Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Thunderbolt/USB4 retimer support. |
| 4 | * |
| 5 | * Copyright (C) 2020, Intel Corporation |
| 6 | * Authors: Kranthi Kuntala <[email protected]> |
| 7 | * Mika Westerberg <[email protected]> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/delay.h> |
| 11 | #include <linux/pm_runtime.h> |
| 12 | #include <linux/sched/signal.h> |
| 13 | |
| 14 | #include "sb_regs.h" |
| 15 | #include "tb.h" |
| 16 | |
| 17 | #define TB_MAX_RETIMER_INDEX 6 |
| 18 | |
Mika Westerberg | 8b02b2d | 2022-09-03 10:43:25 +0300 | [diff] [blame] | 19 | /** |
| 20 | * tb_retimer_nvm_read() - Read contents of retimer NVM |
| 21 | * @rt: Retimer device |
| 22 | * @address: NVM address (in bytes) to start reading |
| 23 | * @buf: Data read from NVM is stored here |
| 24 | * @size: Number of bytes to read |
| 25 | * |
| 26 | * Reads retimer NVM and copies the contents to @buf. Returns %0 if the |
| 27 | * read was successful and negative errno in case of failure. |
| 28 | */ |
| 29 | int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf, |
| 30 | size_t size) |
| 31 | { |
| 32 | return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size); |
| 33 | } |
| 34 | |
| 35 | static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 36 | { |
| 37 | struct tb_nvm *nvm = priv; |
| 38 | struct tb_retimer *rt = tb_to_retimer(nvm->dev); |
| 39 | int ret; |
| 40 | |
| 41 | pm_runtime_get_sync(&rt->dev); |
| 42 | |
| 43 | if (!mutex_trylock(&rt->tb->lock)) { |
| 44 | ret = restart_syscall(); |
| 45 | goto out; |
| 46 | } |
| 47 | |
Mika Westerberg | 8b02b2d | 2022-09-03 10:43:25 +0300 | [diff] [blame] | 48 | ret = tb_retimer_nvm_read(rt, offset, val, bytes); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 49 | mutex_unlock(&rt->tb->lock); |
| 50 | |
| 51 | out: |
| 52 | pm_runtime_mark_last_busy(&rt->dev); |
| 53 | pm_runtime_put_autosuspend(&rt->dev); |
| 54 | |
| 55 | return ret; |
| 56 | } |
| 57 | |
Mika Westerberg | 8b02b2d | 2022-09-03 10:43:25 +0300 | [diff] [blame] | 58 | static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes) |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 59 | { |
| 60 | struct tb_nvm *nvm = priv; |
| 61 | struct tb_retimer *rt = tb_to_retimer(nvm->dev); |
| 62 | int ret = 0; |
| 63 | |
| 64 | if (!mutex_trylock(&rt->tb->lock)) |
| 65 | return restart_syscall(); |
| 66 | |
| 67 | ret = tb_nvm_write_buf(nvm, offset, val, bytes); |
| 68 | mutex_unlock(&rt->tb->lock); |
| 69 | |
| 70 | return ret; |
| 71 | } |
| 72 | |
| 73 | static int tb_retimer_nvm_add(struct tb_retimer *rt) |
| 74 | { |
| 75 | struct tb_nvm *nvm; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 76 | int ret; |
| 77 | |
| 78 | nvm = tb_nvm_alloc(&rt->dev); |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 79 | if (IS_ERR(nvm)) { |
| 80 | ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm); |
| 81 | goto err_nvm; |
| 82 | } |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 83 | |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 84 | ret = tb_nvm_read_version(nvm); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 85 | if (ret) |
| 86 | goto err_nvm; |
| 87 | |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 88 | ret = tb_nvm_add_active(nvm, nvm_read); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 89 | if (ret) |
| 90 | goto err_nvm; |
| 91 | |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 92 | ret = tb_nvm_add_non_active(nvm, nvm_write); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 93 | if (ret) |
| 94 | goto err_nvm; |
| 95 | |
| 96 | rt->nvm = nvm; |
| 97 | return 0; |
| 98 | |
| 99 | err_nvm: |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 100 | dev_dbg(&rt->dev, "NVM upgrade disabled\n"); |
Mario Limonciello | 5422f43 | 2024-12-09 10:25:51 -0600 | [diff] [blame] | 101 | rt->no_nvm_upgrade = true; |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 102 | if (!IS_ERR(nvm)) |
| 103 | tb_nvm_free(nvm); |
| 104 | |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 105 | return ret; |
| 106 | } |
| 107 | |
| 108 | static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt) |
| 109 | { |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 110 | unsigned int image_size; |
| 111 | const u8 *buf; |
Rajmohan Mani | faa1c61 | 2021-04-12 15:29:16 +0300 | [diff] [blame] | 112 | int ret; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 113 | |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 114 | ret = tb_nvm_validate(rt->nvm); |
| 115 | if (ret) |
| 116 | return ret; |
| 117 | |
| 118 | buf = rt->nvm->buf_data_start; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 119 | image_size = rt->nvm->buf_data_size; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 120 | |
Rajmohan Mani | faa1c61 | 2021-04-12 15:29:16 +0300 | [diff] [blame] | 121 | ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf, |
| 122 | image_size); |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 123 | if (ret) |
| 124 | return ret; |
Rajmohan Mani | faa1c61 | 2021-04-12 15:29:16 +0300 | [diff] [blame] | 125 | |
Szuying Chen | aef9c69 | 2022-09-02 17:40:08 +0800 | [diff] [blame] | 126 | rt->nvm->flushed = true; |
| 127 | return 0; |
Rajmohan Mani | faa1c61 | 2021-04-12 15:29:16 +0300 | [diff] [blame] | 128 | } |
| 129 | |
| 130 | static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only) |
| 131 | { |
Mika Westerberg | 25335b3 | 2021-04-21 17:14:10 +0300 | [diff] [blame] | 132 | u32 status; |
Rajmohan Mani | faa1c61 | 2021-04-12 15:29:16 +0300 | [diff] [blame] | 133 | int ret; |
| 134 | |
| 135 | if (auth_only) { |
| 136 | ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0); |
| 137 | if (ret) |
| 138 | return ret; |
| 139 | } |
| 140 | |
Mika Westerberg | 25335b3 | 2021-04-21 17:14:10 +0300 | [diff] [blame] | 141 | ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index); |
| 142 | if (ret) |
| 143 | return ret; |
| 144 | |
| 145 | usleep_range(100, 150); |
| 146 | |
| 147 | /* |
| 148 | * Check the status now if we still can access the retimer. It |
| 149 | * is expected that the below fails. |
| 150 | */ |
| 151 | ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index, |
| 152 | &status); |
| 153 | if (!ret) { |
| 154 | rt->auth_status = status; |
| 155 | return status ? -EINVAL : 0; |
| 156 | } |
| 157 | |
| 158 | return 0; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 159 | } |
| 160 | |
| 161 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
| 162 | char *buf) |
| 163 | { |
| 164 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 165 | |
Andy Shevchenko | 8283fb5 | 2022-09-22 17:32:39 +0300 | [diff] [blame] | 166 | return sysfs_emit(buf, "%#x\n", rt->device); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 167 | } |
| 168 | static DEVICE_ATTR_RO(device); |
| 169 | |
| 170 | static ssize_t nvm_authenticate_show(struct device *dev, |
| 171 | struct device_attribute *attr, char *buf) |
| 172 | { |
| 173 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 174 | int ret; |
| 175 | |
| 176 | if (!mutex_trylock(&rt->tb->lock)) |
| 177 | return restart_syscall(); |
| 178 | |
| 179 | if (!rt->nvm) |
| 180 | ret = -EAGAIN; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 181 | else |
Andy Shevchenko | 8283fb5 | 2022-09-22 17:32:39 +0300 | [diff] [blame] | 182 | ret = sysfs_emit(buf, "%#x\n", rt->auth_status); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 183 | |
| 184 | mutex_unlock(&rt->tb->lock); |
| 185 | |
| 186 | return ret; |
| 187 | } |
| 188 | |
Mika Westerberg | 1402ba0 | 2023-05-26 14:46:44 +0300 | [diff] [blame] | 189 | static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status) |
| 190 | { |
| 191 | int i; |
| 192 | |
| 193 | tb_port_dbg(port, "reading NVM authentication status of retimers\n"); |
| 194 | |
| 195 | /* |
| 196 | * Before doing anything else, read the authentication status. |
| 197 | * If the retimer has it set, store it for the new retimer |
| 198 | * device instance. |
| 199 | */ |
| 200 | for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) |
| 201 | usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); |
| 202 | } |
| 203 | |
Gil Fine | cd0c1e5 | 2023-03-03 00:17:24 +0200 | [diff] [blame] | 204 | static void tb_retimer_set_inbound_sbtx(struct tb_port *port) |
| 205 | { |
| 206 | int i; |
| 207 | |
Mika Westerberg | 87200371 | 2023-05-26 14:55:20 +0300 | [diff] [blame] | 208 | /* |
| 209 | * When USB4 port is online sideband communications are |
| 210 | * already up. |
| 211 | */ |
| 212 | if (!usb4_port_device_is_offline(port->usb4)) |
| 213 | return; |
| 214 | |
| 215 | tb_port_dbg(port, "enabling sideband transactions\n"); |
| 216 | |
Gil Fine | cd0c1e5 | 2023-03-03 00:17:24 +0200 | [diff] [blame] | 217 | for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) |
| 218 | usb4_port_retimer_set_inbound_sbtx(port, i); |
| 219 | } |
| 220 | |
| 221 | static void tb_retimer_unset_inbound_sbtx(struct tb_port *port) |
| 222 | { |
| 223 | int i; |
| 224 | |
Mika Westerberg | 87200371 | 2023-05-26 14:55:20 +0300 | [diff] [blame] | 225 | /* |
| 226 | * When USB4 port is offline we need to keep the sideband |
| 227 | * communications up to make it possible to communicate with |
| 228 | * the connected retimers. |
| 229 | */ |
| 230 | if (usb4_port_device_is_offline(port->usb4)) |
| 231 | return; |
| 232 | |
| 233 | tb_port_dbg(port, "disabling sideband transactions\n"); |
| 234 | |
Gil Fine | cd0c1e5 | 2023-03-03 00:17:24 +0200 | [diff] [blame] | 235 | for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--) |
| 236 | usb4_port_retimer_unset_inbound_sbtx(port, i); |
| 237 | } |
| 238 | |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 239 | static ssize_t nvm_authenticate_store(struct device *dev, |
| 240 | struct device_attribute *attr, const char *buf, size_t count) |
| 241 | { |
| 242 | struct tb_retimer *rt = tb_to_retimer(dev); |
Rajmohan Mani | faa1c61 | 2021-04-12 15:29:16 +0300 | [diff] [blame] | 243 | int val, ret; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 244 | |
| 245 | pm_runtime_get_sync(&rt->dev); |
| 246 | |
| 247 | if (!mutex_trylock(&rt->tb->lock)) { |
| 248 | ret = restart_syscall(); |
| 249 | goto exit_rpm; |
| 250 | } |
| 251 | |
| 252 | if (!rt->nvm) { |
| 253 | ret = -EAGAIN; |
| 254 | goto exit_unlock; |
| 255 | } |
| 256 | |
Rajmohan Mani | faa1c61 | 2021-04-12 15:29:16 +0300 | [diff] [blame] | 257 | ret = kstrtoint(buf, 10, &val); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 258 | if (ret) |
| 259 | goto exit_unlock; |
| 260 | |
| 261 | /* Always clear status */ |
| 262 | rt->auth_status = 0; |
| 263 | |
| 264 | if (val) { |
Mika Westerberg | b7b8391 | 2023-05-26 14:51:23 +0300 | [diff] [blame] | 265 | /* |
| 266 | * When NVM authentication starts the retimer is not |
| 267 | * accessible so calling tb_retimer_unset_inbound_sbtx() |
| 268 | * will fail and therefore we do not call it. Exception |
| 269 | * is when the validation fails or we only write the new |
| 270 | * NVM image without authentication. |
| 271 | */ |
Gil Fine | cd0c1e5 | 2023-03-03 00:17:24 +0200 | [diff] [blame] | 272 | tb_retimer_set_inbound_sbtx(rt->port); |
Rajmohan Mani | faa1c61 | 2021-04-12 15:29:16 +0300 | [diff] [blame] | 273 | if (val == AUTHENTICATE_ONLY) { |
| 274 | ret = tb_retimer_nvm_authenticate(rt, true); |
| 275 | } else { |
| 276 | if (!rt->nvm->flushed) { |
| 277 | if (!rt->nvm->buf) { |
| 278 | ret = -EINVAL; |
| 279 | goto exit_unlock; |
| 280 | } |
| 281 | |
| 282 | ret = tb_retimer_nvm_validate_and_write(rt); |
| 283 | if (ret || val == WRITE_ONLY) |
| 284 | goto exit_unlock; |
| 285 | } |
| 286 | if (val == WRITE_AND_AUTHENTICATE) |
| 287 | ret = tb_retimer_nvm_authenticate(rt, false); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 288 | } |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 289 | } |
| 290 | |
| 291 | exit_unlock: |
Mika Westerberg | b7b8391 | 2023-05-26 14:51:23 +0300 | [diff] [blame] | 292 | if (ret || val == WRITE_ONLY) |
| 293 | tb_retimer_unset_inbound_sbtx(rt->port); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 294 | mutex_unlock(&rt->tb->lock); |
| 295 | exit_rpm: |
| 296 | pm_runtime_mark_last_busy(&rt->dev); |
| 297 | pm_runtime_put_autosuspend(&rt->dev); |
| 298 | |
| 299 | if (ret) |
| 300 | return ret; |
| 301 | return count; |
| 302 | } |
| 303 | static DEVICE_ATTR_RW(nvm_authenticate); |
| 304 | |
| 305 | static ssize_t nvm_version_show(struct device *dev, |
| 306 | struct device_attribute *attr, char *buf) |
| 307 | { |
| 308 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 309 | int ret; |
| 310 | |
| 311 | if (!mutex_trylock(&rt->tb->lock)) |
| 312 | return restart_syscall(); |
| 313 | |
| 314 | if (!rt->nvm) |
| 315 | ret = -EAGAIN; |
| 316 | else |
Andy Shevchenko | 8283fb5 | 2022-09-22 17:32:39 +0300 | [diff] [blame] | 317 | ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 318 | |
| 319 | mutex_unlock(&rt->tb->lock); |
| 320 | return ret; |
| 321 | } |
| 322 | static DEVICE_ATTR_RO(nvm_version); |
| 323 | |
| 324 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
| 325 | char *buf) |
| 326 | { |
| 327 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 328 | |
Andy Shevchenko | 8283fb5 | 2022-09-22 17:32:39 +0300 | [diff] [blame] | 329 | return sysfs_emit(buf, "%#x\n", rt->vendor); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 330 | } |
| 331 | static DEVICE_ATTR_RO(vendor); |
| 332 | |
Mario Limonciello | 5422f43 | 2024-12-09 10:25:51 -0600 | [diff] [blame] | 333 | static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr, |
| 334 | int n) |
| 335 | { |
| 336 | struct device *dev = kobj_to_dev(kobj); |
| 337 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 338 | |
| 339 | if (attr == &dev_attr_nvm_authenticate.attr || |
| 340 | attr == &dev_attr_nvm_version.attr) |
| 341 | return rt->no_nvm_upgrade ? 0 : attr->mode; |
| 342 | |
| 343 | return attr->mode; |
| 344 | } |
| 345 | |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 346 | static struct attribute *retimer_attrs[] = { |
| 347 | &dev_attr_device.attr, |
| 348 | &dev_attr_nvm_authenticate.attr, |
| 349 | &dev_attr_nvm_version.attr, |
| 350 | &dev_attr_vendor.attr, |
| 351 | NULL |
| 352 | }; |
| 353 | |
| 354 | static const struct attribute_group retimer_group = { |
Mario Limonciello | 5422f43 | 2024-12-09 10:25:51 -0600 | [diff] [blame] | 355 | .is_visible = retimer_is_visible, |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 356 | .attrs = retimer_attrs, |
| 357 | }; |
| 358 | |
| 359 | static const struct attribute_group *retimer_groups[] = { |
| 360 | &retimer_group, |
| 361 | NULL |
| 362 | }; |
| 363 | |
| 364 | static void tb_retimer_release(struct device *dev) |
| 365 | { |
| 366 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 367 | |
| 368 | kfree(rt); |
| 369 | } |
| 370 | |
| 371 | struct device_type tb_retimer_type = { |
| 372 | .name = "thunderbolt_retimer", |
| 373 | .groups = retimer_groups, |
| 374 | .release = tb_retimer_release, |
| 375 | }; |
| 376 | |
| 377 | static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) |
| 378 | { |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 379 | struct tb_retimer *rt; |
| 380 | u32 vendor, device; |
| 381 | int ret; |
| 382 | |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 383 | ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor, |
| 384 | sizeof(vendor)); |
| 385 | if (ret) { |
| 386 | if (ret != -ENODEV) |
| 387 | tb_port_warn(port, "failed read retimer VendorId: %d\n", ret); |
| 388 | return ret; |
| 389 | } |
| 390 | |
| 391 | ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device, |
| 392 | sizeof(device)); |
| 393 | if (ret) { |
| 394 | if (ret != -ENODEV) |
| 395 | tb_port_warn(port, "failed read retimer ProductId: %d\n", ret); |
| 396 | return ret; |
| 397 | } |
| 398 | |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 399 | /* |
| 400 | * Check that it supports NVM operations. If not then don't add |
| 401 | * the device at all. |
| 402 | */ |
| 403 | ret = usb4_port_retimer_nvm_sector_size(port, index); |
| 404 | if (ret < 0) |
| 405 | return ret; |
| 406 | |
| 407 | rt = kzalloc(sizeof(*rt), GFP_KERNEL); |
| 408 | if (!rt) |
| 409 | return -ENOMEM; |
| 410 | |
| 411 | rt->index = index; |
| 412 | rt->vendor = vendor; |
| 413 | rt->device = device; |
| 414 | rt->auth_status = auth_status; |
| 415 | rt->port = port; |
| 416 | rt->tb = port->sw->tb; |
| 417 | |
Mika Westerberg | 1e56c88 | 2021-11-15 19:10:51 +0200 | [diff] [blame] | 418 | rt->dev.parent = &port->usb4->dev; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 419 | rt->dev.bus = &tb_bus_type; |
| 420 | rt->dev.type = &tb_retimer_type; |
| 421 | dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev), |
| 422 | port->port, index); |
| 423 | |
| 424 | ret = device_register(&rt->dev); |
| 425 | if (ret) { |
| 426 | dev_err(&rt->dev, "failed to register retimer: %d\n", ret); |
| 427 | put_device(&rt->dev); |
| 428 | return ret; |
| 429 | } |
| 430 | |
| 431 | ret = tb_retimer_nvm_add(rt); |
| 432 | if (ret) { |
| 433 | dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); |
Dan Carpenter | bec4d7c | 2021-03-29 09:07:18 +0300 | [diff] [blame] | 434 | device_unregister(&rt->dev); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 435 | return ret; |
| 436 | } |
| 437 | |
| 438 | dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n", |
| 439 | rt->vendor, rt->device); |
| 440 | |
| 441 | pm_runtime_no_callbacks(&rt->dev); |
| 442 | pm_runtime_set_active(&rt->dev); |
| 443 | pm_runtime_enable(&rt->dev); |
| 444 | pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY); |
| 445 | pm_runtime_mark_last_busy(&rt->dev); |
| 446 | pm_runtime_use_autosuspend(&rt->dev); |
| 447 | |
| 448 | return 0; |
| 449 | } |
| 450 | |
| 451 | static void tb_retimer_remove(struct tb_retimer *rt) |
| 452 | { |
| 453 | dev_info(&rt->dev, "retimer disconnected\n"); |
| 454 | tb_nvm_free(rt->nvm); |
| 455 | device_unregister(&rt->dev); |
| 456 | } |
| 457 | |
| 458 | struct tb_retimer_lookup { |
| 459 | const struct tb_port *port; |
| 460 | u8 index; |
| 461 | }; |
| 462 | |
| 463 | static int retimer_match(struct device *dev, void *data) |
| 464 | { |
| 465 | const struct tb_retimer_lookup *lookup = data; |
| 466 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 467 | |
| 468 | return rt && rt->port == lookup->port && rt->index == lookup->index; |
| 469 | } |
| 470 | |
| 471 | static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) |
| 472 | { |
| 473 | struct tb_retimer_lookup lookup = { .port = port, .index = index }; |
| 474 | struct device *dev; |
| 475 | |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 476 | dev = device_find_child(&port->usb4->dev, &lookup, retimer_match); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 477 | if (dev) |
| 478 | return tb_to_retimer(dev); |
| 479 | |
| 480 | return NULL; |
| 481 | } |
| 482 | |
| 483 | /** |
| 484 | * tb_retimer_scan() - Scan for on-board retimers under port |
| 485 | * @port: USB4 port to scan |
Rajmohan Mani | 3fb10ea | 2021-04-01 18:42:38 +0300 | [diff] [blame] | 486 | * @add: If true also registers found retimers |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 487 | * |
Rajmohan Mani | 3fb10ea | 2021-04-01 18:42:38 +0300 | [diff] [blame] | 488 | * Brings the sideband into a state where retimers can be accessed. |
| 489 | * Then Tries to enumerate on-board retimers connected to @port. Found |
| 490 | * retimers are registered as children of @port if @add is set. Does |
| 491 | * not scan for cable retimers for now. |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 492 | */ |
Rajmohan Mani | 3fb10ea | 2021-04-01 18:42:38 +0300 | [diff] [blame] | 493 | int tb_retimer_scan(struct tb_port *port, bool add) |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 494 | { |
Dan Carpenter | 08fe7ae | 2021-03-29 09:08:01 +0300 | [diff] [blame] | 495 | u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 496 | int ret, i, last_idx = 0; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 497 | |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 498 | /* |
| 499 | * Send broadcast RT to make sure retimer indices facing this |
| 500 | * port are set. |
| 501 | */ |
| 502 | ret = usb4_port_enumerate_retimers(port); |
| 503 | if (ret) |
Mika Westerberg | 23257cf | 2022-12-29 14:10:30 +0200 | [diff] [blame] | 504 | return ret; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 505 | |
| 506 | /* |
Mika Westerberg | 1402ba0 | 2023-05-26 14:46:44 +0300 | [diff] [blame] | 507 | * Immediately after sending enumerate retimers read the |
| 508 | * authentication status of each retimer. |
| 509 | */ |
| 510 | tb_retimer_nvm_authenticate_status(port, status); |
| 511 | |
| 512 | /* |
Rajmohan Mani | 3fb10ea | 2021-04-01 18:42:38 +0300 | [diff] [blame] | 513 | * Enable sideband channel for each retimer. We can do this |
| 514 | * regardless whether there is device connected or not. |
| 515 | */ |
Gil Fine | cd0c1e5 | 2023-03-03 00:17:24 +0200 | [diff] [blame] | 516 | tb_retimer_set_inbound_sbtx(port); |
Rajmohan Mani | 3fb10ea | 2021-04-01 18:42:38 +0300 | [diff] [blame] | 517 | |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 518 | for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { |
| 519 | /* |
| 520 | * Last retimer is true only for the last on-board |
| 521 | * retimer (the one connected directly to the Type-C |
| 522 | * port). |
| 523 | */ |
| 524 | ret = usb4_port_retimer_is_last(port, i); |
| 525 | if (ret > 0) |
| 526 | last_idx = i; |
| 527 | else if (ret < 0) |
| 528 | break; |
| 529 | } |
| 530 | |
Gil Fine | cd0c1e5 | 2023-03-03 00:17:24 +0200 | [diff] [blame] | 531 | tb_retimer_unset_inbound_sbtx(port); |
| 532 | |
Utkarsh Patel | c28f3d8 | 2022-12-22 20:22:46 -0800 | [diff] [blame] | 533 | if (!last_idx) |
Mika Westerberg | 23257cf | 2022-12-29 14:10:30 +0200 | [diff] [blame] | 534 | return 0; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 535 | |
| 536 | /* Add on-board retimers if they do not exist already */ |
Mika Westerberg | 23257cf | 2022-12-29 14:10:30 +0200 | [diff] [blame] | 537 | ret = 0; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 538 | for (i = 1; i <= last_idx; i++) { |
| 539 | struct tb_retimer *rt; |
| 540 | |
| 541 | rt = tb_port_find_retimer(port, i); |
| 542 | if (rt) { |
| 543 | put_device(&rt->dev); |
Rajmohan Mani | 3fb10ea | 2021-04-01 18:42:38 +0300 | [diff] [blame] | 544 | } else if (add) { |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 545 | ret = tb_retimer_add(port, i, status[i]); |
| 546 | if (ret && ret != -EOPNOTSUPP) |
Rajmohan Mani | 3fb10ea | 2021-04-01 18:42:38 +0300 | [diff] [blame] | 547 | break; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 548 | } |
| 549 | } |
| 550 | |
Mika Westerberg | 1e56c88 | 2021-11-15 19:10:51 +0200 | [diff] [blame] | 551 | return ret; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 552 | } |
| 553 | |
| 554 | static int remove_retimer(struct device *dev, void *data) |
| 555 | { |
| 556 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 557 | struct tb_port *port = data; |
| 558 | |
| 559 | if (rt && rt->port == port) |
| 560 | tb_retimer_remove(rt); |
| 561 | return 0; |
| 562 | } |
| 563 | |
| 564 | /** |
| 565 | * tb_retimer_remove_all() - Remove all retimers under port |
| 566 | * @port: USB4 port whose retimers to remove |
| 567 | * |
| 568 | * This removes all previously added retimers under @port. |
| 569 | */ |
| 570 | void tb_retimer_remove_all(struct tb_port *port) |
| 571 | { |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame] | 572 | struct usb4_port *usb4; |
| 573 | |
| 574 | usb4 = port->usb4; |
| 575 | if (usb4) |
| 576 | device_for_each_child_reverse(&usb4->dev, port, |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 577 | remove_retimer); |
| 578 | } |