Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * Thunderbolt/USB4 retimer support. |
| 4 | * |
| 5 | * Copyright (C) 2020, Intel Corporation |
| 6 | * Authors: Kranthi Kuntala <[email protected]> |
| 7 | * Mika Westerberg <[email protected]> |
| 8 | */ |
| 9 | |
| 10 | #include <linux/delay.h> |
| 11 | #include <linux/pm_runtime.h> |
| 12 | #include <linux/sched/signal.h> |
| 13 | |
| 14 | #include "sb_regs.h" |
| 15 | #include "tb.h" |
| 16 | |
| 17 | #define TB_MAX_RETIMER_INDEX 6 |
| 18 | |
| 19 | static int tb_retimer_nvm_read(void *priv, unsigned int offset, void *val, |
| 20 | size_t bytes) |
| 21 | { |
| 22 | struct tb_nvm *nvm = priv; |
| 23 | struct tb_retimer *rt = tb_to_retimer(nvm->dev); |
| 24 | int ret; |
| 25 | |
| 26 | pm_runtime_get_sync(&rt->dev); |
| 27 | |
| 28 | if (!mutex_trylock(&rt->tb->lock)) { |
| 29 | ret = restart_syscall(); |
| 30 | goto out; |
| 31 | } |
| 32 | |
| 33 | ret = usb4_port_retimer_nvm_read(rt->port, rt->index, offset, val, bytes); |
| 34 | mutex_unlock(&rt->tb->lock); |
| 35 | |
| 36 | out: |
| 37 | pm_runtime_mark_last_busy(&rt->dev); |
| 38 | pm_runtime_put_autosuspend(&rt->dev); |
| 39 | |
| 40 | return ret; |
| 41 | } |
| 42 | |
| 43 | static int tb_retimer_nvm_write(void *priv, unsigned int offset, void *val, |
| 44 | size_t bytes) |
| 45 | { |
| 46 | struct tb_nvm *nvm = priv; |
| 47 | struct tb_retimer *rt = tb_to_retimer(nvm->dev); |
| 48 | int ret = 0; |
| 49 | |
| 50 | if (!mutex_trylock(&rt->tb->lock)) |
| 51 | return restart_syscall(); |
| 52 | |
| 53 | ret = tb_nvm_write_buf(nvm, offset, val, bytes); |
| 54 | mutex_unlock(&rt->tb->lock); |
| 55 | |
| 56 | return ret; |
| 57 | } |
| 58 | |
| 59 | static int tb_retimer_nvm_add(struct tb_retimer *rt) |
| 60 | { |
| 61 | struct tb_nvm *nvm; |
| 62 | u32 val, nvm_size; |
| 63 | int ret; |
| 64 | |
| 65 | nvm = tb_nvm_alloc(&rt->dev); |
| 66 | if (IS_ERR(nvm)) |
| 67 | return PTR_ERR(nvm); |
| 68 | |
| 69 | ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_VERSION, &val, |
| 70 | sizeof(val)); |
| 71 | if (ret) |
| 72 | goto err_nvm; |
| 73 | |
| 74 | nvm->major = val >> 16; |
| 75 | nvm->minor = val >> 8; |
| 76 | |
| 77 | ret = usb4_port_retimer_nvm_read(rt->port, rt->index, NVM_FLASH_SIZE, |
| 78 | &val, sizeof(val)); |
| 79 | if (ret) |
| 80 | goto err_nvm; |
| 81 | |
| 82 | nvm_size = (SZ_1M << (val & 7)) / 8; |
| 83 | nvm_size = (nvm_size - SZ_16K) / 2; |
| 84 | |
| 85 | ret = tb_nvm_add_active(nvm, nvm_size, tb_retimer_nvm_read); |
| 86 | if (ret) |
| 87 | goto err_nvm; |
| 88 | |
| 89 | ret = tb_nvm_add_non_active(nvm, NVM_MAX_SIZE, tb_retimer_nvm_write); |
| 90 | if (ret) |
| 91 | goto err_nvm; |
| 92 | |
| 93 | rt->nvm = nvm; |
| 94 | return 0; |
| 95 | |
| 96 | err_nvm: |
| 97 | tb_nvm_free(nvm); |
| 98 | return ret; |
| 99 | } |
| 100 | |
| 101 | static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt) |
| 102 | { |
| 103 | unsigned int image_size, hdr_size; |
| 104 | const u8 *buf = rt->nvm->buf; |
| 105 | u16 ds_size, device; |
| 106 | |
| 107 | image_size = rt->nvm->buf_data_size; |
| 108 | if (image_size < NVM_MIN_SIZE || image_size > NVM_MAX_SIZE) |
| 109 | return -EINVAL; |
| 110 | |
| 111 | /* |
| 112 | * FARB pointer must point inside the image and must at least |
| 113 | * contain parts of the digital section we will be reading here. |
| 114 | */ |
| 115 | hdr_size = (*(u32 *)buf) & 0xffffff; |
| 116 | if (hdr_size + NVM_DEVID + 2 >= image_size) |
| 117 | return -EINVAL; |
| 118 | |
| 119 | /* Digital section start should be aligned to 4k page */ |
| 120 | if (!IS_ALIGNED(hdr_size, SZ_4K)) |
| 121 | return -EINVAL; |
| 122 | |
| 123 | /* |
| 124 | * Read digital section size and check that it also fits inside |
| 125 | * the image. |
| 126 | */ |
| 127 | ds_size = *(u16 *)(buf + hdr_size); |
| 128 | if (ds_size >= image_size) |
| 129 | return -EINVAL; |
| 130 | |
| 131 | /* |
| 132 | * Make sure the device ID in the image matches the retimer |
| 133 | * hardware. |
| 134 | */ |
| 135 | device = *(u16 *)(buf + hdr_size + NVM_DEVID); |
| 136 | if (device != rt->device) |
| 137 | return -EINVAL; |
| 138 | |
| 139 | /* Skip headers in the image */ |
| 140 | buf += hdr_size; |
| 141 | image_size -= hdr_size; |
| 142 | |
| 143 | return usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf, |
| 144 | image_size); |
| 145 | } |
| 146 | |
| 147 | static ssize_t device_show(struct device *dev, struct device_attribute *attr, |
| 148 | char *buf) |
| 149 | { |
| 150 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 151 | |
| 152 | return sprintf(buf, "%#x\n", rt->device); |
| 153 | } |
| 154 | static DEVICE_ATTR_RO(device); |
| 155 | |
| 156 | static ssize_t nvm_authenticate_show(struct device *dev, |
| 157 | struct device_attribute *attr, char *buf) |
| 158 | { |
| 159 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 160 | int ret; |
| 161 | |
| 162 | if (!mutex_trylock(&rt->tb->lock)) |
| 163 | return restart_syscall(); |
| 164 | |
| 165 | if (!rt->nvm) |
| 166 | ret = -EAGAIN; |
| 167 | else |
| 168 | ret = sprintf(buf, "%#x\n", rt->auth_status); |
| 169 | |
| 170 | mutex_unlock(&rt->tb->lock); |
| 171 | |
| 172 | return ret; |
| 173 | } |
| 174 | |
| 175 | static ssize_t nvm_authenticate_store(struct device *dev, |
| 176 | struct device_attribute *attr, const char *buf, size_t count) |
| 177 | { |
| 178 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 179 | bool val; |
| 180 | int ret; |
| 181 | |
| 182 | pm_runtime_get_sync(&rt->dev); |
| 183 | |
| 184 | if (!mutex_trylock(&rt->tb->lock)) { |
| 185 | ret = restart_syscall(); |
| 186 | goto exit_rpm; |
| 187 | } |
| 188 | |
| 189 | if (!rt->nvm) { |
| 190 | ret = -EAGAIN; |
| 191 | goto exit_unlock; |
| 192 | } |
| 193 | |
| 194 | ret = kstrtobool(buf, &val); |
| 195 | if (ret) |
| 196 | goto exit_unlock; |
| 197 | |
| 198 | /* Always clear status */ |
| 199 | rt->auth_status = 0; |
| 200 | |
| 201 | if (val) { |
| 202 | if (!rt->nvm->buf) { |
| 203 | ret = -EINVAL; |
| 204 | goto exit_unlock; |
| 205 | } |
| 206 | |
| 207 | ret = tb_retimer_nvm_validate_and_write(rt); |
| 208 | if (ret) |
| 209 | goto exit_unlock; |
| 210 | |
| 211 | ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index); |
| 212 | } |
| 213 | |
| 214 | exit_unlock: |
| 215 | mutex_unlock(&rt->tb->lock); |
| 216 | exit_rpm: |
| 217 | pm_runtime_mark_last_busy(&rt->dev); |
| 218 | pm_runtime_put_autosuspend(&rt->dev); |
| 219 | |
| 220 | if (ret) |
| 221 | return ret; |
| 222 | return count; |
| 223 | } |
| 224 | static DEVICE_ATTR_RW(nvm_authenticate); |
| 225 | |
| 226 | static ssize_t nvm_version_show(struct device *dev, |
| 227 | struct device_attribute *attr, char *buf) |
| 228 | { |
| 229 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 230 | int ret; |
| 231 | |
| 232 | if (!mutex_trylock(&rt->tb->lock)) |
| 233 | return restart_syscall(); |
| 234 | |
| 235 | if (!rt->nvm) |
| 236 | ret = -EAGAIN; |
| 237 | else |
| 238 | ret = sprintf(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor); |
| 239 | |
| 240 | mutex_unlock(&rt->tb->lock); |
| 241 | return ret; |
| 242 | } |
| 243 | static DEVICE_ATTR_RO(nvm_version); |
| 244 | |
| 245 | static ssize_t vendor_show(struct device *dev, struct device_attribute *attr, |
| 246 | char *buf) |
| 247 | { |
| 248 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 249 | |
| 250 | return sprintf(buf, "%#x\n", rt->vendor); |
| 251 | } |
| 252 | static DEVICE_ATTR_RO(vendor); |
| 253 | |
| 254 | static struct attribute *retimer_attrs[] = { |
| 255 | &dev_attr_device.attr, |
| 256 | &dev_attr_nvm_authenticate.attr, |
| 257 | &dev_attr_nvm_version.attr, |
| 258 | &dev_attr_vendor.attr, |
| 259 | NULL |
| 260 | }; |
| 261 | |
| 262 | static const struct attribute_group retimer_group = { |
| 263 | .attrs = retimer_attrs, |
| 264 | }; |
| 265 | |
| 266 | static const struct attribute_group *retimer_groups[] = { |
| 267 | &retimer_group, |
| 268 | NULL |
| 269 | }; |
| 270 | |
| 271 | static void tb_retimer_release(struct device *dev) |
| 272 | { |
| 273 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 274 | |
| 275 | kfree(rt); |
| 276 | } |
| 277 | |
| 278 | struct device_type tb_retimer_type = { |
| 279 | .name = "thunderbolt_retimer", |
| 280 | .groups = retimer_groups, |
| 281 | .release = tb_retimer_release, |
| 282 | }; |
| 283 | |
| 284 | static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status) |
| 285 | { |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame^] | 286 | struct usb4_port *usb4; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 287 | struct tb_retimer *rt; |
| 288 | u32 vendor, device; |
| 289 | int ret; |
| 290 | |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame^] | 291 | usb4 = port->usb4; |
| 292 | if (!usb4) |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 293 | return -EINVAL; |
| 294 | |
| 295 | ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor, |
| 296 | sizeof(vendor)); |
| 297 | if (ret) { |
| 298 | if (ret != -ENODEV) |
| 299 | tb_port_warn(port, "failed read retimer VendorId: %d\n", ret); |
| 300 | return ret; |
| 301 | } |
| 302 | |
| 303 | ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device, |
| 304 | sizeof(device)); |
| 305 | if (ret) { |
| 306 | if (ret != -ENODEV) |
| 307 | tb_port_warn(port, "failed read retimer ProductId: %d\n", ret); |
| 308 | return ret; |
| 309 | } |
| 310 | |
| 311 | if (vendor != PCI_VENDOR_ID_INTEL && vendor != 0x8087) { |
| 312 | tb_port_info(port, "retimer NVM format of vendor %#x is not supported\n", |
| 313 | vendor); |
| 314 | return -EOPNOTSUPP; |
| 315 | } |
| 316 | |
| 317 | /* |
| 318 | * Check that it supports NVM operations. If not then don't add |
| 319 | * the device at all. |
| 320 | */ |
| 321 | ret = usb4_port_retimer_nvm_sector_size(port, index); |
| 322 | if (ret < 0) |
| 323 | return ret; |
| 324 | |
| 325 | rt = kzalloc(sizeof(*rt), GFP_KERNEL); |
| 326 | if (!rt) |
| 327 | return -ENOMEM; |
| 328 | |
| 329 | rt->index = index; |
| 330 | rt->vendor = vendor; |
| 331 | rt->device = device; |
| 332 | rt->auth_status = auth_status; |
| 333 | rt->port = port; |
| 334 | rt->tb = port->sw->tb; |
| 335 | |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame^] | 336 | rt->dev.parent = &usb4->dev; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 337 | rt->dev.bus = &tb_bus_type; |
| 338 | rt->dev.type = &tb_retimer_type; |
| 339 | dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev), |
| 340 | port->port, index); |
| 341 | |
| 342 | ret = device_register(&rt->dev); |
| 343 | if (ret) { |
| 344 | dev_err(&rt->dev, "failed to register retimer: %d\n", ret); |
| 345 | put_device(&rt->dev); |
| 346 | return ret; |
| 347 | } |
| 348 | |
| 349 | ret = tb_retimer_nvm_add(rt); |
| 350 | if (ret) { |
| 351 | dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret); |
Dan Carpenter | bec4d7c | 2021-03-29 09:07:18 +0300 | [diff] [blame] | 352 | device_unregister(&rt->dev); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 353 | return ret; |
| 354 | } |
| 355 | |
| 356 | dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n", |
| 357 | rt->vendor, rt->device); |
| 358 | |
| 359 | pm_runtime_no_callbacks(&rt->dev); |
| 360 | pm_runtime_set_active(&rt->dev); |
| 361 | pm_runtime_enable(&rt->dev); |
| 362 | pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY); |
| 363 | pm_runtime_mark_last_busy(&rt->dev); |
| 364 | pm_runtime_use_autosuspend(&rt->dev); |
| 365 | |
| 366 | return 0; |
| 367 | } |
| 368 | |
| 369 | static void tb_retimer_remove(struct tb_retimer *rt) |
| 370 | { |
| 371 | dev_info(&rt->dev, "retimer disconnected\n"); |
| 372 | tb_nvm_free(rt->nvm); |
| 373 | device_unregister(&rt->dev); |
| 374 | } |
| 375 | |
| 376 | struct tb_retimer_lookup { |
| 377 | const struct tb_port *port; |
| 378 | u8 index; |
| 379 | }; |
| 380 | |
| 381 | static int retimer_match(struct device *dev, void *data) |
| 382 | { |
| 383 | const struct tb_retimer_lookup *lookup = data; |
| 384 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 385 | |
| 386 | return rt && rt->port == lookup->port && rt->index == lookup->index; |
| 387 | } |
| 388 | |
| 389 | static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index) |
| 390 | { |
| 391 | struct tb_retimer_lookup lookup = { .port = port, .index = index }; |
| 392 | struct device *dev; |
| 393 | |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame^] | 394 | dev = device_find_child(&port->usb4->dev, &lookup, retimer_match); |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 395 | if (dev) |
| 396 | return tb_to_retimer(dev); |
| 397 | |
| 398 | return NULL; |
| 399 | } |
| 400 | |
| 401 | /** |
| 402 | * tb_retimer_scan() - Scan for on-board retimers under port |
| 403 | * @port: USB4 port to scan |
| 404 | * |
| 405 | * Tries to enumerate on-board retimers connected to @port. Found |
| 406 | * retimers are registered as children of @port. Does not scan for cable |
| 407 | * retimers for now. |
| 408 | */ |
| 409 | int tb_retimer_scan(struct tb_port *port) |
| 410 | { |
Dan Carpenter | 08fe7ae | 2021-03-29 09:08:01 +0300 | [diff] [blame] | 411 | u32 status[TB_MAX_RETIMER_INDEX + 1] = {}; |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 412 | int ret, i, last_idx = 0; |
| 413 | |
| 414 | if (!port->cap_usb4) |
| 415 | return 0; |
| 416 | |
| 417 | /* |
| 418 | * Send broadcast RT to make sure retimer indices facing this |
| 419 | * port are set. |
| 420 | */ |
| 421 | ret = usb4_port_enumerate_retimers(port); |
| 422 | if (ret) |
| 423 | return ret; |
| 424 | |
| 425 | /* |
| 426 | * Before doing anything else, read the authentication status. |
| 427 | * If the retimer has it set, store it for the new retimer |
| 428 | * device instance. |
| 429 | */ |
| 430 | for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) |
| 431 | usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); |
| 432 | |
| 433 | for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { |
| 434 | /* |
| 435 | * Last retimer is true only for the last on-board |
| 436 | * retimer (the one connected directly to the Type-C |
| 437 | * port). |
| 438 | */ |
| 439 | ret = usb4_port_retimer_is_last(port, i); |
| 440 | if (ret > 0) |
| 441 | last_idx = i; |
| 442 | else if (ret < 0) |
| 443 | break; |
| 444 | } |
| 445 | |
| 446 | if (!last_idx) |
| 447 | return 0; |
| 448 | |
| 449 | /* Add on-board retimers if they do not exist already */ |
| 450 | for (i = 1; i <= last_idx; i++) { |
| 451 | struct tb_retimer *rt; |
| 452 | |
| 453 | rt = tb_port_find_retimer(port, i); |
| 454 | if (rt) { |
| 455 | put_device(&rt->dev); |
| 456 | } else { |
| 457 | ret = tb_retimer_add(port, i, status[i]); |
| 458 | if (ret && ret != -EOPNOTSUPP) |
| 459 | return ret; |
| 460 | } |
| 461 | } |
| 462 | |
| 463 | return 0; |
| 464 | } |
| 465 | |
| 466 | static int remove_retimer(struct device *dev, void *data) |
| 467 | { |
| 468 | struct tb_retimer *rt = tb_to_retimer(dev); |
| 469 | struct tb_port *port = data; |
| 470 | |
| 471 | if (rt && rt->port == port) |
| 472 | tb_retimer_remove(rt); |
| 473 | return 0; |
| 474 | } |
| 475 | |
| 476 | /** |
| 477 | * tb_retimer_remove_all() - Remove all retimers under port |
| 478 | * @port: USB4 port whose retimers to remove |
| 479 | * |
| 480 | * This removes all previously added retimers under @port. |
| 481 | */ |
| 482 | void tb_retimer_remove_all(struct tb_port *port) |
| 483 | { |
Mika Westerberg | cae5f51 | 2021-04-01 17:34:20 +0300 | [diff] [blame^] | 484 | struct usb4_port *usb4; |
| 485 | |
| 486 | usb4 = port->usb4; |
| 487 | if (usb4) |
| 488 | device_for_each_child_reverse(&usb4->dev, port, |
Kranthi Kuntala | dacb1287 | 2020-03-05 16:39:58 +0200 | [diff] [blame] | 489 | remove_retimer); |
| 490 | } |