blob: a273fb02a02c4bbf5d110bc3ebf441edfafc1803 [file] [log] [blame]
Kranthi Kuntaladacb12872020-03-05 16:39:58 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt/USB4 retimer support.
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Authors: Kranthi Kuntala <[email protected]>
7 * Mika Westerberg <[email protected]>
8 */
9
10#include <linux/delay.h>
11#include <linux/pm_runtime.h>
12#include <linux/sched/signal.h>
13
14#include "sb_regs.h"
15#include "tb.h"
16
17#define TB_MAX_RETIMER_INDEX 6
18
Mika Westerberg8b02b2d2022-09-03 10:43:25 +030019/**
20 * tb_retimer_nvm_read() - Read contents of retimer NVM
21 * @rt: Retimer device
22 * @address: NVM address (in bytes) to start reading
23 * @buf: Data read from NVM is stored here
24 * @size: Number of bytes to read
25 *
26 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
27 * read was successful and negative errno in case of failure.
28 */
29int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
30 size_t size)
31{
32 return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
33}
34
35static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020036{
37 struct tb_nvm *nvm = priv;
38 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
39 int ret;
40
41 pm_runtime_get_sync(&rt->dev);
42
43 if (!mutex_trylock(&rt->tb->lock)) {
44 ret = restart_syscall();
45 goto out;
46 }
47
Mika Westerberg8b02b2d2022-09-03 10:43:25 +030048 ret = tb_retimer_nvm_read(rt, offset, val, bytes);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020049 mutex_unlock(&rt->tb->lock);
50
51out:
52 pm_runtime_mark_last_busy(&rt->dev);
53 pm_runtime_put_autosuspend(&rt->dev);
54
55 return ret;
56}
57
Mika Westerberg8b02b2d2022-09-03 10:43:25 +030058static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020059{
60 struct tb_nvm *nvm = priv;
61 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
62 int ret = 0;
63
64 if (!mutex_trylock(&rt->tb->lock))
65 return restart_syscall();
66
67 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
68 mutex_unlock(&rt->tb->lock);
69
70 return ret;
71}
72
73static int tb_retimer_nvm_add(struct tb_retimer *rt)
74{
75 struct tb_nvm *nvm;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020076 int ret;
77
78 nvm = tb_nvm_alloc(&rt->dev);
Szuying Chenaef9c692022-09-02 17:40:08 +080079 if (IS_ERR(nvm)) {
80 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
81 goto err_nvm;
82 }
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020083
Szuying Chenaef9c692022-09-02 17:40:08 +080084 ret = tb_nvm_read_version(nvm);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020085 if (ret)
86 goto err_nvm;
87
Szuying Chenaef9c692022-09-02 17:40:08 +080088 ret = tb_nvm_add_active(nvm, nvm_read);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020089 if (ret)
90 goto err_nvm;
91
Szuying Chenaef9c692022-09-02 17:40:08 +080092 ret = tb_nvm_add_non_active(nvm, nvm_write);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020093 if (ret)
94 goto err_nvm;
95
96 rt->nvm = nvm;
97 return 0;
98
99err_nvm:
Szuying Chenaef9c692022-09-02 17:40:08 +0800100 dev_dbg(&rt->dev, "NVM upgrade disabled\n");
101 if (!IS_ERR(nvm))
102 tb_nvm_free(nvm);
103
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200104 return ret;
105}
106
107static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
108{
Szuying Chenaef9c692022-09-02 17:40:08 +0800109 unsigned int image_size;
110 const u8 *buf;
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300111 int ret;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200112
Szuying Chenaef9c692022-09-02 17:40:08 +0800113 ret = tb_nvm_validate(rt->nvm);
114 if (ret)
115 return ret;
116
117 buf = rt->nvm->buf_data_start;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200118 image_size = rt->nvm->buf_data_size;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200119
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300120 ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
121 image_size);
Szuying Chenaef9c692022-09-02 17:40:08 +0800122 if (ret)
123 return ret;
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300124
Szuying Chenaef9c692022-09-02 17:40:08 +0800125 rt->nvm->flushed = true;
126 return 0;
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300127}
128
129static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
130{
Mika Westerberg25335b32021-04-21 17:14:10 +0300131 u32 status;
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300132 int ret;
133
134 if (auth_only) {
135 ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
136 if (ret)
137 return ret;
138 }
139
Mika Westerberg25335b32021-04-21 17:14:10 +0300140 ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
141 if (ret)
142 return ret;
143
144 usleep_range(100, 150);
145
146 /*
147 * Check the status now if we still can access the retimer. It
148 * is expected that the below fails.
149 */
150 ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
151 &status);
152 if (!ret) {
153 rt->auth_status = status;
154 return status ? -EINVAL : 0;
155 }
156
157 return 0;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200158}
159
160static ssize_t device_show(struct device *dev, struct device_attribute *attr,
161 char *buf)
162{
163 struct tb_retimer *rt = tb_to_retimer(dev);
164
Andy Shevchenko8283fb52022-09-22 17:32:39 +0300165 return sysfs_emit(buf, "%#x\n", rt->device);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200166}
167static DEVICE_ATTR_RO(device);
168
169static ssize_t nvm_authenticate_show(struct device *dev,
170 struct device_attribute *attr, char *buf)
171{
172 struct tb_retimer *rt = tb_to_retimer(dev);
173 int ret;
174
175 if (!mutex_trylock(&rt->tb->lock))
176 return restart_syscall();
177
178 if (!rt->nvm)
179 ret = -EAGAIN;
Szuying Chenaef9c692022-09-02 17:40:08 +0800180 else if (rt->no_nvm_upgrade)
181 ret = -EOPNOTSUPP;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200182 else
Andy Shevchenko8283fb52022-09-22 17:32:39 +0300183 ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200184
185 mutex_unlock(&rt->tb->lock);
186
187 return ret;
188}
189
Mika Westerberg1402ba02023-05-26 14:46:44 +0300190static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
191{
192 int i;
193
194 tb_port_dbg(port, "reading NVM authentication status of retimers\n");
195
196 /*
197 * Before doing anything else, read the authentication status.
198 * If the retimer has it set, store it for the new retimer
199 * device instance.
200 */
201 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
202 usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
203}
204
Gil Finecd0c1e52023-03-03 00:17:24 +0200205static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
206{
207 int i;
208
209 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
210 usb4_port_retimer_set_inbound_sbtx(port, i);
211}
212
213static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
214{
215 int i;
216
217 for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
218 usb4_port_retimer_unset_inbound_sbtx(port, i);
219}
220
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200221static ssize_t nvm_authenticate_store(struct device *dev,
222 struct device_attribute *attr, const char *buf, size_t count)
223{
224 struct tb_retimer *rt = tb_to_retimer(dev);
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300225 int val, ret;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200226
227 pm_runtime_get_sync(&rt->dev);
228
229 if (!mutex_trylock(&rt->tb->lock)) {
230 ret = restart_syscall();
231 goto exit_rpm;
232 }
233
234 if (!rt->nvm) {
235 ret = -EAGAIN;
236 goto exit_unlock;
237 }
238
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300239 ret = kstrtoint(buf, 10, &val);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200240 if (ret)
241 goto exit_unlock;
242
243 /* Always clear status */
244 rt->auth_status = 0;
245
246 if (val) {
Mika Westerbergb7b83912023-05-26 14:51:23 +0300247 /*
248 * When NVM authentication starts the retimer is not
249 * accessible so calling tb_retimer_unset_inbound_sbtx()
250 * will fail and therefore we do not call it. Exception
251 * is when the validation fails or we only write the new
252 * NVM image without authentication.
253 */
Gil Finecd0c1e52023-03-03 00:17:24 +0200254 tb_retimer_set_inbound_sbtx(rt->port);
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300255 if (val == AUTHENTICATE_ONLY) {
256 ret = tb_retimer_nvm_authenticate(rt, true);
257 } else {
258 if (!rt->nvm->flushed) {
259 if (!rt->nvm->buf) {
260 ret = -EINVAL;
261 goto exit_unlock;
262 }
263
264 ret = tb_retimer_nvm_validate_and_write(rt);
265 if (ret || val == WRITE_ONLY)
266 goto exit_unlock;
267 }
268 if (val == WRITE_AND_AUTHENTICATE)
269 ret = tb_retimer_nvm_authenticate(rt, false);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200270 }
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200271 }
272
273exit_unlock:
Mika Westerbergb7b83912023-05-26 14:51:23 +0300274 if (ret || val == WRITE_ONLY)
275 tb_retimer_unset_inbound_sbtx(rt->port);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200276 mutex_unlock(&rt->tb->lock);
277exit_rpm:
278 pm_runtime_mark_last_busy(&rt->dev);
279 pm_runtime_put_autosuspend(&rt->dev);
280
281 if (ret)
282 return ret;
283 return count;
284}
285static DEVICE_ATTR_RW(nvm_authenticate);
286
287static ssize_t nvm_version_show(struct device *dev,
288 struct device_attribute *attr, char *buf)
289{
290 struct tb_retimer *rt = tb_to_retimer(dev);
291 int ret;
292
293 if (!mutex_trylock(&rt->tb->lock))
294 return restart_syscall();
295
296 if (!rt->nvm)
297 ret = -EAGAIN;
298 else
Andy Shevchenko8283fb52022-09-22 17:32:39 +0300299 ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200300
301 mutex_unlock(&rt->tb->lock);
302 return ret;
303}
304static DEVICE_ATTR_RO(nvm_version);
305
306static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
307 char *buf)
308{
309 struct tb_retimer *rt = tb_to_retimer(dev);
310
Andy Shevchenko8283fb52022-09-22 17:32:39 +0300311 return sysfs_emit(buf, "%#x\n", rt->vendor);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200312}
313static DEVICE_ATTR_RO(vendor);
314
315static struct attribute *retimer_attrs[] = {
316 &dev_attr_device.attr,
317 &dev_attr_nvm_authenticate.attr,
318 &dev_attr_nvm_version.attr,
319 &dev_attr_vendor.attr,
320 NULL
321};
322
323static const struct attribute_group retimer_group = {
324 .attrs = retimer_attrs,
325};
326
327static const struct attribute_group *retimer_groups[] = {
328 &retimer_group,
329 NULL
330};
331
332static void tb_retimer_release(struct device *dev)
333{
334 struct tb_retimer *rt = tb_to_retimer(dev);
335
336 kfree(rt);
337}
338
339struct device_type tb_retimer_type = {
340 .name = "thunderbolt_retimer",
341 .groups = retimer_groups,
342 .release = tb_retimer_release,
343};
344
345static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
346{
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200347 struct tb_retimer *rt;
348 u32 vendor, device;
349 int ret;
350
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200351 ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
352 sizeof(vendor));
353 if (ret) {
354 if (ret != -ENODEV)
355 tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
356 return ret;
357 }
358
359 ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
360 sizeof(device));
361 if (ret) {
362 if (ret != -ENODEV)
363 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
364 return ret;
365 }
366
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200367 /*
368 * Check that it supports NVM operations. If not then don't add
369 * the device at all.
370 */
371 ret = usb4_port_retimer_nvm_sector_size(port, index);
372 if (ret < 0)
373 return ret;
374
375 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
376 if (!rt)
377 return -ENOMEM;
378
379 rt->index = index;
380 rt->vendor = vendor;
381 rt->device = device;
382 rt->auth_status = auth_status;
383 rt->port = port;
384 rt->tb = port->sw->tb;
385
Mika Westerberg1e56c882021-11-15 19:10:51 +0200386 rt->dev.parent = &port->usb4->dev;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200387 rt->dev.bus = &tb_bus_type;
388 rt->dev.type = &tb_retimer_type;
389 dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
390 port->port, index);
391
392 ret = device_register(&rt->dev);
393 if (ret) {
394 dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
395 put_device(&rt->dev);
396 return ret;
397 }
398
399 ret = tb_retimer_nvm_add(rt);
400 if (ret) {
401 dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
Dan Carpenterbec4d7c2021-03-29 09:07:18 +0300402 device_unregister(&rt->dev);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200403 return ret;
404 }
405
406 dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
407 rt->vendor, rt->device);
408
409 pm_runtime_no_callbacks(&rt->dev);
410 pm_runtime_set_active(&rt->dev);
411 pm_runtime_enable(&rt->dev);
412 pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
413 pm_runtime_mark_last_busy(&rt->dev);
414 pm_runtime_use_autosuspend(&rt->dev);
415
416 return 0;
417}
418
419static void tb_retimer_remove(struct tb_retimer *rt)
420{
421 dev_info(&rt->dev, "retimer disconnected\n");
422 tb_nvm_free(rt->nvm);
423 device_unregister(&rt->dev);
424}
425
426struct tb_retimer_lookup {
427 const struct tb_port *port;
428 u8 index;
429};
430
431static int retimer_match(struct device *dev, void *data)
432{
433 const struct tb_retimer_lookup *lookup = data;
434 struct tb_retimer *rt = tb_to_retimer(dev);
435
436 return rt && rt->port == lookup->port && rt->index == lookup->index;
437}
438
439static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
440{
441 struct tb_retimer_lookup lookup = { .port = port, .index = index };
442 struct device *dev;
443
Mika Westerbergcae5f512021-04-01 17:34:20 +0300444 dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200445 if (dev)
446 return tb_to_retimer(dev);
447
448 return NULL;
449}
450
451/**
452 * tb_retimer_scan() - Scan for on-board retimers under port
453 * @port: USB4 port to scan
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300454 * @add: If true also registers found retimers
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200455 *
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300456 * Brings the sideband into a state where retimers can be accessed.
457 * Then Tries to enumerate on-board retimers connected to @port. Found
458 * retimers are registered as children of @port if @add is set. Does
459 * not scan for cable retimers for now.
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200460 */
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300461int tb_retimer_scan(struct tb_port *port, bool add)
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200462{
Dan Carpenter08fe7ae2021-03-29 09:08:01 +0300463 u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200464 int ret, i, last_idx = 0;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200465
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200466 /*
467 * Send broadcast RT to make sure retimer indices facing this
468 * port are set.
469 */
470 ret = usb4_port_enumerate_retimers(port);
471 if (ret)
Mika Westerberg23257cf2022-12-29 14:10:30 +0200472 return ret;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200473
474 /*
Mika Westerberg1402ba02023-05-26 14:46:44 +0300475 * Immediately after sending enumerate retimers read the
476 * authentication status of each retimer.
477 */
478 tb_retimer_nvm_authenticate_status(port, status);
479
480 /*
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300481 * Enable sideband channel for each retimer. We can do this
482 * regardless whether there is device connected or not.
483 */
Gil Finecd0c1e52023-03-03 00:17:24 +0200484 tb_retimer_set_inbound_sbtx(port);
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300485
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200486 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
487 /*
488 * Last retimer is true only for the last on-board
489 * retimer (the one connected directly to the Type-C
490 * port).
491 */
492 ret = usb4_port_retimer_is_last(port, i);
493 if (ret > 0)
494 last_idx = i;
495 else if (ret < 0)
496 break;
497 }
498
Gil Finecd0c1e52023-03-03 00:17:24 +0200499 tb_retimer_unset_inbound_sbtx(port);
500
Utkarsh Patelc28f3d82022-12-22 20:22:46 -0800501 if (!last_idx)
Mika Westerberg23257cf2022-12-29 14:10:30 +0200502 return 0;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200503
504 /* Add on-board retimers if they do not exist already */
Mika Westerberg23257cf2022-12-29 14:10:30 +0200505 ret = 0;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200506 for (i = 1; i <= last_idx; i++) {
507 struct tb_retimer *rt;
508
509 rt = tb_port_find_retimer(port, i);
510 if (rt) {
511 put_device(&rt->dev);
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300512 } else if (add) {
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200513 ret = tb_retimer_add(port, i, status[i]);
514 if (ret && ret != -EOPNOTSUPP)
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300515 break;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200516 }
517 }
518
Mika Westerberg1e56c882021-11-15 19:10:51 +0200519 return ret;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200520}
521
522static int remove_retimer(struct device *dev, void *data)
523{
524 struct tb_retimer *rt = tb_to_retimer(dev);
525 struct tb_port *port = data;
526
527 if (rt && rt->port == port)
528 tb_retimer_remove(rt);
529 return 0;
530}
531
532/**
533 * tb_retimer_remove_all() - Remove all retimers under port
534 * @port: USB4 port whose retimers to remove
535 *
536 * This removes all previously added retimers under @port.
537 */
538void tb_retimer_remove_all(struct tb_port *port)
539{
Mika Westerbergcae5f512021-04-01 17:34:20 +0300540 struct usb4_port *usb4;
541
542 usb4 = port->usb4;
543 if (usb4)
544 device_for_each_child_reverse(&usb4->dev, port,
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200545 remove_retimer);
546}