blob: 2ee8c5ebca7c3c5f0be653197c9efdd45f7ad4cd [file] [log] [blame]
Kranthi Kuntaladacb12872020-03-05 16:39:58 +02001// SPDX-License-Identifier: GPL-2.0
2/*
3 * Thunderbolt/USB4 retimer support.
4 *
5 * Copyright (C) 2020, Intel Corporation
6 * Authors: Kranthi Kuntala <[email protected]>
7 * Mika Westerberg <[email protected]>
8 */
9
10#include <linux/delay.h>
11#include <linux/pm_runtime.h>
12#include <linux/sched/signal.h>
13
14#include "sb_regs.h"
15#include "tb.h"
16
17#define TB_MAX_RETIMER_INDEX 6
18
Mika Westerberg8b02b2d2022-09-03 10:43:25 +030019/**
20 * tb_retimer_nvm_read() - Read contents of retimer NVM
21 * @rt: Retimer device
22 * @address: NVM address (in bytes) to start reading
23 * @buf: Data read from NVM is stored here
24 * @size: Number of bytes to read
25 *
26 * Reads retimer NVM and copies the contents to @buf. Returns %0 if the
27 * read was successful and negative errno in case of failure.
28 */
29int tb_retimer_nvm_read(struct tb_retimer *rt, unsigned int address, void *buf,
30 size_t size)
31{
32 return usb4_port_retimer_nvm_read(rt->port, rt->index, address, buf, size);
33}
34
35static int nvm_read(void *priv, unsigned int offset, void *val, size_t bytes)
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020036{
37 struct tb_nvm *nvm = priv;
38 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
39 int ret;
40
41 pm_runtime_get_sync(&rt->dev);
42
43 if (!mutex_trylock(&rt->tb->lock)) {
44 ret = restart_syscall();
45 goto out;
46 }
47
Mika Westerberg8b02b2d2022-09-03 10:43:25 +030048 ret = tb_retimer_nvm_read(rt, offset, val, bytes);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020049 mutex_unlock(&rt->tb->lock);
50
51out:
52 pm_runtime_mark_last_busy(&rt->dev);
53 pm_runtime_put_autosuspend(&rt->dev);
54
55 return ret;
56}
57
Mika Westerberg8b02b2d2022-09-03 10:43:25 +030058static int nvm_write(void *priv, unsigned int offset, void *val, size_t bytes)
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020059{
60 struct tb_nvm *nvm = priv;
61 struct tb_retimer *rt = tb_to_retimer(nvm->dev);
62 int ret = 0;
63
64 if (!mutex_trylock(&rt->tb->lock))
65 return restart_syscall();
66
67 ret = tb_nvm_write_buf(nvm, offset, val, bytes);
68 mutex_unlock(&rt->tb->lock);
69
70 return ret;
71}
72
73static int tb_retimer_nvm_add(struct tb_retimer *rt)
74{
75 struct tb_nvm *nvm;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020076 int ret;
77
78 nvm = tb_nvm_alloc(&rt->dev);
Szuying Chenaef9c692022-09-02 17:40:08 +080079 if (IS_ERR(nvm)) {
80 ret = PTR_ERR(nvm) == -EOPNOTSUPP ? 0 : PTR_ERR(nvm);
81 goto err_nvm;
82 }
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020083
Szuying Chenaef9c692022-09-02 17:40:08 +080084 ret = tb_nvm_read_version(nvm);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020085 if (ret)
86 goto err_nvm;
87
Szuying Chenaef9c692022-09-02 17:40:08 +080088 ret = tb_nvm_add_active(nvm, nvm_read);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020089 if (ret)
90 goto err_nvm;
91
Szuying Chenaef9c692022-09-02 17:40:08 +080092 ret = tb_nvm_add_non_active(nvm, nvm_write);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +020093 if (ret)
94 goto err_nvm;
95
96 rt->nvm = nvm;
97 return 0;
98
99err_nvm:
Szuying Chenaef9c692022-09-02 17:40:08 +0800100 dev_dbg(&rt->dev, "NVM upgrade disabled\n");
Mario Limonciello5422f432024-12-09 10:25:51 -0600101 rt->no_nvm_upgrade = true;
Szuying Chenaef9c692022-09-02 17:40:08 +0800102 if (!IS_ERR(nvm))
103 tb_nvm_free(nvm);
104
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200105 return ret;
106}
107
108static int tb_retimer_nvm_validate_and_write(struct tb_retimer *rt)
109{
Szuying Chenaef9c692022-09-02 17:40:08 +0800110 unsigned int image_size;
111 const u8 *buf;
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300112 int ret;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200113
Szuying Chenaef9c692022-09-02 17:40:08 +0800114 ret = tb_nvm_validate(rt->nvm);
115 if (ret)
116 return ret;
117
118 buf = rt->nvm->buf_data_start;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200119 image_size = rt->nvm->buf_data_size;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200120
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300121 ret = usb4_port_retimer_nvm_write(rt->port, rt->index, 0, buf,
122 image_size);
Szuying Chenaef9c692022-09-02 17:40:08 +0800123 if (ret)
124 return ret;
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300125
Szuying Chenaef9c692022-09-02 17:40:08 +0800126 rt->nvm->flushed = true;
127 return 0;
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300128}
129
130static int tb_retimer_nvm_authenticate(struct tb_retimer *rt, bool auth_only)
131{
Mika Westerberg25335b32021-04-21 17:14:10 +0300132 u32 status;
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300133 int ret;
134
135 if (auth_only) {
136 ret = usb4_port_retimer_nvm_set_offset(rt->port, rt->index, 0);
137 if (ret)
138 return ret;
139 }
140
Mika Westerberg25335b32021-04-21 17:14:10 +0300141 ret = usb4_port_retimer_nvm_authenticate(rt->port, rt->index);
142 if (ret)
143 return ret;
144
145 usleep_range(100, 150);
146
147 /*
148 * Check the status now if we still can access the retimer. It
149 * is expected that the below fails.
150 */
151 ret = usb4_port_retimer_nvm_authenticate_status(rt->port, rt->index,
152 &status);
153 if (!ret) {
154 rt->auth_status = status;
155 return status ? -EINVAL : 0;
156 }
157
158 return 0;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200159}
160
161static ssize_t device_show(struct device *dev, struct device_attribute *attr,
162 char *buf)
163{
164 struct tb_retimer *rt = tb_to_retimer(dev);
165
Andy Shevchenko8283fb52022-09-22 17:32:39 +0300166 return sysfs_emit(buf, "%#x\n", rt->device);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200167}
168static DEVICE_ATTR_RO(device);
169
170static ssize_t nvm_authenticate_show(struct device *dev,
171 struct device_attribute *attr, char *buf)
172{
173 struct tb_retimer *rt = tb_to_retimer(dev);
174 int ret;
175
176 if (!mutex_trylock(&rt->tb->lock))
177 return restart_syscall();
178
179 if (!rt->nvm)
180 ret = -EAGAIN;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200181 else
Andy Shevchenko8283fb52022-09-22 17:32:39 +0300182 ret = sysfs_emit(buf, "%#x\n", rt->auth_status);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200183
184 mutex_unlock(&rt->tb->lock);
185
186 return ret;
187}
188
Mika Westerberg1402ba02023-05-26 14:46:44 +0300189static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status)
190{
191 int i;
192
193 tb_port_dbg(port, "reading NVM authentication status of retimers\n");
194
195 /*
196 * Before doing anything else, read the authentication status.
197 * If the retimer has it set, store it for the new retimer
198 * device instance.
199 */
200 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
201 usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]);
202}
203
Gil Finecd0c1e52023-03-03 00:17:24 +0200204static void tb_retimer_set_inbound_sbtx(struct tb_port *port)
205{
206 int i;
207
Mika Westerberg872003712023-05-26 14:55:20 +0300208 /*
209 * When USB4 port is online sideband communications are
210 * already up.
211 */
212 if (!usb4_port_device_is_offline(port->usb4))
213 return;
214
215 tb_port_dbg(port, "enabling sideband transactions\n");
216
Gil Finecd0c1e52023-03-03 00:17:24 +0200217 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++)
218 usb4_port_retimer_set_inbound_sbtx(port, i);
219}
220
221static void tb_retimer_unset_inbound_sbtx(struct tb_port *port)
222{
223 int i;
224
Mika Westerberg872003712023-05-26 14:55:20 +0300225 /*
226 * When USB4 port is offline we need to keep the sideband
227 * communications up to make it possible to communicate with
228 * the connected retimers.
229 */
230 if (usb4_port_device_is_offline(port->usb4))
231 return;
232
233 tb_port_dbg(port, "disabling sideband transactions\n");
234
Gil Finecd0c1e52023-03-03 00:17:24 +0200235 for (i = TB_MAX_RETIMER_INDEX; i >= 1; i--)
236 usb4_port_retimer_unset_inbound_sbtx(port, i);
237}
238
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200239static ssize_t nvm_authenticate_store(struct device *dev,
240 struct device_attribute *attr, const char *buf, size_t count)
241{
242 struct tb_retimer *rt = tb_to_retimer(dev);
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300243 int val, ret;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200244
245 pm_runtime_get_sync(&rt->dev);
246
247 if (!mutex_trylock(&rt->tb->lock)) {
248 ret = restart_syscall();
249 goto exit_rpm;
250 }
251
252 if (!rt->nvm) {
253 ret = -EAGAIN;
254 goto exit_unlock;
255 }
256
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300257 ret = kstrtoint(buf, 10, &val);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200258 if (ret)
259 goto exit_unlock;
260
261 /* Always clear status */
262 rt->auth_status = 0;
263
264 if (val) {
Mika Westerbergb7b83912023-05-26 14:51:23 +0300265 /*
266 * When NVM authentication starts the retimer is not
267 * accessible so calling tb_retimer_unset_inbound_sbtx()
268 * will fail and therefore we do not call it. Exception
269 * is when the validation fails or we only write the new
270 * NVM image without authentication.
271 */
Gil Finecd0c1e52023-03-03 00:17:24 +0200272 tb_retimer_set_inbound_sbtx(rt->port);
Rajmohan Manifaa1c612021-04-12 15:29:16 +0300273 if (val == AUTHENTICATE_ONLY) {
274 ret = tb_retimer_nvm_authenticate(rt, true);
275 } else {
276 if (!rt->nvm->flushed) {
277 if (!rt->nvm->buf) {
278 ret = -EINVAL;
279 goto exit_unlock;
280 }
281
282 ret = tb_retimer_nvm_validate_and_write(rt);
283 if (ret || val == WRITE_ONLY)
284 goto exit_unlock;
285 }
286 if (val == WRITE_AND_AUTHENTICATE)
287 ret = tb_retimer_nvm_authenticate(rt, false);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200288 }
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200289 }
290
291exit_unlock:
Mika Westerbergb7b83912023-05-26 14:51:23 +0300292 if (ret || val == WRITE_ONLY)
293 tb_retimer_unset_inbound_sbtx(rt->port);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200294 mutex_unlock(&rt->tb->lock);
295exit_rpm:
296 pm_runtime_mark_last_busy(&rt->dev);
297 pm_runtime_put_autosuspend(&rt->dev);
298
299 if (ret)
300 return ret;
301 return count;
302}
303static DEVICE_ATTR_RW(nvm_authenticate);
304
305static ssize_t nvm_version_show(struct device *dev,
306 struct device_attribute *attr, char *buf)
307{
308 struct tb_retimer *rt = tb_to_retimer(dev);
309 int ret;
310
311 if (!mutex_trylock(&rt->tb->lock))
312 return restart_syscall();
313
314 if (!rt->nvm)
315 ret = -EAGAIN;
316 else
Andy Shevchenko8283fb52022-09-22 17:32:39 +0300317 ret = sysfs_emit(buf, "%x.%x\n", rt->nvm->major, rt->nvm->minor);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200318
319 mutex_unlock(&rt->tb->lock);
320 return ret;
321}
322static DEVICE_ATTR_RO(nvm_version);
323
324static ssize_t vendor_show(struct device *dev, struct device_attribute *attr,
325 char *buf)
326{
327 struct tb_retimer *rt = tb_to_retimer(dev);
328
Andy Shevchenko8283fb52022-09-22 17:32:39 +0300329 return sysfs_emit(buf, "%#x\n", rt->vendor);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200330}
331static DEVICE_ATTR_RO(vendor);
332
Mario Limonciello5422f432024-12-09 10:25:51 -0600333static umode_t retimer_is_visible(struct kobject *kobj, struct attribute *attr,
334 int n)
335{
336 struct device *dev = kobj_to_dev(kobj);
337 struct tb_retimer *rt = tb_to_retimer(dev);
338
339 if (attr == &dev_attr_nvm_authenticate.attr ||
340 attr == &dev_attr_nvm_version.attr)
341 return rt->no_nvm_upgrade ? 0 : attr->mode;
342
343 return attr->mode;
344}
345
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200346static struct attribute *retimer_attrs[] = {
347 &dev_attr_device.attr,
348 &dev_attr_nvm_authenticate.attr,
349 &dev_attr_nvm_version.attr,
350 &dev_attr_vendor.attr,
351 NULL
352};
353
354static const struct attribute_group retimer_group = {
Mario Limonciello5422f432024-12-09 10:25:51 -0600355 .is_visible = retimer_is_visible,
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200356 .attrs = retimer_attrs,
357};
358
359static const struct attribute_group *retimer_groups[] = {
360 &retimer_group,
361 NULL
362};
363
364static void tb_retimer_release(struct device *dev)
365{
366 struct tb_retimer *rt = tb_to_retimer(dev);
367
368 kfree(rt);
369}
370
371struct device_type tb_retimer_type = {
372 .name = "thunderbolt_retimer",
373 .groups = retimer_groups,
374 .release = tb_retimer_release,
375};
376
377static int tb_retimer_add(struct tb_port *port, u8 index, u32 auth_status)
378{
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200379 struct tb_retimer *rt;
380 u32 vendor, device;
381 int ret;
382
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200383 ret = usb4_port_retimer_read(port, index, USB4_SB_VENDOR_ID, &vendor,
384 sizeof(vendor));
385 if (ret) {
386 if (ret != -ENODEV)
387 tb_port_warn(port, "failed read retimer VendorId: %d\n", ret);
388 return ret;
389 }
390
391 ret = usb4_port_retimer_read(port, index, USB4_SB_PRODUCT_ID, &device,
392 sizeof(device));
393 if (ret) {
394 if (ret != -ENODEV)
395 tb_port_warn(port, "failed read retimer ProductId: %d\n", ret);
396 return ret;
397 }
398
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200399 /*
400 * Check that it supports NVM operations. If not then don't add
401 * the device at all.
402 */
403 ret = usb4_port_retimer_nvm_sector_size(port, index);
404 if (ret < 0)
405 return ret;
406
407 rt = kzalloc(sizeof(*rt), GFP_KERNEL);
408 if (!rt)
409 return -ENOMEM;
410
411 rt->index = index;
412 rt->vendor = vendor;
413 rt->device = device;
414 rt->auth_status = auth_status;
415 rt->port = port;
416 rt->tb = port->sw->tb;
417
Mika Westerberg1e56c882021-11-15 19:10:51 +0200418 rt->dev.parent = &port->usb4->dev;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200419 rt->dev.bus = &tb_bus_type;
420 rt->dev.type = &tb_retimer_type;
421 dev_set_name(&rt->dev, "%s:%u.%u", dev_name(&port->sw->dev),
422 port->port, index);
423
424 ret = device_register(&rt->dev);
425 if (ret) {
426 dev_err(&rt->dev, "failed to register retimer: %d\n", ret);
427 put_device(&rt->dev);
428 return ret;
429 }
430
431 ret = tb_retimer_nvm_add(rt);
432 if (ret) {
433 dev_err(&rt->dev, "failed to add NVM devices: %d\n", ret);
Dan Carpenterbec4d7c2021-03-29 09:07:18 +0300434 device_unregister(&rt->dev);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200435 return ret;
436 }
437
438 dev_info(&rt->dev, "new retimer found, vendor=%#x device=%#x\n",
439 rt->vendor, rt->device);
440
441 pm_runtime_no_callbacks(&rt->dev);
442 pm_runtime_set_active(&rt->dev);
443 pm_runtime_enable(&rt->dev);
444 pm_runtime_set_autosuspend_delay(&rt->dev, TB_AUTOSUSPEND_DELAY);
445 pm_runtime_mark_last_busy(&rt->dev);
446 pm_runtime_use_autosuspend(&rt->dev);
447
448 return 0;
449}
450
451static void tb_retimer_remove(struct tb_retimer *rt)
452{
453 dev_info(&rt->dev, "retimer disconnected\n");
454 tb_nvm_free(rt->nvm);
455 device_unregister(&rt->dev);
456}
457
458struct tb_retimer_lookup {
459 const struct tb_port *port;
460 u8 index;
461};
462
463static int retimer_match(struct device *dev, void *data)
464{
465 const struct tb_retimer_lookup *lookup = data;
466 struct tb_retimer *rt = tb_to_retimer(dev);
467
468 return rt && rt->port == lookup->port && rt->index == lookup->index;
469}
470
471static struct tb_retimer *tb_port_find_retimer(struct tb_port *port, u8 index)
472{
473 struct tb_retimer_lookup lookup = { .port = port, .index = index };
474 struct device *dev;
475
Mika Westerbergcae5f512021-04-01 17:34:20 +0300476 dev = device_find_child(&port->usb4->dev, &lookup, retimer_match);
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200477 if (dev)
478 return tb_to_retimer(dev);
479
480 return NULL;
481}
482
483/**
484 * tb_retimer_scan() - Scan for on-board retimers under port
485 * @port: USB4 port to scan
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300486 * @add: If true also registers found retimers
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200487 *
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300488 * Brings the sideband into a state where retimers can be accessed.
489 * Then Tries to enumerate on-board retimers connected to @port. Found
490 * retimers are registered as children of @port if @add is set. Does
491 * not scan for cable retimers for now.
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200492 */
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300493int tb_retimer_scan(struct tb_port *port, bool add)
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200494{
Dan Carpenter08fe7ae2021-03-29 09:08:01 +0300495 u32 status[TB_MAX_RETIMER_INDEX + 1] = {};
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200496 int ret, i, last_idx = 0;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200497
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200498 /*
499 * Send broadcast RT to make sure retimer indices facing this
500 * port are set.
501 */
502 ret = usb4_port_enumerate_retimers(port);
503 if (ret)
Mika Westerberg23257cf2022-12-29 14:10:30 +0200504 return ret;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200505
506 /*
Mika Westerberg1402ba02023-05-26 14:46:44 +0300507 * Immediately after sending enumerate retimers read the
508 * authentication status of each retimer.
509 */
510 tb_retimer_nvm_authenticate_status(port, status);
511
512 /*
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300513 * Enable sideband channel for each retimer. We can do this
514 * regardless whether there is device connected or not.
515 */
Gil Finecd0c1e52023-03-03 00:17:24 +0200516 tb_retimer_set_inbound_sbtx(port);
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300517
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200518 for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) {
519 /*
520 * Last retimer is true only for the last on-board
521 * retimer (the one connected directly to the Type-C
522 * port).
523 */
524 ret = usb4_port_retimer_is_last(port, i);
525 if (ret > 0)
526 last_idx = i;
527 else if (ret < 0)
528 break;
529 }
530
Gil Finecd0c1e52023-03-03 00:17:24 +0200531 tb_retimer_unset_inbound_sbtx(port);
532
Utkarsh Patelc28f3d82022-12-22 20:22:46 -0800533 if (!last_idx)
Mika Westerberg23257cf2022-12-29 14:10:30 +0200534 return 0;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200535
536 /* Add on-board retimers if they do not exist already */
Mika Westerberg23257cf2022-12-29 14:10:30 +0200537 ret = 0;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200538 for (i = 1; i <= last_idx; i++) {
539 struct tb_retimer *rt;
540
541 rt = tb_port_find_retimer(port, i);
542 if (rt) {
543 put_device(&rt->dev);
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300544 } else if (add) {
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200545 ret = tb_retimer_add(port, i, status[i]);
546 if (ret && ret != -EOPNOTSUPP)
Rajmohan Mani3fb10ea2021-04-01 18:42:38 +0300547 break;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200548 }
549 }
550
Mika Westerberg1e56c882021-11-15 19:10:51 +0200551 return ret;
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200552}
553
554static int remove_retimer(struct device *dev, void *data)
555{
556 struct tb_retimer *rt = tb_to_retimer(dev);
557 struct tb_port *port = data;
558
559 if (rt && rt->port == port)
560 tb_retimer_remove(rt);
561 return 0;
562}
563
564/**
565 * tb_retimer_remove_all() - Remove all retimers under port
566 * @port: USB4 port whose retimers to remove
567 *
568 * This removes all previously added retimers under @port.
569 */
570void tb_retimer_remove_all(struct tb_port *port)
571{
Mika Westerbergcae5f512021-04-01 17:34:20 +0300572 struct usb4_port *usb4;
573
574 usb4 = port->usb4;
575 if (usb4)
576 device_for_each_child_reverse(&usb4->dev, port,
Kranthi Kuntaladacb12872020-03-05 16:39:58 +0200577 remove_retimer);
578}