blob: 82ac5b4d3ae41daabdf283a435b7acba72a82e4c [file] [log] [blame]
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -08001/*
2 * Applied Micro X-Gene SoC Ethernet v2 Driver
3 *
4 * Copyright (c) 2017, Applied Micro Circuits Corporation
5 * Author(s): Iyappan Subramanian <[email protected]>
6 * Keyur Chudgar <[email protected]>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include "main.h"
23
24static const struct acpi_device_id xge_acpi_match[];
25
26static int xge_get_resources(struct xge_pdata *pdata)
27{
28 struct platform_device *pdev;
29 struct net_device *ndev;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -080030 int phy_mode, ret = 0;
Iyappan Subramanian70dbd9b2017-03-07 17:08:45 -080031 struct resource *res;
32 struct device *dev;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -080033
34 pdev = pdata->pdev;
35 dev = &pdev->dev;
36 ndev = pdata->ndev;
37
38 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
39 if (!res) {
40 dev_err(dev, "Resource enet_csr not defined\n");
41 return -ENODEV;
42 }
43
44 pdata->resources.base_addr = devm_ioremap(dev, res->start,
45 resource_size(res));
46 if (!pdata->resources.base_addr) {
47 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
48 return -ENOMEM;
49 }
50
51 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
52 eth_hw_addr_random(ndev);
53
54 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
55
56 phy_mode = device_get_phy_mode(dev);
57 if (phy_mode < 0) {
58 dev_err(dev, "Unable to get phy-connection-type\n");
59 return phy_mode;
60 }
61 pdata->resources.phy_mode = phy_mode;
62
63 if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
64 dev_err(dev, "Incorrect phy-connection-type specified\n");
65 return -ENODEV;
66 }
67
68 ret = platform_get_irq(pdev, 0);
69 if (ret <= 0) {
70 dev_err(dev, "Unable to get ENET IRQ\n");
71 ret = ret ? : -ENXIO;
72 return ret;
73 }
74 pdata->resources.irq = ret;
75
76 return 0;
77}
78
79static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
80{
81 struct xge_pdata *pdata = netdev_priv(ndev);
82 struct xge_desc_ring *ring = pdata->rx_ring;
83 const u8 slots = XGENE_ENET_NUM_DESC - 1;
84 struct device *dev = &pdata->pdev->dev;
85 struct xge_raw_desc *raw_desc;
86 u64 addr_lo, addr_hi;
87 u8 tail = ring->tail;
88 struct sk_buff *skb;
89 dma_addr_t dma_addr;
90 u16 len;
91 int i;
92
93 for (i = 0; i < nbuf; i++) {
94 raw_desc = &ring->raw_desc[tail];
95
96 len = XGENE_ENET_STD_MTU;
97 skb = netdev_alloc_skb(ndev, len);
98 if (unlikely(!skb))
99 return -ENOMEM;
100
101 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
102 if (dma_mapping_error(dev, dma_addr)) {
103 netdev_err(ndev, "DMA mapping error\n");
104 dev_kfree_skb_any(skb);
105 return -EINVAL;
106 }
107
108 ring->pkt_info[tail].skb = skb;
109 ring->pkt_info[tail].dma_addr = dma_addr;
110
111 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
112 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
113 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
114 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
115 SET_BITS(PKT_ADDRH,
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800116 upper_32_bits(dma_addr)));
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800117
118 dma_wmb();
119 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
120 SET_BITS(E, 1));
121 tail = (tail + 1) & slots;
122 }
123
124 ring->tail = tail;
125
126 return 0;
127}
128
129static int xge_init_hw(struct net_device *ndev)
130{
131 struct xge_pdata *pdata = netdev_priv(ndev);
132 int ret;
133
134 ret = xge_port_reset(ndev);
135 if (ret)
136 return ret;
137
138 xge_port_init(ndev);
139 pdata->nbufs = NUM_BUFS;
140
141 return 0;
142}
143
144static irqreturn_t xge_irq(const int irq, void *data)
145{
146 struct xge_pdata *pdata = data;
147
148 if (napi_schedule_prep(&pdata->napi)) {
149 xge_intr_disable(pdata);
150 __napi_schedule(&pdata->napi);
151 }
152
153 return IRQ_HANDLED;
154}
155
156static int xge_request_irq(struct net_device *ndev)
157{
158 struct xge_pdata *pdata = netdev_priv(ndev);
159 struct device *dev = &pdata->pdev->dev;
160 int ret;
161
162 snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
163
164 ret = devm_request_irq(dev, pdata->resources.irq, xge_irq,
165 0, pdata->irq_name, pdata);
166 if (ret)
167 netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
168
169 return ret;
170}
171
172static void xge_free_irq(struct net_device *ndev)
173{
174 struct xge_pdata *pdata = netdev_priv(ndev);
175 struct device *dev = &pdata->pdev->dev;
176
177 devm_free_irq(dev, pdata->resources.irq, pdata);
178}
179
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800180static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
181{
182 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
183 (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
184 return true;
185
186 return false;
187}
188
189static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
190{
191 struct xge_pdata *pdata = netdev_priv(ndev);
192 struct device *dev = &pdata->pdev->dev;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800193 struct xge_desc_ring *tx_ring;
194 struct xge_raw_desc *raw_desc;
Iyappan Subramanian70dbd9b2017-03-07 17:08:45 -0800195 static dma_addr_t dma_addr;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800196 u64 addr_lo, addr_hi;
197 void *pkt_buf;
198 u8 tail;
199 u16 len;
200
201 tx_ring = pdata->tx_ring;
202 tail = tx_ring->tail;
203 len = skb_headlen(skb);
204 raw_desc = &tx_ring->raw_desc[tail];
205
206 if (!is_tx_slot_available(raw_desc)) {
207 netif_stop_queue(ndev);
208 return NETDEV_TX_BUSY;
209 }
210
211 /* Packet buffers should be 64B aligned */
212 pkt_buf = dma_zalloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
213 GFP_ATOMIC);
214 if (unlikely(!pkt_buf)) {
215 dev_kfree_skb_any(skb);
216 return NETDEV_TX_OK;
217 }
218 memcpy(pkt_buf, skb->data, len);
219
220 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
221 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
222 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
223 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
224 SET_BITS(PKT_ADDRH,
225 upper_32_bits(dma_addr)));
226
227 tx_ring->pkt_info[tail].skb = skb;
228 tx_ring->pkt_info[tail].dma_addr = dma_addr;
229 tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
230
231 dma_wmb();
232
233 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
234 SET_BITS(PKT_SIZE, len) |
235 SET_BITS(E, 0));
236 skb_tx_timestamp(skb);
237 xge_wr_csr(pdata, DMATXCTRL, 1);
238
239 tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
240
241 return NETDEV_TX_OK;
242}
243
244static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
245{
246 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
247 !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
248 return true;
249
250 return false;
251}
252
253static void xge_txc_poll(struct net_device *ndev)
254{
255 struct xge_pdata *pdata = netdev_priv(ndev);
256 struct device *dev = &pdata->pdev->dev;
257 struct xge_desc_ring *tx_ring;
258 struct xge_raw_desc *raw_desc;
259 dma_addr_t dma_addr;
260 struct sk_buff *skb;
261 void *pkt_buf;
262 u32 data;
263 u8 head;
264
265 tx_ring = pdata->tx_ring;
266 head = tx_ring->head;
267
268 data = xge_rd_csr(pdata, DMATXSTATUS);
269 if (!GET_BITS(TXPKTCOUNT, data))
270 return;
271
272 while (1) {
273 raw_desc = &tx_ring->raw_desc[head];
274
275 if (!is_tx_hw_done(raw_desc))
276 break;
277
278 dma_rmb();
279
280 skb = tx_ring->pkt_info[head].skb;
281 dma_addr = tx_ring->pkt_info[head].dma_addr;
282 pkt_buf = tx_ring->pkt_info[head].pkt_buf;
283 pdata->stats.tx_packets++;
284 pdata->stats.tx_bytes += skb->len;
285 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
286 dev_kfree_skb_any(skb);
287
288 /* clear pktstart address and pktsize */
289 raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
290 SET_BITS(PKT_SIZE, SLOT_EMPTY));
291 xge_wr_csr(pdata, DMATXSTATUS, 1);
292
293 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
294 }
295
296 if (netif_queue_stopped(ndev))
297 netif_wake_queue(ndev);
298
299 tx_ring->head = head;
300}
301
302static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
303{
304 struct xge_pdata *pdata = netdev_priv(ndev);
305 struct device *dev = &pdata->pdev->dev;
306 struct xge_desc_ring *rx_ring;
307 struct xge_raw_desc *raw_desc;
308 struct sk_buff *skb;
309 dma_addr_t dma_addr;
310 int processed = 0;
311 u8 head, rx_error;
312 int i, ret;
313 u32 data;
314 u16 len;
315
316 rx_ring = pdata->rx_ring;
317 head = rx_ring->head;
318
319 data = xge_rd_csr(pdata, DMARXSTATUS);
320 if (!GET_BITS(RXPKTCOUNT, data))
321 return 0;
322
323 for (i = 0; i < budget; i++) {
324 raw_desc = &rx_ring->raw_desc[head];
325
326 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
327 break;
328
329 dma_rmb();
330
331 skb = rx_ring->pkt_info[head].skb;
332 rx_ring->pkt_info[head].skb = NULL;
333 dma_addr = rx_ring->pkt_info[head].dma_addr;
334 len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
335 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
336 DMA_FROM_DEVICE);
337
338 rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
339 if (unlikely(rx_error)) {
340 pdata->stats.rx_errors++;
341 dev_kfree_skb_any(skb);
342 goto out;
343 }
344
345 skb_put(skb, len);
346 skb->protocol = eth_type_trans(skb, ndev);
347
348 pdata->stats.rx_packets++;
349 pdata->stats.rx_bytes += len;
350 napi_gro_receive(&pdata->napi, skb);
351out:
352 ret = xge_refill_buffers(ndev, 1);
353 xge_wr_csr(pdata, DMARXSTATUS, 1);
354 xge_wr_csr(pdata, DMARXCTRL, 1);
355
356 if (ret)
357 break;
358
359 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
360 processed++;
361 }
362
363 rx_ring->head = head;
364
365 return processed;
366}
367
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800368static void xge_delete_desc_ring(struct net_device *ndev,
369 struct xge_desc_ring *ring)
370{
371 struct xge_pdata *pdata = netdev_priv(ndev);
372 struct device *dev = &pdata->pdev->dev;
373 u16 size;
374
375 if (!ring)
376 return;
377
378 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
379 if (ring->desc_addr)
380 dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
381
382 kfree(ring->pkt_info);
383 kfree(ring);
384}
385
386static void xge_free_buffers(struct net_device *ndev)
387{
388 struct xge_pdata *pdata = netdev_priv(ndev);
389 struct xge_desc_ring *ring = pdata->rx_ring;
390 struct device *dev = &pdata->pdev->dev;
391 struct sk_buff *skb;
392 dma_addr_t dma_addr;
393 int i;
394
395 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
396 skb = ring->pkt_info[i].skb;
397 dma_addr = ring->pkt_info[i].dma_addr;
398
399 if (!skb)
400 continue;
401
402 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
403 DMA_FROM_DEVICE);
404 dev_kfree_skb_any(skb);
405 }
406}
407
408static void xge_delete_desc_rings(struct net_device *ndev)
409{
410 struct xge_pdata *pdata = netdev_priv(ndev);
411
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800412 xge_txc_poll(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800413 xge_delete_desc_ring(ndev, pdata->tx_ring);
414
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800415 xge_rx_poll(ndev, 64);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800416 xge_free_buffers(ndev);
417 xge_delete_desc_ring(ndev, pdata->rx_ring);
418}
419
420static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
421{
422 struct xge_pdata *pdata = netdev_priv(ndev);
423 struct device *dev = &pdata->pdev->dev;
424 struct xge_desc_ring *ring;
425 u16 size;
426
427 ring = kzalloc(sizeof(struct xge_desc_ring), GFP_KERNEL);
428 if (!ring)
429 return NULL;
430
431 ring->ndev = ndev;
432
433 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
434 ring->desc_addr = dma_zalloc_coherent(dev, size, &ring->dma_addr,
435 GFP_KERNEL);
436 if (!ring->desc_addr)
437 goto err;
438
439 ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(struct pkt_info),
440 GFP_KERNEL);
441 if (!ring->pkt_info)
442 goto err;
443
444 xge_setup_desc(ring);
445
446 return ring;
447
448err:
449 xge_delete_desc_ring(ndev, ring);
450
451 return NULL;
452}
453
454static int xge_create_desc_rings(struct net_device *ndev)
455{
456 struct xge_pdata *pdata = netdev_priv(ndev);
457 struct xge_desc_ring *ring;
458 int ret;
459
460 /* create tx ring */
461 ring = xge_create_desc_ring(ndev);
462 if (!ring)
463 goto err;
464
465 pdata->tx_ring = ring;
466 xge_update_tx_desc_addr(pdata);
467
468 /* create rx ring */
469 ring = xge_create_desc_ring(ndev);
470 if (!ring)
471 goto err;
472
473 pdata->rx_ring = ring;
474 xge_update_rx_desc_addr(pdata);
475
476 ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
477 if (ret)
478 goto err;
479
480 return 0;
481err:
482 xge_delete_desc_rings(ndev);
483
484 return -ENOMEM;
485}
486
487static int xge_open(struct net_device *ndev)
488{
489 struct xge_pdata *pdata = netdev_priv(ndev);
490 int ret;
491
492 ret = xge_create_desc_rings(ndev);
493 if (ret)
494 return ret;
495
496 napi_enable(&pdata->napi);
497 ret = xge_request_irq(ndev);
498 if (ret)
499 return ret;
500
501 xge_intr_enable(pdata);
502 xge_wr_csr(pdata, DMARXCTRL, 1);
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700503
504 phy_start(ndev->phydev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800505 xge_mac_enable(pdata);
506 netif_start_queue(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800507
508 return 0;
509}
510
511static int xge_close(struct net_device *ndev)
512{
513 struct xge_pdata *pdata = netdev_priv(ndev);
514
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800515 netif_stop_queue(ndev);
516 xge_mac_disable(pdata);
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700517 phy_stop(ndev->phydev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800518
519 xge_intr_disable(pdata);
520 xge_free_irq(ndev);
521 napi_disable(&pdata->napi);
522 xge_delete_desc_rings(ndev);
523
524 return 0;
525}
526
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800527static int xge_napi(struct napi_struct *napi, const int budget)
528{
529 struct net_device *ndev = napi->dev;
Iyappan Subramanian70dbd9b2017-03-07 17:08:45 -0800530 struct xge_pdata *pdata;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800531 int processed;
532
533 pdata = netdev_priv(ndev);
534
535 xge_txc_poll(ndev);
536 processed = xge_rx_poll(ndev, budget);
537
538 if (processed < budget) {
539 napi_complete_done(napi, processed);
540 xge_intr_enable(pdata);
541 }
542
543 return processed;
544}
545
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800546static int xge_set_mac_addr(struct net_device *ndev, void *addr)
547{
548 struct xge_pdata *pdata = netdev_priv(ndev);
549 int ret;
550
551 ret = eth_mac_addr(ndev, addr);
552 if (ret)
553 return ret;
554
555 xge_mac_set_station_addr(pdata);
556
557 return 0;
558}
559
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800560static bool is_tx_pending(struct xge_raw_desc *raw_desc)
561{
562 if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
563 return true;
564
565 return false;
566}
567
568static void xge_free_pending_skb(struct net_device *ndev)
569{
570 struct xge_pdata *pdata = netdev_priv(ndev);
571 struct device *dev = &pdata->pdev->dev;
572 struct xge_desc_ring *tx_ring;
573 struct xge_raw_desc *raw_desc;
574 dma_addr_t dma_addr;
575 struct sk_buff *skb;
576 void *pkt_buf;
577 int i;
578
579 tx_ring = pdata->tx_ring;
580
581 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
582 raw_desc = &tx_ring->raw_desc[i];
583
584 if (!is_tx_pending(raw_desc))
585 continue;
586
587 skb = tx_ring->pkt_info[i].skb;
588 dma_addr = tx_ring->pkt_info[i].dma_addr;
589 pkt_buf = tx_ring->pkt_info[i].pkt_buf;
590 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
591 dev_kfree_skb_any(skb);
592 }
593}
594
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800595static void xge_timeout(struct net_device *ndev)
596{
597 struct xge_pdata *pdata = netdev_priv(ndev);
598
599 rtnl_lock();
600
601 if (netif_running(ndev)) {
602 netif_carrier_off(ndev);
603 netif_stop_queue(ndev);
604 xge_intr_disable(pdata);
605 napi_disable(&pdata->napi);
606
607 xge_wr_csr(pdata, DMATXCTRL, 0);
608 xge_txc_poll(ndev);
609 xge_free_pending_skb(ndev);
610 xge_wr_csr(pdata, DMATXSTATUS, ~0U);
611
612 xge_setup_desc(pdata->tx_ring);
613 xge_update_tx_desc_addr(pdata);
614 xge_mac_init(pdata);
615
616 napi_enable(&pdata->napi);
617 xge_intr_enable(pdata);
618 xge_mac_enable(pdata);
619 netif_start_queue(ndev);
620 netif_carrier_on(ndev);
621 }
622
623 rtnl_unlock();
624}
625
626static void xge_get_stats64(struct net_device *ndev,
627 struct rtnl_link_stats64 *storage)
628{
629 struct xge_pdata *pdata = netdev_priv(ndev);
630 struct xge_stats *stats = &pdata->stats;
631
632 storage->tx_packets += stats->tx_packets;
633 storage->tx_bytes += stats->tx_bytes;
634
635 storage->rx_packets += stats->rx_packets;
636 storage->rx_bytes += stats->rx_bytes;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800637 storage->rx_errors += stats->rx_errors;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800638}
639
640static const struct net_device_ops xgene_ndev_ops = {
641 .ndo_open = xge_open,
642 .ndo_stop = xge_close,
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800643 .ndo_start_xmit = xge_start_xmit,
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800644 .ndo_set_mac_address = xge_set_mac_addr,
645 .ndo_tx_timeout = xge_timeout,
646 .ndo_get_stats64 = xge_get_stats64,
647};
648
649static int xge_probe(struct platform_device *pdev)
650{
651 struct device *dev = &pdev->dev;
652 struct net_device *ndev;
653 struct xge_pdata *pdata;
654 int ret;
655
656 ndev = alloc_etherdev(sizeof(struct xge_pdata));
657 if (!ndev)
658 return -ENOMEM;
659
660 pdata = netdev_priv(ndev);
661
662 pdata->pdev = pdev;
663 pdata->ndev = ndev;
664 SET_NETDEV_DEV(ndev, dev);
665 platform_set_drvdata(pdev, pdata);
666 ndev->netdev_ops = &xgene_ndev_ops;
667
668 ndev->features |= NETIF_F_GSO |
669 NETIF_F_GRO;
670
671 ret = xge_get_resources(pdata);
672 if (ret)
673 goto err;
674
675 ndev->hw_features = ndev->features;
676
677 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
678 if (ret) {
679 netdev_err(ndev, "No usable DMA configuration\n");
680 goto err;
681 }
682
683 ret = xge_init_hw(ndev);
684 if (ret)
685 goto err;
686
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700687 ret = xge_mdio_config(ndev);
688 if (ret)
689 goto err;
690
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800691 netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
692
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800693 ret = register_netdev(ndev);
694 if (ret) {
695 netdev_err(ndev, "Failed to register netdev\n");
696 goto err;
697 }
698
699 return 0;
700
701err:
702 free_netdev(ndev);
703
704 return ret;
705}
706
707static int xge_remove(struct platform_device *pdev)
708{
709 struct xge_pdata *pdata;
710 struct net_device *ndev;
711
712 pdata = platform_get_drvdata(pdev);
713 ndev = pdata->ndev;
714
715 rtnl_lock();
716 if (netif_running(ndev))
717 dev_close(ndev);
718 rtnl_unlock();
719
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700720 xge_mdio_remove(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800721 unregister_netdev(ndev);
722 free_netdev(ndev);
723
724 return 0;
725}
726
727static void xge_shutdown(struct platform_device *pdev)
728{
729 struct xge_pdata *pdata;
730
731 pdata = platform_get_drvdata(pdev);
732 if (!pdata)
733 return;
734
735 if (!pdata->ndev)
736 return;
737
738 xge_remove(pdev);
739}
740
741static const struct acpi_device_id xge_acpi_match[] = {
742 { "APMC0D80" },
743 { }
744};
745MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
746
747static struct platform_driver xge_driver = {
748 .driver = {
749 .name = "xgene-enet-v2",
750 .acpi_match_table = ACPI_PTR(xge_acpi_match),
751 },
752 .probe = xge_probe,
753 .remove = xge_remove,
754 .shutdown = xge_shutdown,
755};
756module_platform_driver(xge_driver);
757
758MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
759MODULE_AUTHOR("Iyappan Subramanian <[email protected]>");
760MODULE_VERSION(XGENE_ENET_V2_VERSION);
761MODULE_LICENSE("GPL");