blob: 79048cc4670386b43a447cc46172150c20a7b51a [file] [log] [blame]
Thomas Gleixner1ccea772019-05-19 15:51:43 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -08002/*
3 * Applied Micro X-Gene SoC Ethernet v2 Driver
4 *
5 * Copyright (c) 2017, Applied Micro Circuits Corporation
6 * Author(s): Iyappan Subramanian <[email protected]>
7 * Keyur Chudgar <[email protected]>
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -08008 */
9
10#include "main.h"
11
12static const struct acpi_device_id xge_acpi_match[];
13
14static int xge_get_resources(struct xge_pdata *pdata)
15{
16 struct platform_device *pdev;
17 struct net_device *ndev;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -080018 int phy_mode, ret = 0;
Iyappan Subramanian70dbd9b2017-03-07 17:08:45 -080019 struct resource *res;
20 struct device *dev;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -080021
22 pdev = pdata->pdev;
23 dev = &pdev->dev;
24 ndev = pdata->ndev;
25
26 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
27 if (!res) {
28 dev_err(dev, "Resource enet_csr not defined\n");
29 return -ENODEV;
30 }
31
32 pdata->resources.base_addr = devm_ioremap(dev, res->start,
33 resource_size(res));
34 if (!pdata->resources.base_addr) {
35 dev_err(dev, "Unable to retrieve ENET Port CSR region\n");
36 return -ENOMEM;
37 }
38
39 if (!device_get_mac_address(dev, ndev->dev_addr, ETH_ALEN))
40 eth_hw_addr_random(ndev);
41
42 memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
43
44 phy_mode = device_get_phy_mode(dev);
45 if (phy_mode < 0) {
46 dev_err(dev, "Unable to get phy-connection-type\n");
47 return phy_mode;
48 }
49 pdata->resources.phy_mode = phy_mode;
50
51 if (pdata->resources.phy_mode != PHY_INTERFACE_MODE_RGMII) {
52 dev_err(dev, "Incorrect phy-connection-type specified\n");
53 return -ENODEV;
54 }
55
56 ret = platform_get_irq(pdev, 0);
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -070057 if (ret < 0) {
58 dev_err(dev, "Unable to get irq\n");
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -080059 return ret;
60 }
61 pdata->resources.irq = ret;
62
63 return 0;
64}
65
66static int xge_refill_buffers(struct net_device *ndev, u32 nbuf)
67{
68 struct xge_pdata *pdata = netdev_priv(ndev);
69 struct xge_desc_ring *ring = pdata->rx_ring;
70 const u8 slots = XGENE_ENET_NUM_DESC - 1;
71 struct device *dev = &pdata->pdev->dev;
72 struct xge_raw_desc *raw_desc;
73 u64 addr_lo, addr_hi;
74 u8 tail = ring->tail;
75 struct sk_buff *skb;
76 dma_addr_t dma_addr;
77 u16 len;
78 int i;
79
80 for (i = 0; i < nbuf; i++) {
81 raw_desc = &ring->raw_desc[tail];
82
83 len = XGENE_ENET_STD_MTU;
84 skb = netdev_alloc_skb(ndev, len);
85 if (unlikely(!skb))
86 return -ENOMEM;
87
88 dma_addr = dma_map_single(dev, skb->data, len, DMA_FROM_DEVICE);
89 if (dma_mapping_error(dev, dma_addr)) {
90 netdev_err(ndev, "DMA mapping error\n");
91 dev_kfree_skb_any(skb);
92 return -EINVAL;
93 }
94
95 ring->pkt_info[tail].skb = skb;
96 ring->pkt_info[tail].dma_addr = dma_addr;
97
98 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
99 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
100 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
101 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
102 SET_BITS(PKT_ADDRH,
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800103 upper_32_bits(dma_addr)));
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800104
105 dma_wmb();
106 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
107 SET_BITS(E, 1));
108 tail = (tail + 1) & slots;
109 }
110
111 ring->tail = tail;
112
113 return 0;
114}
115
116static int xge_init_hw(struct net_device *ndev)
117{
118 struct xge_pdata *pdata = netdev_priv(ndev);
119 int ret;
120
121 ret = xge_port_reset(ndev);
122 if (ret)
123 return ret;
124
125 xge_port_init(ndev);
126 pdata->nbufs = NUM_BUFS;
127
128 return 0;
129}
130
131static irqreturn_t xge_irq(const int irq, void *data)
132{
133 struct xge_pdata *pdata = data;
134
135 if (napi_schedule_prep(&pdata->napi)) {
136 xge_intr_disable(pdata);
137 __napi_schedule(&pdata->napi);
138 }
139
140 return IRQ_HANDLED;
141}
142
143static int xge_request_irq(struct net_device *ndev)
144{
145 struct xge_pdata *pdata = netdev_priv(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800146 int ret;
147
148 snprintf(pdata->irq_name, IRQ_ID_SIZE, "%s", ndev->name);
149
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700150 ret = request_irq(pdata->resources.irq, xge_irq, 0, pdata->irq_name,
151 pdata);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800152 if (ret)
153 netdev_err(ndev, "Failed to request irq %s\n", pdata->irq_name);
154
155 return ret;
156}
157
158static void xge_free_irq(struct net_device *ndev)
159{
160 struct xge_pdata *pdata = netdev_priv(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800161
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700162 free_irq(pdata->resources.irq, pdata);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800163}
164
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800165static bool is_tx_slot_available(struct xge_raw_desc *raw_desc)
166{
167 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
168 (GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)) == SLOT_EMPTY))
169 return true;
170
171 return false;
172}
173
174static netdev_tx_t xge_start_xmit(struct sk_buff *skb, struct net_device *ndev)
175{
176 struct xge_pdata *pdata = netdev_priv(ndev);
177 struct device *dev = &pdata->pdev->dev;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800178 struct xge_desc_ring *tx_ring;
179 struct xge_raw_desc *raw_desc;
Iyappan Subramanian70dbd9b2017-03-07 17:08:45 -0800180 static dma_addr_t dma_addr;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800181 u64 addr_lo, addr_hi;
182 void *pkt_buf;
183 u8 tail;
184 u16 len;
185
186 tx_ring = pdata->tx_ring;
187 tail = tx_ring->tail;
188 len = skb_headlen(skb);
189 raw_desc = &tx_ring->raw_desc[tail];
190
191 if (!is_tx_slot_available(raw_desc)) {
192 netif_stop_queue(ndev);
193 return NETDEV_TX_BUSY;
194 }
195
196 /* Packet buffers should be 64B aligned */
Luis Chamberlain750afb02019-01-04 09:23:09 +0100197 pkt_buf = dma_alloc_coherent(dev, XGENE_ENET_STD_MTU, &dma_addr,
198 GFP_ATOMIC);
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800199 if (unlikely(!pkt_buf)) {
200 dev_kfree_skb_any(skb);
201 return NETDEV_TX_OK;
202 }
203 memcpy(pkt_buf, skb->data, len);
204
205 addr_hi = GET_BITS(NEXT_DESC_ADDRH, le64_to_cpu(raw_desc->m1));
206 addr_lo = GET_BITS(NEXT_DESC_ADDRL, le64_to_cpu(raw_desc->m1));
207 raw_desc->m1 = cpu_to_le64(SET_BITS(NEXT_DESC_ADDRL, addr_lo) |
208 SET_BITS(NEXT_DESC_ADDRH, addr_hi) |
209 SET_BITS(PKT_ADDRH,
210 upper_32_bits(dma_addr)));
211
212 tx_ring->pkt_info[tail].skb = skb;
213 tx_ring->pkt_info[tail].dma_addr = dma_addr;
214 tx_ring->pkt_info[tail].pkt_buf = pkt_buf;
215
216 dma_wmb();
217
218 raw_desc->m0 = cpu_to_le64(SET_BITS(PKT_ADDRL, dma_addr) |
219 SET_BITS(PKT_SIZE, len) |
220 SET_BITS(E, 0));
221 skb_tx_timestamp(skb);
222 xge_wr_csr(pdata, DMATXCTRL, 1);
223
224 tx_ring->tail = (tail + 1) & (XGENE_ENET_NUM_DESC - 1);
225
226 return NETDEV_TX_OK;
227}
228
229static bool is_tx_hw_done(struct xge_raw_desc *raw_desc)
230{
231 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)) &&
232 !GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0)))
233 return true;
234
235 return false;
236}
237
238static void xge_txc_poll(struct net_device *ndev)
239{
240 struct xge_pdata *pdata = netdev_priv(ndev);
241 struct device *dev = &pdata->pdev->dev;
242 struct xge_desc_ring *tx_ring;
243 struct xge_raw_desc *raw_desc;
244 dma_addr_t dma_addr;
245 struct sk_buff *skb;
246 void *pkt_buf;
247 u32 data;
248 u8 head;
249
250 tx_ring = pdata->tx_ring;
251 head = tx_ring->head;
252
253 data = xge_rd_csr(pdata, DMATXSTATUS);
254 if (!GET_BITS(TXPKTCOUNT, data))
255 return;
256
257 while (1) {
258 raw_desc = &tx_ring->raw_desc[head];
259
260 if (!is_tx_hw_done(raw_desc))
261 break;
262
263 dma_rmb();
264
265 skb = tx_ring->pkt_info[head].skb;
266 dma_addr = tx_ring->pkt_info[head].dma_addr;
267 pkt_buf = tx_ring->pkt_info[head].pkt_buf;
268 pdata->stats.tx_packets++;
269 pdata->stats.tx_bytes += skb->len;
270 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
271 dev_kfree_skb_any(skb);
272
273 /* clear pktstart address and pktsize */
274 raw_desc->m0 = cpu_to_le64(SET_BITS(E, 1) |
275 SET_BITS(PKT_SIZE, SLOT_EMPTY));
276 xge_wr_csr(pdata, DMATXSTATUS, 1);
277
278 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
279 }
280
281 if (netif_queue_stopped(ndev))
282 netif_wake_queue(ndev);
283
284 tx_ring->head = head;
285}
286
287static int xge_rx_poll(struct net_device *ndev, unsigned int budget)
288{
289 struct xge_pdata *pdata = netdev_priv(ndev);
290 struct device *dev = &pdata->pdev->dev;
291 struct xge_desc_ring *rx_ring;
292 struct xge_raw_desc *raw_desc;
293 struct sk_buff *skb;
294 dma_addr_t dma_addr;
295 int processed = 0;
296 u8 head, rx_error;
297 int i, ret;
298 u32 data;
299 u16 len;
300
301 rx_ring = pdata->rx_ring;
302 head = rx_ring->head;
303
304 data = xge_rd_csr(pdata, DMARXSTATUS);
305 if (!GET_BITS(RXPKTCOUNT, data))
306 return 0;
307
308 for (i = 0; i < budget; i++) {
309 raw_desc = &rx_ring->raw_desc[head];
310
311 if (GET_BITS(E, le64_to_cpu(raw_desc->m0)))
312 break;
313
314 dma_rmb();
315
316 skb = rx_ring->pkt_info[head].skb;
317 rx_ring->pkt_info[head].skb = NULL;
318 dma_addr = rx_ring->pkt_info[head].dma_addr;
319 len = GET_BITS(PKT_SIZE, le64_to_cpu(raw_desc->m0));
320 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
321 DMA_FROM_DEVICE);
322
323 rx_error = GET_BITS(D, le64_to_cpu(raw_desc->m2));
324 if (unlikely(rx_error)) {
325 pdata->stats.rx_errors++;
326 dev_kfree_skb_any(skb);
327 goto out;
328 }
329
330 skb_put(skb, len);
331 skb->protocol = eth_type_trans(skb, ndev);
332
333 pdata->stats.rx_packets++;
334 pdata->stats.rx_bytes += len;
335 napi_gro_receive(&pdata->napi, skb);
336out:
337 ret = xge_refill_buffers(ndev, 1);
338 xge_wr_csr(pdata, DMARXSTATUS, 1);
339 xge_wr_csr(pdata, DMARXCTRL, 1);
340
341 if (ret)
342 break;
343
344 head = (head + 1) & (XGENE_ENET_NUM_DESC - 1);
345 processed++;
346 }
347
348 rx_ring->head = head;
349
350 return processed;
351}
352
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800353static void xge_delete_desc_ring(struct net_device *ndev,
354 struct xge_desc_ring *ring)
355{
356 struct xge_pdata *pdata = netdev_priv(ndev);
357 struct device *dev = &pdata->pdev->dev;
358 u16 size;
359
360 if (!ring)
361 return;
362
363 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
364 if (ring->desc_addr)
365 dma_free_coherent(dev, size, ring->desc_addr, ring->dma_addr);
366
367 kfree(ring->pkt_info);
368 kfree(ring);
369}
370
371static void xge_free_buffers(struct net_device *ndev)
372{
373 struct xge_pdata *pdata = netdev_priv(ndev);
374 struct xge_desc_ring *ring = pdata->rx_ring;
375 struct device *dev = &pdata->pdev->dev;
376 struct sk_buff *skb;
377 dma_addr_t dma_addr;
378 int i;
379
380 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
381 skb = ring->pkt_info[i].skb;
382 dma_addr = ring->pkt_info[i].dma_addr;
383
384 if (!skb)
385 continue;
386
387 dma_unmap_single(dev, dma_addr, XGENE_ENET_STD_MTU,
388 DMA_FROM_DEVICE);
389 dev_kfree_skb_any(skb);
390 }
391}
392
393static void xge_delete_desc_rings(struct net_device *ndev)
394{
395 struct xge_pdata *pdata = netdev_priv(ndev);
396
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800397 xge_txc_poll(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800398 xge_delete_desc_ring(ndev, pdata->tx_ring);
399
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800400 xge_rx_poll(ndev, 64);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800401 xge_free_buffers(ndev);
402 xge_delete_desc_ring(ndev, pdata->rx_ring);
403}
404
405static struct xge_desc_ring *xge_create_desc_ring(struct net_device *ndev)
406{
407 struct xge_pdata *pdata = netdev_priv(ndev);
408 struct device *dev = &pdata->pdev->dev;
409 struct xge_desc_ring *ring;
410 u16 size;
411
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700412 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800413 if (!ring)
414 return NULL;
415
416 ring->ndev = ndev;
417
418 size = XGENE_ENET_DESC_SIZE * XGENE_ENET_NUM_DESC;
Luis Chamberlain750afb02019-01-04 09:23:09 +0100419 ring->desc_addr = dma_alloc_coherent(dev, size, &ring->dma_addr,
420 GFP_KERNEL);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800421 if (!ring->desc_addr)
422 goto err;
423
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700424 ring->pkt_info = kcalloc(XGENE_ENET_NUM_DESC, sizeof(*ring->pkt_info),
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800425 GFP_KERNEL);
426 if (!ring->pkt_info)
427 goto err;
428
429 xge_setup_desc(ring);
430
431 return ring;
432
433err:
434 xge_delete_desc_ring(ndev, ring);
435
436 return NULL;
437}
438
439static int xge_create_desc_rings(struct net_device *ndev)
440{
441 struct xge_pdata *pdata = netdev_priv(ndev);
442 struct xge_desc_ring *ring;
443 int ret;
444
445 /* create tx ring */
446 ring = xge_create_desc_ring(ndev);
447 if (!ring)
448 goto err;
449
450 pdata->tx_ring = ring;
451 xge_update_tx_desc_addr(pdata);
452
453 /* create rx ring */
454 ring = xge_create_desc_ring(ndev);
455 if (!ring)
456 goto err;
457
458 pdata->rx_ring = ring;
459 xge_update_rx_desc_addr(pdata);
460
461 ret = xge_refill_buffers(ndev, XGENE_ENET_NUM_DESC);
462 if (ret)
463 goto err;
464
465 return 0;
466err:
467 xge_delete_desc_rings(ndev);
468
469 return -ENOMEM;
470}
471
472static int xge_open(struct net_device *ndev)
473{
474 struct xge_pdata *pdata = netdev_priv(ndev);
475 int ret;
476
477 ret = xge_create_desc_rings(ndev);
478 if (ret)
479 return ret;
480
481 napi_enable(&pdata->napi);
482 ret = xge_request_irq(ndev);
483 if (ret)
484 return ret;
485
486 xge_intr_enable(pdata);
487 xge_wr_csr(pdata, DMARXCTRL, 1);
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700488
489 phy_start(ndev->phydev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800490 xge_mac_enable(pdata);
491 netif_start_queue(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800492
493 return 0;
494}
495
496static int xge_close(struct net_device *ndev)
497{
498 struct xge_pdata *pdata = netdev_priv(ndev);
499
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800500 netif_stop_queue(ndev);
501 xge_mac_disable(pdata);
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700502 phy_stop(ndev->phydev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800503
504 xge_intr_disable(pdata);
505 xge_free_irq(ndev);
506 napi_disable(&pdata->napi);
507 xge_delete_desc_rings(ndev);
508
509 return 0;
510}
511
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800512static int xge_napi(struct napi_struct *napi, const int budget)
513{
514 struct net_device *ndev = napi->dev;
Iyappan Subramanian70dbd9b2017-03-07 17:08:45 -0800515 struct xge_pdata *pdata;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800516 int processed;
517
518 pdata = netdev_priv(ndev);
519
520 xge_txc_poll(ndev);
521 processed = xge_rx_poll(ndev, budget);
522
523 if (processed < budget) {
524 napi_complete_done(napi, processed);
525 xge_intr_enable(pdata);
526 }
527
528 return processed;
529}
530
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800531static int xge_set_mac_addr(struct net_device *ndev, void *addr)
532{
533 struct xge_pdata *pdata = netdev_priv(ndev);
534 int ret;
535
536 ret = eth_mac_addr(ndev, addr);
537 if (ret)
538 return ret;
539
540 xge_mac_set_station_addr(pdata);
541
542 return 0;
543}
544
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800545static bool is_tx_pending(struct xge_raw_desc *raw_desc)
546{
547 if (!GET_BITS(E, le64_to_cpu(raw_desc->m0)))
548 return true;
549
550 return false;
551}
552
553static void xge_free_pending_skb(struct net_device *ndev)
554{
555 struct xge_pdata *pdata = netdev_priv(ndev);
556 struct device *dev = &pdata->pdev->dev;
557 struct xge_desc_ring *tx_ring;
558 struct xge_raw_desc *raw_desc;
559 dma_addr_t dma_addr;
560 struct sk_buff *skb;
561 void *pkt_buf;
562 int i;
563
564 tx_ring = pdata->tx_ring;
565
566 for (i = 0; i < XGENE_ENET_NUM_DESC; i++) {
567 raw_desc = &tx_ring->raw_desc[i];
568
569 if (!is_tx_pending(raw_desc))
570 continue;
571
572 skb = tx_ring->pkt_info[i].skb;
573 dma_addr = tx_ring->pkt_info[i].dma_addr;
574 pkt_buf = tx_ring->pkt_info[i].pkt_buf;
575 dma_free_coherent(dev, XGENE_ENET_STD_MTU, pkt_buf, dma_addr);
576 dev_kfree_skb_any(skb);
577 }
578}
579
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800580static void xge_timeout(struct net_device *ndev)
581{
582 struct xge_pdata *pdata = netdev_priv(ndev);
583
584 rtnl_lock();
585
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700586 if (!netif_running(ndev))
587 goto out;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800588
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700589 netif_stop_queue(ndev);
590 xge_intr_disable(pdata);
591 napi_disable(&pdata->napi);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800592
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700593 xge_wr_csr(pdata, DMATXCTRL, 0);
594 xge_txc_poll(ndev);
595 xge_free_pending_skb(ndev);
596 xge_wr_csr(pdata, DMATXSTATUS, ~0U);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800597
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700598 xge_setup_desc(pdata->tx_ring);
599 xge_update_tx_desc_addr(pdata);
600 xge_mac_init(pdata);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800601
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700602 napi_enable(&pdata->napi);
603 xge_intr_enable(pdata);
604 xge_mac_enable(pdata);
605 netif_start_queue(ndev);
606
607out:
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800608 rtnl_unlock();
609}
610
611static void xge_get_stats64(struct net_device *ndev,
612 struct rtnl_link_stats64 *storage)
613{
614 struct xge_pdata *pdata = netdev_priv(ndev);
615 struct xge_stats *stats = &pdata->stats;
616
617 storage->tx_packets += stats->tx_packets;
618 storage->tx_bytes += stats->tx_bytes;
619
620 storage->rx_packets += stats->rx_packets;
621 storage->rx_bytes += stats->rx_bytes;
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800622 storage->rx_errors += stats->rx_errors;
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800623}
624
625static const struct net_device_ops xgene_ndev_ops = {
626 .ndo_open = xge_open,
627 .ndo_stop = xge_close,
Iyappan Subramanianb105bcd2017-03-07 17:08:44 -0800628 .ndo_start_xmit = xge_start_xmit,
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800629 .ndo_set_mac_address = xge_set_mac_addr,
630 .ndo_tx_timeout = xge_timeout,
631 .ndo_get_stats64 = xge_get_stats64,
632};
633
634static int xge_probe(struct platform_device *pdev)
635{
636 struct device *dev = &pdev->dev;
637 struct net_device *ndev;
638 struct xge_pdata *pdata;
639 int ret;
640
Iyappan Subramanian1ffa8a72017-03-21 18:18:05 -0700641 ndev = alloc_etherdev(sizeof(*pdata));
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800642 if (!ndev)
643 return -ENOMEM;
644
645 pdata = netdev_priv(ndev);
646
647 pdata->pdev = pdev;
648 pdata->ndev = ndev;
649 SET_NETDEV_DEV(ndev, dev);
650 platform_set_drvdata(pdev, pdata);
651 ndev->netdev_ops = &xgene_ndev_ops;
652
653 ndev->features |= NETIF_F_GSO |
654 NETIF_F_GRO;
655
656 ret = xge_get_resources(pdata);
657 if (ret)
658 goto err;
659
660 ndev->hw_features = ndev->features;
Iyappan Subramanian617d7952017-03-21 18:18:03 -0700661 xge_set_ethtool_ops(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800662
663 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(64));
664 if (ret) {
665 netdev_err(ndev, "No usable DMA configuration\n");
666 goto err;
667 }
668
669 ret = xge_init_hw(ndev);
670 if (ret)
671 goto err;
672
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700673 ret = xge_mdio_config(ndev);
674 if (ret)
675 goto err;
676
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800677 netif_napi_add(ndev, &pdata->napi, xge_napi, NAPI_POLL_WEIGHT);
678
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800679 ret = register_netdev(ndev);
680 if (ret) {
681 netdev_err(ndev, "Failed to register netdev\n");
682 goto err;
683 }
684
685 return 0;
686
687err:
688 free_netdev(ndev);
689
690 return ret;
691}
692
693static int xge_remove(struct platform_device *pdev)
694{
695 struct xge_pdata *pdata;
696 struct net_device *ndev;
697
698 pdata = platform_get_drvdata(pdev);
699 ndev = pdata->ndev;
700
701 rtnl_lock();
702 if (netif_running(ndev))
703 dev_close(ndev);
704 rtnl_unlock();
705
Iyappan Subramanianea8ab162017-03-21 18:18:02 -0700706 xge_mdio_remove(ndev);
Iyappan Subramanian3b3f9a72017-03-07 17:08:43 -0800707 unregister_netdev(ndev);
708 free_netdev(ndev);
709
710 return 0;
711}
712
713static void xge_shutdown(struct platform_device *pdev)
714{
715 struct xge_pdata *pdata;
716
717 pdata = platform_get_drvdata(pdev);
718 if (!pdata)
719 return;
720
721 if (!pdata->ndev)
722 return;
723
724 xge_remove(pdev);
725}
726
727static const struct acpi_device_id xge_acpi_match[] = {
728 { "APMC0D80" },
729 { }
730};
731MODULE_DEVICE_TABLE(acpi, xge_acpi_match);
732
733static struct platform_driver xge_driver = {
734 .driver = {
735 .name = "xgene-enet-v2",
736 .acpi_match_table = ACPI_PTR(xge_acpi_match),
737 },
738 .probe = xge_probe,
739 .remove = xge_remove,
740 .shutdown = xge_shutdown,
741};
742module_platform_driver(xge_driver);
743
744MODULE_DESCRIPTION("APM X-Gene SoC Ethernet v2 driver");
745MODULE_AUTHOR("Iyappan Subramanian <[email protected]>");
746MODULE_VERSION(XGENE_ENET_V2_VERSION);
747MODULE_LICENSE("GPL");