diff options
Diffstat (limited to 'drivers/net/ethernet/freescale/fec_main.c')
| -rw-r--r-- | drivers/net/ethernet/freescale/fec_main.c | 253 |
1 files changed, 158 insertions, 95 deletions
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 42ec6ca3bf03..66b5cbdb43b9 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -397,7 +397,7 @@ static void fec_dump(struct net_device *ndev) fec16_to_cpu(bdp->cbd_sc), fec32_to_cpu(bdp->cbd_bufaddr), fec16_to_cpu(bdp->cbd_datlen), - txq->tx_skbuff[index]); + txq->tx_buf[index].skb); bdp = fec_enet_get_nextdesc(bdp, &txq->bd); index++; } while (bdp != txq->bd.base); @@ -654,7 +654,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, index = fec_enet_get_bd_index(last_bdp, &txq->bd); /* Save skb pointer */ - txq->tx_skbuff[index] = skb; + txq->tx_buf[index].skb = skb; /* Make sure the updates to rest of the descriptor are performed before * transferring ownership. @@ -672,9 +672,7 @@ static int fec_enet_txq_submit_skb(struct fec_enet_priv_tx_q *txq, skb_tx_timestamp(skb); - /* Make sure the update to bdp and tx_skbuff are performed before - * txq->bd.cur. - */ + /* Make sure the update to bdp is performed before txq->bd.cur. */ wmb(); txq->bd.cur = bdp; @@ -862,7 +860,7 @@ static int fec_enet_txq_submit_tso(struct fec_enet_priv_tx_q *txq, } /* Save skb pointer */ - txq->tx_skbuff[index] = skb; + txq->tx_buf[index].skb = skb; skb_tx_timestamp(skb); txq->bd.cur = bdp; @@ -952,16 +950,33 @@ static void fec_enet_bd_init(struct net_device *dev) for (i = 0; i < txq->bd.ring_size; i++) { /* Initialize the BD for every fragment in the page. */ bdp->cbd_sc = cpu_to_fec16(0); - if (bdp->cbd_bufaddr && - !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - fec16_to_cpu(bdp->cbd_datlen), - DMA_TO_DEVICE); - if (txq->tx_skbuff[i]) { - dev_kfree_skb_any(txq->tx_skbuff[i]); - txq->tx_skbuff[i] = NULL; + if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { + if (bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + if (txq->tx_buf[i].skb) { + dev_kfree_skb_any(txq->tx_buf[i].skb); + txq->tx_buf[i].skb = NULL; + } + } else { + if (bdp->cbd_bufaddr) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + + if (txq->tx_buf[i].xdp) { + xdp_return_frame(txq->tx_buf[i].xdp); + txq->tx_buf[i].xdp = NULL; + } + + /* restore default tx buffer type: FEC_TXBUF_T_SKB */ + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; } + bdp->cbd_bufaddr = cpu_to_fec32(0); bdp = fec_enet_get_nextdesc(bdp, &txq->bd); } @@ -1011,24 +1026,6 @@ static void fec_enet_enable_ring(struct net_device *ndev) } } -static void fec_enet_reset_skb(struct net_device *ndev) -{ - struct fec_enet_private *fep = netdev_priv(ndev); - struct fec_enet_priv_tx_q *txq; - int i, j; - - for (i = 0; i < fep->num_tx_queues; i++) { - txq = fep->tx_queue[i]; - - for (j = 0; j < txq->bd.ring_size; j++) { - if (txq->tx_skbuff[j]) { - dev_kfree_skb_any(txq->tx_skbuff[j]); - txq->tx_skbuff[j] = NULL; - } - } - } -} - /* * This function is called to start or restart the FEC during a link * change, transmit timeout, or to reconfigure the FEC. The network @@ -1071,9 +1068,6 @@ fec_restart(struct net_device *ndev) fec_enet_enable_ring(ndev); - /* Reset tx SKB buffers. */ - fec_enet_reset_skb(ndev); - /* Enable MII mode */ if (fep->full_duplex == DUPLEX_FULL) { /* FD enable */ @@ -1378,9 +1372,10 @@ fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, } static void -fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) +fec_enet_tx_queue(struct net_device *ndev, u16 queue_id, int budget) { struct fec_enet_private *fep; + struct xdp_frame *xdpf; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; @@ -1408,16 +1403,39 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) index = fec_enet_get_bd_index(bdp, &txq->bd); - skb = txq->tx_skbuff[index]; - txq->tx_skbuff[index] = NULL; - if (!IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) - dma_unmap_single(&fep->pdev->dev, - fec32_to_cpu(bdp->cbd_bufaddr), - fec16_to_cpu(bdp->cbd_datlen), - DMA_TO_DEVICE); - bdp->cbd_bufaddr = cpu_to_fec32(0); - if (!skb) - goto skb_done; + if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { + skb = txq->tx_buf[index].skb; + txq->tx_buf[index].skb = NULL; + if (bdp->cbd_bufaddr && + !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr))) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + bdp->cbd_bufaddr = cpu_to_fec32(0); + if (!skb) + goto tx_buf_done; + } else { + /* Tx processing cannot call any XDP (or page pool) APIs if + * the "budget" is 0. Because NAPI is called with budget of + * 0 (such as netpoll) indicates we may be in an IRQ context, + * however, we can't use the page pool from IRQ context. + */ + if (unlikely(!budget)) + break; + + xdpf = txq->tx_buf[index].xdp; + if (bdp->cbd_bufaddr) + dma_unmap_single(&fep->pdev->dev, + fec32_to_cpu(bdp->cbd_bufaddr), + fec16_to_cpu(bdp->cbd_datlen), + DMA_TO_DEVICE); + bdp->cbd_bufaddr = cpu_to_fec32(0); + if (!xdpf) { + txq->tx_buf[index].type = FEC_TXBUF_T_SKB; + goto tx_buf_done; + } + } /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | @@ -1436,21 +1454,11 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) ndev->stats.tx_carrier_errors++; } else { ndev->stats.tx_packets++; - ndev->stats.tx_bytes += skb->len; - } - - /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who - * are to time stamp the packet, so we still need to check time - * stamping enabled flag. - */ - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && - fep->hwts_tx_en) && - fep->bufdesc_ex) { - struct skb_shared_hwtstamps shhwtstamps; - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); - skb_tstamp_tx(skb, &shhwtstamps); + if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) + ndev->stats.tx_bytes += skb->len; + else + ndev->stats.tx_bytes += xdpf->len; } /* Deferred means some collisions occurred during transmit, @@ -1459,10 +1467,32 @@ fec_enet_tx_queue(struct net_device *ndev, u16 queue_id) if (status & BD_ENET_TX_DEF) ndev->stats.collisions++; - /* Free the sk buffer associated with this last transmit */ - dev_kfree_skb_any(skb); -skb_done: - /* Make sure the update to bdp and tx_skbuff are performed + if (txq->tx_buf[index].type == FEC_TXBUF_T_SKB) { + /* NOTE: SKBTX_IN_PROGRESS being set does not imply it's we who + * are to time stamp the packet, so we still need to check time + * stamping enabled flag. + */ + if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS && + fep->hwts_tx_en) && fep->bufdesc_ex) { + struct skb_shared_hwtstamps shhwtstamps; + struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; + + fec_enet_hwtstamp(fep, fec32_to_cpu(ebdp->ts), &shhwtstamps); + skb_tstamp_tx(skb, &shhwtstamps); + } + + /* Free the sk buffer associated with this last transmit */ + dev_kfree_skb_any(skb); + } else { + xdp_return_frame(xdpf); + + txq->tx_buf[index].xdp = NULL; + /* restore default tx buffer type: FEC_TXBUF_T_SKB */ + txq->tx_buf[index].type = FEC_TXBUF_T_SKB; + } + +tx_buf_done: + /* Make sure the update to bdp and tx_buf are performed * before dirty_tx */ wmb(); @@ -1486,14 +1516,14 @@ skb_done: writel(0, txq->bd.reg_desc_active); } -static void fec_enet_tx(struct net_device *ndev) +static void fec_enet_tx(struct net_device *ndev, int budget) { struct fec_enet_private *fep = netdev_priv(ndev); int i; /* Make sure that AVB queues are processed first. */ for (i = fep->num_tx_queues - 1; i >= 0; i--) - fec_enet_tx_queue(ndev, i); + fec_enet_tx_queue(ndev, i, budget); } static void fec_enet_update_cbd(struct fec_enet_priv_rx_q *rxq, @@ -1836,7 +1866,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) do { done += fec_enet_rx(ndev, budget - done); - fec_enet_tx(ndev); + fec_enet_tx(ndev, budget); } while ((done < budget) && fec_enet_collect_events(fep)); if (done < budget) { @@ -2810,6 +2840,7 @@ static void fec_enet_get_xdp_stats(struct fec_enet_private *fep, u64 *data) static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) { +#ifdef CONFIG_PAGE_POOL_STATS struct page_pool_stats stats = {}; struct fec_enet_priv_rx_q *rxq; int i; @@ -2824,6 +2855,7 @@ static void fec_enet_page_pool_stats(struct fec_enet_private *fep, u64 *data) } page_pool_ethtool_stats_get(data, &stats); +#endif } static void fec_enet_get_ethtool_stats(struct net_device *dev, @@ -3268,9 +3300,19 @@ static void fec_enet_free_buffers(struct net_device *ndev) for (i = 0; i < txq->bd.ring_size; i++) { kfree(txq->tx_bounce[i]); txq->tx_bounce[i] = NULL; - skb = txq->tx_skbuff[i]; - txq->tx_skbuff[i] = NULL; - dev_kfree_skb(skb); + + if (txq->tx_buf[i].type == FEC_TXBUF_T_SKB) { + skb = txq->tx_buf[i].skb; + txq->tx_buf[i].skb = NULL; + dev_kfree_skb(skb); + } else { + if (txq->tx_buf[i].xdp) { + xdp_return_frame(txq->tx_buf[i].xdp); + txq->tx_buf[i].xdp = NULL; + } + + txq->tx_buf[i].type = FEC_TXBUF_T_SKB; + } } } } @@ -3315,8 +3357,7 @@ static int fec_enet_alloc_queue(struct net_device *ndev) fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size; txq->tx_stop_threshold = FEC_MAX_SKB_DESCS; - txq->tx_wake_threshold = - (txq->bd.ring_size - txq->tx_stop_threshold) / 2; + txq->tx_wake_threshold = FEC_MAX_SKB_DESCS + 2 * MAX_SKB_FRAGS; txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev, txq->bd.ring_size * TSO_HEADER_SIZE, @@ -3751,12 +3792,18 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) if (fep->quirks & FEC_QUIRK_SWAP_FRAME) return -EOPNOTSUPP; + if (!bpf->prog) + xdp_features_clear_redirect_target(dev); + if (is_run) { napi_disable(&fep->napi); netif_tx_disable(dev); } old_prog = xchg(&fep->xdp_prog, bpf->prog); + if (old_prog) + bpf_prog_put(old_prog); + fec_restart(dev); if (is_run) { @@ -3764,8 +3811,8 @@ static int fec_enet_bpf(struct net_device *dev, struct netdev_bpf *bpf) netif_tx_start_all_queues(dev); } - if (old_prog) - bpf_prog_put(old_prog); + if (bpf->prog) + xdp_features_set_redirect_target(dev, false); return 0; @@ -3791,20 +3838,18 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, struct xdp_frame *frame) { unsigned int index, status, estatus; - struct bufdesc *bdp, *last_bdp; + struct bufdesc *bdp; dma_addr_t dma_addr; int entries_free; entries_free = fec_enet_get_free_txdesc_num(txq); if (entries_free < MAX_SKB_FRAGS + 1) { - netdev_err(fep->netdev, "NOT enough BD for SG!\n"); - xdp_return_frame(frame); - return NETDEV_TX_BUSY; + netdev_err_once(fep->netdev, "NOT enough BD for SG!\n"); + return -EBUSY; } /* Fill in a Tx ring entry */ bdp = txq->bd.cur; - last_bdp = bdp; status = fec16_to_cpu(bdp->cbd_sc); status &= ~BD_ENET_TX_STATS; @@ -3813,7 +3858,7 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, dma_addr = dma_map_single(&fep->pdev->dev, frame->data, frame->len, DMA_TO_DEVICE); if (dma_mapping_error(&fep->pdev->dev, dma_addr)) - return FEC_ENET_XDP_CONSUMED; + return -ENOMEM; status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST); if (fep->bufdesc_ex) @@ -3832,8 +3877,13 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, ebdp->cbd_esc = cpu_to_fec32(estatus); } - index = fec_enet_get_bd_index(last_bdp, &txq->bd); - txq->tx_skbuff[index] = NULL; + txq->tx_buf[index].type = FEC_TXBUF_T_XDP_NDO; + txq->tx_buf[index].xdp = frame; + + /* Make sure the updates to rest of the descriptor are performed before + * transferring ownership. + */ + dma_wmb(); /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. @@ -3842,10 +3892,16 @@ static int fec_enet_txq_xmit_frame(struct fec_enet_private *fep, bdp->cbd_sc = cpu_to_fec16(status); /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd); + bdp = fec_enet_get_nextdesc(bdp, &txq->bd); + + /* Make sure the update to bdp are performed before txq->bd.cur. */ + dma_wmb(); txq->bd.cur = bdp; + /* Trigger transmission start */ + writel(0, txq->bd.reg_desc_active); + return 0; } @@ -3868,18 +3924,14 @@ static int fec_enet_xdp_xmit(struct net_device *dev, __netif_tx_lock(nq, cpu); + /* Avoid tx timeout as XDP shares the queue with kernel stack */ + txq_trans_cond_update(nq); for (i = 0; i < num_frames; i++) { - if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) != 0) + if (fec_enet_txq_xmit_frame(fep, txq, frames[i]) < 0) break; sent_frames++; } - /* Make sure the update to bdp and tx_skbuff are performed. */ - wmb(); - - /* Trigger transmission start */ - writel(0, txq->bd.reg_desc_active); - __netif_tx_unlock(nq); return sent_frames; @@ -4031,6 +4083,10 @@ static int fec_enet_init(struct net_device *ndev) ndev->hw_features = ndev->features; + if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) + ndev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT; + fec_restart(ndev); if (fep->quirks & FEC_QUIRK_MIB_CLEAR) @@ -4478,9 +4534,11 @@ fec_drv_remove(struct platform_device *pdev) struct device_node *np = pdev->dev.of_node; int ret; - ret = pm_runtime_resume_and_get(&pdev->dev); + ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) - return ret; + dev_err(&pdev->dev, + "Failed to resume device in remove callback (%pe)\n", + ERR_PTR(ret)); cancel_work_sync(&fep->tx_timeout_work); fec_ptp_stop(pdev); @@ -4493,8 +4551,13 @@ fec_drv_remove(struct platform_device *pdev) of_phy_deregister_fixed_link(np); of_node_put(fep->phy_node); - clk_disable_unprepare(fep->clk_ahb); - clk_disable_unprepare(fep->clk_ipg); + /* After pm_runtime_get_sync() failed, the clks are still off, so skip + * disabling them again. + */ + if (ret >= 0) { + clk_disable_unprepare(fep->clk_ahb); + clk_disable_unprepare(fep->clk_ipg); + } pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); |