diff options
Diffstat (limited to 'drivers/net/ethernet/intel/ixgbe')
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe.h | 14 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 42 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 122 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h | 3 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 259 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h | 5 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 93 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | 2 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 13 | ||||
| -rw-r--r-- | drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 29 |
10 files changed, 483 insertions, 99 deletions
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 7068e9c3691d..636f9e350162 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -151,6 +151,7 @@ struct vf_data_storage { u16 tx_rate; u16 vlan_count; u8 spoofchk_enabled; + bool rss_query_enabled; unsigned int vf_api; }; @@ -642,7 +643,6 @@ struct ixgbe_adapter { #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) #define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) -#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11) /* Tx fast path data */ int num_tx_queues; @@ -722,6 +722,8 @@ struct ixgbe_adapter { u8 __iomem *io_addr; /* Mainly for iounmap use */ u32 wol; + u16 bridge_mode; + u16 eeprom_verh; u16 eeprom_verl; u16 eeprom_cap; @@ -765,6 +767,15 @@ struct ixgbe_adapter { u8 default_up; unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ + +/* maximum number of RETA entries among all devices supported by ixgbe + * driver: currently it's x550 device in non-SRIOV mode + */ +#define IXGBE_MAX_RETA_ENTRIES 512 + u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; + +#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ + u32 rss_key[IXGBE_RSS_KEY_SIZE / sizeof(u32)]; }; static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) @@ -954,4 +965,5 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct ixgbe_adapter *adapter, struct ixgbe_ring *tx_ring); +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); #endif /* _IXGBE_H_ */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index ccaecb1b8619..eafa9ec802ba 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2853,6 +2853,45 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) return ret; } +static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return sizeof(adapter->rss_key); +} + +static u32 ixgbe_rss_indir_size(struct net_device *netdev) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + return ixgbe_rss_indir_tbl_entries(adapter); +} + +static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) +{ + int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter); + + for (i = 0; i < reta_size; i++) + indir[i] = adapter->rss_indir_tbl[i]; +} + +static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (hfunc) + *hfunc = ETH_RSS_HASH_TOP; + + if (indir) + ixgbe_get_reta(adapter, indir); + + if (key) + memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); + + return 0; +} + static int ixgbe_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { @@ -3110,6 +3149,9 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { .set_coalesce = ixgbe_set_coalesce, .get_rxnfc = ixgbe_get_rxnfc, .set_rxnfc = ixgbe_set_rxnfc, + .get_rxfh_indir_size = ixgbe_rss_indir_size, + .get_rxfh_key_size = ixgbe_get_rxfh_key_size, + .get_rxfh = ixgbe_get_rxfh, .get_channels = ixgbe_get_channels, .set_channels = ixgbe_set_channels, .get_ts_info = ixgbe_get_ts_info, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c index 2ad91cb04dab..631c603fc966 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c @@ -71,6 +71,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) struct ixgbe_fcoe *fcoe; struct ixgbe_adapter *adapter; struct ixgbe_fcoe_ddp *ddp; + struct ixgbe_hw *hw; u32 fcbuff; if (!netdev) @@ -85,25 +86,51 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) if (!ddp->udl) return 0; + hw = &adapter->hw; len = ddp->len; - /* if there an error, force to invalidate ddp context */ - if (ddp->err) { + /* if no error then skip ddp context invalidation */ + if (!ddp->err) + goto skip_ddpinv; + + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP FCoE lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), + (xid | IXGBE_FCFLTRW_WE)); + + /* program FCBUFF */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); + + /* program FCDMARW */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_WE)); + + /* read FCBUFF to check context invalidated */ + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), + (xid | IXGBE_FCDMARW_RE)); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCDDC(2, xid)); + } else { + /* other hardware requires DDP FCoE lock */ spin_lock_bh(&fcoe->lock); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, (xid | IXGBE_FCFLTRW_WE)); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_WE)); /* guaranteed to be invalidated after 100us */ - IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, (xid | IXGBE_FCDMARW_RE)); - fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); + fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); spin_unlock_bh(&fcoe->lock); - if (fcbuff & IXGBE_FCBUFF_VALID) - udelay(100); - } + } + + if (fcbuff & IXGBE_FCBUFF_VALID) + usleep_range(100, 150); + +skip_ddpinv: if (ddp->sgl) dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, DMA_FROM_DEVICE); @@ -272,7 +299,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, /* program DMA context */ hw = &adapter->hw; - spin_lock_bh(&fcoe->lock); /* turn on last frame indication for target mode as FCP_RSPtarget is * supposed to send FCP_RSP when it is done. */ @@ -283,16 +309,33 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); } - IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); - IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); - IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); - IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); - /* program filter context */ - IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); - IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); - IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + if (hw->mac.type == ixgbe_mac_X550) { + /* X550 does not require DDP lock */ + + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), + ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); + } else { + /* DDP lock for indirect DDP context access */ + spin_lock_bh(&fcoe->lock); + + IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); + IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); + IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); + IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); + /* program filter context */ + IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); + IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); + IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); - spin_unlock_bh(&fcoe->lock); + spin_unlock_bh(&fcoe->lock); + } return 1; @@ -371,6 +414,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, struct fcoe_crc_eof *crc; __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); __le32 ddp_err; + int ddp_max; u32 fctl; u16 xid; @@ -392,7 +436,11 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, else xid = be16_to_cpu(fh->fh_rx_id); - if (xid >= IXGBE_FCOE_DDP_MAX) + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + if (xid >= ddp_max) return -EINVAL; fcoe = &adapter->fcoe; @@ -612,7 +660,8 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) { struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; struct ixgbe_hw *hw = &adapter->hw; - int i, fcoe_q, fcoe_i; + int i, fcoe_q, fcoe_i, fcoe_q_h = 0; + int fcreta_size; u32 etqf; /* Minimal functionality for FCoE requires at least CRC offloads */ @@ -633,10 +682,23 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) return; /* Use one or more Rx queues for FCoE by redirection table */ - for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { + fcreta_size = IXGBE_FCRETA_SIZE; + if (adapter->hw.mac.type == ixgbe_mac_X550) + fcreta_size = IXGBE_FCRETA_SIZE_X550; + + for (i = 0; i < fcreta_size; i++) { + if (adapter->hw.mac.type == ixgbe_mac_X550) { + int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % + fcoe->indices); + fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; + fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & + IXGBE_FCRETA_ENTRY_HIGH_MASK; + } + fcoe_i = fcoe->offset + (i % fcoe->indices); fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; + fcoe_q |= fcoe_q_h; IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); } IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); @@ -672,13 +734,18 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) { struct ixgbe_fcoe *fcoe = &adapter->fcoe; - int cpu, i; + int cpu, i, ddp_max; /* do nothing if no DDP pools were allocated */ if (!fcoe->ddp_pool) return; - for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) + ddp_max = IXGBE_FCOE_DDP_MAX; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + ddp_max = IXGBE_FCOE_DDP_MAX_X550; + + for (i = 0; i < ddp_max; i++) ixgbe_fcoe_ddp_put(adapter->netdev, i); for_each_possible_cpu(cpu) @@ -758,6 +825,9 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) } adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + /* X550 has different DDP Max limit */ + if (adapter->hw.mac.type == ixgbe_mac_X550) + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h index 0772b7730fce..38385876effb 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h @@ -46,6 +46,7 @@ #define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ #define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ #define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ +#define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */ /* Default traffic class to use for FCoE */ #define IXGBE_FCOE_DEFTC 3 @@ -77,7 +78,7 @@ struct ixgbe_fcoe { struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; atomic_t refcnt; spinlock_t lock; - struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; + struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550]; void *extra_ddp_buffer; dma_addr_t extra_ddp_buffer_dma; unsigned long mode; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 395dc6bb5d82..d3f4b0ceb3f7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -3228,89 +3228,148 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); } -static void ixgbe_setup_reta(struct ixgbe_adapter *adapter, const u32 *seed) +/** + * Return a number of entries in the RSS indirection table + * + * @adapter: device handle + * + * - 82598/82599/X540: 128 + * - X550(non-SRIOV mode): 512 + * - X550(SRIOV mode): 64 + */ +u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) +{ + if (adapter->hw.mac.type < ixgbe_mac_X550) + return 128; + else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) + return 64; + else + return 512; +} + +/** + * Write the RETA table to HW + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ixgbe_store_reta(struct ixgbe_adapter *adapter) { + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 reta = 0; - int i, j; - int reta_entries = 128; - u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; - int indices_multi; - - /* - * Program table for at least 2 queues w/ SR-IOV so that VFs can - * make full use of any rings they may have. We will use the - * PSRTYPE register to control how many rings we use within the PF. - */ - if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) - rss_i = 2; - - /* Fill out hash function seeds */ - for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); + u32 indices_multi; + u8 *indir_tbl = adapter->rss_indir_tbl; /* Fill out the redirection table as follows: - * 82598: 128 (8 bit wide) entries containing pair of 4 bit RSS indices - * 82599/X540: 128 (8 bit wide) entries containing 4 bit RSS index - * X550: 512 (8 bit wide) entries containing 6 bit RSS index + * - 82598: 8 bit wide entries containing pair of 4 bit RSS + * indices. + * - 82599/X540: 8 bit wide entries containing 4 bit RSS index + * - X550: 8 bit wide entries containing 6 bit RSS index */ if (adapter->hw.mac.type == ixgbe_mac_82598EB) indices_multi = 0x11; else indices_multi = 0x1; - switch (adapter->hw.mac.type) { - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) - reta_entries = 512; - default: - break; - } - - /* Fill out redirection table */ - for (i = 0, j = 0; i < reta_entries; i++, j++) { - if (j == rss_i) - j = 0; - reta = (reta << 8) | (j * indices_multi); + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; if ((i & 3) == 3) { if (i < 128) IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); else IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), reta); + reta = 0; } } } -static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter, const u32 *seed) +/** + * Write the RETA table to HW (for x550 devices in SRIOV mode) + * + * @adapter: device handle + * + * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. + */ +static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) { + u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); struct ixgbe_hw *hw = &adapter->hw; u32 vfreta = 0; + unsigned int pf_pool = adapter->num_vfs; + + /* Write redirection table to HW */ + for (i = 0; i < reta_entries; i++) { + vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; + if ((i & 3) == 3) { + IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), + vfreta); + vfreta = 0; + } + } +} + +static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; + u32 i, j; + u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + + /* Program table for at least 2 queues w/ SR-IOV so that VFs can + * make full use of any rings they may have. We will use the + * PSRTYPE register to control how many rings we use within the PF. + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) + rss_i = 2; + + /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) + IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); + + /* Fill out redirection table */ + memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); + + for (i = 0, j = 0; i < reta_entries; i++, j++) { + if (j == rss_i) + j = 0; + + adapter->rss_indir_tbl[i] = j; + } + + ixgbe_store_reta(adapter); +} + +static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) +{ + struct ixgbe_hw *hw = &adapter->hw; u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; unsigned int pf_pool = adapter->num_vfs; int i, j; /* Fill out hash function seeds */ for (i = 0; i < 10; i++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), seed[i]); + IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), + adapter->rss_key[i]); /* Fill out the redirection table */ for (i = 0, j = 0; i < 64; i++, j++) { if (j == rss_i) j = 0; - vfreta = (vfreta << 8) | j; - if ((i & 3) == 3) - IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), - vfreta); + + adapter->rss_indir_tbl[i] = j; } + + ixgbe_store_vfreta(adapter); } static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) { struct ixgbe_hw *hw = &adapter->hw; u32 mrqc = 0, rss_field = 0, vfmrqc = 0; - u32 rss_key[10]; u32 rxcsum; /* Disable indicating checksum in descriptor, enables RSS hash */ @@ -3354,7 +3413,7 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; - netdev_rss_key_fill(rss_key, sizeof(rss_key)); + netdev_rss_key_fill(adapter->rss_key, sizeof(adapter->rss_key)); if ((hw->mac.type >= ixgbe_mac_X550) && (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { unsigned int pf_pool = adapter->num_vfs; @@ -3364,12 +3423,12 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); /* Setup RSS through the VF registers */ - ixgbe_setup_vfreta(adapter, rss_key); + ixgbe_setup_vfreta(adapter); vfmrqc = IXGBE_MRQC_RSSEN; vfmrqc |= rss_field; IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); } else { - ixgbe_setup_reta(adapter, rss_key); + ixgbe_setup_reta(adapter); mrqc |= rss_field; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); } @@ -3553,7 +3612,7 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); - if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) + if (adapter->bridge_mode == BRIDGE_MODE_VEB) IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ @@ -3599,6 +3658,10 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) /* enable ethertype anti spoofing if hw supports it */ if (hw->mac.ops.set_ethertype_anti_spoofing) hw->mac.ops.set_ethertype_anti_spoofing(hw, true, i); + + /* Enable/Disable RSS query feature */ + ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, + adapter->vfinfo[i].rss_query_enabled); } } @@ -7870,6 +7933,80 @@ static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); } +/** + * ixgbe_configure_bridge_mode - set various bridge modes + * @adapter - the private structure + * @mode - requested bridge mode + * + * Configure some settings require for various bridge modes. + **/ +static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, + __u16 mode) +{ + struct ixgbe_hw *hw = &adapter->hw; + unsigned int p, num_pools; + u32 vmdctl; + + switch (mode) { + case BRIDGE_MODE_VEPA: + /* disable Tx loopback, rely on switch hairpin mode */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, 0); + + /* must enable Rx switching replication to allow multicast + * packet reception on all VFs, and to enable source address + * pruning. + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + vmdctl |= IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* enable Rx source address pruning. Note, this requires + * replication to be enabled or else it does nothing. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + true, + p); + } + break; + case BRIDGE_MODE_VEB: + /* enable Tx loopback for internal VF/PF communication */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, + IXGBE_PFDTXGSWC_VT_LBEN); + + /* disable Rx switching replication unless we have SR-IOV + * virtual functions + */ + vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); + if (!adapter->num_vfs) + vmdctl &= ~IXGBE_VT_CTL_REPLEN; + IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); + + /* disable Rx source address pruning, since we don't expect to + * be receiving external loopback of our transmitted frames. + */ + num_pools = adapter->num_vfs + adapter->num_rx_pools; + for (p = 0; p < num_pools; p++) { + if (hw->mac.ops.set_source_address_pruning) + hw->mac.ops.set_source_address_pruning(hw, + false, + p); + } + break; + default: + return -EINVAL; + } + + adapter->bridge_mode = mode; + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + + return 0; +} + static int ixgbe_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, u16 flags) { @@ -7885,8 +8022,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EINVAL; nla_for_each_nested(attr, br_spec, rem) { + u32 status; __u16 mode; - u32 reg = 0; if (nla_type(attr) != IFLA_BRIDGE_MODE) continue; @@ -7895,19 +8032,11 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, return -EINVAL; mode = nla_get_u16(attr); - if (mode == BRIDGE_MODE_VEPA) { - reg = 0; - adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB; - } else if (mode == BRIDGE_MODE_VEB) { - reg = IXGBE_PFDTXGSWC_VT_LBEN; - adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; - } else - return -EINVAL; - - IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg); + status = ixgbe_configure_bridge_mode(adapter, mode); + if (status) + return status; - e_info(drv, "enabling bridge mode: %s\n", - mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); + break; } return 0; @@ -7918,17 +8047,12 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, u32 filter_mask) { struct ixgbe_adapter *adapter = netdev_priv(dev); - u16 mode; if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) return 0; - if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) - mode = BRIDGE_MODE_VEB; - else - mode = BRIDGE_MODE_VEPA; - - return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, + adapter->bridge_mode, 0, 0); } static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) @@ -8040,6 +8164,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, + .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, .ndo_get_vf_config = ixgbe_ndo_get_vf_config, .ndo_get_stats64 = ixgbe_get_stats64, #ifdef CONFIG_IXGBE_DCB @@ -8394,7 +8519,6 @@ skip_sriov: NETIF_F_IPV6_CSUM | NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXHASH | @@ -8416,6 +8540,7 @@ skip_sriov: } netdev->hw_features |= NETIF_F_RXALL; + netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; netdev->vlan_features |= NETIF_F_TSO; netdev->vlan_features |= NETIF_F_TSO6; @@ -8977,8 +9102,6 @@ static void __exit ixgbe_exit_module(void) pci_unregister_driver(&ixgbe_driver); ixgbe_dbg_exit(); - - rcu_barrier(); /* Wait for completion of call_rcu()'s */ } #ifdef CONFIG_IXGBE_DCA diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h index a5cb755de3a9..b1e4703ff2a5 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h @@ -73,6 +73,7 @@ enum ixgbe_pfvf_api_rev { ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ + ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ /* This value should always be last */ ixgbe_mbox_api_unknown, /* indicates that API version is not known */ }; @@ -97,6 +98,10 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_TRANS_VLAN 3 /* Indication of port vlan */ #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ +/* mailbox API, version 1.2 VF requests */ +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ + /* length of permanent address message returned from PF */ #define IXGBE_VF_PERMADDR_MSG_LEN 4 /* word in permanent address message with the current multicast type */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c index 09a291bb7c34..1d17b5872dd1 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c @@ -36,6 +36,7 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/ipv6.h> +#include <linux/if_bridge.h> #ifdef NETIF_F_HW_VLAN_CTAG_TX #include <linux/if_vlan.h> #endif @@ -79,7 +80,7 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) /* Initialize default switching mode VEB */ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); - adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; + adapter->bridge_mode = BRIDGE_MODE_VEB; /* If call to enable VFs succeeded then allocate memory * for per VF control structures. @@ -105,9 +106,18 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | IXGBE_FLAG2_RSC_ENABLED); - /* enable spoof checking for all VFs */ - for (i = 0; i < adapter->num_vfs; i++) + for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ adapter->vfinfo[i].spoofchk_enabled = true; + + /* We support VF RSS querying only for 82599 and x540 + * devices at the moment. These devices share RSS + * indirection table and RSS hash key with PF therefore + * we want to disable the querying by default. + */ + adapter->vfinfo[i].rss_query_enabled = 0; + } + return 0; } @@ -424,6 +434,7 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) #endif /* CONFIG_FCOE */ switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: /* * Version 1.1 supports jumbo frames on VFs if PF has * jumbo frames enabled which means legacy VFs are @@ -891,6 +902,7 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, switch (api) { case ixgbe_mbox_api_10: case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: adapter->vfinfo[vf].vf_api = api; return 0; default: @@ -914,6 +926,7 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, switch (adapter->vfinfo[vf].vf_api) { case ixgbe_mbox_api_20: case ixgbe_mbox_api_11: + case ixgbe_mbox_api_12: break; default: return -1; @@ -941,6 +954,53 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, return 0; } +static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) +{ + u32 i, j; + u32 *out_buf = &msgbuf[1]; + const u8 *reta = adapter->rss_indir_tbl; + u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); + + /* Check if operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + /* This mailbox command is supported (required) only for 82599 and x540 + * VFs which support up to 4 RSS queues. Therefore we will compress the + * RETA by saving only 2 bits from each entry. This way we will be able + * to transfer the whole RETA in a single mailbox operation. + */ + for (i = 0; i < reta_size / 16; i++) { + out_buf[i] = 0; + for (j = 0; j < 16; j++) + out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); + } + + return 0; +} + +static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) +{ + u32 *rss_key = &msgbuf[1]; + + /* Check if the operation is permitted */ + if (!adapter->vfinfo[vf].rss_query_enabled) + return -EPERM; + + /* verify the PF is supporting the correct API */ + if (adapter->vfinfo[vf].vf_api != ixgbe_mbox_api_12) + return -EOPNOTSUPP; + + memcpy(rss_key, adapter->rss_key, sizeof(adapter->rss_key)); + + return 0; +} + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) { u32 mbx_size = IXGBE_VFMAILBOX_SIZE; @@ -997,6 +1057,12 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) case IXGBE_VF_GET_QUEUES: retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); break; + case IXGBE_VF_GET_RETA: + retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); + break; + case IXGBE_VF_GET_RSS_KEY: + retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); + break; default: e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); retval = IXGBE_ERR_MBX; @@ -1330,6 +1396,26 @@ int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) return 0; } +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting) +{ + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + /* This operation is currently supported only for 82599 and x540 + * devices. + */ + if (adapter->hw.mac.type < ixgbe_mac_82599EB || + adapter->hw.mac.type >= ixgbe_mac_X550) + return -EOPNOTSUPP; + + if (vf >= adapter->num_vfs) + return -EINVAL; + + adapter->vfinfo[vf].rss_query_enabled = setting; + + return 0; +} + int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi) { @@ -1343,5 +1429,6 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, ivi->vlan = adapter->vfinfo[vf].pf_vlan; ivi->qos = adapter->vfinfo[vf].pf_qos; ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; + ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; return 0; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h index 32c26d586c01..2c197e6d1fe7 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h @@ -47,6 +47,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate); int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); +int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, + bool setting); int ixgbe_ndo_get_vf_config(struct net_device *netdev, int vf, struct ifla_vf_info *ivi); void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h index c3ddc944f1e9..dd6ba5916dfe 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h @@ -285,6 +285,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ #define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +#define IXGBE_PFFLPL 0x050B0 +#define IXGBE_PFFLPH 0x050B4 #define IXGBE_VT_CTL 0x051B0 #define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ #define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ @@ -608,6 +610,8 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_RTTBCNRM 0x04980 #define IXGBE_RTTQCNRM 0x04980 +/* FCoE Direct DMA Context */ +#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) /* FCoE DMA Context Registers */ #define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ #define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ @@ -634,6 +638,9 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ #define IXGBE_REOFF 0x05158 /* Rx FC EOF */ #define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ +/* FCoE Direct Filter Context */ +#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) +#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) /* FCoE Filter Context Registers */ #define IXGBE_FCFLT 0x05108 /* FC FLT Context */ #define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ @@ -664,6 +671,10 @@ struct ixgbe_thermal_sensor_data { #define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ #define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ #define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ +#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ +/* Higher 7 bits for the queue index */ +#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 +#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 /* Stats registers */ #define IXGBE_CRCERRS 0x04000 @@ -3069,6 +3080,8 @@ struct ixgbe_mac_operations { s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); void (*disable_rx)(struct ixgbe_hw *hw); void (*enable_rx)(struct ixgbe_hw *hw); + void (*set_source_address_pruning)(struct ixgbe_hw *, bool, + unsigned int); void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); /* DMA Coalescing */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c index 58a3155af7cd..cf5cf819a6b8 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c @@ -1363,6 +1363,33 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } +/** ixgbe_set_source_address_pruning_X550 - Enable/Disbale src address pruning + * @hw: pointer to hardware structure + * @enable: enable or disable source address pruning + * @pool: Rx pool to set source address pruning for + **/ +static void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, + bool enable, + unsigned int pool) +{ + u64 pfflp; + + /* max rx pool is 63 */ + if (pool > 63) + return; + + pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); + pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; + + if (enable) + pfflp |= (1ULL << pool); + else + pfflp &= ~(1ULL << pool); + + IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); + IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); +} + #define X550_COMMON_MAC \ .init_hw = &ixgbe_init_hw_generic, \ .start_hw = &ixgbe_start_hw_X540, \ @@ -1397,6 +1424,8 @@ static void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, .init_uta_tables = &ixgbe_init_uta_tables_generic, \ .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, \ .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, \ + .set_source_address_pruning = \ + &ixgbe_set_source_address_pruning_X550, \ .set_ethertype_anti_spoofing = \ &ixgbe_set_ethertype_anti_spoofing_X550, \ .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, \ |