glort = l2_accel->dglort + 1 + i;
hw->mac.ops.update_xcast_mode(hw, glort,
- FM10K_XCAST_MODE_MULTI);
+ FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort,
sdev->dev_addr,
hw->mac.default_vid, true);
if (fm10k_host_mbx_ready(interface)) {
hw->mac.ops.update_xcast_mode(hw, glort,
- FM10K_XCAST_MODE_MULTI);
+ FM10K_XCAST_MODE_NONE);
fm10k_queue_mac_request(interface, glort, sdev->dev_addr,
hw->mac.default_vid, true);
}
static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- u32 reg_offset, vf_shift;
+ u16 pool = adapter->num_rx_pools;
+ u32 reg_offset, vf_shift, vmolr;
u32 gcr_ext, vmdctl;
int i;
vmdctl |= IXGBE_VT_CTL_REPLEN;
IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl);
+ /* accept untagged packets until a vlan tag is
+ * specifically set for the VMDQ queue/pool
+ */
+ vmolr = IXGBE_VMOLR_AUPE;
+ while (pool--)
+ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(pool)), vmolr);
+
vf_shift = VMDQ_P(0) % 32;
reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0;
spin_unlock(&adapter->fdir_perfect_lock);
}
-static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool,
- struct ixgbe_adapter *adapter)
-{
- struct ixgbe_hw *hw = &adapter->hw;
- u32 vmolr;
-
- /* No unicast promiscuous support for VMDQ devices. */
- vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool));
- vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE);
-
- /* clear the affected bit */
- vmolr &= ~IXGBE_VMOLR_MPE;
-
- if (dev->flags & IFF_ALLMULTI) {
- vmolr |= IXGBE_VMOLR_MPE;
- } else {
- vmolr |= IXGBE_VMOLR_ROMPE;
- hw->mac.ops.update_mc_addr_list(hw, dev);
- }
- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr);
-}
-
/**
* ixgbe_clean_rx_ring - Free Rx Buffers per Queue
* @rx_ring: ring to free buffers from
*/
err = ixgbe_add_mac_filter(adapter, vdev->dev_addr,
VMDQ_P(accel->pool));
- if (err >= 0) {
- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter);
+ if (err >= 0)
return 0;
- }
for (i = 0; i < adapter->num_rx_queues_per_pool; i++)
adapter->rx_ring[baseq + i]->netdev = NULL;
ixgbe_del_mac_filter(adapter, accel->netdev->dev_addr,
VMDQ_P(accel->pool));
- /* disable ability to receive packets for this pool */
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_VMOLR(accel->pool), 0);
-
/* Allow remaining Rx packets to get flushed out of the
* Rx FIFO before we drop the netdev for the ring.
*/
const struct macvlan_dev *vlan = netdev_priv(dev);
const struct macvlan_port *port = vlan->port;
const struct macvlan_dev *dest;
+ void *accel_priv = NULL;
if (vlan->mode == MACVLAN_MODE_BRIDGE) {
const struct ethhdr *eth = (void *)skb->data;
}
}
+ /* For packets that are non-multicast and not bridged we will pass
+ * the necessary information so that the lowerdev can distinguish
+ * the source of the packets via the accel_priv value.
+ */
+ accel_priv = vlan->accel_priv;
xmit_world:
skb->dev = vlan->lowerdev;
- return dev_queue_xmit(skb);
+ return dev_queue_xmit_accel(skb, accel_priv);
}
static inline netdev_tx_t macvlan_netpoll_send_skb(struct macvlan_dev *vlan, struct sk_buff *skb)
static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
+ struct macvlan_dev *vlan = netdev_priv(dev);
unsigned int len = skb->len;
int ret;
- struct macvlan_dev *vlan = netdev_priv(dev);
if (unlikely(netpoll_tx_running(dev)))
return macvlan_netpoll_send_skb(vlan, skb);
- if (vlan->accel_priv) {
- skb->dev = vlan->lowerdev;
- ret = dev_queue_xmit_accel(skb, vlan->accel_priv);
- } else {
- ret = macvlan_queue_xmit(skb, dev);
- }
+ ret = macvlan_queue_xmit(skb, dev);
if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
struct vlan_pcpu_stats *pcpu_stats;
/* Attempt to populate accel_priv which is used to offload the L2
* forwarding requests for unicast packets.
*/
- if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD) {
+ if (lowerdev->features & NETIF_F_HW_L2FW_DOFFLOAD)
vlan->accel_priv =
lowerdev->netdev_ops->ndo_dfwd_add_station(lowerdev, dev);
- /* If we get a NULL pointer back, or if we get an error
- * then we should just fall through to the non accelerated path
- */
- if (IS_ERR_OR_NULL(vlan->accel_priv))
- vlan->accel_priv = NULL;
- else
- return 0;
+ /* If earlier attempt to offload failed, or accel_priv is not
+ * populated we must add the unicast address to the lower device.
+ */
+ if (IS_ERR_OR_NULL(vlan->accel_priv)) {
+ vlan->accel_priv = NULL;
+ err = dev_uc_add(lowerdev, dev->dev_addr);
+ if (err < 0)
+ goto out;
}
- err = -EBUSY;
- if (macvlan_addr_busy(vlan->port, dev->dev_addr))
- goto out;
-
- err = dev_uc_add(lowerdev, dev->dev_addr);
- if (err < 0)
- goto out;
if (dev->flags & IFF_ALLMULTI) {
err = dev_set_allmulti(lowerdev, 1);
if (err < 0)
if (dev->flags & IFF_ALLMULTI)
dev_set_allmulti(lowerdev, -1);
del_unicast:
- dev_uc_del(lowerdev, dev->dev_addr);
-out:
if (vlan->accel_priv) {
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
vlan->accel_priv);
vlan->accel_priv = NULL;
+ } else {
+ dev_uc_del(lowerdev, dev->dev_addr);
}
+out:
return err;
}
lowerdev->netdev_ops->ndo_dfwd_del_station(lowerdev,
vlan->accel_priv);
vlan->accel_priv = NULL;
- return 0;
}
dev_uc_unsync(lowerdev, dev);