Skip to content

Commit

Permalink
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Browse files Browse the repository at this point in the history
Pull networking updates from David Miller:

 1) ax88796 does 64-bit divides which causes link errors on ARM, fix
    from Arnd Bergmann.

 2) Once an improper offload setting is detected on an SKB we don't rate
    limit the log message so we can very easily live lock.  From Ben
    Greear.

 3) Openvswitch cannot report vport configuration changes reliably
    because it didn't preallocate the netlink notification message
    before changing state.  From Jesse Gross.

 4) The effective UID/GID SCM credentials fix, from Linus.

 5) When a user explicitly asks for wireless authentication, cfg80211
    isn't told about the AP detachment leaving inconsistent state.  Fix
    from Johannes Berg.

 6) Fix self-MAC checks in batman-adv on multi-mesh nodes, from Antonio
    Quartulli.

 7) Revert build_skb() change sin IGB driver, can result in memory
    corruption.  From Alexander Duyck.

 8) Fix setting VLANs on virtual functions in IXGBE, from Greg Rose.

 9) Fix TSO races in qlcnic driver, from Sritej Velaga.

10) In bnx2x the kernel driver and UNDI firmware can try to program the
    chip at the same time, resulting in corruption.  Add proper
    synchronization.  From Dmitry Kravkov.

11) Fix corruption of status block in firmware ram in bxn2x, from Ariel
    Elior.

12) Fix load balancing hash regression of bonding driver in forwarding
    configurations, from Eric Dumazet.

13) Fix TS ECR regression in TCP by calling tcp_replace_ts_recent() in
    all the right spots, from Eric Dumazet.

14) Fix several bonding bugs having to do with address manintainence,
    including not removing address when configuration operations
    encounter errors, missed locking on the address lists, missing
    refcounting on VLAN objects, etc.  All from Nikolay Aleksandrov.

15) Add workarounds for firmware bugs in LTE qmi_wwan devices, wherein
    the devices fail to add a proper ethernet header while on LTE
    networks but otherwise properly do so on 2G and 3G ones.  From Bjørn
    Mork.

* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (38 commits)
  net: fix incorrect credentials passing
  net: rate-limit warn-bad-offload splats.
  net: ax88796: avoid 64 bit arithmetic
  qlge: Update version to 1.00.00.32.
  qlge: Fix ethtool autoneg advertising.
  qlge: Fix receive path to drop error frames
  net: qmi_wwan: prevent duplicate mac address on link (firmware bug workaround)
  net: qmi_wwan: fixup destination address (firmware bug workaround)
  net: qmi_wwan: fixup missing ethernet header (firmware bug workaround)
  bonding: in bond_mc_swap() bond's mc addr list is walked without lock
  bonding: disable netpoll on enslave failure
  bonding: primary_slave & curr_active_slave are not cleaned on enslave failure
  bonding: vlans don't get deleted on enslave failure
  bonding: mc addresses don't get deleted on enslave failure
  pkt_sched: fix error return code in fw_change_attrs()
  irda: small read past the end of array in debug code
  tcp: call tcp_replace_ts_recent() from tcp_ack()
  netfilter: xt_rpfilter: skip locally generated broadcast/multicast, too
  netfilter: ipset: bitmap:ip,mac: fix listing with timeout
  bonding: fix l23 and l34 load balancing in forwarding path
  ...
  • Loading branch information
torvalds committed Apr 21, 2013
2 parents f068f5e + 83f1b4b commit c437d88
Show file tree
Hide file tree
Showing 41 changed files with 544 additions and 393 deletions.
76 changes: 51 additions & 25 deletions drivers/net/bonding/bond_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -846,8 +846,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(old_active->dev, -1);

netif_addr_lock_bh(bond->dev);
netdev_for_each_mc_addr(ha, bond->dev)
dev_mc_del(old_active->dev, ha->addr);
netif_addr_unlock_bh(bond->dev);
}

if (new_active) {
Expand All @@ -858,8 +860,10 @@ static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
if (bond->dev->flags & IFF_ALLMULTI)
dev_set_allmulti(new_active->dev, 1);

netif_addr_lock_bh(bond->dev);
netdev_for_each_mc_addr(ha, bond->dev)
dev_mc_add(new_active->dev, ha->addr);
netif_addr_unlock_bh(bond->dev);
}
}

Expand Down Expand Up @@ -1901,9 +1905,26 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_destroy_slave_symlinks(bond_dev, slave_dev);

err_detach:
if (!USES_PRIMARY(bond->params.mode)) {
netif_addr_lock_bh(bond_dev);
bond_mc_list_flush(bond_dev, slave_dev);
netif_addr_unlock_bh(bond_dev);
}
bond_del_vlans_from_slave(bond, slave_dev);
write_lock_bh(&bond->lock);
bond_detach_slave(bond, new_slave);
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
write_unlock_bh(&bond->lock);
if (bond->curr_active_slave == new_slave) {
read_lock(&bond->lock);
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
read_unlock(&bond->lock);
}
slave_disable_netpoll(new_slave);

err_close:
slave_dev->priv_flags &= ~IFF_BONDING;
Expand Down Expand Up @@ -3296,20 +3317,22 @@ static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
*/
static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
{
struct ethhdr *data = (struct ethhdr *)skb->data;
struct iphdr *iph;
struct ipv6hdr *ipv6h;
const struct ethhdr *data;
const struct iphdr *iph;
const struct ipv6hdr *ipv6h;
u32 v6hash;
__be32 *s, *d;
const __be32 *s, *d;

if (skb->protocol == htons(ETH_P_IP) &&
skb_network_header_len(skb) >= sizeof(*iph)) {
pskb_network_may_pull(skb, sizeof(*iph))) {
iph = ip_hdr(skb);
data = (struct ethhdr *)skb->data;
return ((ntohl(iph->saddr ^ iph->daddr) & 0xffff) ^
(data->h_dest[5] ^ data->h_source[5])) % count;
} else if (skb->protocol == htons(ETH_P_IPV6) &&
skb_network_header_len(skb) >= sizeof(*ipv6h)) {
pskb_network_may_pull(skb, sizeof(*ipv6h))) {
ipv6h = ipv6_hdr(skb);
data = (struct ethhdr *)skb->data;
s = &ipv6h->saddr.s6_addr32[0];
d = &ipv6h->daddr.s6_addr32[0];
v6hash = (s[1] ^ d[1]) ^ (s[2] ^ d[2]) ^ (s[3] ^ d[3]);
Expand All @@ -3328,33 +3351,36 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
{
u32 layer4_xor = 0;
struct iphdr *iph;
struct ipv6hdr *ipv6h;
__be32 *s, *d;
__be16 *layer4hdr;
const struct iphdr *iph;
const struct ipv6hdr *ipv6h;
const __be32 *s, *d;
const __be16 *l4 = NULL;
__be16 _l4[2];
int noff = skb_network_offset(skb);
int poff;

if (skb->protocol == htons(ETH_P_IP) &&
skb_network_header_len(skb) >= sizeof(*iph)) {
pskb_may_pull(skb, noff + sizeof(*iph))) {
iph = ip_hdr(skb);
if (!ip_is_fragment(iph) &&
(iph->protocol == IPPROTO_TCP ||
iph->protocol == IPPROTO_UDP) &&
(skb_headlen(skb) - skb_network_offset(skb) >=
iph->ihl * sizeof(u32) + sizeof(*layer4hdr) * 2)) {
layer4hdr = (__be16 *)((u32 *)iph + iph->ihl);
layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
poff = proto_ports_offset(iph->protocol);

if (!ip_is_fragment(iph) && poff >= 0) {
l4 = skb_header_pointer(skb, noff + (iph->ihl << 2) + poff,
sizeof(_l4), &_l4);
if (l4)
layer4_xor = ntohs(l4[0] ^ l4[1]);
}
return (layer4_xor ^
((ntohl(iph->saddr ^ iph->daddr)) & 0xffff)) % count;
} else if (skb->protocol == htons(ETH_P_IPV6) &&
skb_network_header_len(skb) >= sizeof(*ipv6h)) {
pskb_may_pull(skb, noff + sizeof(*ipv6h))) {
ipv6h = ipv6_hdr(skb);
if ((ipv6h->nexthdr == IPPROTO_TCP ||
ipv6h->nexthdr == IPPROTO_UDP) &&
(skb_headlen(skb) - skb_network_offset(skb) >=
sizeof(*ipv6h) + sizeof(*layer4hdr) * 2)) {
layer4hdr = (__be16 *)(ipv6h + 1);
layer4_xor = ntohs(*layer4hdr ^ *(layer4hdr + 1));
poff = proto_ports_offset(ipv6h->nexthdr);
if (poff >= 0) {
l4 = skb_header_pointer(skb, noff + sizeof(*ipv6h) + poff,
sizeof(_l4), &_l4);
if (l4)
layer4_xor = ntohs(l4[0] ^ l4[1]);
}
s = &ipv6h->saddr.s6_addr32[0];
d = &ipv6h->daddr.s6_addr32[0];
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/8390/ax88796.c
Original file line number Diff line number Diff line change
Expand Up @@ -828,7 +828,7 @@ static int ax_probe(struct platform_device *pdev)
struct ei_device *ei_local;
struct ax_device *ax;
struct resource *irq, *mem, *mem2;
resource_size_t mem_size, mem2_size = 0;
unsigned long mem_size, mem2_size = 0;
int ret = 0;

dev = ax__alloc_ei_netdev(sizeof(struct ax_device));
Expand Down
7 changes: 5 additions & 2 deletions drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
Original file line number Diff line number Diff line change
Expand Up @@ -2614,6 +2614,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
}
}

/* initialize FW coalescing state machines in RAM */
bnx2x_update_coalesce(bp);

/* setup the leading queue */
rc = bnx2x_setup_leading(bp);
if (rc) {
Expand Down Expand Up @@ -4580,11 +4583,11 @@ static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
u32 addr = BAR_CSTRORM_INTMEM +
CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
u16 flags = REG_RD16(bp, addr);
u8 flags = REG_RD8(bp, addr);
/* clear and set */
flags &= ~HC_INDEX_DATA_HC_ENABLED;
flags |= enable_flag;
REG_WR16(bp, addr, flags);
REG_WR8(bp, addr, flags);
DP(NETIF_MSG_IFUP,
"port %x fw_sb_id %d sb_index %d disable %d\n",
port, fw_sb_id, sb_index, disable);
Expand Down
4 changes: 4 additions & 0 deletions drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -9878,6 +9878,10 @@ static int bnx2x_prev_unload_common(struct bnx2x *bp)
REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
}
}
if (!CHIP_IS_E1x(bp))
/* block FW from writing to host */
REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);

/* wait until BRB is empty */
tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
while (timer_count) {
Expand Down
8 changes: 0 additions & 8 deletions drivers/net/ethernet/intel/igb/igb.h
Original file line number Diff line number Diff line change
Expand Up @@ -284,18 +284,10 @@ struct igb_q_vector {
enum e1000_ring_flags_t {
IGB_RING_FLAG_RX_SCTP_CSUM,
IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
IGB_RING_FLAG_RX_BUILD_SKB_ENABLED,
IGB_RING_FLAG_TX_CTX_IDX,
IGB_RING_FLAG_TX_DETECT_HANG
};

#define ring_uses_build_skb(ring) \
test_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define set_ring_build_skb_enabled(ring) \
set_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)
#define clear_ring_build_skb_enabled(ring) \
clear_bit(IGB_RING_FLAG_RX_BUILD_SKB_ENABLED, &(ring)->flags)

#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)

#define IGB_RX_DESC(R, i) \
Expand Down
110 changes: 4 additions & 106 deletions drivers/net/ethernet/intel/igb/igb_main.c
Original file line number Diff line number Diff line change
Expand Up @@ -3350,20 +3350,6 @@ void igb_configure_rx_ring(struct igb_adapter *adapter,
wr32(E1000_RXDCTL(reg_idx), rxdctl);
}

static void igb_set_rx_buffer_len(struct igb_adapter *adapter,
struct igb_ring *rx_ring)
{
#define IGB_MAX_BUILD_SKB_SIZE \
(SKB_WITH_OVERHEAD(IGB_RX_BUFSZ) - \
(NET_SKB_PAD + NET_IP_ALIGN + IGB_TS_HDR_LEN))

/* set build_skb flag */
if (adapter->max_frame_size <= IGB_MAX_BUILD_SKB_SIZE)
set_ring_build_skb_enabled(rx_ring);
else
clear_ring_build_skb_enabled(rx_ring);
}

/**
* igb_configure_rx - Configure receive Unit after Reset
* @adapter: board private structure
Expand All @@ -3383,11 +3369,8 @@ static void igb_configure_rx(struct igb_adapter *adapter)

/* Setup the HW Rx Head and Tail Descriptor Pointers and
* the Base and Length of the Rx Descriptor Ring */
for (i = 0; i < adapter->num_rx_queues; i++) {
struct igb_ring *rx_ring = adapter->rx_ring[i];
igb_set_rx_buffer_len(adapter, rx_ring);
igb_configure_rx_ring(adapter, rx_ring);
}
for (i = 0; i < adapter->num_rx_queues; i++)
igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
}

/**
Expand Down Expand Up @@ -6203,78 +6186,6 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
return igb_can_reuse_rx_page(rx_buffer, page, truesize);
}

static struct sk_buff *igb_build_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc)
{
struct igb_rx_buffer *rx_buffer;
struct sk_buff *skb;
struct page *page;
void *page_addr;
unsigned int size = le16_to_cpu(rx_desc->wb.upper.length);
#if (PAGE_SIZE < 8192)
unsigned int truesize = IGB_RX_BUFSZ;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
SKB_DATA_ALIGN(NET_SKB_PAD +
NET_IP_ALIGN +
size);
#endif

/* If we spanned a buffer we have a huge mess so test for it */
BUG_ON(unlikely(!igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)));

rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page;
prefetchw(page);

page_addr = page_address(page) + rx_buffer->page_offset;

/* prefetch first cache line of first page */
prefetch(page_addr + NET_SKB_PAD + NET_IP_ALIGN);
#if L1_CACHE_BYTES < 128
prefetch(page_addr + L1_CACHE_BYTES + NET_SKB_PAD + NET_IP_ALIGN);
#endif

/* build an skb to around the page buffer */
skb = build_skb(page_addr, truesize);
if (unlikely(!skb)) {
rx_ring->rx_stats.alloc_failed++;
return NULL;
}

/* we are reusing so sync this buffer for CPU use */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_buffer->dma,
rx_buffer->page_offset,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);

/* update pointers within the skb to store the data */
skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD);
__skb_put(skb, size);

/* pull timestamp out of packet data */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) {
igb_ptp_rx_pktstamp(rx_ring->q_vector, skb->data, skb);
__skb_pull(skb, IGB_TS_HDR_LEN);
}

if (igb_can_reuse_rx_page(rx_buffer, page, truesize)) {
/* hand second half of page back to the ring */
igb_reuse_rx_page(rx_ring, rx_buffer);
} else {
/* we are not reusing the buffer so unmap it */
dma_unmap_page(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE);
}

/* clear contents of buffer_info */
rx_buffer->dma = 0;
rx_buffer->page = NULL;

return skb;
}

static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
Expand Down Expand Up @@ -6690,10 +6601,7 @@ static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
rmb();

/* retrieve a buffer from the ring */
if (ring_uses_build_skb(rx_ring))
skb = igb_build_rx_buffer(rx_ring, rx_desc);
else
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);
skb = igb_fetch_rx_buffer(rx_ring, rx_desc, skb);

/* exit if we failed to retrieve a buffer */
if (!skb)
Expand Down Expand Up @@ -6780,14 +6688,6 @@ static bool igb_alloc_mapped_page(struct igb_ring *rx_ring,
return true;
}

static inline unsigned int igb_rx_offset(struct igb_ring *rx_ring)
{
if (ring_uses_build_skb(rx_ring))
return NET_SKB_PAD + NET_IP_ALIGN;
else
return 0;
}

/**
* igb_alloc_rx_buffers - Replace used receive buffers; packet split
* @adapter: address of board private structure
Expand All @@ -6814,9 +6714,7 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info.
*/
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma +
bi->page_offset +
igb_rx_offset(rx_ring));
rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset);

rx_desc++;
bi++;
Expand Down
6 changes: 6 additions & 0 deletions drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
Original file line number Diff line number Diff line change
Expand Up @@ -1049,6 +1049,12 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
return -EINVAL;
if (vlan || qos) {
if (adapter->vfinfo[vf].pf_vlan)
err = ixgbe_set_vf_vlan(adapter, false,
adapter->vfinfo[vf].pf_vlan,
vf);
if (err)
goto out;
err = ixgbe_set_vf_vlan(adapter, true, vlan, vf);
if (err)
goto out;
Expand Down
Loading

0 comments on commit c437d88

Please sign in to comment.