Skip to content

Commit

Permalink
Merge ra.kernel.org:/pub/scm/linux/kernel/git/davem/net
Browse files Browse the repository at this point in the history
  • Loading branch information
davem330 committed Aug 12, 2018
2 parents 9a95d9c + ec0c967 commit 6a92ef0
Show file tree
Hide file tree
Showing 19 changed files with 92 additions and 71 deletions.
2 changes: 1 addition & 1 deletion MAINTAINERS
Original file line number Diff line number Diff line change
Expand Up @@ -5930,7 +5930,7 @@ F: Documentation/dev-tools/gcov.rst

GDB KERNEL DEBUGGING HELPER SCRIPTS
M: Jan Kiszka <[email protected]>
M: Kieran Bingham <[email protected]>
M: Kieran Bingham <[email protected]>
S: Supported
F: scripts/gdb/

Expand Down
1 change: 0 additions & 1 deletion arch/parisc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ config PARISC
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_STRICT_KERNEL_RWX
select ARCH_HAS_UBSAN_SANITIZE_ALL
select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_SUPPORTS_MEMORY_FAILURE
select RTC_CLASS
select RTC_DRV_GENERIC
Expand Down
1 change: 0 additions & 1 deletion arch/s390/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,6 @@ config S390
select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANTS_DYNAMIC_TASK_STRUCT
select ARCH_WANTS_UBSAN_NO_NULL
select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS2
Expand Down
15 changes: 14 additions & 1 deletion drivers/block/zram/zram_drv.c
Original file line number Diff line number Diff line change
Expand Up @@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram)
zram->backing_dev = NULL;
zram->old_block_size = 0;
zram->bdev = NULL;

zram->disk->queue->backing_dev_info->capabilities |=
BDI_CAP_SYNCHRONOUS_IO;
kvfree(zram->bitmap);
zram->bitmap = NULL;
}
Expand Down Expand Up @@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev,
zram->backing_dev = backing_dev;
zram->bitmap = bitmap;
zram->nr_pages = nr_pages;
/*
* With writeback feature, zram does asynchronous IO so it's no longer
* synchronous device so let's remove synchronous io flag. Othewise,
* upper layer(e.g., swap) could wait IO completion rather than
* (submit and return), which will cause system sluggish.
* Furthermore, when the IO function returns(e.g., swap_readpage),
* upper layer expects IO was done so it could deallocate the page
* freely but in fact, IO is going on so finally could cause
* use-after-free when the IO is really done.
*/
zram->disk->queue->backing_dev_info->capabilities &=
~BDI_CAP_SYNCHRONOUS_IO;
up_write(&zram->init_lock);

pr_info("setup backing device %s\n", file_name);
Expand Down
41 changes: 28 additions & 13 deletions drivers/i2c/busses/i2c-xlp9xx.c
Original file line number Diff line number Diff line change
Expand Up @@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv)
if (priv->len_recv) {
/* read length byte */
rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);

/*
* We expect at least 2 interrupts for I2C_M_RECV_LEN
* transactions. The length is updated during the first
* interrupt, and the buffer contents are only copied
* during subsequent interrupts. If in case the interrupts
* get merged we would complete the transaction without
* copying out the bytes from RX fifo. To avoid this now we
* drain the fifo as and when data is available.
* We drained the rlen byte already, decrement total length
* by one.
*/

len--;
if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) {
rlen = 0; /*abort transfer */
priv->msg_buf_remaining = 0;
priv->msg_len = 0;
} else {
*buf++ = rlen;
if (priv->client_pec)
++rlen; /* account for error check byte */
/* update remaining bytes and message length */
priv->msg_buf_remaining = rlen;
priv->msg_len = rlen + 1;
xlp9xx_i2c_update_rlen(priv);
return;
}

*buf++ = rlen;
if (priv->client_pec)
++rlen; /* account for error check byte */
/* update remaining bytes and message length */
priv->msg_buf_remaining = rlen;
priv->msg_len = rlen + 1;
xlp9xx_i2c_update_rlen(priv);
priv->len_recv = false;
} else {
len = min(priv->msg_buf_remaining, len);
for (i = 0; i < len; i++, buf++)
*buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);

priv->msg_buf_remaining -= len;
}

len = min(priv->msg_buf_remaining, len);
for (i = 0; i < len; i++, buf++)
*buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO);

priv->msg_buf_remaining -= len;
priv->msg_buf = buf;

if (priv->msg_buf_remaining)
Expand Down
25 changes: 11 additions & 14 deletions drivers/net/ethernet/ti/cpsw.c
Original file line number Diff line number Diff line change
Expand Up @@ -2358,14 +2358,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
int i;

for (i = 0; i < cpsw->data.slaves; i++) {
if (vid == cpsw->slaves[i].port_vlan)
return -EINVAL;
if (vid == cpsw->slaves[i].port_vlan) {
ret = -EINVAL;
goto err;
}
}
}

dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
ret = cpsw_add_vlan_ale_entry(priv, vid);

err:
pm_runtime_put(cpsw->dev);
return ret;
}
Expand All @@ -2391,22 +2393,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,

for (i = 0; i < cpsw->data.slaves; i++) {
if (vid == cpsw->slaves[i].port_vlan)
return -EINVAL;
goto err;
}
}

dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0);
if (ret != 0)
return ret;

ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
if (ret != 0)
return ret;

ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr,
HOST_PORT_NUM, ALE_VLAN, vid);
ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast,
0, ALE_VLAN, vid);
err:
pm_runtime_put(cpsw->dev);
return ret;
}
Expand Down
2 changes: 1 addition & 1 deletion drivers/net/ethernet/ti/cpsw_ale.c
Original file line number Diff line number Diff line change
Expand Up @@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,

idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0);
if (idx < 0)
return -EINVAL;
return -ENOENT;

cpsw_ale_read(ale, idx, ale_entry);

Expand Down
8 changes: 4 additions & 4 deletions drivers/net/xen-netfront.c
Original file line number Diff line number Diff line change
Expand Up @@ -895,7 +895,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
RING_IDX cons = queue->rx.rsp_cons;
struct sk_buff *nskb;

Expand All @@ -904,15 +903,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue,
RING_GET_RESPONSE(&queue->rx, ++cons);
skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0];

if (shinfo->nr_frags == MAX_SKB_FRAGS) {
if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) {
unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to;

BUG_ON(pull_to <= skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS);
BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);

skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag),
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag),
rx->offset, rx->status, PAGE_SIZE);

skb_shinfo(nskb)->nr_frags = 0;
Expand Down
15 changes: 9 additions & 6 deletions kernel/bpf/cpumap.c
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ struct bpf_cpu_map {
};

static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
struct xdp_bulk_queue *bq);
struct xdp_bulk_queue *bq, bool in_napi_ctx);

static u64 cpu_map_bitmap_size(const union bpf_attr *attr)
{
Expand Down Expand Up @@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu);

/* No concurrent bq_enqueue can run at this point */
bq_flush_to_queue(rcpu, bq);
bq_flush_to_queue(rcpu, bq, false);
}
free_percpu(rcpu->bulkq);
/* Cannot kthread_stop() here, last put free rcpu resources */
Expand Down Expand Up @@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = {
};

static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
struct xdp_bulk_queue *bq)
struct xdp_bulk_queue *bq, bool in_napi_ctx)
{
unsigned int processed = 0, drops = 0;
const int to_cpu = rcpu->cpu;
Expand All @@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu,
err = __ptr_ring_produce(q, xdpf);
if (err) {
drops++;
xdp_return_frame_rx_napi(xdpf);
if (likely(in_napi_ctx))
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
}
processed++;
}
Expand All @@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);

if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
bq_flush_to_queue(rcpu, bq);
bq_flush_to_queue(rcpu, bq, true);

/* Notice, xdp_buff/page MUST be queued here, long enough for
* driver to code invoking us to finished, due to driver
Expand Down Expand Up @@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map)

/* Flush all frames in bulkq to real queue */
bq = this_cpu_ptr(rcpu->bulkq);
bq_flush_to_queue(rcpu, bq);
bq_flush_to_queue(rcpu, bq, true);

/* If already running, costs spin_lock_irqsave + smb_mb */
wake_up_process(rcpu->kthread);
Expand Down
14 changes: 9 additions & 5 deletions kernel/bpf/devmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit)
}

static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct xdp_bulk_queue *bq, u32 flags)
struct xdp_bulk_queue *bq, u32 flags,
bool in_napi_ctx)
{
struct net_device *dev = obj->dev;
int sent = 0, drops = 0, err = 0;
Expand Down Expand Up @@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj,
struct xdp_frame *xdpf = bq->q[i];

/* RX path under NAPI protection, can return frames faster */
xdp_return_frame_rx_napi(xdpf);
if (likely(in_napi_ctx))
xdp_return_frame_rx_napi(xdpf);
else
xdp_return_frame(xdpf);
drops++;
}
goto out;
Expand Down Expand Up @@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map)
__clear_bit(bit, bitmap);

bq = this_cpu_ptr(dev->bulkq);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true);
}
}

Expand Down Expand Up @@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf,
struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq);

if (unlikely(bq->count == DEV_MAP_BULK_SIZE))
bq_xmit_all(obj, bq, 0);
bq_xmit_all(obj, bq, 0, true);

/* Ingress dev_rx will be the same for all xdp_frame's in
* bulk_queue, because bq stored per-CPU and must be flushed
Expand Down Expand Up @@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev)
__clear_bit(dev->bit, bitmap);

bq = per_cpu_ptr(dev->bulkq, cpu);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH);
bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false);
}
}
}
Expand Down
9 changes: 6 additions & 3 deletions kernel/bpf/sockmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -1045,12 +1045,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);

while (msg_data_left(msg)) {
struct sk_msg_buff *m;
struct sk_msg_buff *m = NULL;
bool enospc = false;
int copy;

if (sk->sk_err) {
err = sk->sk_err;
err = -sk->sk_err;
goto out_err;
}

Expand Down Expand Up @@ -1113,8 +1113,11 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory:
err = sk_stream_wait_memory(sk, &timeo);
if (err)
if (err) {
if (m && m != psock->cork)
free_start_sg(sk, m);
goto out_err;
}
}
out_err:
if (err < 0)
Expand Down
11 changes: 0 additions & 11 deletions lib/Kconfig.ubsan
Original file line number Diff line number Diff line change
@@ -1,9 +1,6 @@
config ARCH_HAS_UBSAN_SANITIZE_ALL
bool

config ARCH_WANTS_UBSAN_NO_NULL
def_bool n

config UBSAN
bool "Undefined behaviour sanity checker"
help
Expand Down Expand Up @@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT
Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.

config UBSAN_NULL
bool "Enable checking of null pointers"
depends on UBSAN
default y if !ARCH_WANTS_UBSAN_NO_NULL
help
This option enables detection of memory accesses via a
null pointer.

config TEST_UBSAN
tristate "Module for testing for undefined behavior detection"
depends on m && UBSAN
Expand Down
3 changes: 3 additions & 0 deletions mm/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -4395,6 +4395,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
return -EINVAL;

maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot);
if (!maddr)
return -ENOMEM;

if (write)
memcpy_toio(maddr + offset, buf, len);
else
Expand Down
2 changes: 1 addition & 1 deletion samples/bpf/xdp_redirect_cpu_kern.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
#include <uapi/linux/bpf.h>
#include "bpf_helpers.h"

#define MAX_CPUS 12 /* WARNING - sync with _user.c */
#define MAX_CPUS 64 /* WARNING - sync with _user.c */

/* Special map type that can XDP_REDIRECT frames to another CPU */
struct bpf_map_def SEC("maps") cpu_map = {
Expand Down
4 changes: 2 additions & 2 deletions samples/bpf/xdp_redirect_cpu_user.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ static const char *__doc__ =
#include <arpa/inet.h>
#include <linux/if_link.h>

#define MAX_CPUS 12 /* WARNING - sync with _kern.c */
#define MAX_CPUS 64 /* WARNING - sync with _kern.c */

/* How many xdp_progs are defined in _kern.c */
#define MAX_PROG 5
Expand Down Expand Up @@ -527,7 +527,7 @@ static void stress_cpumap(void)
* procedure.
*/
create_cpu_entry(1, 1024, 0, false);
create_cpu_entry(1, 128, 0, false);
create_cpu_entry(1, 8, 0, false);
create_cpu_entry(1, 16000, 0, false);
}

Expand Down
Loading

0 comments on commit 6a92ef0

Please sign in to comment.