diff --git a/MAINTAINERS b/MAINTAINERS index 9777a1dd6ff647..4c05e18ced505d 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5930,7 +5930,7 @@ F: Documentation/dev-tools/gcov.rst GDB KERNEL DEBUGGING HELPER SCRIPTS M: Jan Kiszka -M: Kieran Bingham +M: Kieran Bingham S: Supported F: scripts/gdb/ diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 46f656b8fc233a..e7705dde953fc7 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -11,7 +11,6 @@ config PARISC select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_STRICT_KERNEL_RWX select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_WANTS_UBSAN_NO_NULL select ARCH_SUPPORTS_MEMORY_FAILURE select RTC_CLASS select RTC_DRV_GENERIC diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 8a1863d9ed5338..4fe5b2affa2330 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -106,7 +106,6 @@ config S390 select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_CMPXCHG_LOCKREF select ARCH_WANTS_DYNAMIC_TASK_STRUCT - select ARCH_WANTS_UBSAN_NO_NULL select ARCH_WANT_IPC_PARSE_VERSION select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS2 diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 7436b2d27fa385..a390c6d4f72df9 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -298,7 +298,8 @@ static void reset_bdev(struct zram *zram) zram->backing_dev = NULL; zram->old_block_size = 0; zram->bdev = NULL; - + zram->disk->queue->backing_dev_info->capabilities |= + BDI_CAP_SYNCHRONOUS_IO; kvfree(zram->bitmap); zram->bitmap = NULL; } @@ -400,6 +401,18 @@ static ssize_t backing_dev_store(struct device *dev, zram->backing_dev = backing_dev; zram->bitmap = bitmap; zram->nr_pages = nr_pages; + /* + * With writeback feature, zram does asynchronous IO so it's no longer + * synchronous device so let's remove synchronous io flag. Othewise, + * upper layer(e.g., swap) could wait IO completion rather than + * (submit and return), which will cause system sluggish. + * Furthermore, when the IO function returns(e.g., swap_readpage), + * upper layer expects IO was done so it could deallocate the page + * freely but in fact, IO is going on so finally could cause + * use-after-free when the IO is really done. + */ + zram->disk->queue->backing_dev_info->capabilities &= + ~BDI_CAP_SYNCHRONOUS_IO; up_write(&zram->init_lock); pr_info("setup backing device %s\n", file_name); diff --git a/drivers/i2c/busses/i2c-xlp9xx.c b/drivers/i2c/busses/i2c-xlp9xx.c index 1f41a4f89c08f4..8a873975cf1252 100644 --- a/drivers/i2c/busses/i2c-xlp9xx.c +++ b/drivers/i2c/busses/i2c-xlp9xx.c @@ -191,28 +191,43 @@ static void xlp9xx_i2c_drain_rx_fifo(struct xlp9xx_i2c_dev *priv) if (priv->len_recv) { /* read length byte */ rlen = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); + + /* + * We expect at least 2 interrupts for I2C_M_RECV_LEN + * transactions. The length is updated during the first + * interrupt, and the buffer contents are only copied + * during subsequent interrupts. If in case the interrupts + * get merged we would complete the transaction without + * copying out the bytes from RX fifo. To avoid this now we + * drain the fifo as and when data is available. + * We drained the rlen byte already, decrement total length + * by one. + */ + + len--; if (rlen > I2C_SMBUS_BLOCK_MAX || rlen == 0) { rlen = 0; /*abort transfer */ priv->msg_buf_remaining = 0; priv->msg_len = 0; - } else { - *buf++ = rlen; - if (priv->client_pec) - ++rlen; /* account for error check byte */ - /* update remaining bytes and message length */ - priv->msg_buf_remaining = rlen; - priv->msg_len = rlen + 1; + xlp9xx_i2c_update_rlen(priv); + return; } + + *buf++ = rlen; + if (priv->client_pec) + ++rlen; /* account for error check byte */ + /* update remaining bytes and message length */ + priv->msg_buf_remaining = rlen; + priv->msg_len = rlen + 1; xlp9xx_i2c_update_rlen(priv); priv->len_recv = false; - } else { - len = min(priv->msg_buf_remaining, len); - for (i = 0; i < len; i++, buf++) - *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); - - priv->msg_buf_remaining -= len; } + len = min(priv->msg_buf_remaining, len); + for (i = 0; i < len; i++, buf++) + *buf = xlp9xx_read_i2c_reg(priv, XLP9XX_I2C_MRXFIFO); + + priv->msg_buf_remaining -= len; priv->msg_buf = buf; if (priv->msg_buf_remaining) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index f051ce35a440aa..832bce07c38519 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -2358,14 +2358,16 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, int i; for (i = 0; i < cpsw->data.slaves; i++) { - if (vid == cpsw->slaves[i].port_vlan) - return -EINVAL; + if (vid == cpsw->slaves[i].port_vlan) { + ret = -EINVAL; + goto err; + } } } dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid); ret = cpsw_add_vlan_ale_entry(priv, vid); - +err: pm_runtime_put(cpsw->dev); return ret; } @@ -2391,22 +2393,17 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, for (i = 0; i < cpsw->data.slaves; i++) { if (vid == cpsw->slaves[i].port_vlan) - return -EINVAL; + goto err; } } dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid); ret = cpsw_ale_del_vlan(cpsw->ale, vid, 0); - if (ret != 0) - return ret; - - ret = cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, - HOST_PORT_NUM, ALE_VLAN, vid); - if (ret != 0) - return ret; - - ret = cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, - 0, ALE_VLAN, vid); + ret |= cpsw_ale_del_ucast(cpsw->ale, priv->mac_addr, + HOST_PORT_NUM, ALE_VLAN, vid); + ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, + 0, ALE_VLAN, vid); +err: pm_runtime_put(cpsw->dev); return ret; } diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 93dc05c194d381..5766225a4ce117 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -394,7 +394,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask, idx = cpsw_ale_match_addr(ale, addr, (flags & ALE_VLAN) ? vid : 0); if (idx < 0) - return -EINVAL; + return -ENOENT; cpsw_ale_read(ale, idx, ale_entry); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 799cba4624a5e0..2ff874b6ec89ab 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -895,7 +895,6 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, struct sk_buff *skb, struct sk_buff_head *list) { - struct skb_shared_info *shinfo = skb_shinfo(skb); RING_IDX cons = queue->rx.rsp_cons; struct sk_buff *nskb; @@ -904,15 +903,16 @@ static RING_IDX xennet_fill_frags(struct netfront_queue *queue, RING_GET_RESPONSE(&queue->rx, ++cons); skb_frag_t *nfrag = &skb_shinfo(nskb)->frags[0]; - if (shinfo->nr_frags == MAX_SKB_FRAGS) { + if (skb_shinfo(skb)->nr_frags == MAX_SKB_FRAGS) { unsigned int pull_to = NETFRONT_SKB_CB(skb)->pull_to; BUG_ON(pull_to <= skb_headlen(skb)); __pskb_pull_tail(skb, pull_to - skb_headlen(skb)); } - BUG_ON(shinfo->nr_frags >= MAX_SKB_FRAGS); + BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS); - skb_add_rx_frag(skb, shinfo->nr_frags, skb_frag_page(nfrag), + skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, + skb_frag_page(nfrag), rx->offset, rx->status, PAGE_SIZE); skb_shinfo(nskb)->nr_frags = 0; diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index e0918d180f08e6..46f5f29605d474 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -69,7 +69,7 @@ struct bpf_cpu_map { }; static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, - struct xdp_bulk_queue *bq); + struct xdp_bulk_queue *bq, bool in_napi_ctx); static u64 cpu_map_bitmap_size(const union bpf_attr *attr) { @@ -375,7 +375,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu) struct xdp_bulk_queue *bq = per_cpu_ptr(rcpu->bulkq, cpu); /* No concurrent bq_enqueue can run at this point */ - bq_flush_to_queue(rcpu, bq); + bq_flush_to_queue(rcpu, bq, false); } free_percpu(rcpu->bulkq); /* Cannot kthread_stop() here, last put free rcpu resources */ @@ -558,7 +558,7 @@ const struct bpf_map_ops cpu_map_ops = { }; static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, - struct xdp_bulk_queue *bq) + struct xdp_bulk_queue *bq, bool in_napi_ctx) { unsigned int processed = 0, drops = 0; const int to_cpu = rcpu->cpu; @@ -578,7 +578,10 @@ static int bq_flush_to_queue(struct bpf_cpu_map_entry *rcpu, err = __ptr_ring_produce(q, xdpf); if (err) { drops++; - xdp_return_frame_rx_napi(xdpf); + if (likely(in_napi_ctx)) + xdp_return_frame_rx_napi(xdpf); + else + xdp_return_frame(xdpf); } processed++; } @@ -598,7 +601,7 @@ static int bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf) struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq); if (unlikely(bq->count == CPU_MAP_BULK_SIZE)) - bq_flush_to_queue(rcpu, bq); + bq_flush_to_queue(rcpu, bq, true); /* Notice, xdp_buff/page MUST be queued here, long enough for * driver to code invoking us to finished, due to driver @@ -661,7 +664,7 @@ void __cpu_map_flush(struct bpf_map *map) /* Flush all frames in bulkq to real queue */ bq = this_cpu_ptr(rcpu->bulkq); - bq_flush_to_queue(rcpu, bq); + bq_flush_to_queue(rcpu, bq, true); /* If already running, costs spin_lock_irqsave + smb_mb */ wake_up_process(rcpu->kthread); diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c index d361fc1e3bf35f..750d45edae7989 100644 --- a/kernel/bpf/devmap.c +++ b/kernel/bpf/devmap.c @@ -217,7 +217,8 @@ void __dev_map_insert_ctx(struct bpf_map *map, u32 bit) } static int bq_xmit_all(struct bpf_dtab_netdev *obj, - struct xdp_bulk_queue *bq, u32 flags) + struct xdp_bulk_queue *bq, u32 flags, + bool in_napi_ctx) { struct net_device *dev = obj->dev; int sent = 0, drops = 0, err = 0; @@ -254,7 +255,10 @@ static int bq_xmit_all(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf = bq->q[i]; /* RX path under NAPI protection, can return frames faster */ - xdp_return_frame_rx_napi(xdpf); + if (likely(in_napi_ctx)) + xdp_return_frame_rx_napi(xdpf); + else + xdp_return_frame(xdpf); drops++; } goto out; @@ -286,7 +290,7 @@ void __dev_map_flush(struct bpf_map *map) __clear_bit(bit, bitmap); bq = this_cpu_ptr(dev->bulkq); - bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); + bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, true); } } @@ -316,7 +320,7 @@ static int bq_enqueue(struct bpf_dtab_netdev *obj, struct xdp_frame *xdpf, struct xdp_bulk_queue *bq = this_cpu_ptr(obj->bulkq); if (unlikely(bq->count == DEV_MAP_BULK_SIZE)) - bq_xmit_all(obj, bq, 0); + bq_xmit_all(obj, bq, 0, true); /* Ingress dev_rx will be the same for all xdp_frame's in * bulk_queue, because bq stored per-CPU and must be flushed @@ -385,7 +389,7 @@ static void dev_map_flush_old(struct bpf_dtab_netdev *dev) __clear_bit(dev->bit, bitmap); bq = per_cpu_ptr(dev->bulkq, cpu); - bq_xmit_all(dev, bq, XDP_XMIT_FLUSH); + bq_xmit_all(dev, bq, XDP_XMIT_FLUSH, false); } } } diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 0b38be5a955c25..70c0755e8fc4e1 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -1045,12 +1045,12 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); while (msg_data_left(msg)) { - struct sk_msg_buff *m; + struct sk_msg_buff *m = NULL; bool enospc = false; int copy; if (sk->sk_err) { - err = sk->sk_err; + err = -sk->sk_err; goto out_err; } @@ -1113,8 +1113,11 @@ static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: err = sk_stream_wait_memory(sk, &timeo); - if (err) + if (err) { + if (m && m != psock->cork) + free_start_sg(sk, m); goto out_err; + } } out_err: if (err < 0) diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan index 19d42ea75ec225..98fa559ebd8086 100644 --- a/lib/Kconfig.ubsan +++ b/lib/Kconfig.ubsan @@ -1,9 +1,6 @@ config ARCH_HAS_UBSAN_SANITIZE_ALL bool -config ARCH_WANTS_UBSAN_NO_NULL - def_bool n - config UBSAN bool "Undefined behaviour sanity checker" help @@ -39,14 +36,6 @@ config UBSAN_ALIGNMENT Enabling this option on architectures that support unaligned accesses may produce a lot of false positives. -config UBSAN_NULL - bool "Enable checking of null pointers" - depends on UBSAN - default y if !ARCH_WANTS_UBSAN_NO_NULL - help - This option enables detection of memory accesses via a - null pointer. - config TEST_UBSAN tristate "Module for testing for undefined behavior detection" depends on m && UBSAN diff --git a/mm/memory.c b/mm/memory.c index dab1511294add1..c5e87a3a82ba34 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4395,6 +4395,9 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, return -EINVAL; maddr = ioremap_prot(phys_addr, PAGE_ALIGN(len + offset), prot); + if (!maddr) + return -ENOMEM; + if (write) memcpy_toio(maddr + offset, buf, len); else diff --git a/samples/bpf/xdp_redirect_cpu_kern.c b/samples/bpf/xdp_redirect_cpu_kern.c index 8cb703671b0468..0cc3d71057f03d 100644 --- a/samples/bpf/xdp_redirect_cpu_kern.c +++ b/samples/bpf/xdp_redirect_cpu_kern.c @@ -14,7 +14,7 @@ #include #include "bpf_helpers.h" -#define MAX_CPUS 12 /* WARNING - sync with _user.c */ +#define MAX_CPUS 64 /* WARNING - sync with _user.c */ /* Special map type that can XDP_REDIRECT frames to another CPU */ struct bpf_map_def SEC("maps") cpu_map = { diff --git a/samples/bpf/xdp_redirect_cpu_user.c b/samples/bpf/xdp_redirect_cpu_user.c index f6efaefd485b15..4b4d78fffe3042 100644 --- a/samples/bpf/xdp_redirect_cpu_user.c +++ b/samples/bpf/xdp_redirect_cpu_user.c @@ -19,7 +19,7 @@ static const char *__doc__ = #include #include -#define MAX_CPUS 12 /* WARNING - sync with _kern.c */ +#define MAX_CPUS 64 /* WARNING - sync with _kern.c */ /* How many xdp_progs are defined in _kern.c */ #define MAX_PROG 5 @@ -527,7 +527,7 @@ static void stress_cpumap(void) * procedure. */ create_cpu_entry(1, 1024, 0, false); - create_cpu_entry(1, 128, 0, false); + create_cpu_entry(1, 8, 0, false); create_cpu_entry(1, 16000, 0, false); } diff --git a/scripts/Makefile.ubsan b/scripts/Makefile.ubsan index b593b36ccff869..38b2b4818e8ebf 100644 --- a/scripts/Makefile.ubsan +++ b/scripts/Makefile.ubsan @@ -14,10 +14,6 @@ ifdef CONFIG_UBSAN_ALIGNMENT CFLAGS_UBSAN += $(call cc-option, -fsanitize=alignment) endif -ifdef CONFIG_UBSAN_NULL - CFLAGS_UBSAN += $(call cc-option, -fsanitize=null) -endif - # -fsanitize=* options makes GCC less smart than usual and # increase number of 'maybe-uninitialized false-positives CFLAGS_UBSAN += $(call cc-option, -Wno-maybe-uninitialized) diff --git a/tools/lib/bpf/btf.c b/tools/lib/bpf/btf.c index 09ecf8162f7a8a..cf94b0770522ff 100644 --- a/tools/lib/bpf/btf.c +++ b/tools/lib/bpf/btf.c @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +// SPDX-License-Identifier: LGPL-2.1 /* Copyright (c) 2018 Facebook */ #include diff --git a/tools/lib/bpf/btf.h b/tools/lib/bpf/btf.h index 43c658ccfc2ba4..4897e0724d4ec6 100644 --- a/tools/lib/bpf/btf.h +++ b/tools/lib/bpf/btf.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 */ +/* SPDX-License-Identifier: LGPL-2.1 */ /* Copyright (c) 2018 Facebook */ #ifndef __BPF_BTF_H diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c index 9e78df20791936..0c7d9e556b47d0 100644 --- a/tools/testing/selftests/bpf/test_sockmap.c +++ b/tools/testing/selftests/bpf/test_sockmap.c @@ -354,7 +354,7 @@ static int msg_loop(int fd, int iov_count, int iov_length, int cnt, while (s->bytes_recvd < total_bytes) { if (txmsg_cork) { timeout.tv_sec = 0; - timeout.tv_usec = 1000; + timeout.tv_usec = 300000; } else { timeout.tv_sec = 1; timeout.tv_usec = 0;