From 9f591ae60e1be026901398ef99eede91237aa3a1 Mon Sep 17 00:00:00 2001 From: Gerd Hoffmann Date: Wed, 21 Mar 2018 15:08:47 +0100 Subject: [PATCH 001/182] drm/i915/gvt: throw error on unhandled vfio ioctls On unknown/unhandled ioctls the driver should return an error, so userspace knows it tried to use something unsupported. Cc: stable@vger.kernel.org Signed-off-by: Gerd Hoffmann Reviewed-by: Alex Williamson Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/kvmgt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c index 520fe3d0a882..f8540cc67b44 100644 --- a/drivers/gpu/drm/i915/gvt/kvmgt.c +++ b/drivers/gpu/drm/i915/gvt/kvmgt.c @@ -1254,7 +1254,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd, } - return 0; + return -ENOTTY; } static ssize_t From ac0fd9cfc837d1d30cb958dc34769e90aad43078 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Thu, 22 Mar 2018 12:27:54 -0500 Subject: [PATCH 002/182] drm/i915/gvt: Mark expected switch fall-through in handle_g2v_notification In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Addresses-Coverity-ID: 1466154 ("Missing break in switch") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 8c5d5d005854..a33c1c3e4a21 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -1150,6 +1150,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) switch (notification) { case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY; + /* fall through */ case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps); return PTR_ERR_OR_ZERO(mm); From 5da795b061a139b55d7393e79c7af688c58a4267 Mon Sep 17 00:00:00 2001 From: Zhipeng Gong Date: Mon, 26 Mar 2018 15:18:56 +0800 Subject: [PATCH 003/182] drm/i915/gvt: Make MI_USER_INTERRUPT nop in cmd parser GVT-g dispatches request to host i915 and depends on i915 notify ring interrupt mechanism to check completion of request. For now MI_USER_INTERRUPT in guest requests is passed through in GVT-g cmd parser and i915 does not use it, which causes unnecessary interrupt handling in i915. On the other hand, if several requests from guest are combined into one request in and contain MI_USER_INTERRUPT in the middle of combined request. GVT-g still has to wait on the whole request to complete to inject user interrupts to guest. This patch makes all the MI_USER_INTERRUPT nop to save some interrupt handling. Here is test result to run glmark2 on guest for 10 seconds: host master interrupts number is reduced from 16021 to 11162 host user interrupts number is reduced from 7936 to 3536 v2: - revise commit message. (Kevin) Reviewed-by: Kevin Tian Signed-off-by: Zhipeng Gong Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c8454ac43fae..d301a204e981 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1079,6 +1079,7 @@ static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s) { set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt, s->workload->pending_events); + patch_value(s, cmd_ptr(s, 0), MI_NOOP); return 0; } From 7598e8700e9a507496660219924ca8c5aa3088d6 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 27 Mar 2018 15:35:14 +0800 Subject: [PATCH 004/182] drm/i915/gvt: Missed to cancel dma map for ggtt entries We have canceled dma map for ppgtt entries. Also we need to do it for ggtt entries when them are invalidated. This can fix task hung issue as: [13517.791767] INFO: task gvt_service_thr:1081 blocked for more than 120 seconds. [13517.792584] Not tainted 4.14.15+ #3 [13517.793417] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [13517.794267] gvt_service_thr D 0 1081 2 0x80000000 [13517.795132] Call Trace: [13517.795996] ? __schedule+0x493/0x77b [13517.796859] schedule+0x79/0x82 [13517.797740] schedule_preempt_disabled+0x5/0x6 [13517.798614] __mutex_lock.isra.0+0x2b5/0x445 [13517.799504] ? __switch_to_asm+0x24/0x60 [13517.800381] ? intel_gvt_cleanup+0x10/0x10 [13517.801261] ? intel_gvt_schedule+0x19/0x2b9 [13517.802107] intel_gvt_schedule+0x19/0x2b9 [13517.802954] ? intel_gvt_cleanup+0x10/0x10 [13517.803824] gvt_service_thread+0xe3/0x10d [13517.804704] ? wait_woken+0x68/0x68 [13517.805588] kthread+0x118/0x120 [13517.806478] ? kthread_create_on_node+0x3a/0x3a [13517.807381] ? call_usermodehelper_exec_async+0x113/0x11a [13517.808307] ret_from_fork+0x35/0x40 v3: split out ggtt reset case. v2: also unmap ggtt during reset. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 30 ++++++++++++++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index d29281231507..8c9477c272be 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -530,6 +530,16 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, false, 0, mm->vgpu); } +static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, + struct intel_gvt_gtt_entry *entry, unsigned long index) +{ + struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + + GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); + + pte_ops->get_entry(NULL, entry, index, false, 0, mm->vgpu); +} + static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { @@ -1818,6 +1828,18 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, return ret; } +static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, + struct intel_gvt_gtt_entry *entry) +{ + struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + unsigned long pfn; + + pfn = pte_ops->get_pfn(entry); + if (pfn != vgpu->gvt->gtt.scratch_mfn) + intel_gvt_hypervisor_dma_unmap_guest_page(vgpu, + pfn << PAGE_SHIFT); +} + static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, void *p_data, unsigned int bytes) { @@ -1844,10 +1866,10 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data, bytes); - m = e; if (ops->test_present(&e)) { gfn = ops->get_pfn(&e); + m = e; /* one PTE update may be issued in multiple writes and the * first write may not construct a valid gfn @@ -1868,8 +1890,12 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ops->set_pfn(&m, gvt->gtt.scratch_mfn); } else ops->set_pfn(&m, dma_addr >> PAGE_SHIFT); - } else + } else { + ggtt_get_host_entry(ggtt_mm, &m, g_gtt_index); + ggtt_invalidate_pte(vgpu, &m); ops->set_pfn(&m, gvt->gtt.scratch_mfn); + ops->clear_present(&m); + } out: ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index); From f4c43db356198d8091442f593871b0beb5c139b2 Mon Sep 17 00:00:00 2001 From: Changbin Du Date: Tue, 27 Mar 2018 15:35:15 +0800 Subject: [PATCH 005/182] drm/i915/gvt: Cancel dma map when resetting ggtt entries Ditto, don't forget ggtt entries during reset. Signed-off-by: Changbin Du Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 22 +++++++++++++++++----- drivers/gpu/drm/i915/gvt/gtt.h | 2 +- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 8c9477c272be..78e55aafc8bc 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -2056,7 +2056,7 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) return PTR_ERR(gtt->ggtt_mm); } - intel_vgpu_reset_ggtt(vgpu); + intel_vgpu_reset_ggtt(vgpu, false); return create_scratch_page_tree(vgpu); } @@ -2341,17 +2341,19 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) /** * intel_vgpu_reset_ggtt - reset the GGTT entry * @vgpu: a vGPU + * @invalidate_old: invalidate old entries * * This function is called at the vGPU create stage * to reset all the GGTT entries. * */ -void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *dev_priv = gvt->dev_priv; struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; + struct intel_gvt_gtt_entry old_entry; u32 index; u32 num_entries; @@ -2360,13 +2362,23 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu) index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT; - while (num_entries--) + while (num_entries--) { + if (invalidate_old) { + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); + ggtt_invalidate_pte(vgpu, &old_entry); + } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); + } index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT; num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT; - while (num_entries--) + while (num_entries--) { + if (invalidate_old) { + ggtt_get_host_entry(vgpu->gtt.ggtt_mm, &old_entry, index); + ggtt_invalidate_pte(vgpu, &old_entry); + } ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++); + } ggtt_invalidate(dev_priv); } @@ -2386,5 +2398,5 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) * removing the shadow pages. */ intel_vgpu_destroy_all_ppgtt_mm(vgpu); - intel_vgpu_reset_ggtt(vgpu); + intel_vgpu_reset_ggtt(vgpu, true); } diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index a8b369cd352b..3792f2b7f4ff 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -193,7 +193,7 @@ struct intel_vgpu_gtt { extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); -void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu); +void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old); void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu); extern int intel_gvt_init_gtt(struct intel_gvt *gvt); From a733390f9a79876635013e57da25f91b6e78ffe6 Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Wed, 28 Mar 2018 05:30:13 +0800 Subject: [PATCH 006/182] drm/i915/gvt: Delete redundant error message in fb_decode.c Much error message exist in host dmesg when guest boot up with local display enabled. [ 167.680011] gvt: vgpu 1: invalid range gmadr 0x0 size 0x0 [ 167.680013] gvt: vgpu 1: invalid gma address: 0 The second error line duplicate with the first error line, so this patch remove this redundant error message and make the next error message much clearer. Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/fb_decoder.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 6b50fe78dc1b..1c120683e958 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -245,16 +245,13 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->hw_format = fmt; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate primary plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } @@ -371,16 +368,13 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, alpha_plane, alpha_force); plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate cursor plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } @@ -476,16 +470,13 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, plane->drm_format = drm_format; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; - if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) return -EINVAL; - } plane->base_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, plane->base); if (plane->base_gpa == INTEL_GVT_INVALID_ADDR) { - gvt_vgpu_err("invalid gma address: %lx\n", - (unsigned long)plane->base); + gvt_vgpu_err("Translate sprite plane gma 0x%x to gpa fail\n", + plane->base); return -EINVAL; } From 65eff272330c72689fb5e20dd6491826fd87a39c Mon Sep 17 00:00:00 2001 From: Xiong Zhang Date: Wed, 28 Mar 2018 05:30:14 +0800 Subject: [PATCH 007/182] drm/i915/gvt: Disable primary/sprite/cursor plane at virtual display initialization Much error exist in host dmesg during guest boot up with loca display enabled. gvt: vgpu 1: invalid range gmadr 0x0 size 0x0 This error happens when qemu get dmabuf info in case that the virtual display plane is enabled but its base address is an invalid 0, such case may be true before guest enable its plane. At this moment, its state is copied from host where the plane may be enabled. This patch disable primary/sprite/cursor plane at virtual display initialization, so intel_vgpu_decode_primary/cursor/sprite could return early as plane is disabled, then plane base check is skipped and error message disapper. Signed-off-by: Xiong Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/display.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index dd96ffc878ac..6d8180e8d1e2 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -169,6 +169,8 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { static void emulate_monitor_status_change(struct intel_vgpu *vgpu) { struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; + int pipe; + vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT); @@ -267,6 +269,14 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) if (IS_BROADWELL(dev_priv)) vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; + /* Disable Primary/Sprite/Cursor plane */ + for_each_pipe(dev_priv, pipe) { + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~CURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= CURSOR_MODE_DISABLE; + } + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; } From 10996f802109c83421ca30556cfe36ffc3bebae3 Mon Sep 17 00:00:00 2001 From: Tina Zhang Date: Wed, 28 Mar 2018 13:49:29 +0800 Subject: [PATCH 008/182] drm/i915/gvt: Add drm_format_mod update Add drm_format_mod update, which is omitted. Fixes: e546e281("drm/i915/gvt: Dmabuf support for GVT-g") Cc: stable@vger.kernel.org Signed-off-by: Tina Zhang Signed-off-by: Zhenyu Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index b555eb26f9ce..6f4f8e941fc2 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -323,6 +323,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf, struct intel_vgpu_fb_info *fb_info) { gvt_dmabuf->drm_format = fb_info->drm_format; + gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod; gvt_dmabuf->width = fb_info->width; gvt_dmabuf->height = fb_info->height; gvt_dmabuf->stride = fb_info->stride; From 38057aa1639b74d5c1e0dc1ca7c22bc7a31de714 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 24 Mar 2018 12:58:29 +0000 Subject: [PATCH 009/182] drm/i915/execlists: Clear user-active flag on preemption completion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When cancelling the requests and clearing out the ports following a successful preemption completion, also clear the active flag. I had assumed that all preemptions would be followed by an immediate dequeue (preserving the active user flag), but under rare circumstances we may be triggering a preemption for the second port only for it to have completed before the preemotion kicks in; leaving execlists->active set even though the system is now idle. We can clear the flag inside the common execlists_cancel_port_requests() as the other users also expect the semantics of active being cleared. Fixes: f6322eddaff7 ("drm/i915/preemption: Allow preemption between submission ports") Signed-off-by: Chris Wilson Cc: Michał Winiarski Cc: Michel Thierry Cc: Mika Kuoppala Cc: Tvrtko Ursulin Reviewed-by: Mika Kuoppala Link: https://patchwork.freedesktop.org/patch/msgid/20180324125829.27026-1-chris@chris-wilson.co.uk (cherry picked from commit eed7ec52f214bac2f25395ccaad610fbeb842a6e) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/intel_lrc.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index 697af5add78b..e3a5f673ff67 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c @@ -577,6 +577,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) * know the next preemption status we see corresponds * to this ELSP update. */ + GEM_BUG_ON(!execlists_is_active(execlists, + EXECLISTS_ACTIVE_USER)); GEM_BUG_ON(!port_count(&port[0])); if (port_count(&port[0]) > 1) goto unlock; @@ -738,6 +740,8 @@ execlists_cancel_port_requests(struct intel_engine_execlists * const execlists) memset(port, 0, sizeof(*port)); port++; } + + execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER); } static void execlists_cancel_requests(struct intel_engine_cs *engine) @@ -1001,6 +1005,11 @@ static void execlists_submission_tasklet(unsigned long data) if (fw) intel_uncore_forcewake_put(dev_priv, execlists->fw_domains); + + /* If the engine is now idle, so should be the flag; and vice versa. */ + GEM_BUG_ON(execlists_is_active(&engine->execlists, + EXECLISTS_ACTIVE_USER) == + !port_isset(engine->execlists.port)); } static void queue_request(struct intel_engine_cs *engine, From 876c27314ce51fe7e7e2aeb24a6448da1a26c78f Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 3 Apr 2018 12:10:09 +0200 Subject: [PATCH 010/182] netfilter: nf_conntrack_sip: allow duplicate SDP expectations Callum Sinclair reported SIP IP Phone errors that he tracked down to such phones sending session descriptions for different media types but with same port numbers. The expect core will only 'refresh' existing expectation if it is from same master AND same expectation class (media type). As expectation class is different, we get an error. The SIP connection tracking code will then 1). drop the SDP packet 2). if an rtp expectation was already installed successfully, error on rtcp expectation will cancel the rtp one. Make the expect core report back to caller when the conflict is due to different expectation class and have SIP tracker ignore soft-error. Reported-by: Callum Sinclair Tested-by: Callum Sinclair Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_expect.c | 5 ++++- net/netfilter/nf_conntrack_sip.c | 16 ++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index 8ef21d9f9a00..4b2b3d53acfc 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -252,7 +252,7 @@ static inline int expect_clash(const struct nf_conntrack_expect *a, static inline int expect_matches(const struct nf_conntrack_expect *a, const struct nf_conntrack_expect *b) { - return a->master == b->master && a->class == b->class && + return a->master == b->master && nf_ct_tuple_equal(&a->tuple, &b->tuple) && nf_ct_tuple_mask_equal(&a->mask, &b->mask) && net_eq(nf_ct_net(a->master), nf_ct_net(b->master)) && @@ -421,6 +421,9 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) h = nf_ct_expect_dst_hash(net, &expect->tuple); hlist_for_each_entry_safe(i, next, &nf_ct_expect_hash[h], hnode) { if (expect_matches(i, expect)) { + if (i->class != expect->class) + return -EALREADY; + if (nf_ct_remove_expect(i)) break; } else if (expect_clash(i, expect)) { diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 4dbb5bad4363..908e51e2dc2b 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -938,11 +938,19 @@ static int set_expected_rtp_rtcp(struct sk_buff *skb, unsigned int protoff, datalen, rtp_exp, rtcp_exp, mediaoff, medialen, daddr); else { - if (nf_ct_expect_related(rtp_exp) == 0) { - if (nf_ct_expect_related(rtcp_exp) != 0) - nf_ct_unexpect_related(rtp_exp); - else + /* -EALREADY handling works around end-points that send + * SDP messages with identical port but different media type, + * we pretend expectation was set up. + */ + int errp = nf_ct_expect_related(rtp_exp); + + if (errp == 0 || errp == -EALREADY) { + int errcp = nf_ct_expect_related(rtcp_exp); + + if (errcp == 0 || errcp == -EALREADY) ret = NF_ACCEPT; + else if (errp == 0) + nf_ct_unexpect_related(rtp_exp); } } nf_ct_expect_put(rtcp_exp); From 5c64576a77894a50be80be0024bed27171b55989 Mon Sep 17 00:00:00 2001 From: Julian Anastasov Date: Sat, 7 Apr 2018 15:50:47 +0300 Subject: [PATCH 011/182] ipvs: fix rtnl_lock lockups caused by start_sync_thread syzkaller reports for wrong rtnl_lock usage in sync code [1] and [2] We have 2 problems in start_sync_thread if error path is taken, eg. on memory allocation error or failure to configure sockets for mcast group or addr/port binding: 1. recursive locking: holding rtnl_lock while calling sock_release which in turn calls again rtnl_lock in ip_mc_drop_socket to leave the mcast group, as noticed by Florian Westphal. Additionally, sock_release can not be called while holding sync_mutex (ABBA deadlock). 2. task hung: holding rtnl_lock while calling kthread_stop to stop the running kthreads. As the kthreads do the same to leave the mcast group (sock_release -> ip_mc_drop_socket -> rtnl_lock) they hang. Fix the problems by calling rtnl_unlock early in the error path, now sock_release is called after unlocking both mutexes. Problem 3 (task hung reported by syzkaller [2]) is variant of problem 2: use _trylock to prevent one user to call rtnl_lock and then while waiting for sync_mutex to block kthreads that execute sock_release when they are stopped by stop_sync_thread. [1] IPVS: stopping backup sync thread 4500 ... WARNING: possible recursive locking detected 4.16.0-rc7+ #3 Not tainted -------------------------------------------- syzkaller688027/4497 is trying to acquire lock: (rtnl_mutex){+.+.}, at: [<00000000bb14d7fb>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 but task is already holding lock: IPVS: stopping backup sync thread 4495 ... (rtnl_mutex){+.+.}, at: [<00000000bb14d7fb>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 other info that might help us debug this: Possible unsafe locking scenario: CPU0 ---- lock(rtnl_mutex); lock(rtnl_mutex); *** DEADLOCK *** May be due to missing lock nesting notation 2 locks held by syzkaller688027/4497: #0: (rtnl_mutex){+.+.}, at: [<00000000bb14d7fb>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 #1: (ipvs->sync_mutex){+.+.}, at: [<00000000703f78e3>] do_ip_vs_set_ctl+0x10f8/0x1cc0 net/netfilter/ipvs/ip_vs_ctl.c:2388 stack backtrace: CPU: 1 PID: 4497 Comm: syzkaller688027 Not tainted 4.16.0-rc7+ #3 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:17 [inline] dump_stack+0x194/0x24d lib/dump_stack.c:53 print_deadlock_bug kernel/locking/lockdep.c:1761 [inline] check_deadlock kernel/locking/lockdep.c:1805 [inline] validate_chain kernel/locking/lockdep.c:2401 [inline] __lock_acquire+0xe8f/0x3e00 kernel/locking/lockdep.c:3431 lock_acquire+0x1d5/0x580 kernel/locking/lockdep.c:3920 __mutex_lock_common kernel/locking/mutex.c:756 [inline] __mutex_lock+0x16f/0x1a80 kernel/locking/mutex.c:893 mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:908 rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 ip_mc_drop_socket+0x88/0x230 net/ipv4/igmp.c:2643 inet_release+0x4e/0x1c0 net/ipv4/af_inet.c:413 sock_release+0x8d/0x1e0 net/socket.c:595 start_sync_thread+0x2213/0x2b70 net/netfilter/ipvs/ip_vs_sync.c:1924 do_ip_vs_set_ctl+0x1139/0x1cc0 net/netfilter/ipvs/ip_vs_ctl.c:2389 nf_sockopt net/netfilter/nf_sockopt.c:106 [inline] nf_setsockopt+0x67/0xc0 net/netfilter/nf_sockopt.c:115 ip_setsockopt+0x97/0xa0 net/ipv4/ip_sockglue.c:1261 udp_setsockopt+0x45/0x80 net/ipv4/udp.c:2406 sock_common_setsockopt+0x95/0xd0 net/core/sock.c:2975 SYSC_setsockopt net/socket.c:1849 [inline] SyS_setsockopt+0x189/0x360 net/socket.c:1828 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 RIP: 0033:0x446a69 RSP: 002b:00007fa1c3a64da8 EFLAGS: 00000246 ORIG_RAX: 0000000000000036 RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 0000000000446a69 RDX: 000000000000048b RSI: 0000000000000000 RDI: 0000000000000003 RBP: 00000000006e29fc R08: 0000000000000018 R09: 0000000000000000 R10: 00000000200000c0 R11: 0000000000000246 R12: 00000000006e29f8 R13: 00676e697279656b R14: 00007fa1c3a659c0 R15: 00000000006e2b60 [2] IPVS: sync thread started: state = BACKUP, mcast_ifn = syz_tun, syncid = 4, id = 0 IPVS: stopping backup sync thread 25415 ... INFO: task syz-executor7:25421 blocked for more than 120 seconds. Not tainted 4.16.0-rc6+ #284 "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. syz-executor7 D23688 25421 4408 0x00000004 Call Trace: context_switch kernel/sched/core.c:2862 [inline] __schedule+0x8fb/0x1ec0 kernel/sched/core.c:3440 schedule+0xf5/0x430 kernel/sched/core.c:3499 schedule_timeout+0x1a3/0x230 kernel/time/timer.c:1777 do_wait_for_common kernel/sched/completion.c:86 [inline] __wait_for_common kernel/sched/completion.c:107 [inline] wait_for_common kernel/sched/completion.c:118 [inline] wait_for_completion+0x415/0x770 kernel/sched/completion.c:139 kthread_stop+0x14a/0x7a0 kernel/kthread.c:530 stop_sync_thread+0x3d9/0x740 net/netfilter/ipvs/ip_vs_sync.c:1996 do_ip_vs_set_ctl+0x2b1/0x1cc0 net/netfilter/ipvs/ip_vs_ctl.c:2394 nf_sockopt net/netfilter/nf_sockopt.c:106 [inline] nf_setsockopt+0x67/0xc0 net/netfilter/nf_sockopt.c:115 ip_setsockopt+0x97/0xa0 net/ipv4/ip_sockglue.c:1253 sctp_setsockopt+0x2ca/0x63e0 net/sctp/socket.c:4154 sock_common_setsockopt+0x95/0xd0 net/core/sock.c:3039 SYSC_setsockopt net/socket.c:1850 [inline] SyS_setsockopt+0x189/0x360 net/socket.c:1829 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 RIP: 0033:0x454889 RSP: 002b:00007fc927626c68 EFLAGS: 00000246 ORIG_RAX: 0000000000000036 RAX: ffffffffffffffda RBX: 00007fc9276276d4 RCX: 0000000000454889 RDX: 000000000000048c RSI: 0000000000000000 RDI: 0000000000000017 RBP: 000000000072bf58 R08: 0000000000000018 R09: 0000000000000000 R10: 0000000020000000 R11: 0000000000000246 R12: 00000000ffffffff R13: 000000000000051c R14: 00000000006f9b40 R15: 0000000000000001 Showing all locks held in the system: 2 locks held by khungtaskd/868: #0: (rcu_read_lock){....}, at: [<00000000a1a8f002>] check_hung_uninterruptible_tasks kernel/hung_task.c:175 [inline] #0: (rcu_read_lock){....}, at: [<00000000a1a8f002>] watchdog+0x1c5/0xd60 kernel/hung_task.c:249 #1: (tasklist_lock){.+.+}, at: [<0000000037c2f8f9>] debug_show_all_locks+0xd3/0x3d0 kernel/locking/lockdep.c:4470 1 lock held by rsyslogd/4247: #0: (&f->f_pos_lock){+.+.}, at: [<000000000d8d6983>] __fdget_pos+0x12b/0x190 fs/file.c:765 2 locks held by getty/4338: #0: (&tty->ldisc_sem){++++}, at: [<00000000bee98654>] ldsem_down_read+0x37/0x40 drivers/tty/tty_ldsem.c:365 #1: (&ldata->atomic_read_lock){+.+.}, at: [<00000000c1d180aa>] n_tty_read+0x2ef/0x1a40 drivers/tty/n_tty.c:2131 2 locks held by getty/4339: #0: (&tty->ldisc_sem){++++}, at: [<00000000bee98654>] ldsem_down_read+0x37/0x40 drivers/tty/tty_ldsem.c:365 #1: (&ldata->atomic_read_lock){+.+.}, at: [<00000000c1d180aa>] n_tty_read+0x2ef/0x1a40 drivers/tty/n_tty.c:2131 2 locks held by getty/4340: #0: (&tty->ldisc_sem){++++}, at: [<00000000bee98654>] ldsem_down_read+0x37/0x40 drivers/tty/tty_ldsem.c:365 #1: (&ldata->atomic_read_lock){+.+.}, at: [<00000000c1d180aa>] n_tty_read+0x2ef/0x1a40 drivers/tty/n_tty.c:2131 2 locks held by getty/4341: #0: (&tty->ldisc_sem){++++}, at: [<00000000bee98654>] ldsem_down_read+0x37/0x40 drivers/tty/tty_ldsem.c:365 #1: (&ldata->atomic_read_lock){+.+.}, at: [<00000000c1d180aa>] n_tty_read+0x2ef/0x1a40 drivers/tty/n_tty.c:2131 2 locks held by getty/4342: #0: (&tty->ldisc_sem){++++}, at: [<00000000bee98654>] ldsem_down_read+0x37/0x40 drivers/tty/tty_ldsem.c:365 #1: (&ldata->atomic_read_lock){+.+.}, at: [<00000000c1d180aa>] n_tty_read+0x2ef/0x1a40 drivers/tty/n_tty.c:2131 2 locks held by getty/4343: #0: (&tty->ldisc_sem){++++}, at: [<00000000bee98654>] ldsem_down_read+0x37/0x40 drivers/tty/tty_ldsem.c:365 #1: (&ldata->atomic_read_lock){+.+.}, at: [<00000000c1d180aa>] n_tty_read+0x2ef/0x1a40 drivers/tty/n_tty.c:2131 2 locks held by getty/4344: #0: (&tty->ldisc_sem){++++}, at: [<00000000bee98654>] ldsem_down_read+0x37/0x40 drivers/tty/tty_ldsem.c:365 #1: (&ldata->atomic_read_lock){+.+.}, at: [<00000000c1d180aa>] n_tty_read+0x2ef/0x1a40 drivers/tty/n_tty.c:2131 3 locks held by kworker/0:5/6494: #0: ((wq_completion)"%s"("ipv6_addrconf")){+.+.}, at: [<00000000a062b18e>] work_static include/linux/workqueue.h:198 [inline] #0: ((wq_completion)"%s"("ipv6_addrconf")){+.+.}, at: [<00000000a062b18e>] set_work_data kernel/workqueue.c:619 [inline] #0: ((wq_completion)"%s"("ipv6_addrconf")){+.+.}, at: [<00000000a062b18e>] set_work_pool_and_clear_pending kernel/workqueue.c:646 [inline] #0: ((wq_completion)"%s"("ipv6_addrconf")){+.+.}, at: [<00000000a062b18e>] process_one_work+0xb12/0x1bb0 kernel/workqueue.c:2084 #1: ((addr_chk_work).work){+.+.}, at: [<00000000278427d5>] process_one_work+0xb89/0x1bb0 kernel/workqueue.c:2088 #2: (rtnl_mutex){+.+.}, at: [<00000000066e35ac>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 1 lock held by syz-executor7/25421: #0: (ipvs->sync_mutex){+.+.}, at: [<00000000d414a689>] do_ip_vs_set_ctl+0x277/0x1cc0 net/netfilter/ipvs/ip_vs_ctl.c:2393 2 locks held by syz-executor7/25427: #0: (rtnl_mutex){+.+.}, at: [<00000000066e35ac>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 #1: (ipvs->sync_mutex){+.+.}, at: [<00000000e6d48489>] do_ip_vs_set_ctl+0x10f8/0x1cc0 net/netfilter/ipvs/ip_vs_ctl.c:2388 1 lock held by syz-executor7/25435: #0: (rtnl_mutex){+.+.}, at: [<00000000066e35ac>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 1 lock held by ipvs-b:2:0/25415: #0: (rtnl_mutex){+.+.}, at: [<00000000066e35ac>] rtnl_lock+0x17/0x20 net/core/rtnetlink.c:74 Reported-and-tested-by: syzbot+a46d6abf9d56b1365a72@syzkaller.appspotmail.com Reported-and-tested-by: syzbot+5fe074c01b2032ce9618@syzkaller.appspotmail.com Fixes: e0b26cc997d5 ("ipvs: call rtnl_lock early") Signed-off-by: Julian Anastasov Signed-off-by: Simon Horman Signed-off-by: Pablo Neira Ayuso --- net/netfilter/ipvs/ip_vs_ctl.c | 8 -- net/netfilter/ipvs/ip_vs_sync.c | 155 ++++++++++++++++---------------- 2 files changed, 80 insertions(+), 83 deletions(-) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 5ebde4b15810..f36098887ad0 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -2384,11 +2384,7 @@ do_ip_vs_set_ctl(struct sock *sk, int cmd, void __user *user, unsigned int len) strlcpy(cfg.mcast_ifn, dm->mcast_ifn, sizeof(cfg.mcast_ifn)); cfg.syncid = dm->syncid; - rtnl_lock(); - mutex_lock(&ipvs->sync_mutex); ret = start_sync_thread(ipvs, &cfg, dm->state); - mutex_unlock(&ipvs->sync_mutex); - rtnl_unlock(); } else { mutex_lock(&ipvs->sync_mutex); ret = stop_sync_thread(ipvs, dm->state); @@ -3481,12 +3477,8 @@ static int ip_vs_genl_new_daemon(struct netns_ipvs *ipvs, struct nlattr **attrs) if (ipvs->mixed_address_family_dests > 0) return -EINVAL; - rtnl_lock(); - mutex_lock(&ipvs->sync_mutex); ret = start_sync_thread(ipvs, &c, nla_get_u32(attrs[IPVS_DAEMON_ATTR_STATE])); - mutex_unlock(&ipvs->sync_mutex); - rtnl_unlock(); return ret; } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index fbaf3bd05b2e..001501e25625 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -49,6 +49,7 @@ #include #include #include +#include #include /* Used for ntoh_seq and hton_seq */ @@ -1360,15 +1361,9 @@ static void set_mcast_pmtudisc(struct sock *sk, int val) /* * Specifiy default interface for outgoing multicasts */ -static int set_mcast_if(struct sock *sk, char *ifname) +static int set_mcast_if(struct sock *sk, struct net_device *dev) { - struct net_device *dev; struct inet_sock *inet = inet_sk(sk); - struct net *net = sock_net(sk); - - dev = __dev_get_by_name(net, ifname); - if (!dev) - return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; @@ -1396,19 +1391,14 @@ static int set_mcast_if(struct sock *sk, char *ifname) * in the in_addr structure passed in as a parameter. */ static int -join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) +join_mcast_group(struct sock *sk, struct in_addr *addr, struct net_device *dev) { - struct net *net = sock_net(sk); struct ip_mreqn mreq; - struct net_device *dev; int ret; memset(&mreq, 0, sizeof(mreq)); memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); - dev = __dev_get_by_name(net, ifname); - if (!dev) - return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; @@ -1423,15 +1413,10 @@ join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) #ifdef CONFIG_IP_VS_IPV6 static int join_mcast_group6(struct sock *sk, struct in6_addr *addr, - char *ifname) + struct net_device *dev) { - struct net *net = sock_net(sk); - struct net_device *dev; int ret; - dev = __dev_get_by_name(net, ifname); - if (!dev) - return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; @@ -1443,24 +1428,18 @@ static int join_mcast_group6(struct sock *sk, struct in6_addr *addr, } #endif -static int bind_mcastif_addr(struct socket *sock, char *ifname) +static int bind_mcastif_addr(struct socket *sock, struct net_device *dev) { - struct net *net = sock_net(sock->sk); - struct net_device *dev; __be32 addr; struct sockaddr_in sin; - dev = __dev_get_by_name(net, ifname); - if (!dev) - return -ENODEV; - addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); if (!addr) pr_err("You probably need to specify IP address on " "multicast interface.\n"); IP_VS_DBG(7, "binding socket with (%s) %pI4\n", - ifname, &addr); + dev->name, &addr); /* Now bind the socket with the address of multicast interface */ sin.sin_family = AF_INET; @@ -1493,7 +1472,8 @@ static void get_mcast_sockaddr(union ipvs_sockaddr *sa, int *salen, /* * Set up sending multicast socket over UDP */ -static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) +static int make_send_sock(struct netns_ipvs *ipvs, int id, + struct net_device *dev, struct socket **sock_ret) { /* multicast addr */ union ipvs_sockaddr mcast_addr; @@ -1505,9 +1485,10 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); - return ERR_PTR(result); + goto error; } - result = set_mcast_if(sock->sk, ipvs->mcfg.mcast_ifn); + *sock_ret = sock; + result = set_mcast_if(sock->sk, dev); if (result < 0) { pr_err("Error setting outbound mcast interface\n"); goto error; @@ -1522,7 +1503,7 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) set_sock_size(sock->sk, 1, result); if (AF_INET == ipvs->mcfg.mcast_af) - result = bind_mcastif_addr(sock, ipvs->mcfg.mcast_ifn); + result = bind_mcastif_addr(sock, dev); else result = 0; if (result < 0) { @@ -1538,19 +1519,18 @@ static struct socket *make_send_sock(struct netns_ipvs *ipvs, int id) goto error; } - return sock; + return 0; error: - sock_release(sock); - return ERR_PTR(result); + return result; } /* * Set up receiving multicast socket over UDP */ -static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, - int ifindex) +static int make_receive_sock(struct netns_ipvs *ipvs, int id, + struct net_device *dev, struct socket **sock_ret) { /* multicast addr */ union ipvs_sockaddr mcast_addr; @@ -1562,8 +1542,9 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); - return ERR_PTR(result); + goto error; } + *sock_ret = sock; /* it is equivalent to the REUSEADDR option in user-space */ sock->sk->sk_reuse = SK_CAN_REUSE; result = sysctl_sync_sock_size(ipvs); @@ -1571,7 +1552,7 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, set_sock_size(sock->sk, 0, result); get_mcast_sockaddr(&mcast_addr, &salen, &ipvs->bcfg, id); - sock->sk->sk_bound_dev_if = ifindex; + sock->sk->sk_bound_dev_if = dev->ifindex; result = sock->ops->bind(sock, (struct sockaddr *)&mcast_addr, salen); if (result < 0) { pr_err("Error binding to the multicast addr\n"); @@ -1582,21 +1563,20 @@ static struct socket *make_receive_sock(struct netns_ipvs *ipvs, int id, #ifdef CONFIG_IP_VS_IPV6 if (ipvs->bcfg.mcast_af == AF_INET6) result = join_mcast_group6(sock->sk, &mcast_addr.in6.sin6_addr, - ipvs->bcfg.mcast_ifn); + dev); else #endif result = join_mcast_group(sock->sk, &mcast_addr.in.sin_addr, - ipvs->bcfg.mcast_ifn); + dev); if (result < 0) { pr_err("Error joining to the multicast group\n"); goto error; } - return sock; + return 0; error: - sock_release(sock); - return ERR_PTR(result); + return result; } @@ -1778,13 +1758,12 @@ static int sync_thread_backup(void *data) int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, int state) { - struct ip_vs_sync_thread_data *tinfo; + struct ip_vs_sync_thread_data *tinfo = NULL; struct task_struct **array = NULL, *task; - struct socket *sock; struct net_device *dev; char *name; int (*threadfn)(void *data); - int id, count, hlen; + int id = 0, count, hlen; int result = -ENOMEM; u16 mtu, min_mtu; @@ -1792,6 +1771,18 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); + /* Do not hold one mutex and then to block on another */ + for (;;) { + rtnl_lock(); + if (mutex_trylock(&ipvs->sync_mutex)) + break; + rtnl_unlock(); + mutex_lock(&ipvs->sync_mutex); + if (rtnl_trylock()) + break; + mutex_unlock(&ipvs->sync_mutex); + } + if (!ipvs->sync_state) { count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); ipvs->threads_mask = count - 1; @@ -1810,7 +1801,8 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, dev = __dev_get_by_name(ipvs->net, c->mcast_ifn); if (!dev) { pr_err("Unknown mcast interface: %s\n", c->mcast_ifn); - return -ENODEV; + result = -ENODEV; + goto out_early; } hlen = (AF_INET6 == c->mcast_af) ? sizeof(struct ipv6hdr) + sizeof(struct udphdr) : @@ -1827,26 +1819,30 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, c->sync_maxlen = mtu - hlen; if (state == IP_VS_STATE_MASTER) { + result = -EEXIST; if (ipvs->ms) - return -EEXIST; + goto out_early; ipvs->mcfg = *c; name = "ipvs-m:%d:%d"; threadfn = sync_thread_master; } else if (state == IP_VS_STATE_BACKUP) { + result = -EEXIST; if (ipvs->backup_threads) - return -EEXIST; + goto out_early; ipvs->bcfg = *c; name = "ipvs-b:%d:%d"; threadfn = sync_thread_backup; } else { - return -EINVAL; + result = -EINVAL; + goto out_early; } if (state == IP_VS_STATE_MASTER) { struct ipvs_master_sync_state *ms; + result = -ENOMEM; ipvs->ms = kcalloc(count, sizeof(ipvs->ms[0]), GFP_KERNEL); if (!ipvs->ms) goto out; @@ -1862,39 +1858,38 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, } else { array = kcalloc(count, sizeof(struct task_struct *), GFP_KERNEL); + result = -ENOMEM; if (!array) goto out; } - tinfo = NULL; for (id = 0; id < count; id++) { - if (state == IP_VS_STATE_MASTER) - sock = make_send_sock(ipvs, id); - else - sock = make_receive_sock(ipvs, id, dev->ifindex); - if (IS_ERR(sock)) { - result = PTR_ERR(sock); - goto outtinfo; - } + result = -ENOMEM; tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); if (!tinfo) - goto outsocket; + goto out; tinfo->ipvs = ipvs; - tinfo->sock = sock; + tinfo->sock = NULL; if (state == IP_VS_STATE_BACKUP) { tinfo->buf = kmalloc(ipvs->bcfg.sync_maxlen, GFP_KERNEL); if (!tinfo->buf) - goto outtinfo; + goto out; } else { tinfo->buf = NULL; } tinfo->id = id; + if (state == IP_VS_STATE_MASTER) + result = make_send_sock(ipvs, id, dev, &tinfo->sock); + else + result = make_receive_sock(ipvs, id, dev, &tinfo->sock); + if (result < 0) + goto out; task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); if (IS_ERR(task)) { result = PTR_ERR(task); - goto outtinfo; + goto out; } tinfo = NULL; if (state == IP_VS_STATE_MASTER) @@ -1911,20 +1906,20 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, ipvs->sync_state |= state; spin_unlock_bh(&ipvs->sync_buff_lock); + mutex_unlock(&ipvs->sync_mutex); + rtnl_unlock(); + /* increase the module use count */ ip_vs_use_count_inc(); return 0; -outsocket: - sock_release(sock); - -outtinfo: - if (tinfo) { - sock_release(tinfo->sock); - kfree(tinfo->buf); - kfree(tinfo); - } +out: + /* We do not need RTNL lock anymore, release it here so that + * sock_release below and in the kthreads can use rtnl_lock + * to leave the mcast group. + */ + rtnl_unlock(); count = id; while (count-- > 0) { if (state == IP_VS_STATE_MASTER) @@ -1932,13 +1927,23 @@ int start_sync_thread(struct netns_ipvs *ipvs, struct ipvs_sync_daemon_cfg *c, else kthread_stop(array[count]); } - kfree(array); - -out: if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { kfree(ipvs->ms); ipvs->ms = NULL; } + mutex_unlock(&ipvs->sync_mutex); + if (tinfo) { + if (tinfo->sock) + sock_release(tinfo->sock); + kfree(tinfo->buf); + kfree(tinfo); + } + kfree(array); + return result; + +out_early: + mutex_unlock(&ipvs->sync_mutex); + rtnl_unlock(); return result; } From 3f1e53abff84cf40b1adb3455d480dd295bf42e8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 4 Apr 2018 21:13:30 +0200 Subject: [PATCH 012/182] netfilter: ebtables: don't attempt to allocate 0-sized compat array Dmitry reports 32bit ebtables on 64bit kernel got broken by a recent change that returns -EINVAL when ruleset has no entries. ebtables however only counts user-defined chains, so for the initial table nentries will be 0. Don't try to allocate the compat array in this case, as no user defined rules exist no rule will need 64bit translation. Reported-by: Dmitry Vyukov Fixes: 7d7d7e02111e9 ("netfilter: compat: reject huge allocation requests") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/ebtables.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 032e0fe45940..28a4c3490359 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c @@ -1825,13 +1825,14 @@ static int compat_table_info(const struct ebt_table_info *info, { unsigned int size = info->entries_size; const void *entries = info->entries; - int ret; newinfo->entries_size = size; - - ret = xt_compat_init_offsets(NFPROTO_BRIDGE, info->nentries); - if (ret) - return ret; + if (info->nentries) { + int ret = xt_compat_init_offsets(NFPROTO_BRIDGE, + info->nentries); + if (ret) + return ret; + } return EBT_ENTRY_ITERATE(entries, size, compat_calc_entry, info, entries, newinfo); From c0db1b677e1d584fab5d7ac76a32e1c0157542e0 Mon Sep 17 00:00:00 2001 From: Daniel J Blueman Date: Mon, 2 Apr 2018 15:10:35 +0800 Subject: [PATCH 013/182] drm/vc4: Fix memory leak during BO teardown During BO teardown, an indirect list 'uniform_addr_offsets' wasn't being freed leading to leaking many 128B allocations. Fix the memory leak by releasing it at teardown time. Cc: stable@vger.kernel.org Fixes: 6d45c81d229d ("drm/vc4: Add support for branching in shader validation.") Signed-off-by: Daniel J Blueman Signed-off-by: Eric Anholt Reviewed-by: Eric Anholt Link: https://patchwork.freedesktop.org/patch/msgid/20180402071035.25356-1-daniel@quora.org --- drivers/gpu/drm/vc4/vc4_bo.c | 2 ++ drivers/gpu/drm/vc4/vc4_validate_shaders.c | 1 + 2 files changed, 3 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c index 2decc8e2c79f..add9cc97a3b6 100644 --- a/drivers/gpu/drm/vc4/vc4_bo.c +++ b/drivers/gpu/drm/vc4/vc4_bo.c @@ -195,6 +195,7 @@ static void vc4_bo_destroy(struct vc4_bo *bo) vc4_bo_set_label(obj, -1); if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; @@ -591,6 +592,7 @@ void vc4_free_object(struct drm_gem_object *gem_bo) } if (bo->validated_shader) { + kfree(bo->validated_shader->uniform_addr_offsets); kfree(bo->validated_shader->texture_samples); kfree(bo->validated_shader); bo->validated_shader = NULL; diff --git a/drivers/gpu/drm/vc4/vc4_validate_shaders.c b/drivers/gpu/drm/vc4/vc4_validate_shaders.c index d3f15bf60900..7cf82b071de2 100644 --- a/drivers/gpu/drm/vc4/vc4_validate_shaders.c +++ b/drivers/gpu/drm/vc4/vc4_validate_shaders.c @@ -942,6 +942,7 @@ vc4_validate_shader(struct drm_gem_cma_object *shader_obj) fail: kfree(validation_state.branch_targets); if (validated_shader) { + kfree(validated_shader->uniform_addr_offsets); kfree(validated_shader->texture_samples); kfree(validated_shader); } From 0abf854d7cbbb405e39e0f93d5c1da98dca24bc0 Mon Sep 17 00:00:00 2001 From: Anders Roxell Date: Tue, 10 Apr 2018 14:24:21 +0200 Subject: [PATCH 014/182] selftests: bpf: update .gitignore with missing generated files Signed-off-by: Anders Roxell Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/.gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore index 9cf83f895d98..5e1ab2f0eb79 100644 --- a/tools/testing/selftests/bpf/.gitignore +++ b/tools/testing/selftests/bpf/.gitignore @@ -12,3 +12,6 @@ test_tcpbpf_user test_verifier_log feature test_libbpf_open +test_sock +test_sock_addr +urandom_read From 3a38bb98d9abdc3856f26b5ed4332803065cd7cf Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Tue, 10 Apr 2018 09:37:32 -0700 Subject: [PATCH 015/182] bpf/tracing: fix a deadlock in perf_event_detach_bpf_prog syzbot reported a possible deadlock in perf_event_detach_bpf_prog. The error details: ====================================================== WARNING: possible circular locking dependency detected 4.16.0-rc7+ #3 Not tainted ------------------------------------------------------ syz-executor7/24531 is trying to acquire lock: (bpf_event_mutex){+.+.}, at: [<000000008a849b07>] perf_event_detach_bpf_prog+0x92/0x3d0 kernel/trace/bpf_trace.c:854 but task is already holding lock: (&mm->mmap_sem){++++}, at: [<0000000038768f87>] vm_mmap_pgoff+0x198/0x280 mm/util.c:353 which lock already depends on the new lock. the existing dependency chain (in reverse order) is: -> #1 (&mm->mmap_sem){++++}: __might_fault+0x13a/0x1d0 mm/memory.c:4571 _copy_to_user+0x2c/0xc0 lib/usercopy.c:25 copy_to_user include/linux/uaccess.h:155 [inline] bpf_prog_array_copy_info+0xf2/0x1c0 kernel/bpf/core.c:1694 perf_event_query_prog_array+0x1c7/0x2c0 kernel/trace/bpf_trace.c:891 _perf_ioctl kernel/events/core.c:4750 [inline] perf_ioctl+0x3e1/0x1480 kernel/events/core.c:4770 vfs_ioctl fs/ioctl.c:46 [inline] do_vfs_ioctl+0x1b1/0x1520 fs/ioctl.c:686 SYSC_ioctl fs/ioctl.c:701 [inline] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:692 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 -> #0 (bpf_event_mutex){+.+.}: lock_acquire+0x1d5/0x580 kernel/locking/lockdep.c:3920 __mutex_lock_common kernel/locking/mutex.c:756 [inline] __mutex_lock+0x16f/0x1a80 kernel/locking/mutex.c:893 mutex_lock_nested+0x16/0x20 kernel/locking/mutex.c:908 perf_event_detach_bpf_prog+0x92/0x3d0 kernel/trace/bpf_trace.c:854 perf_event_free_bpf_prog kernel/events/core.c:8147 [inline] _free_event+0xbdb/0x10f0 kernel/events/core.c:4116 put_event+0x24/0x30 kernel/events/core.c:4204 perf_mmap_close+0x60d/0x1010 kernel/events/core.c:5172 remove_vma+0xb4/0x1b0 mm/mmap.c:172 remove_vma_list mm/mmap.c:2490 [inline] do_munmap+0x82a/0xdf0 mm/mmap.c:2731 mmap_region+0x59e/0x15a0 mm/mmap.c:1646 do_mmap+0x6c0/0xe00 mm/mmap.c:1483 do_mmap_pgoff include/linux/mm.h:2223 [inline] vm_mmap_pgoff+0x1de/0x280 mm/util.c:355 SYSC_mmap_pgoff mm/mmap.c:1533 [inline] SyS_mmap_pgoff+0x462/0x5f0 mm/mmap.c:1491 SYSC_mmap arch/x86/kernel/sys_x86_64.c:100 [inline] SyS_mmap+0x16/0x20 arch/x86/kernel/sys_x86_64.c:91 do_syscall_64+0x281/0x940 arch/x86/entry/common.c:287 entry_SYSCALL_64_after_hwframe+0x42/0xb7 other info that might help us debug this: Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&mm->mmap_sem); lock(bpf_event_mutex); lock(&mm->mmap_sem); lock(bpf_event_mutex); *** DEADLOCK *** ====================================================== The bug is introduced by Commit f371b304f12e ("bpf/tracing: allow user space to query prog array on the same tp") where copy_to_user, which requires mm->mmap_sem, is called inside bpf_event_mutex lock. At the same time, during perf_event file descriptor close, mm->mmap_sem is held first and then subsequent perf_event_detach_bpf_prog needs bpf_event_mutex lock. Such a senario caused a deadlock. As suggested by Daniel, moving copy_to_user out of the bpf_event_mutex lock should fix the problem. Fixes: f371b304f12e ("bpf/tracing: allow user space to query prog array on the same tp") Reported-by: syzbot+dc5ca0e4c9bfafaf2bae@syzkaller.appspotmail.com Signed-off-by: Yonghong Song Signed-off-by: Daniel Borkmann --- include/linux/bpf.h | 4 ++-- kernel/bpf/core.c | 45 ++++++++++++++++++++++++++-------------- kernel/trace/bpf_trace.c | 25 ++++++++++++++++++---- 3 files changed, 52 insertions(+), 22 deletions(-) diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 95a7abd0ee92..486e65e3db26 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -339,8 +339,8 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, struct bpf_prog *old_prog); int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, - __u32 __user *prog_ids, u32 request_cnt, - __u32 __user *prog_cnt); + u32 *prog_ids, u32 request_cnt, + u32 *prog_cnt); int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, struct bpf_prog *exclude_prog, struct bpf_prog *include_prog, diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c index d315b393abdd..ba03ec39efb3 100644 --- a/kernel/bpf/core.c +++ b/kernel/bpf/core.c @@ -1572,13 +1572,32 @@ int bpf_prog_array_length(struct bpf_prog_array __rcu *progs) return cnt; } +static bool bpf_prog_array_copy_core(struct bpf_prog **prog, + u32 *prog_ids, + u32 request_cnt) +{ + int i = 0; + + for (; *prog; prog++) { + if (*prog == &dummy_bpf_prog.prog) + continue; + prog_ids[i] = (*prog)->aux->id; + if (++i == request_cnt) { + prog++; + break; + } + } + + return !!(*prog); +} + int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, __u32 __user *prog_ids, u32 cnt) { struct bpf_prog **prog; unsigned long err = 0; - u32 i = 0, *ids; bool nospc; + u32 *ids; /* users of this function are doing: * cnt = bpf_prog_array_length(); @@ -1595,16 +1614,7 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, return -ENOMEM; rcu_read_lock(); prog = rcu_dereference(progs)->progs; - for (; *prog; prog++) { - if (*prog == &dummy_bpf_prog.prog) - continue; - ids[i] = (*prog)->aux->id; - if (++i == cnt) { - prog++; - break; - } - } - nospc = !!(*prog); + nospc = bpf_prog_array_copy_core(prog, ids, cnt); rcu_read_unlock(); err = copy_to_user(prog_ids, ids, cnt * sizeof(u32)); kfree(ids); @@ -1683,22 +1693,25 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, } int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, - __u32 __user *prog_ids, u32 request_cnt, - __u32 __user *prog_cnt) + u32 *prog_ids, u32 request_cnt, + u32 *prog_cnt) { + struct bpf_prog **prog; u32 cnt = 0; if (array) cnt = bpf_prog_array_length(array); - if (copy_to_user(prog_cnt, &cnt, sizeof(cnt))) - return -EFAULT; + *prog_cnt = cnt; /* return early if user requested only program count or nothing to copy */ if (!request_cnt || !cnt) return 0; - return bpf_prog_array_copy_to_user(array, prog_ids, request_cnt); + /* this function is called under trace/bpf_trace.c: bpf_event_mutex */ + prog = rcu_dereference_check(array, 1)->progs; + return bpf_prog_array_copy_core(prog, prog_ids, request_cnt) ? -ENOSPC + : 0; } static void bpf_prog_free_deferred(struct work_struct *work) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index d88e96d4e12c..56ba0f2a01db 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -977,6 +977,7 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info) { struct perf_event_query_bpf __user *uquery = info; struct perf_event_query_bpf query = {}; + u32 *ids, prog_cnt, ids_len; int ret; if (!capable(CAP_SYS_ADMIN)) @@ -985,16 +986,32 @@ int perf_event_query_prog_array(struct perf_event *event, void __user *info) return -EINVAL; if (copy_from_user(&query, uquery, sizeof(query))) return -EFAULT; - if (query.ids_len > BPF_TRACE_MAX_PROGS) + + ids_len = query.ids_len; + if (ids_len > BPF_TRACE_MAX_PROGS) return -E2BIG; + ids = kcalloc(ids_len, sizeof(u32), GFP_USER | __GFP_NOWARN); + if (!ids) + return -ENOMEM; + /* + * The above kcalloc returns ZERO_SIZE_PTR when ids_len = 0, which + * is required when user only wants to check for uquery->prog_cnt. + * There is no need to check for it since the case is handled + * gracefully in bpf_prog_array_copy_info. + */ mutex_lock(&bpf_event_mutex); ret = bpf_prog_array_copy_info(event->tp_event->prog_array, - uquery->ids, - query.ids_len, - &uquery->prog_cnt); + ids, + ids_len, + &prog_cnt); mutex_unlock(&bpf_event_mutex); + if (copy_to_user(&uquery->prog_cnt, &prog_cnt, sizeof(prog_cnt)) || + copy_to_user(uquery->ids, ids, ids_len * sizeof(u32))) + ret = -EFAULT; + + kfree(ids); return ret; } From 1f5781725dcbb026438e77091c91a94f678c3522 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Tue, 10 Apr 2018 15:15:16 +0900 Subject: [PATCH 016/182] commoncap: Handle memory allocation failure. syzbot is reporting NULL pointer dereference at xattr_getsecurity() [1], for cap_inode_getsecurity() is returning sizeof(struct vfs_cap_data) when memory allocation failed. Return -ENOMEM if memory allocation failed. [1] https://syzkaller.appspot.com/bug?id=a55ba438506fe68649a5f50d2d82d56b365e0107 Signed-off-by: Tetsuo Handa Fixes: 8db6c34f1dbc8e06 ("Introduce v3 namespaced file capabilities") Reported-by: syzbot Cc: stable # 4.14+ Acked-by: Serge E. Hallyn Acked-by: James Morris Signed-off-by: Eric W. Biederman --- security/commoncap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/security/commoncap.c b/security/commoncap.c index 48620c93d697..1ce701fcb3f3 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -449,6 +449,8 @@ int cap_inode_getsecurity(struct inode *inode, const char *name, void **buffer, magic |= VFS_CAP_FLAGS_EFFECTIVE; memcpy(&cap->data, &nscap->data, sizeof(__le32) * 2 * VFS_CAP_U32); cap->magic_etc = cpu_to_le32(magic); + } else { + size = -ENOMEM; } } kfree(tmpbuf); From cf43ae63c024971e6df94665e829c01c22202a19 Mon Sep 17 00:00:00 2001 From: Jack Ma Date: Fri, 6 Apr 2018 15:45:16 +1200 Subject: [PATCH 017/182] netfilter: xt_connmark: Add bit mapping for bit-shift operation. With the addition of bit-shift operations, we are able to shift ct/skbmark based on user requirements. However, this change might also cause the most left/right hand- side mark to be accidentially lost during shift operations. This patch adds the ability to 'grep' certain bits based on ctmask or nfmask out of the original mark. Then, apply shift operations to achieve a new mapping between ctmark and skb->mark. For example: If someone would like save the fourth F bits of ctmark 0xFFF(F)000F into the seventh hexadecimal (0) skb->mark 0xABC000(0)E. new_targetmark = (ctmark & ctmask) >> 12; (new) skb->mark = (skb->mark &~nfmask) ^ new_targetmark; This will preserve the other bits that are not related to this operation. Fixes: 472a73e00757 ("netfilter: xt_conntrack: Support bit-shifting for CONNMARK & MARK targets.") Reviewed-by: Florian Westphal Signed-off-by: Jack Ma Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_connmark.c | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 773da82190dc..4b424e6caf3e 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -41,6 +41,7 @@ connmark_tg_shift(struct sk_buff *skb, u8 shift_bits, u8 shift_dir) { enum ip_conntrack_info ctinfo; + u_int32_t new_targetmark; struct nf_conn *ct; u_int32_t newmark; @@ -61,24 +62,26 @@ connmark_tg_shift(struct sk_buff *skb, } break; case XT_CONNMARK_SAVE: - newmark = (ct->mark & ~info->ctmask) ^ - (skb->mark & info->nfmask); + new_targetmark = (skb->mark & info->nfmask); if (shift_dir == D_SHIFT_RIGHT) - newmark >>= shift_bits; + new_targetmark >>= shift_bits; else - newmark <<= shift_bits; + new_targetmark <<= shift_bits; + newmark = (ct->mark & ~info->ctmask) ^ + new_targetmark; if (ct->mark != newmark) { ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, ct); } break; case XT_CONNMARK_RESTORE: - newmark = (skb->mark & ~info->nfmask) ^ - (ct->mark & info->ctmask); + new_targetmark = (ct->mark & info->ctmask); if (shift_dir == D_SHIFT_RIGHT) - newmark >>= shift_bits; + new_targetmark >>= shift_bits; else - newmark <<= shift_bits; + new_targetmark <<= shift_bits; + newmark = (skb->mark & ~info->nfmask) ^ + new_targetmark; skb->mark = newmark; break; } From 7ecb46e9ee9af18e304eb9e7d6804c59a408e846 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabi=C3=A1n=20Inostroza?= Date: Thu, 12 Apr 2018 00:37:35 -0300 Subject: [PATCH 018/182] ALSA: line6: Use correct endpoint type for midi output MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sending MIDI messages to a PODxt through the USB connection shows "usb_submit_urb failed" in dmesg and the message is not received by the POD. The error is caused because in the funcion send_midi_async() in midi.c there is a call to usb_sndbulkpipe() for endpoint 3 OUT, but the PODxt USB descriptor shows that this endpoint it's an interrupt endpoint. Patch tested with PODxt only. [ The bug has been present from the very beginning in the staging driver time, but Fixes below points to the commit moving to sound/ directory so that the fix can be cleanly applied -- tiwai ] Fixes: 61864d844c29 ("ALSA: move line6 usb driver into sound/usb") Signed-off-by: Fabián Inostroza Cc: Signed-off-by: Takashi Iwai --- sound/usb/line6/midi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/usb/line6/midi.c b/sound/usb/line6/midi.c index 6d7cde56a355..e2cf55c53ea8 100644 --- a/sound/usb/line6/midi.c +++ b/sound/usb/line6/midi.c @@ -125,7 +125,7 @@ static int send_midi_async(struct usb_line6 *line6, unsigned char *data, } usb_fill_int_urb(urb, line6->usbdev, - usb_sndbulkpipe(line6->usbdev, + usb_sndintpipe(line6->usbdev, line6->properties->ep_ctrl_w), transfer_buffer, length, midi_sent, line6, line6->interval); From 9dc9a95f03a69ab926d9ff1986ab2087f34a5dce Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Tue, 3 Apr 2018 21:18:33 +0300 Subject: [PATCH 019/182] perf stat: Enable 1ms interval for printing event counters values Currently print count interval for performance counters values is limited by 10ms so reading the values at frequencies higher than 100Hz is restricted by the tool. This change makes perf stat -I possible on frequencies up to 1KHz and, to some extent, makes perf stat -I to be on-par with perf record sampling profiling. When running perf stat -I for monitoring e.g. PCIe uncore counters and at the same time profiling some I/O workload by perf record e.g. for cpu-cycles and context switches, it is then possible to observe consolidated CPU/OS/IO(Uncore) performance picture for that workload. Tool overhead warning printed when specifying -v option can be missed due to screen scrolling in case you have output to the console so message is moved into help available by running perf stat -h. Signed-off-by: Alexey Budankov Acked-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/b842ad6a-d606-32e4-afe5-974071b5198e@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-stat.txt | 2 +- tools/perf/builtin-stat.c | 14 ++------------ 2 files changed, 3 insertions(+), 13 deletions(-) diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt index f15b306be183..e6c3b4e555c2 100644 --- a/tools/perf/Documentation/perf-stat.txt +++ b/tools/perf/Documentation/perf-stat.txt @@ -153,7 +153,7 @@ perf stat --repeat 10 --null --sync --pre 'make -s O=defconfig-build/clean' -- m -I msecs:: --interval-print msecs:: -Print count deltas every N milliseconds (minimum: 10ms) +Print count deltas every N milliseconds (minimum: 1ms) The overhead percentage could be high in some cases, for instance with small, sub 100ms intervals. Use with caution. example: 'perf stat -I 1000 -e cycles -a sleep 5' diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index f5c454855908..147a27e8c937 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -1943,7 +1943,8 @@ static const struct option stat_options[] = { OPT_STRING(0, "post", &post_cmd, "command", "command to run after to the measured command"), OPT_UINTEGER('I', "interval-print", &stat_config.interval, - "print counts at regular interval in ms (>= 10)"), + "print counts at regular interval in ms " + "(overhead is possible for values <= 100ms)"), OPT_INTEGER(0, "interval-count", &stat_config.times, "print counts for fixed number of times"), OPT_UINTEGER(0, "timeout", &stat_config.timeout, @@ -2923,17 +2924,6 @@ int cmd_stat(int argc, const char **argv) } } - if (interval && interval < 100) { - if (interval < 10) { - pr_err("print interval must be >= 10ms\n"); - parse_options_usage(stat_usage, stat_options, "I", 1); - goto out; - } else - pr_warning("print interval < 100ms. " - "The overhead percentage could be high in some cases. " - "Please proceed with caution.\n"); - } - if (stat_config.times && interval) interval_count = true; else if (stat_config.times && !interval) { From 4d3b57da1593c66835d8e3a757e4751b35493fb8 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 4 Apr 2018 17:34:45 +0100 Subject: [PATCH 020/182] tools headers: Restore READ_ONCE() C++ compatibility Our userspace defines READ_ONCE() in a way that clang doesn't like, as we have an anonymous union in which neither field is initialized. WRITE_ONCE() is fine since it initializes the __val field. For READ_ONCE() we can keep clang and GCC happy with a dummy initialization of the __c field, so let's do that. At the same time, let's split READ_ONCE() and WRITE_ONCE() over several lines for legibility, as we do in the in-kernel . Reported-by: Li Zhijian Reported-by: Sandipan Das Tested-by: Sandipan Das Signed-off-by: Mark Rutland Fixes: 6aa7de059173a986 ("locking/atomics: COCCINELLE/treewide: Convert trivial ACCESS_ONCE() patterns to READ_ONCE()/WRITE_ONCE()") Link: http://lkml.kernel.org/r/20180404163445.16492-1-mark.rutland@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/linux/compiler.h | 20 +++++++++++++++----- 1 file changed, 15 insertions(+), 5 deletions(-) diff --git a/tools/include/linux/compiler.h b/tools/include/linux/compiler.h index 04e32f965ad7..1827c2f973f9 100644 --- a/tools/include/linux/compiler.h +++ b/tools/include/linux/compiler.h @@ -151,11 +151,21 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s * required ordering. */ -#define READ_ONCE(x) \ - ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) - -#define WRITE_ONCE(x, val) \ - ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; }) +#define READ_ONCE(x) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__c = { 0 } }; \ + __read_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) + +#define WRITE_ONCE(x, val) \ +({ \ + union { typeof(x) __val; char __c[1]; } __u = \ + { .__val = (val) }; \ + __write_once_size(&(x), __u.__c, sizeof(x)); \ + __u.__val; \ +}) #ifndef __fallthrough From af72cfb80af5e4cafd8e0b58ac54f222c913aa1b Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Tue, 10 Apr 2018 19:16:24 -0500 Subject: [PATCH 021/182] perf tests: Run dwarf unwind test on arm32 Enable the unwind test on arm32: $ perf test unwind 58: DWARF unwind : Ok Signed-off-by: Kim Phillips Cc: Alexander Shishkin Cc: Brian Robbins Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20180410191624.a3a468670dd4548c66d3d094@arm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/arm/include/arch-tests.h | 12 ++++++++++++ tools/perf/arch/arm/tests/Build | 2 ++ tools/perf/arch/arm/tests/arch-tests.c | 16 ++++++++++++++++ 3 files changed, 30 insertions(+) create mode 100644 tools/perf/arch/arm/include/arch-tests.h create mode 100644 tools/perf/arch/arm/tests/arch-tests.c diff --git a/tools/perf/arch/arm/include/arch-tests.h b/tools/perf/arch/arm/include/arch-tests.h new file mode 100644 index 000000000000..90ec4c8cb880 --- /dev/null +++ b/tools/perf/arch/arm/include/arch-tests.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef ARCH_TESTS_H +#define ARCH_TESTS_H + +#ifdef HAVE_DWARF_UNWIND_SUPPORT +struct thread; +struct perf_sample; +#endif + +extern struct test arch_tests[]; + +#endif diff --git a/tools/perf/arch/arm/tests/Build b/tools/perf/arch/arm/tests/Build index b30eff9bcc83..883c57ff0c08 100644 --- a/tools/perf/arch/arm/tests/Build +++ b/tools/perf/arch/arm/tests/Build @@ -1,2 +1,4 @@ libperf-y += regs_load.o libperf-y += dwarf-unwind.o + +libperf-y += arch-tests.o diff --git a/tools/perf/arch/arm/tests/arch-tests.c b/tools/perf/arch/arm/tests/arch-tests.c new file mode 100644 index 000000000000..5b1543c98022 --- /dev/null +++ b/tools/perf/arch/arm/tests/arch-tests.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +#include +#include "tests/tests.h" +#include "arch-tests.h" + +struct test arch_tests[] = { +#ifdef HAVE_DWARF_UNWIND_SUPPORT + { + .desc = "DWARF unwind", + .func = test__dwarf_unwind, + }, +#endif + { + .func = NULL, + }, +}; From 592c10e217f3edb35c7e0deba161fef69ad1a336 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 11 Apr 2018 10:30:03 -0300 Subject: [PATCH 022/182] perf annotate: Allow showing offsets in more than just jump targets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Jesper wanted to see offsets at callq sites when doing some performance investigation related to retpolines, so save him some time by providing an 'struct annotation_options' to control where offsets should appear: just on jump targets? That + call instructions? All? This puts in place the logic to show the offsets, now we need to wire this up in the TUI browser (next patch) and on the 'perf annotate --stdio2" interface, where we need a more general mechanism to setup the 'annotation_options' struct from the command line. Suggested-by: Jesper Dangaard Brouer Cc: Adrian Hunter Cc: Alexei Starovoitov Cc: Andi Kleen Cc: Daniel Borkmann Cc: David Ahern Cc: Jin Yao Cc: Jiri Olsa Cc: Linus Torvalds Cc: Martin Liška Cc: Namhyung Kim Cc: Ravi Bangoria Cc: Thomas Richter Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-m3jc9c3swobye9tj08gnh5i7@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/annotate.c | 11 +++++++++-- tools/perf/util/annotate.h | 9 +++++++++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index fbad8dfbb186..5edc565d86c4 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -46,6 +46,7 @@ struct annotation_options annotation__default_options = { .use_offset = true, .jump_arrows = true, + .offset_level = ANNOTATION__OFFSET_JUMP_TARGETS, }; const char *disassembler_style; @@ -2512,7 +2513,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati if (!notes->options->use_offset) { printed = scnprintf(bf, sizeof(bf), "%" PRIx64 ": ", addr); } else { - if (al->jump_sources) { + if (al->jump_sources && + notes->options->offset_level >= ANNOTATION__OFFSET_JUMP_TARGETS) { if (notes->options->show_nr_jumps) { int prev; printed = scnprintf(bf, sizeof(bf), "%*d ", @@ -2523,9 +2525,14 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati obj__printf(obj, bf); obj__set_color(obj, prev); } - +print_addr: printed = scnprintf(bf, sizeof(bf), "%*" PRIx64 ": ", notes->widths.target, addr); + } else if (ins__is_call(&disasm_line(al)->ins) && + notes->options->offset_level >= ANNOTATION__OFFSET_CALL) { + goto print_addr; + } else if (notes->options->offset_level == ANNOTATION__MAX_OFFSET_LEVEL) { + goto print_addr; } else { printed = scnprintf(bf, sizeof(bf), "%-*s ", notes->widths.addr, " "); diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index db8d09bea07e..f28a9e43421d 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -70,8 +70,17 @@ struct annotation_options { show_nr_jumps, show_nr_samples, show_total_period; + u8 offset_level; }; +enum { + ANNOTATION__OFFSET_JUMP_TARGETS = 1, + ANNOTATION__OFFSET_CALL, + ANNOTATION__MAX_OFFSET_LEVEL, +}; + +#define ANNOTATION__MIN_OFFSET_LEVEL ANNOTATION__OFFSET_JUMP_TARGETS + extern struct annotation_options annotation__default_options; struct annotation; From 51f39603b5f260c73635f4d06d390476b32df6a5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 11 Apr 2018 10:41:23 -0300 Subject: [PATCH 023/182] perf annotate browser: Allow showing offsets in more than just jump targets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Jesper wanted to see offsets at callq sites when doing some performance investigation related to retpolines, so save him some time by providing a 'O' hotkey to allow showing offsets from function start at call instructions or in all instructions, just go on pressing 'O' till the offsets you need appear. Example: Starts with: Samples: 64 of event 'cycles:ppp', 100000 Hz, Event count (approx.): 318963 ixgbe_read_reg /proc/kcore Percent│ ↑ je 2a │ ┌──cmp $0xffffffff,%r13d │ ├──je d0 │ │ mov $0x53e3,%edi │ │→ callq __const_udelay │ │ sub $0x1,%r15d │ │↑ jne 83 │ │ mov 0x8(%rbp),%rax │ │ testb $0x20,0x1799(%rax) │ │↑ je 2a │ │ mov 0x200(%rax),%rdi │ │ mov %r13d,%edx │ │ mov $0xffffffffc02595d8,%rsi │ │→ callq netdev_warn │ │↑ jmpq 2a │d0:└─→mov 0x8(%rbp),%rsi │ mov %rbp,%rdi │ mov %eax,0x4(%rsp) │ → callq ixgbe_remove_adapter.isra.77 │ mov 0x4(%rsp),%eax Press 'h' for help on key bindings ============================================================================ Pess 'O': Samples: 64 of event 'cycles:ppp', 100000 Hz, Event count (approx.): 318963 ixgbe_read_reg /proc/kcore Percent│ ↑ je 2a │ ┌──cmp $0xffffffff,%r13d │ ├──je d0 │ │ mov $0x53e3,%edi │99:│→ callq __const_udelay │ │ sub $0x1,%r15d │ │↑ jne 83 │ │ mov 0x8(%rbp),%rax │ │ testb $0x20,0x1799(%rax) │ │↑ je 2a │ │ mov 0x200(%rax),%rdi │ │ mov %r13d,%edx │ │ mov $0xffffffffc02595d8,%rsi │c6:│→ callq netdev_warn │ │↑ jmpq 2a │d0:└─→mov 0x8(%rbp),%rsi │ mov %rbp,%rdi │ mov %eax,0x4(%rsp) │db: → callq ixgbe_remove_adapter.isra.77 │ mov 0x4(%rsp),%eax Press 'h' for help on key bindings ============================================================================ Press 'O' again: Samples: 64 of event 'cycles:ppp', 100000 Hz, Event count (approx.): 318963 ixgbe_read_reg /proc/kcore Percent│8c: ↑ je 2a │8e:┌──cmp $0xffffffff,%r13d │92:├──je d0 │94:│ mov $0x53e3,%edi │99:│→ callq __const_udelay │9e:│ sub $0x1,%r15d │a2:│↑ jne 83 │a4:│ mov 0x8(%rbp),%rax │a8:│ testb $0x20,0x1799(%rax) │af:│↑ je 2a │b5:│ mov 0x200(%rax),%rdi │bc:│ mov %r13d,%edx │bf:│ mov $0xffffffffc02595d8,%rsi │c6:│→ callq netdev_warn │cb:│↑ jmpq 2a │d0:└─→mov 0x8(%rbp),%rsi │d4: mov %rbp,%rdi │d7: mov %eax,0x4(%rsp) │db: → callq ixgbe_remove_adapter.isra.77 │e0: mov 0x4(%rsp),%eax Press 'h' for help on key bindings ============================================================================ Press 'O' again and it will show just jump target offsets. Suggested-by: Jesper Dangaard Brouer Cc: Adrian Hunter Cc: Alexei Starovoitov Cc: Andi Kleen Cc: Daniel Borkmann Cc: David Ahern Cc: Jin Yao Cc: Jiri Olsa Cc: Linus Torvalds Cc: Martin Liška Cc: Namhyung Kim Cc: Ravi Bangoria Cc: Thomas Richter Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-upp6pfdetwlsx18ec2uf1od4@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/annotate.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c index 12c099a87f8b..3781d74088a7 100644 --- a/tools/perf/ui/browsers/annotate.c +++ b/tools/perf/ui/browsers/annotate.c @@ -692,6 +692,7 @@ static int annotate_browser__run(struct annotate_browser *browser, "J Toggle showing number of jump sources on targets\n" "n Search next string\n" "o Toggle disassembler output/simplified view\n" + "O Bump offset level (jump targets -> +call -> all -> cycle thru)\n" "s Toggle source code view\n" "t Circulate percent, total period, samples view\n" "/ Search string\n" @@ -719,6 +720,10 @@ static int annotate_browser__run(struct annotate_browser *browser, notes->options->use_offset = !notes->options->use_offset; annotation__update_column_widths(notes); continue; + case 'O': + if (++notes->options->offset_level > ANNOTATION__MAX_OFFSET_LEVEL) + notes->options->offset_level = ANNOTATION__MIN_OFFSET_LEVEL; + continue; case 'j': notes->options->jump_arrows = !notes->options->jump_arrows; continue; From e14b733c5dc62f574b4dbc045b2cc52b03d83d4c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 11 Apr 2018 12:08:53 -0300 Subject: [PATCH 024/182] perf jvmti: Give hints about package names needed to build Give as examples of package names to install to have this built for fedora and debian, to help the user a bit. The part from 'e.g.:' onwards: No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-1.8.0-openjdk-devel Cc: Andi Kleen Cc: David Ahern Cc: Jiri Olsa Cc: Namhyung Kim Cc: Stephane Eranian Cc: William Cohen Link: https://lkml.kernel.org/n/tip-edbi4r2pvzn7no6ebxbtczng@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.config | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index c7abd83a8e19..6b307e97dc57 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -847,7 +847,7 @@ ifndef NO_JVMTI ifeq ($(feature-jvmti), 1) $(call detected_var,JDIR) else - $(warning No openjdk development package found, please install JDK package) + $(warning No openjdk development package found, please install JDK package, e.g. openjdk-8-jdk, java-1.8.0-openjdk-devel) NO_JVMTI := 1 endif endif From c13009c1ef3a94cfea212c86bbb94c8361e5de0c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 11 Apr 2018 17:57:32 -0300 Subject: [PATCH 025/182] perf tests bpf: Remove unused ptrace.h include from LLVM test The bpf-script-test-kbuild.c script, used in one of the LLVM subtests, includes ptrace.h unnecessarily, and that ends up making it include a header that uses asm(_ASM_SP), a feature that is not supported by clang <= 4.0, breaking that 'perf test' entry. This ended up leading to the ca26cffa4e4a ("x86/asm: Allow again using asm.h when building for the 'bpf' clang target"), adding an ifndef __BPF__ to the arch/x86/include/asm/asm.h file. Newer clang versions accept that asm(_ASM_SP) construct, so just remove the ptrace.h include, which paves the way for reverting ca26cffa4e4a ("x86/asm: Allow again using asm.h when building for the 'bpf' clang target"). Suggested-by: Yonghong Song Acked-by: Yonghong Song Link: https://lkml.kernel.org/r/613f0a0d-c433-8f4d-dcc1-c9889deae39e@fb.com Cc: Adrian Hunter Cc: Alexander Potapenko Cc: Alexei Starovoitov Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Daniel Borkmann Cc: David Ahern Cc: Dmitriy Vyukov Cc: Jiri Olsa Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Matthias Kaehlcke Cc: Miguel Bernal Marin Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-clbcnzbakdp18ibme4wt43ib@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/bpf-script-test-kbuild.c | 1 - 1 file changed, 1 deletion(-) diff --git a/tools/perf/tests/bpf-script-test-kbuild.c b/tools/perf/tests/bpf-script-test-kbuild.c index 3626924740d8..ff3ec8337f0a 100644 --- a/tools/perf/tests/bpf-script-test-kbuild.c +++ b/tools/perf/tests/bpf-script-test-kbuild.c @@ -9,7 +9,6 @@ #define SEC(NAME) __attribute__((section(NAME), used)) #include -#include SEC("func=vfs_llseek") int bpf_func__vfs_llseek(void *ctx) From fd97d39b0aa49a4beb429aec344604c1b689f089 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 11 Apr 2018 18:03:33 -0300 Subject: [PATCH 026/182] Revert "x86/asm: Allow again using asm.h when building for the 'bpf' clang target" This reverts commit ca26cffa4e4aaeb09bb9e308f95c7835cb149248. Newer clang versions accept that asm(_ASM_SP) construct, and now that the bpf-script-test-kbuild.c script, used in one of the 'perf test LLVM' subtests doesn't include ptrace.h, which ended up including arch/x86/include/asm/asm.h, we can revert this patch. Suggested-by: Yonghong Song Link: https://lkml.kernel.org/r/613f0a0d-c433-8f4d-dcc1-c9889deae39e@fb.com Acked-by: Yonghong Song Cc: Adrian Hunter Cc: Alexander Potapenko Cc: Alexei Starovoitov Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Daniel Borkmann Cc: David Ahern Cc: Dmitriy Vyukov Cc: Jiri Olsa Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Matthias Kaehlcke Cc: Miguel Bernal Marin Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-nqozcv8loq40tkqpfw997993@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- arch/x86/include/asm/asm.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h index 386a6900e206..219faaec51df 100644 --- a/arch/x86/include/asm/asm.h +++ b/arch/x86/include/asm/asm.h @@ -136,7 +136,6 @@ #endif #ifndef __ASSEMBLY__ -#ifndef __BPF__ /* * This output constraint should be used for any inline asm which has a "call" * instruction. Otherwise the asm may be inserted before the frame pointer @@ -146,6 +145,5 @@ register unsigned long current_stack_pointer asm(_ASM_SP); #define ASM_CALL_CONSTRAINT "+r" (current_stack_pointer) #endif -#endif #endif /* _ASM_X86_ASM_H */ From 90ce61b91903f9c357cbceced45d41642f2aa812 Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Mon, 9 Apr 2018 18:26:47 +0800 Subject: [PATCH 027/182] perf script: Use HAVE_LIBXXX_SUPPORT to replace NO_LIBXXX In Makefile.config, we define the conditional compilation variables HAVE_LIBPERL_SUPPORT and HAVE_LIBPYTHON_SUPPORT. To make the C code more consistent, this patch replaces NO_LIBPERL/NO_LIBPYTHON in C code with HAVE_LIBPERL_SUPPORT/ HAVE_LIBPYTHON_SUPPORT. Signed-off-by: Jin Yao Suggested-by: Ingo Molnar Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Kan Liang Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1523269609-28824-2-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-script.c | 4 ++-- tools/perf/util/trace-event-scripting.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index 313c42423393..b17edbcd98cc 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -2801,11 +2801,11 @@ int find_scripts(char **scripts_array, char **scripts_path_array) for_each_lang(scripts_path, scripts_dir, lang_dirent) { scnprintf(lang_path, MAXPATHLEN, "%s/%s", scripts_path, lang_dirent->d_name); -#ifdef NO_LIBPERL +#ifndef HAVE_LIBPERL_SUPPORT if (strstr(lang_path, "perl")) continue; #endif -#ifdef NO_LIBPYTHON +#ifndef HAVE_LIBPYTHON_SUPPORT if (strstr(lang_path, "python")) continue; #endif diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index 0ac9077f62a2..b1e5c3a2b8e3 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -98,7 +98,7 @@ static void register_python_scripting(struct scripting_ops *scripting_ops) } } -#ifdef NO_LIBPYTHON +#ifndef HAVE_LIBPYTHON_SUPPORT void setup_python_scripting(void) { register_python_scripting(&python_scripting_unsupported_ops); @@ -161,7 +161,7 @@ static void register_perl_scripting(struct scripting_ops *scripting_ops) } } -#ifdef NO_LIBPERL +#ifndef HAVE_LIBPERL_SUPPORT void setup_perl_scripting(void) { register_perl_scripting(&perl_scripting_unsupported_ops); From 22e9af4e94801bbdf6945e55db64b877be7c71b3 Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Mon, 9 Apr 2018 18:26:48 +0800 Subject: [PATCH 028/182] perf tools: Rename HAVE_SYSCALL_TABLE to HAVE_SYSCALL_TABLE_SUPPORT To be consistent with other HAVE_XXX_SUPPORT uses in Makefile.config, this patch renames HAVE_SYSCALL_TABLE to HAVE_SYSCALL_TABLE_SUPPORT and updates the C code accordingly. Signed-off-by: Jin Yao Suggested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Kan Liang Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1523269609-28824-3-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Makefile.config | 2 +- tools/perf/builtin-help.c | 2 +- tools/perf/perf.c | 4 ++-- tools/perf/util/generate-cmdlist.sh | 2 +- tools/perf/util/syscalltbl.c | 6 +++--- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config index 6b307e97dc57..ae7dc46e8f8a 100644 --- a/tools/perf/Makefile.config +++ b/tools/perf/Makefile.config @@ -68,7 +68,7 @@ ifeq ($(NO_PERF_REGS),0) endif ifneq ($(NO_SYSCALL_TABLE),1) - CFLAGS += -DHAVE_SYSCALL_TABLE + CFLAGS += -DHAVE_SYSCALL_TABLE_SUPPORT endif # So far there's only x86 and arm libdw unwind support merged in perf. diff --git a/tools/perf/builtin-help.c b/tools/perf/builtin-help.c index 4aca13f23b9d..1c41b4eaf73c 100644 --- a/tools/perf/builtin-help.c +++ b/tools/perf/builtin-help.c @@ -439,7 +439,7 @@ int cmd_help(int argc, const char **argv) #ifdef HAVE_LIBELF_SUPPORT "probe", #endif -#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) +#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) "trace", #endif NULL }; diff --git a/tools/perf/perf.c b/tools/perf/perf.c index 1659029d03fc..20a08cb32332 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c @@ -73,7 +73,7 @@ static struct cmd_struct commands[] = { { "lock", cmd_lock, 0 }, { "kvm", cmd_kvm, 0 }, { "test", cmd_test, 0 }, -#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) +#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) { "trace", cmd_trace, 0 }, #endif { "inject", cmd_inject, 0 }, @@ -491,7 +491,7 @@ int main(int argc, const char **argv) argv[0] = cmd; } if (strstarts(cmd, "trace")) { -#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE) +#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT) setup_path(); argv[0] = "trace"; return cmd_trace(argc, argv); diff --git a/tools/perf/util/generate-cmdlist.sh b/tools/perf/util/generate-cmdlist.sh index ff17920a5ebc..c3cef36d4176 100755 --- a/tools/perf/util/generate-cmdlist.sh +++ b/tools/perf/util/generate-cmdlist.sh @@ -38,7 +38,7 @@ do done echo "#endif /* HAVE_LIBELF_SUPPORT */" -echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE)" +echo "#if defined(HAVE_LIBAUDIT_SUPPORT) || defined(HAVE_SYSCALL_TABLE_SUPPORT)" sed -n -e 's/^perf-\([^ ]*\)[ ].* audit*/\1/p' command-list.txt | sort | while read cmd diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c index 895122d638dd..0ee7f568d60c 100644 --- a/tools/perf/util/syscalltbl.c +++ b/tools/perf/util/syscalltbl.c @@ -17,7 +17,7 @@ #include #include -#ifdef HAVE_SYSCALL_TABLE +#ifdef HAVE_SYSCALL_TABLE_SUPPORT #include #include "string2.h" #include "util.h" @@ -139,7 +139,7 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); } -#else /* HAVE_SYSCALL_TABLE */ +#else /* HAVE_SYSCALL_TABLE_SUPPORT */ #include @@ -176,4 +176,4 @@ int syscalltbl__strglobmatch_first(struct syscalltbl *tbl, const char *syscall_g { return syscalltbl__strglobmatch_next(tbl, syscall_glob, idx); } -#endif /* HAVE_SYSCALL_TABLE */ +#endif /* HAVE_SYSCALL_TABLE_SUPPORT */ From 8a812bf552d98f6f887f860d3910f201b4a97b26 Mon Sep 17 00:00:00 2001 From: Jin Yao Date: Mon, 9 Apr 2018 18:26:49 +0800 Subject: [PATCH 029/182] perf version: Print status for syscall_table This patch doesn't print "libaudit" line if HAVE_SYSCALL_TABLE_SUPPORT is available and add a line for HAVE_SYSCALL_TABLE_SUPPORT. For example, $ ./perf -vv perf version 4.13.rc5.gc2f8af9 dwarf: [ on ] # HAVE_DWARF_SUPPORT dwarf_getlocations: [ on ] # HAVE_DWARF_GETLOCATIONS_SUPPORT glibc: [ on ] # HAVE_GLIBC_SUPPORT gtk2: [ on ] # HAVE_GTK2_SUPPORT syscall_table: [ on ] # HAVE_SYSCALL_TABLE_SUPPORT libbfd: [ on ] # HAVE_LIBBFD_SUPPORT libelf: [ on ] # HAVE_LIBELF_SUPPORT libnuma: [ on ] # HAVE_LIBNUMA_SUPPORT numa_num_possible_cpus: [ on ] # HAVE_LIBNUMA_SUPPORT libperl: [ on ] # HAVE_LIBPERL_SUPPORT libpython: [ on ] # HAVE_LIBPYTHON_SUPPORT libslang: [ on ] # HAVE_SLANG_SUPPORT libcrypto: [ on ] # HAVE_LIBCRYPTO_SUPPORT libunwind: [ on ] # HAVE_LIBUNWIND_SUPPORT libdw-dwarf-unwind: [ on ] # HAVE_DWARF_SUPPORT zlib: [ on ] # HAVE_ZLIB_SUPPORT lzma: [ on ] # HAVE_LZMA_SUPPORT get_cpuid: [ on ] # HAVE_AUXTRACE_SUPPORT bpf: [ on ] # HAVE_LIBBPF_SUPPORT The line "syscall_table: [ on ] # HAVE_SYSCALL_TABLE_SUPPORT" is new created. Signed-off-by: Jin Yao Suggested-by: Arnaldo Carvalho de Melo Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Kan Liang Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1523269609-28824-4-git-send-email-yao.jin@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/builtin-version.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/builtin-version.c b/tools/perf/builtin-version.c index 2abe3910d6b6..50df168be326 100644 --- a/tools/perf/builtin-version.c +++ b/tools/perf/builtin-version.c @@ -60,7 +60,10 @@ static void library_status(void) STATUS(HAVE_DWARF_GETLOCATIONS_SUPPORT, dwarf_getlocations); STATUS(HAVE_GLIBC_SUPPORT, glibc); STATUS(HAVE_GTK2_SUPPORT, gtk2); +#ifndef HAVE_SYSCALL_TABLE_SUPPORT STATUS(HAVE_LIBAUDIT_SUPPORT, libaudit); +#endif + STATUS(HAVE_SYSCALL_TABLE_SUPPORT, syscall_table); STATUS(HAVE_LIBBFD_SUPPORT, libbfd); STATUS(HAVE_LIBELF_SUPPORT, libelf); STATUS(HAVE_LIBNUMA_SUPPORT, libnuma); From e8103e44cebe2ef891bc3a5c6c4b74854846968b Mon Sep 17 00:00:00 2001 From: Takuya Yamamoto Date: Tue, 10 Apr 2018 23:35:39 +0900 Subject: [PATCH 030/182] perf sched: Fix documentation for timehist Fixed a incorrect option and usage to those shown by "perf sched timehist -h", i.e. the default is really --call-graph, which is equivalent to -g. Signed-off-by: Takuya Yamamoto Cc: Peter Zijlstra Link: https://lkml.kernel.org/n/tip-8fzo0dlsi1mku5aqx8brep5s@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-sched.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt index bb33601a823b..63f938b887dd 100644 --- a/tools/perf/Documentation/perf-sched.txt +++ b/tools/perf/Documentation/perf-sched.txt @@ -104,8 +104,8 @@ OPTIONS for 'perf sched timehist' kallsyms pathname -g:: ---no-call-graph:: - Do not display call chains if present. +--call-graph:: + Display call chains if present (default on). --max-stack:: Maximum number of functions to display in backtrace, default 5. From 4b163ca343b45855b03114ef2ab47c454989d55c Mon Sep 17 00:00:00 2001 From: Sandipan Das Date: Thu, 12 Apr 2018 21:51:40 +0530 Subject: [PATCH 031/182] perf tests: Disable breakpoint accounting test for powerpc We disable this test as instruction breakpoints (HW_BREAKPOINT_X) are not available for powerpc. Before applying patch: 21: Breakpoint accounting : --- start --- test child forked, pid 3635 failed opening event 0 failed opening event 0 watchpoints count 1, breakpoints count 0, has_ioctl 1, share 0 test child finished with -2 ---- end ---- Breakpoint accounting: Skip After applying patch: 21: Breakpoint accounting : Disabled Signed-off-by: Sandipan Das Cc: Jiri Olsa Cc: Naveen N. Rao Cc: Ravi Bangoria Link: http://lkml.kernel.org/r/20180412162140.2992-1-sandipan@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/builtin-test.c | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c index 625f5a6772af..cac8f8889bc3 100644 --- a/tools/perf/tests/builtin-test.c +++ b/tools/perf/tests/builtin-test.c @@ -118,6 +118,7 @@ static struct test generic_tests[] = { { .desc = "Breakpoint accounting", .func = test__bp_accounting, + .is_supported = test__bp_signal_is_supported, }, { .desc = "Number of exit events of a simple workload", From 4f75f1cbf95e2f0853cd229d042b203931b899af Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Thu, 12 Apr 2018 15:32:46 +0200 Subject: [PATCH 032/182] perf record: Change warning for missing sysfs entry to debug Using perf on 4.16.0 kernel on s390 shows this warning: failed: can't open node sysfs data each time I run command perf record ... for example: [root@s35lp76 perf]# ./perf record -e rB0000 -- sleep 1 [ perf record: Woken up 1 times to write data ] failed: can't open node sysfs data [ perf record: Captured and wrote 0.001 MB perf.data (4 samples) ] [root@s35lp76 perf]# It turns out commit e2091cedd51bf ("perf tools: Add MEM_TOPOLOGY feature to perf data file") tries to open directory named /sys/devices/system/node/ which does not exist on s390. This is the call stack: __cmd_record +---> perf_session__write_header +---> perf_header__adds_write +---> do_write_feat +---> write_mem_topology +---> build_mem_topology prints warning The issue starts in do_write_feat() which unconditionally loops over all features and now includes HEADER_MEM_TOPOLOGY and calls write_mem_topology(). Function record__init_features() at the beginning of __cmd_record() sets all features and then turns off some of them. Fix this by changing the warning to a level 2 debug output statement. So it is only shown when debug level 2 or higher is set. Signed-off-by: Thomas Richter Cc: Heiko Carstens Cc: Hendrik Brueckner Cc: Martin Schwidefsky Link: http://lkml.kernel.org/r/20180412133246.92801-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/header.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 121df1683c36..a8bff2178fbc 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -1320,7 +1320,8 @@ static int build_mem_topology(struct memory_node *nodes, u64 size, u64 *cntp) dir = opendir(path); if (!dir) { - pr_warning("failed: can't open node sysfs data\n"); + pr_debug2("%s: could't read %s, does this arch have topology information?\n", + __func__, path); return -1; } From 7b366142a50ad79e48de8e67c5b3e8cfb9fa82dd Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 12 Apr 2018 14:58:24 -0300 Subject: [PATCH 033/182] perf report: Fix switching to another perf.data file MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In the TUI the 's' hotkey can be used to switch to another perf.data file in the current directory, but that got broken in Fixes: b01141f4f59c ("perf annotate: Initialize the priv are in symbol__new()"), that would show this once another file was chosen: ┌─Fatal Error─────────────────────────────────────┐ │Annotation needs to be init before symbol__init()│ │ │ │ │ │Press any key... │ └─────────────────────────────────────────────────┘ Fix it by just silently bailing out if symbol__annotation_init() was already called, just like is done with symbol__init(), i.e. they are done just once at session start, not when switching to a new perf.data file. Cc: Adrian Hunter Cc: Andi Kleen Cc: David Ahern Cc: Jin Yao Cc: Jiri Olsa Cc: Martin Liška Cc: Namhyung Kim Cc: Ravi Bangoria Cc: Thomas Richter Cc: Wang Nan Fixes: b01141f4f59c ("perf annotate: Initialize the priv are in symbol__new()") Link: https://lkml.kernel.org/n/tip-ogppdtpzfax7y1h6gjdv5s6u@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/symbol.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index 62b2dd2253eb..1466814ebada 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -2091,16 +2091,14 @@ static bool symbol__read_kptr_restrict(void) int symbol__annotation_init(void) { + if (symbol_conf.init_annotation) + return 0; + if (symbol_conf.initialized) { pr_err("Annotation needs to be init before symbol__init()\n"); return -1; } - if (symbol_conf.init_annotation) { - pr_warning("Annotation being initialized multiple times\n"); - return 0; - } - symbol_conf.priv_size += sizeof(struct annotation); symbol_conf.init_annotation = true; return 0; From 43c4023152a9c5742948ac919e58ade127fa4e2e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 12 Apr 2018 15:23:02 -0300 Subject: [PATCH 034/182] perf annotate: Allow setting the offset level in .perfconfig MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The default is 1 (jump_target): # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 nop 4.61 push %rbx 19.33 pushfq 7.97 pop %rax 0.32 nop 0.06 mov %rax,%rbx 14.63 cli 0.06 nop xor %eax,%eax mov $0x1,%edx 49.94 lock cmpxchg %edx,(%rdi) 0.16 test %eax,%eax ↓ jne 2b 2.66 mov %rbx,%rax pop %rbx ← retq 2b: mov %eax,%esi → callq *ffffffffb30eaed0 mov %rbx,%rax pop %rbx ← retq # But one can ask for showing offsets for call instructions by setting this: # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 nop 4.61 push %rbx 19.33 pushfq 7.97 pop %rax 0.32 nop 0.06 mov %rax,%rbx 14.63 cli 0.06 nop xor %eax,%eax mov $0x1,%edx 49.94 lock cmpxchg %edx,(%rdi) 0.16 test %eax,%eax ↓ jne 2b 2.66 mov %rbx,%rax pop %rbx ← retq 2b: mov %eax,%esi 2d: → callq *ffffffffb30eaed0 mov %rbx,%rax pop %rbx ← retq # Or using a big value to ask for all offsets to be shown: # cat ~/.perfconfig [annotate] offset_level = 100 hide_src_code = true # perf annotate --ignore-vmlinux --stdio2 _raw_spin_lock_irqsave Samples: 3K of event 'cycles:ppp', 3000 Hz, Event count (approx.): 2766398574 _raw_spin_lock_irqsave() /proc/kcore 0.26 0: nop 4.61 5: push %rbx 19.33 6: pushfq 7.97 7: pop %rax 0.32 8: nop 0.06 d: mov %rax,%rbx 14.63 10: cli 0.06 11: nop 17: xor %eax,%eax 19: mov $0x1,%edx 49.94 1e: lock cmpxchg %edx,(%rdi) 0.16 22: test %eax,%eax 24: ↓ jne 2b 2.66 26: mov %rbx,%rax 29: pop %rbx 2a: ← retq 2b: mov %eax,%esi 2d: → callq *ffffffffb30eaed0 32: mov %rbx,%rax 35: pop %rbx 36: ← retq # This also affects the TUI, i.e. the default 'perf annotate' and 'perf top/report' -> A hotkey -> annotate interfaces, when slang-devel is present in the build, i.e.: # perf version --build-options | grep slang libslang: [ on ] # HAVE_SLANG_SUPPORT # Cc: Adrian Hunter Cc: Andi Kleen Cc: David Ahern Cc: Jin Yao Cc: Jiri Olsa Cc: Martin Liška Cc: Namhyung Kim Cc: Ravi Bangoria Cc: Thomas Richter Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-venm6x5zrt40eu8hxdsmqxz6@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-config.txt | 5 +++++ tools/perf/util/annotate.c | 15 ++++++++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt index 5b4fff3adc4b..32f4a898e3f2 100644 --- a/tools/perf/Documentation/perf-config.txt +++ b/tools/perf/Documentation/perf-config.txt @@ -334,6 +334,11 @@ annotate.*:: 99.93 │ mov %eax,%eax + annotate.offset_level:: + Default is '1', meaning just jump targets will have offsets show right beside + the instruction. When set to '2' 'call' instructions will also have its offsets + shown, 3 or higher will show offsets for all instructions. + hist.*:: hist.percentage:: This option control the way to calculate overhead of filtered entries - diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 5edc565d86c4..536ee148bff8 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -2649,10 +2649,11 @@ int __annotation__scnprintf_samples_period(struct annotation *notes, */ static struct annotation_config { const char *name; - bool *value; + void *value; } annotation__configs[] = { ANNOTATION__CFG(hide_src_code), ANNOTATION__CFG(jump_arrows), + ANNOTATION__CFG(offset_level), ANNOTATION__CFG(show_linenr), ANNOTATION__CFG(show_nr_jumps), ANNOTATION__CFG(show_nr_samples), @@ -2684,8 +2685,16 @@ static int annotation__config(const char *var, const char *value, if (cfg == NULL) pr_debug("%s variable unknown, ignoring...", var); - else - *cfg->value = perf_config_bool(name, value); + else if (strcmp(var, "annotate.offset_level") == 0) { + perf_config_int(cfg->value, name, value); + + if (*(int *)cfg->value > ANNOTATION__MAX_OFFSET_LEVEL) + *(int *)cfg->value = ANNOTATION__MAX_OFFSET_LEVEL; + else if (*(int *)cfg->value < ANNOTATION__MIN_OFFSET_LEVEL) + *(int *)cfg->value = ANNOTATION__MIN_OFFSET_LEVEL; + } else { + *(bool *)cfg->value = perf_config_bool(name, value); + } return 0; } From b0d5c81e872ed21de1e56feb0fa6e4161da7be61 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 12 Apr 2018 16:28:18 -0300 Subject: [PATCH 035/182] perf annotate: Handle variables in 'sub', 'or' and many other instructions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just like is done for 'mov' and others that can have as source or targets variables resolved by objdump, to make them more compact: - orb $0x4,0x224d71(%rip) # 226ca4 <_rtld_global+0xca4> + orb $0x4,_rtld_global+0xca4 Cc: Adrian Hunter Cc: Andi Kleen Cc: David Ahern Cc: Jin Yao Cc: Jiri Olsa Cc: Martin Liška Cc: Namhyung Kim Cc: Ravi Bangoria Cc: Thomas Richter Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-efex7746id4w4wa03nqxvh3m@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/arch/x86/annotate/instructions.c | 67 ++++++++++++++++++++- 1 file changed, 66 insertions(+), 1 deletion(-) diff --git a/tools/perf/arch/x86/annotate/instructions.c b/tools/perf/arch/x86/annotate/instructions.c index 5bd1ba8c0282..44f5aba78210 100644 --- a/tools/perf/arch/x86/annotate/instructions.c +++ b/tools/perf/arch/x86/annotate/instructions.c @@ -1,21 +1,43 @@ // SPDX-License-Identifier: GPL-2.0 static struct ins x86__instructions[] = { + { .name = "adc", .ops = &mov_ops, }, + { .name = "adcb", .ops = &mov_ops, }, + { .name = "adcl", .ops = &mov_ops, }, { .name = "add", .ops = &mov_ops, }, { .name = "addl", .ops = &mov_ops, }, { .name = "addq", .ops = &mov_ops, }, + { .name = "addsd", .ops = &mov_ops, }, { .name = "addw", .ops = &mov_ops, }, { .name = "and", .ops = &mov_ops, }, + { .name = "andb", .ops = &mov_ops, }, + { .name = "andl", .ops = &mov_ops, }, + { .name = "andpd", .ops = &mov_ops, }, + { .name = "andps", .ops = &mov_ops, }, + { .name = "andq", .ops = &mov_ops, }, + { .name = "andw", .ops = &mov_ops, }, + { .name = "bsr", .ops = &mov_ops, }, + { .name = "bt", .ops = &mov_ops, }, + { .name = "btr", .ops = &mov_ops, }, { .name = "bts", .ops = &mov_ops, }, + { .name = "btsq", .ops = &mov_ops, }, { .name = "call", .ops = &call_ops, }, { .name = "callq", .ops = &call_ops, }, + { .name = "cmovbe", .ops = &mov_ops, }, + { .name = "cmove", .ops = &mov_ops, }, + { .name = "cmovae", .ops = &mov_ops, }, { .name = "cmp", .ops = &mov_ops, }, { .name = "cmpb", .ops = &mov_ops, }, { .name = "cmpl", .ops = &mov_ops, }, { .name = "cmpq", .ops = &mov_ops, }, { .name = "cmpw", .ops = &mov_ops, }, { .name = "cmpxch", .ops = &mov_ops, }, + { .name = "cmpxchg", .ops = &mov_ops, }, + { .name = "cs", .ops = &mov_ops, }, { .name = "dec", .ops = &dec_ops, }, { .name = "decl", .ops = &dec_ops, }, + { .name = "divsd", .ops = &mov_ops, }, + { .name = "divss", .ops = &mov_ops, }, + { .name = "gs", .ops = &mov_ops, }, { .name = "imul", .ops = &mov_ops, }, { .name = "inc", .ops = &dec_ops, }, { .name = "incl", .ops = &dec_ops, }, @@ -57,25 +79,68 @@ static struct ins x86__instructions[] = { { .name = "lea", .ops = &mov_ops, }, { .name = "lock", .ops = &lock_ops, }, { .name = "mov", .ops = &mov_ops, }, + { .name = "movapd", .ops = &mov_ops, }, + { .name = "movaps", .ops = &mov_ops, }, { .name = "movb", .ops = &mov_ops, }, { .name = "movdqa", .ops = &mov_ops, }, + { .name = "movdqu", .ops = &mov_ops, }, { .name = "movl", .ops = &mov_ops, }, { .name = "movq", .ops = &mov_ops, }, + { .name = "movsd", .ops = &mov_ops, }, { .name = "movslq", .ops = &mov_ops, }, + { .name = "movss", .ops = &mov_ops, }, + { .name = "movupd", .ops = &mov_ops, }, + { .name = "movups", .ops = &mov_ops, }, + { .name = "movw", .ops = &mov_ops, }, { .name = "movzbl", .ops = &mov_ops, }, { .name = "movzwl", .ops = &mov_ops, }, + { .name = "mulsd", .ops = &mov_ops, }, + { .name = "mulss", .ops = &mov_ops, }, { .name = "nop", .ops = &nop_ops, }, { .name = "nopl", .ops = &nop_ops, }, { .name = "nopw", .ops = &nop_ops, }, { .name = "or", .ops = &mov_ops, }, + { .name = "orb", .ops = &mov_ops, }, { .name = "orl", .ops = &mov_ops, }, + { .name = "orps", .ops = &mov_ops, }, + { .name = "orq", .ops = &mov_ops, }, + { .name = "pand", .ops = &mov_ops, }, + { .name = "paddq", .ops = &mov_ops, }, + { .name = "pcmpeqb", .ops = &mov_ops, }, + { .name = "por", .ops = &mov_ops, }, + { .name = "rclb", .ops = &mov_ops, }, + { .name = "rcll", .ops = &mov_ops, }, + { .name = "retq", .ops = &ret_ops, }, + { .name = "sbb", .ops = &mov_ops, }, + { .name = "sbbl", .ops = &mov_ops, }, + { .name = "sete", .ops = &mov_ops, }, + { .name = "sub", .ops = &mov_ops, }, + { .name = "subl", .ops = &mov_ops, }, + { .name = "subq", .ops = &mov_ops, }, + { .name = "subsd", .ops = &mov_ops, }, + { .name = "subw", .ops = &mov_ops, }, { .name = "test", .ops = &mov_ops, }, { .name = "testb", .ops = &mov_ops, }, { .name = "testl", .ops = &mov_ops, }, + { .name = "ucomisd", .ops = &mov_ops, }, + { .name = "ucomiss", .ops = &mov_ops, }, + { .name = "vaddsd", .ops = &mov_ops, }, + { .name = "vandpd", .ops = &mov_ops, }, + { .name = "vmovdqa", .ops = &mov_ops, }, + { .name = "vmovq", .ops = &mov_ops, }, + { .name = "vmovsd", .ops = &mov_ops, }, + { .name = "vmulsd", .ops = &mov_ops, }, + { .name = "vorpd", .ops = &mov_ops, }, + { .name = "vsubsd", .ops = &mov_ops, }, + { .name = "vucomisd", .ops = &mov_ops, }, { .name = "xadd", .ops = &mov_ops, }, { .name = "xbeginl", .ops = &jump_ops, }, { .name = "xbeginq", .ops = &jump_ops, }, - { .name = "retq", .ops = &ret_ops, }, + { .name = "xchg", .ops = &mov_ops, }, + { .name = "xor", .ops = &mov_ops, }, + { .name = "xorb", .ops = &mov_ops, }, + { .name = "xorpd", .ops = &mov_ops, }, + { .name = "xorps", .ops = &mov_ops, }, }; static bool x86__ins_is_fused(struct arch *arch, const char *ins1, From 43838a23a05fbd13e47d750d3dfd77001536dd33 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 11 Apr 2018 13:27:52 -0400 Subject: [PATCH 036/182] random: fix crng_ready() test The crng_init variable has three states: 0: The CRNG is not initialized at all 1: The CRNG has a small amount of entropy, hopefully good enough for early-boot, non-cryptographical use cases 2: The CRNG is fully initialized and we are sure it is safe for cryptographic use cases. The crng_ready() function should only return true once we are in the last state. This addresses CVE-2018-1108. Reported-by: Jann Horn Fixes: e192be9d9a30 ("random: replace non-blocking pool...") Cc: stable@kernel.org # 4.8+ Signed-off-by: Theodore Ts'o Reviewed-by: Jann Horn --- drivers/char/random.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index e027e7fa1472..c8ec1e70abde 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -427,7 +427,7 @@ struct crng_state primary_crng = { * its value (from 0->1->2). */ static int crng_init = 0; -#define crng_ready() (likely(crng_init > 0)) +#define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) static void _extract_crng(struct crng_state *crng, @@ -794,7 +794,7 @@ static int crng_fast_load(const char *cp, size_t len) if (!spin_trylock_irqsave(&primary_crng.lock, flags)) return 0; - if (crng_ready()) { + if (crng_init != 0) { spin_unlock_irqrestore(&primary_crng.lock, flags); return 0; } @@ -856,7 +856,7 @@ static void _extract_crng(struct crng_state *crng, { unsigned long v, flags; - if (crng_init > 1 && + if (crng_ready() && time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); spin_lock_irqsave(&crng->lock, flags); @@ -1139,7 +1139,7 @@ void add_interrupt_randomness(int irq, int irq_flags) fast_mix(fast_pool); add_interrupt_bench(cycles); - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { if ((fast_pool->count >= 64) && crng_fast_load((char *) fast_pool->pool, sizeof(fast_pool->pool))) { @@ -2212,7 +2212,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, { struct entropy_store *poolp = &input_pool; - if (!crng_ready()) { + if (unlikely(crng_init == 0)) { crng_fast_load(buffer, count); return; } From dc12baacb95f205948f64dc936a47d89ee110117 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 11 Apr 2018 14:58:27 -0400 Subject: [PATCH 037/182] random: use a different mixing algorithm for add_device_randomness() add_device_randomness() use of crng_fast_load() was highly problematic. Some callers of add_device_randomness() can pass in a large amount of static information. This would immediately promote the crng_init state from 0 to 1, without really doing much to initialize the primary_crng's internal state with something even vaguely unpredictable. Since we don't have the speed constraints of add_interrupt_randomness(), we can do a better job mixing in the what unpredictability a device driver or architecture maintainer might see fit to give us, and do it in a way which does not bump the crng_init_cnt variable. Also, since add_device_randomness() doesn't bump any entropy accounting in crng_init state 0, mix the device randomness into the input_pool entropy pool as well. This is related to CVE-2018-1108. Reported-by: Jann Horn Fixes: ee7998c50c26 ("random: do not ignore early device randomness") Cc: stable@kernel.org # 4.13+ Signed-off-by: Theodore Ts'o --- drivers/char/random.c | 55 +++++++++++++++++++++++++++++++++++++++---- 1 file changed, 51 insertions(+), 4 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index c8ec1e70abde..6baa828c0493 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -787,6 +787,10 @@ static void crng_initialize(struct crng_state *crng) crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } +/* + * crng_fast_load() can be called by code in the interrupt service + * path. So we can't afford to dilly-dally. + */ static int crng_fast_load(const char *cp, size_t len) { unsigned long flags; @@ -813,6 +817,51 @@ static int crng_fast_load(const char *cp, size_t len) return 1; } +/* + * crng_slow_load() is called by add_device_randomness, which has two + * attributes. (1) We can't trust the buffer passed to it is + * guaranteed to be unpredictable (so it might not have any entropy at + * all), and (2) it doesn't have the performance constraints of + * crng_fast_load(). + * + * So we do something more comprehensive which is guaranteed to touch + * all of the primary_crng's state, and which uses a LFSR with a + * period of 255 as part of the mixing algorithm. Finally, we do + * *not* advance crng_init_cnt since buffer we may get may be something + * like a fixed DMI table (for example), which might very well be + * unique to the machine, but is otherwise unvarying. + */ +static int crng_slow_load(const char *cp, size_t len) +{ + unsigned long flags; + static unsigned char lfsr = 1; + unsigned char tmp; + unsigned i, max = CHACHA20_KEY_SIZE; + const char * src_buf = cp; + char * dest_buf = (char *) &primary_crng.state[4]; + + if (!spin_trylock_irqsave(&primary_crng.lock, flags)) + return 0; + if (crng_init != 0) { + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 0; + } + if (len > max) + max = len; + + for (i = 0; i < max ; i++) { + tmp = lfsr; + lfsr >>= 1; + if (tmp & 1) + lfsr ^= 0xE1; + tmp = dest_buf[i % CHACHA20_KEY_SIZE]; + dest_buf[i % CHACHA20_KEY_SIZE] ^= src_buf[i % len] ^ lfsr; + lfsr += (tmp << 3) | (tmp >> 5); + } + spin_unlock_irqrestore(&primary_crng.lock, flags); + return 1; +} + static void crng_reseed(struct crng_state *crng, struct entropy_store *r) { unsigned long flags; @@ -981,10 +1030,8 @@ void add_device_randomness(const void *buf, unsigned int size) unsigned long time = random_get_entropy() ^ jiffies; unsigned long flags; - if (!crng_ready()) { - crng_fast_load(buf, size); - return; - } + if (!crng_ready() && size) + crng_slow_load(buf, size); trace_add_device_randomness(size, _RET_IP_); spin_lock_irqsave(&input_pool.lock, flags); From 8ef35c866f8862df074a49a93b0309725812dea8 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 11 Apr 2018 15:23:56 -0400 Subject: [PATCH 038/182] random: set up the NUMA crng instances after the CRNG is fully initialized Until the primary_crng is fully initialized, don't initialize the NUMA crng nodes. Otherwise users of /dev/urandom on NUMA systems before the CRNG is fully initialized can get very bad quality randomness. Of course everyone should move to getrandom(2) where this won't be an issue, but there's a lot of legacy code out there. This related to CVE-2018-1108. Reported-by: Jann Horn Fixes: 1e7f583af67b ("random: make /dev/urandom scalable for silly...") Cc: stable@kernel.org # 4.8+ Signed-off-by: Theodore Ts'o --- drivers/char/random.c | 46 +++++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 19 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index 6baa828c0493..02d792f7933f 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -787,6 +787,32 @@ static void crng_initialize(struct crng_state *crng) crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1; } +#ifdef CONFIG_NUMA +static void numa_crng_init(void) +{ + int i; + struct crng_state *crng; + struct crng_state **pool; + + pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); + for_each_online_node(i) { + crng = kmalloc_node(sizeof(struct crng_state), + GFP_KERNEL | __GFP_NOFAIL, i); + spin_lock_init(&crng->lock); + crng_initialize(crng); + pool[i] = crng; + } + mb(); + if (cmpxchg(&crng_node_pool, NULL, pool)) { + for_each_node(i) + kfree(pool[i]); + kfree(pool); + } +} +#else +static void numa_crng_init(void) {} +#endif + /* * crng_fast_load() can be called by code in the interrupt service * path. So we can't afford to dilly-dally. @@ -893,6 +919,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) spin_unlock_irqrestore(&primary_crng.lock, flags); if (crng == &primary_crng && crng_init < 2) { invalidate_batched_entropy(); + numa_crng_init(); crng_init = 2; process_random_ready_list(); wake_up_interruptible(&crng_init_wait); @@ -1727,28 +1754,9 @@ static void init_std_data(struct entropy_store *r) */ static int rand_initialize(void) { -#ifdef CONFIG_NUMA - int i; - struct crng_state *crng; - struct crng_state **pool; -#endif - init_std_data(&input_pool); init_std_data(&blocking_pool); crng_initialize(&primary_crng); - -#ifdef CONFIG_NUMA - pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL); - for_each_online_node(i) { - crng = kmalloc_node(sizeof(struct crng_state), - GFP_KERNEL | __GFP_NOFAIL, i); - spin_lock_init(&crng->lock); - crng_initialize(crng); - pool[i] = crng; - } - mb(); - crng_node_pool = pool; -#endif return 0; } early_initcall(rand_initialize); From 0bb29a849a6433b72e249eea7695477b02056e94 Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Thu, 12 Apr 2018 00:50:45 -0400 Subject: [PATCH 039/182] random: crng_reseed() should lock the crng instance that it is modifying Reported-by: Jann Horn Fixes: 1e7f583af67b ("random: make /dev/urandom scalable for silly...") Cc: stable@kernel.org # 4.8+ Signed-off-by: Theodore Ts'o Reviewed-by: Jann Horn --- drivers/char/random.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index 02d792f7933f..898233f594b4 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -906,7 +906,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) _crng_backtrack_protect(&primary_crng, buf.block, CHACHA20_KEY_SIZE); } - spin_lock_irqsave(&primary_crng.lock, flags); + spin_lock_irqsave(&crng->lock, flags); for (i = 0; i < 8; i++) { unsigned long rv; if (!arch_get_random_seed_long(&rv) && @@ -916,7 +916,7 @@ static void crng_reseed(struct crng_state *crng, struct entropy_store *r) } memzero_explicit(&buf, sizeof(buf)); crng->init_time = jiffies; - spin_unlock_irqrestore(&primary_crng.lock, flags); + spin_unlock_irqrestore(&crng->lock, flags); if (crng == &primary_crng && crng_init < 2) { invalidate_batched_entropy(); numa_crng_init(); From d848e5f8e1ebdb227d045db55fe4f825e82965fa Mon Sep 17 00:00:00 2001 From: Theodore Ts'o Date: Wed, 11 Apr 2018 16:32:17 -0400 Subject: [PATCH 040/182] random: add new ioctl RNDRESEEDCRNG Add a new ioctl which forces the the crng to be reseeded. Signed-off-by: Theodore Ts'o Cc: stable@kernel.org --- drivers/char/random.c | 13 ++++++++++++- include/uapi/linux/random.h | 3 +++ 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/drivers/char/random.c b/drivers/char/random.c index 898233f594b4..3cd3aae24d6d 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -429,6 +429,7 @@ struct crng_state primary_crng = { static int crng_init = 0; #define crng_ready() (likely(crng_init > 1)) static int crng_init_cnt = 0; +static unsigned long crng_global_init_time = 0; #define CRNG_INIT_CNT_THRESH (2*CHACHA20_KEY_SIZE) static void _extract_crng(struct crng_state *crng, __u32 out[CHACHA20_BLOCK_WORDS]); @@ -933,7 +934,8 @@ static void _extract_crng(struct crng_state *crng, unsigned long v, flags; if (crng_ready() && - time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)) + (time_after(crng_global_init_time, crng->init_time) || + time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL))) crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL); spin_lock_irqsave(&crng->lock, flags); if (arch_get_random_long(&v)) @@ -1757,6 +1759,7 @@ static int rand_initialize(void) init_std_data(&input_pool); init_std_data(&blocking_pool); crng_initialize(&primary_crng); + crng_global_init_time = jiffies; return 0; } early_initcall(rand_initialize); @@ -1930,6 +1933,14 @@ static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg) input_pool.entropy_count = 0; blocking_pool.entropy_count = 0; return 0; + case RNDRESEEDCRNG: + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + if (crng_init < 2) + return -ENODATA; + crng_reseed(&primary_crng, NULL); + crng_global_init_time = jiffies - 1; + return 0; default: return -EINVAL; } diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h index c34f4490d025..26ee91300e3e 100644 --- a/include/uapi/linux/random.h +++ b/include/uapi/linux/random.h @@ -35,6 +35,9 @@ /* Clear the entropy pool and associated counters. (Superuser only.) */ #define RNDCLEARPOOL _IO( 'R', 0x06 ) +/* Reseed CRNG. (Superuser only.) */ +#define RNDRESEEDCRNG _IO( 'R', 0x07 ) + struct rand_pool_info { int entropy_count; int buf_size; From 5f20407a2415181ce3af06fc84a86280a2708dd4 Mon Sep 17 00:00:00 2001 From: Jerry Hoemann Date: Mon, 9 Oct 2017 12:56:56 -0600 Subject: [PATCH 041/182] watchdog: hpwdt: change maintainer. Signed-off-by: Jerry Hoemann Reviewed-by: Guenter Roeck Acked-by: Jimmy Vance Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- MAINTAINERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..2a8a542a2cdb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6256,7 +6256,7 @@ S: Odd Fixes F: drivers/media/usb/hdpvr/ HEWLETT PACKARD ENTERPRISE ILO NMI WATCHDOG DRIVER -M: Jimmy Vance +M: Jerry Hoemann S: Supported F: Documentation/watchdog/hpwdt.txt F: drivers/watchdog/hpwdt.c From a03cc689e5c9d89d500f4a4dae1a81ea512dbb25 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 27 Mar 2018 14:25:40 -0500 Subject: [PATCH 042/182] watchdog: sch311x_wdt: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case I replaced "Fall" with a proper "Fall through" comment, which is what GCC is expecting to find. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/sch311x_wdt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/watchdog/sch311x_wdt.c b/drivers/watchdog/sch311x_wdt.c index 43d0cbb7ba0b..814cdf539b0f 100644 --- a/drivers/watchdog/sch311x_wdt.c +++ b/drivers/watchdog/sch311x_wdt.c @@ -299,7 +299,7 @@ static long sch311x_wdt_ioctl(struct file *file, unsigned int cmd, if (sch311x_wdt_set_heartbeat(new_timeout)) return -EINVAL; sch311x_wdt_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: From 3ff0751fa04208459b74cfbe492df79468b8425c Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 27 Mar 2018 14:30:41 -0500 Subject: [PATCH 043/182] watchdog: w83977f_wdt: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case I replaced "Fall" with a proper "Fall through" comment, which is what GCC is expecting to find. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/w83977f_wdt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/watchdog/w83977f_wdt.c b/drivers/watchdog/w83977f_wdt.c index 20e2bba10400..672b61a7f9a3 100644 --- a/drivers/watchdog/w83977f_wdt.c +++ b/drivers/watchdog/w83977f_wdt.c @@ -427,7 +427,7 @@ static long wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return -EINVAL; wdt_keepalive(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, uarg.i); From e30d69df78fb2667dc58e906cabd0f70ed0af95d Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Tue, 27 Mar 2018 14:33:49 -0500 Subject: [PATCH 044/182] watchdog: wafer5823wdt: Mark expected switch fall-through In preparation to enabling -Wimplicit-fallthrough, mark switch cases where we are expecting to fall through. Notice that in this particular case I replaced "Fall" with a proper "Fall through" comment, which is what GCC is expecting to find. Signed-off-by: Gustavo A. R. Silva Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/wafer5823wdt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/watchdog/wafer5823wdt.c b/drivers/watchdog/wafer5823wdt.c index db0da7ea4fd8..93c5b610e264 100644 --- a/drivers/watchdog/wafer5823wdt.c +++ b/drivers/watchdog/wafer5823wdt.c @@ -178,7 +178,7 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd, timeout = new_timeout; wafwdt_stop(); wafwdt_start(); - /* Fall */ + /* Fall through */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); From fdac6a90d2d151abdbb7e5ec14bb9ab64e2931ec Mon Sep 17 00:00:00 2001 From: Veeraiyan Chidambaram Date: Fri, 13 Apr 2018 16:19:24 +0200 Subject: [PATCH 045/182] watchdog: renesas-wdt: Add support for WDIOF_CARDRESET This patch adds the WDIOF_CARDRESET support for the Renesas platform watchdog, to know if the board reboot is due to a watchdog reset. This is done via the WOVF bit (bit 4) of the RWTCSRA register, which indicates if RWTCNT overflowed, triggering the reset in last boot. Signed-off-by: Veeraiyan Chidambaram [takeshi.kihara.df: changed to read the RWTCSRA register while clock is enabled] Signed-off-by: Takeshi Kihara Signed-off-by: Wolfram Sang Reviewed-by: Guenter Roeck Reviewed-by: Vladimir Zapolskiy Reviewed-by: Geert Uytterhoeven Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/renesas_wdt.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/watchdog/renesas_wdt.c b/drivers/watchdog/renesas_wdt.c index 6b8c6ddfe30b..514db5cc1595 100644 --- a/drivers/watchdog/renesas_wdt.c +++ b/drivers/watchdog/renesas_wdt.c @@ -121,7 +121,8 @@ static int rwdt_restart(struct watchdog_device *wdev, unsigned long action, } static const struct watchdog_info rwdt_ident = { - .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT, + .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | + WDIOF_CARDRESET, .identity = "Renesas WDT Watchdog", }; @@ -197,9 +198,10 @@ static int rwdt_probe(struct platform_device *pdev) return PTR_ERR(clk); pm_runtime_enable(&pdev->dev); - pm_runtime_get_sync(&pdev->dev); priv->clk_rate = clk_get_rate(clk); + priv->wdev.bootstatus = (readb_relaxed(priv->base + RWTCSRA) & + RWTCSRA_WOVF) ? WDIOF_CARDRESET : 0; pm_runtime_put(&pdev->dev); if (!priv->clk_rate) { From 49d4d277ca54e04170d39484c8758a0ea9bca37d Mon Sep 17 00:00:00 2001 From: Eddie James Date: Tue, 27 Mar 2018 15:09:27 -0500 Subject: [PATCH 046/182] aspeed: watchdog: Set bootstatus during probe Check the aspeed timeout status register to see if the system has booted from the secondary boot source. If so, set the watchdog device bootstatus flag for "Card previously reset the CPU." Signed-off-by: Eddie James Reviewed-by: Guenter Roeck Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck --- drivers/watchdog/aspeed_wdt.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/watchdog/aspeed_wdt.c b/drivers/watchdog/aspeed_wdt.c index a5b8eb21201f..1abe4d021fd2 100644 --- a/drivers/watchdog/aspeed_wdt.c +++ b/drivers/watchdog/aspeed_wdt.c @@ -55,6 +55,8 @@ MODULE_DEVICE_TABLE(of, aspeed_wdt_of_table); #define WDT_CTRL_WDT_INTR BIT(2) #define WDT_CTRL_RESET_SYSTEM BIT(1) #define WDT_CTRL_ENABLE BIT(0) +#define WDT_TIMEOUT_STATUS 0x10 +#define WDT_TIMEOUT_STATUS_BOOT_SECONDARY BIT(1) /* * WDT_RESET_WIDTH controls the characteristics of the external pulse (if @@ -192,6 +194,7 @@ static int aspeed_wdt_probe(struct platform_device *pdev) struct device_node *np; const char *reset_type; u32 duration; + u32 status; int ret; wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL); @@ -307,6 +310,10 @@ static int aspeed_wdt_probe(struct platform_device *pdev) writel(duration - 1, wdt->base + WDT_RESET_WIDTH); } + status = readl(wdt->base + WDT_TIMEOUT_STATUS); + if (status & WDT_TIMEOUT_STATUS_BOOT_SECONDARY) + wdt->wdd.bootstatus = WDIOF_CARDRESET; + ret = devm_watchdog_register_device(&pdev->dev, &wdt->wdd); if (ret) { dev_err(&pdev->dev, "failed to register\n"); From af52f9982e410edac21ca4b49563053ffc9da1eb Mon Sep 17 00:00:00 2001 From: David Wang Date: Mon, 16 Apr 2018 17:48:09 +0800 Subject: [PATCH 047/182] ALSA: hda - New VIA controller suppor no-snoop path This patch is used to tell kernel that new VIA HDAC controller also support no-snoop path. [ minor coding style fix by tiwai ] Signed-off-by: David Wang Cc: Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_intel.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index ccffce068634..fece779f6260 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -1645,7 +1645,8 @@ static void azx_check_snoop_available(struct azx *chip) */ u8 val; pci_read_config_byte(chip->pci, 0x42, &val); - if (!(val & 0x80) && chip->pci->revision == 0x30) + if (!(val & 0x80) && (chip->pci->revision == 0x30 || + chip->pci->revision == 0x20)) snoop = false; } From e7c5a571a8d6a266aee9ca3f3f26e5afe3717eca Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Apr 2018 12:34:24 -0700 Subject: [PATCH 048/182] libnvdimm, dimm: handle EACCES failures from label reads The new support for the standard _LSR and _LSW methods neglected to also update the nvdimm_init_config_data() and nvdimm_set_config_data() to return the translated error code from failed commands. This precision is necessary because the locked status that was previously returned on ND_CMD_GET_CONFIG_SIZE commands is now returned on ND_CMD_{GET,SET}_CONFIG_DATA commands. If the kernel misses this indication it can inadvertently fall back to label-less mode when it should otherwise avoid all access to locked regions. Cc: Fixes: 4b27db7e26cd ("acpi, nfit: add support for the _LSI, _LSR, and...") Signed-off-by: Dan Williams --- drivers/nvdimm/dimm_devs.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c index e00d45522b80..8d348b22ba45 100644 --- a/drivers/nvdimm/dimm_devs.c +++ b/drivers/nvdimm/dimm_devs.c @@ -88,9 +88,9 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd) int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) { struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); + int rc = validate_dimm(ndd), cmd_rc = 0; struct nd_cmd_get_config_data_hdr *cmd; struct nvdimm_bus_descriptor *nd_desc; - int rc = validate_dimm(ndd); u32 max_cmd_size, config_size; size_t offset; @@ -124,9 +124,11 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) cmd->in_offset = offset; rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), ND_CMD_GET_CONFIG_DATA, cmd, - cmd->in_length + sizeof(*cmd), NULL); - if (rc || cmd->status) { - rc = -ENXIO; + cmd->in_length + sizeof(*cmd), &cmd_rc); + if (rc < 0) + break; + if (cmd_rc < 0) { + rc = cmd_rc; break; } memcpy(ndd->data + offset, cmd->out_buf, cmd->in_length); @@ -140,9 +142,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd) int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, void *buf, size_t len) { - int rc = validate_dimm(ndd); size_t max_cmd_size, buf_offset; struct nd_cmd_set_config_hdr *cmd; + int rc = validate_dimm(ndd), cmd_rc = 0; struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev); struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc; @@ -164,7 +166,6 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, for (buf_offset = 0; len; len -= cmd->in_length, buf_offset += cmd->in_length) { size_t cmd_size; - u32 *status; cmd->in_offset = offset + buf_offset; cmd->in_length = min(max_cmd_size, len); @@ -172,12 +173,13 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset, /* status is output in the last 4-bytes of the command buffer */ cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32); - status = ((void *) cmd) + cmd_size - sizeof(u32); rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev), - ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, NULL); - if (rc || *status) { - rc = rc ? rc : -ENXIO; + ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc); + if (rc < 0) + break; + if (cmd_rc < 0) { + rc = cmd_rc; break; } } From 55c72ab62e47fc584131901baddb2752e949ebcd Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Apr 2018 13:56:43 -0700 Subject: [PATCH 049/182] tools/testing/nvdimm: allow custom error code injection Given that libnvdimm driver stack takes specific actions on DIMM command error codes like -EACCES, provide a facility to inject custom failures. Signed-off-by: Dan Williams --- tools/testing/nvdimm/test/nfit.c | 38 +++++++++++++++++++++++++++++++- 1 file changed, 37 insertions(+), 1 deletion(-) diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index cb166be4918d..dc6cf5630280 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -138,6 +138,7 @@ static u32 handle[] = { }; static unsigned long dimm_fail_cmd_flags[NUM_DCR]; +static int dimm_fail_cmd_code[NUM_DCR]; struct nfit_test_fw { enum intel_fw_update_state state; @@ -892,8 +893,11 @@ static int get_dimm(struct nfit_mem *nfit_mem, unsigned int func) if (i >= ARRAY_SIZE(handle)) return -ENXIO; - if ((1 << func) & dimm_fail_cmd_flags[i]) + if ((1 << func) & dimm_fail_cmd_flags[i]) { + if (dimm_fail_cmd_code[i]) + return dimm_fail_cmd_code[i]; return -EIO; + } return i; } @@ -1225,8 +1229,40 @@ static ssize_t fail_cmd_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR_RW(fail_cmd); +static ssize_t fail_cmd_code_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + int dimm = dimm_name_to_id(dev); + + if (dimm < 0) + return dimm; + + return sprintf(buf, "%d\n", dimm_fail_cmd_code[dimm]); +} + +static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t size) +{ + int dimm = dimm_name_to_id(dev); + unsigned long val; + ssize_t rc; + + if (dimm < 0) + return dimm; + + rc = kstrtol(buf, 0, &val); + if (rc) + return rc; + + dimm_fail_cmd_code[dimm] = val; + return size; +} +static DEVICE_ATTR_RW(fail_cmd_code); + + static struct attribute *nfit_test_dimm_attributes[] = { &dev_attr_fail_cmd.attr, + &dev_attr_fail_cmd_code.attr, &dev_attr_handle.attr, NULL, }; From 718fda67d2f69cc6074b4b6a740a6e4aacd44eff Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Apr 2018 16:38:01 -0700 Subject: [PATCH 050/182] tools/testing/nvdimm: support nfit_test_dimm attributes under nfit_test.1 The nfit_test.1 bus provides a pmem topology without blk-aperture enabling, so it presents different failure modes for label space handling. Allow custom DSM command error injection. Signed-off-by: Dan Williams --- tools/testing/nvdimm/test/nfit.c | 43 +++++++++++++++++++------------- 1 file changed, 25 insertions(+), 18 deletions(-) diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index dc6cf5630280..c8c88363311b 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -1166,12 +1166,12 @@ static int ars_state_init(struct device *dev, struct ars_state *ars_state) static void put_dimms(void *data) { - struct device **dimm_dev = data; + struct nfit_test *t = data; int i; - for (i = 0; i < NUM_DCR; i++) - if (dimm_dev[i]) - device_unregister(dimm_dev[i]); + for (i = 0; i < t->num_dcr; i++) + if (t->dimm_dev[i]) + device_unregister(t->dimm_dev[i]); } static struct class *nfit_test_dimm; @@ -1180,13 +1180,11 @@ static int dimm_name_to_id(struct device *dev) { int dimm; - if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1 - || dimm >= NUM_DCR || dimm < 0) + if (sscanf(dev_name(dev), "test_dimm%d", &dimm) != 1) return -ENXIO; return dimm; } - static ssize_t handle_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1259,7 +1257,6 @@ static ssize_t fail_cmd_code_store(struct device *dev, struct device_attribute * } static DEVICE_ATTR_RW(fail_cmd_code); - static struct attribute *nfit_test_dimm_attributes[] = { &dev_attr_fail_cmd.attr, &dev_attr_fail_cmd_code.attr, @@ -1276,6 +1273,23 @@ static const struct attribute_group *nfit_test_dimm_attribute_groups[] = { NULL, }; +static int nfit_test_dimm_init(struct nfit_test *t) +{ + int i; + + if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t)) + return -ENOMEM; + for (i = 0; i < t->num_dcr; i++) { + t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, + &t->pdev.dev, 0, NULL, + nfit_test_dimm_attribute_groups, + "test_dimm%d", i + t->dcr_idx); + if (!t->dimm_dev[i]) + return -ENOMEM; + } + return 0; +} + static void smart_init(struct nfit_test *t) { int i; @@ -1371,17 +1385,8 @@ static int nfit_test0_alloc(struct nfit_test *t) if (!t->_fit) return -ENOMEM; - if (devm_add_action_or_reset(&t->pdev.dev, put_dimms, t->dimm_dev)) + if (nfit_test_dimm_init(t)) return -ENOMEM; - for (i = 0; i < NUM_DCR; i++) { - t->dimm_dev[i] = device_create_with_groups(nfit_test_dimm, - &t->pdev.dev, 0, NULL, - nfit_test_dimm_attribute_groups, - "test_dimm%d", i); - if (!t->dimm_dev[i]) - return -ENOMEM; - } - smart_init(t); return ars_state_init(&t->pdev.dev, &t->ars_state); } @@ -1413,6 +1418,8 @@ static int nfit_test1_alloc(struct nfit_test *t) if (!t->spa_set[1]) return -ENOMEM; + if (nfit_test_dimm_init(t)) + return -ENOMEM; smart_init(t); return ars_state_init(&t->pdev.dev, &t->ars_state); } From 19357a685e870eeb825cbb7fd3104082ab041987 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Tue, 10 Apr 2018 14:23:47 -0700 Subject: [PATCH 051/182] tools/testing/nvdimm: fix missing newline in nfit_test_dimm 'handle' attribute Sysfs userspace tooling generally expects the kernel to emit a newlines when reading sysfs attributes. Signed-off-by: Dan Williams --- tools/testing/nvdimm/test/nfit.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index c8c88363311b..51fc41c24a77 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -1193,7 +1193,7 @@ static ssize_t handle_show(struct device *dev, struct device_attribute *attr, if (dimm < 0) return dimm; - return sprintf(buf, "%#x", handle[dimm]); + return sprintf(buf, "%#x\n", handle[dimm]); } DEVICE_ATTR_RO(handle); From 9484e12d7999a3c82d8732b60c0149f038bdd985 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Mon, 9 Apr 2018 14:29:33 -0700 Subject: [PATCH 052/182] tools/testing/nvdimm: enable labels for nfit_test.1 dimms Enable test cases for the kernel's fallback to label-less mode. Signed-off-by: Dan Williams --- tools/testing/nvdimm/test/nfit.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c index 51fc41c24a77..4ea385be528f 100644 --- a/tools/testing/nvdimm/test/nfit.c +++ b/tools/testing/nvdimm/test/nfit.c @@ -2265,6 +2265,9 @@ static void nfit_test1_setup(struct nfit_test *t) set_bit(ND_CMD_ARS_STATUS, &acpi_desc->bus_cmd_force_en); set_bit(ND_CMD_CLEAR_ERROR, &acpi_desc->bus_cmd_force_en); set_bit(ND_INTEL_ENABLE_LSS_STATUS, &acpi_desc->dimm_cmd_force_en); + set_bit(ND_CMD_GET_CONFIG_SIZE, &acpi_desc->dimm_cmd_force_en); + set_bit(ND_CMD_GET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); + set_bit(ND_CMD_SET_CONFIG_DATA, &acpi_desc->dimm_cmd_force_en); } static int nfit_test_blk_do_io(struct nd_blk_region *ndbr, resource_size_t dpa, From 114aa35d06d4920c537b72f9fa935de5dd205260 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Fri, 30 Mar 2018 13:22:06 -0700 Subject: [PATCH 053/182] netfilter: conntrack: silent a memory leak warning The following memory leak is false postive: unreferenced object 0xffff8f37f156fb38 (size 128): comm "softirq", pid 0, jiffies 4294899665 (age 11.292s) hex dump (first 32 bytes): 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b 6b kkkkkkkkkkkkkkkk 00 00 00 00 30 00 20 00 48 6b 6b 6b 6b 6b 6b 6b ....0. .Hkkkkkkk backtrace: [<000000004fda266a>] __kmalloc_track_caller+0x10d/0x141 [<000000007b0a7e3c>] __krealloc+0x45/0x62 [<00000000d08e0bfb>] nf_ct_ext_add+0xdc/0x133 [<0000000099b47fd8>] init_conntrack+0x1b1/0x392 [<0000000086dc36ec>] nf_conntrack_in+0x1ee/0x34b [<00000000940592de>] nf_hook_slow+0x36/0x95 [<00000000d1bd4da7>] nf_hook.constprop.43+0x1c3/0x1dd [<00000000c3673266>] __ip_local_out+0xae/0xb4 [<000000003e4192a6>] ip_local_out+0x17/0x33 [<00000000b64356de>] igmp_ifc_timer_expire+0x23e/0x26f [<000000006a8f3032>] call_timer_fn+0x14c/0x2a5 [<00000000650c1725>] __run_timers.part.34+0x150/0x182 [<0000000090e6946e>] run_timer_softirq+0x2a/0x4c [<000000004d1e7293>] __do_softirq+0x1d1/0x3c2 [<000000004643557d>] irq_exit+0x53/0xa2 [<0000000029ddee8f>] smp_apic_timer_interrupt+0x22a/0x235 because __krealloc() is not supposed to release the old memory and it is released later via kfree_rcu(). Since this is the only external user of __krealloc(), just mark it as not leak here. Signed-off-by: Cong Wang Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_extend.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index 9fe0ddc333fb..bd71a828ebde 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -71,6 +71,7 @@ void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp) rcu_read_unlock(); alloc = max(newlen, NF_CT_EXT_PREALLOC); + kmemleak_not_leak(old); new = __krealloc(old, alloc, gfp); if (!new) return NULL; From a6615743704fdc179e227f84b7903edd1f0b4241 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Mon, 9 Apr 2018 12:53:12 +0200 Subject: [PATCH 054/182] netfilter: fix CONFIG_NF_REJECT_IPV6=m link error We get a new link error with CONFIG_NFT_REJECT_INET=y and CONFIG_NF_REJECT_IPV6=m after larger parts of the nftables modules are linked together: net/netfilter/nft_reject_inet.o: In function `nft_reject_inet_eval': nft_reject_inet.c:(.text+0x17c): undefined reference to `nf_send_unreach6' nft_reject_inet.c:(.text+0x190): undefined reference to `nf_send_reset6' The problem is that with NF_TABLES_INET set, we implicitly try to use the ipv6 version as well for NFT_REJECT, but when CONFIG_IPV6 is set to a loadable module, it's impossible to reach that. The best workaround I found is to express the above as a Kconfig dependency, forcing NFT_REJECT itself to be 'm' in that particular configuration. Fixes: 02c7b25e5f54 ("netfilter: nf_tables: build-in filter chain type") Signed-off-by: Arnd Bergmann Signed-off-by: Pablo Neira Ayuso --- net/netfilter/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig index 704b3832dbad..44d8a55e9721 100644 --- a/net/netfilter/Kconfig +++ b/net/netfilter/Kconfig @@ -594,6 +594,7 @@ config NFT_QUOTA config NFT_REJECT default m if NETFILTER_ADVANCED=n tristate "Netfilter nf_tables reject support" + depends on !NF_TABLES_INET || (IPV6!=m || m) help This option adds the "reject" expression that you can use to explicitly deny and notify via TCP reset/ICMP informational errors From 569ccae68b38654f04b6842b034aa33857f605fe Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 10 Apr 2018 09:30:27 +0200 Subject: [PATCH 055/182] netfilter: nf_tables: can't fail after linking rule into active rule list rules in nftables a free'd using kfree, but protected by rcu, i.e. we must wait for a grace period to elapse. Normal removal patch does this, but nf_tables_newrule() doesn't obey this rule during error handling. It calls nft_trans_rule_add() *after* linking rule, and, if that fails to allocate memory, it unlinks the rule and then kfree() it -- this is unsafe. Switch order -- first add rule to transaction list, THEN link it to public list. Note: nft_trans_rule_add() uses GFP_KERNEL; it will not fail so this is not a problem in practice (spotted only during code review). Fixes: 0628b123c96d12 ("netfilter: nfnetlink: add batch support and use it from nf_tables") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 59 +++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 27 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 9134cc429ad4..b1984f8f7253 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2361,41 +2361,46 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk, } if (nlh->nlmsg_flags & NLM_F_REPLACE) { - if (nft_is_active_next(net, old_rule)) { - trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, - old_rule); - if (trans == NULL) { - err = -ENOMEM; - goto err2; - } - nft_deactivate_next(net, old_rule); - chain->use--; - list_add_tail_rcu(&rule->list, &old_rule->list); - } else { + if (!nft_is_active_next(net, old_rule)) { err = -ENOENT; goto err2; } - } else if (nlh->nlmsg_flags & NLM_F_APPEND) - if (old_rule) - list_add_rcu(&rule->list, &old_rule->list); - else - list_add_tail_rcu(&rule->list, &chain->rules); - else { - if (old_rule) - list_add_tail_rcu(&rule->list, &old_rule->list); - else - list_add_rcu(&rule->list, &chain->rules); - } + trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE, + old_rule); + if (trans == NULL) { + err = -ENOMEM; + goto err2; + } + nft_deactivate_next(net, old_rule); + chain->use--; - if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { - err = -ENOMEM; - goto err3; + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { + err = -ENOMEM; + goto err2; + } + + list_add_tail_rcu(&rule->list, &old_rule->list); + } else { + if (nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, rule) == NULL) { + err = -ENOMEM; + goto err2; + } + + if (nlh->nlmsg_flags & NLM_F_APPEND) { + if (old_rule) + list_add_rcu(&rule->list, &old_rule->list); + else + list_add_tail_rcu(&rule->list, &chain->rules); + } else { + if (old_rule) + list_add_tail_rcu(&rule->list, &old_rule->list); + else + list_add_rcu(&rule->list, &chain->rules); + } } chain->use++; return 0; -err3: - list_del_rcu(&rule->list); err2: nf_tables_rule_destroy(&ctx, rule); err1: From 2f6adf481527c8ab8033c601f55bfb5b3712b2ac Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 10 Apr 2018 09:00:24 +0200 Subject: [PATCH 056/182] netfilter: nf_tables: free set name in error path set->name must be free'd here in case ops->init fails. Fixes: 387454901bd6 ("netfilter: nf_tables: Allow set names of up to 255 chars") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b1984f8f7253..102ad873acb4 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3212,18 +3212,20 @@ static int nf_tables_newset(struct net *net, struct sock *nlsk, err = ops->init(set, &desc, nla); if (err < 0) - goto err2; + goto err3; err = nft_trans_set_add(&ctx, NFT_MSG_NEWSET, set); if (err < 0) - goto err3; + goto err4; list_add_tail_rcu(&set->list, &table->sets); table->use++; return 0; -err3: +err4: ops->destroy(set); +err3: + kfree(set->name); err2: kvfree(set); err1: From b8e47d87be65aec931846ced9a34a22d2021c311 Mon Sep 17 00:00:00 2001 From: Ramalingam C Date: Thu, 5 Apr 2018 17:33:22 +0530 Subject: [PATCH 057/182] drm: Fix HDCP downstream dev count read In both HDMI and DP, device count is represented by 6:0 bits of a register(BInfo/Bstatus) So macro for bitmasking the device_count is fixed(0x3F->0x7F). v3: Retained the Rb-ed. v4: %s/drm\/i915/drm [rodrigo] v5: Added "Fixes:" and HDCP keyword in subject [Rodrigo, Sean Paul] Signed-off-by: Ramalingam C Fixes: 495eb7f877ab drm: Add some HDCP related #defines cc: Sean Paul Reviewed-by: Sean Paul Signed-off-by: Sean Paul Link: https://patchwork.freedesktop.org/patch/msgid/1522929802-22850-1-git-send-email-ramalingam.c@intel.com --- include/drm/drm_hdcp.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h index 562fa7df2637..98e63d870139 100644 --- a/include/drm/drm_hdcp.h +++ b/include/drm/drm_hdcp.h @@ -19,7 +19,7 @@ #define DRM_HDCP_RI_LEN 2 #define DRM_HDCP_V_PRIME_PART_LEN 4 #define DRM_HDCP_V_PRIME_NUM_PARTS 5 -#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x3f) +#define DRM_HDCP_NUM_DOWNSTREAM(x) (x & 0x7f) #define DRM_HDCP_MAX_CASCADE_EXCEEDED(x) (x & BIT(3)) #define DRM_HDCP_MAX_DEVICE_EXCEEDED(x) (x & BIT(7)) From 5c8dad48e4f53d6fd0a7e4f95d7c1c983374de88 Mon Sep 17 00:00:00 2001 From: Song Liu Date: Fri, 13 Apr 2018 11:55:13 -0700 Subject: [PATCH 058/182] trace_kprobe: Remove warning message "Could not insert probe at..." This warning message is not very helpful, as the return value should already show information about the error. Also, this message will spam dmesg if the user space does testing in a loop, like: for x in {0..5} do echo p:xx xx+$x >> /sys/kernel/debug/tracing/kprobe_events done Reported-by: Vince Weaver Signed-off-by: Song Liu Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: kernel-team@fb.com Link: http://lkml.kernel.org/r/20180413185513.3626052-1-songliubraving@fb.com Signed-off-by: Ingo Molnar --- kernel/trace/trace_kprobe.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 1cd3fb4d70f8..02aed76e0978 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -512,8 +512,6 @@ static int __register_trace_kprobe(struct trace_kprobe *tk) if (ret == 0) tk->tp.flags |= TP_FLAG_REGISTERED; else { - pr_warn("Could not insert probe at %s+%lu: %d\n", - trace_kprobe_symbol(tk), trace_kprobe_offset(tk), ret); if (ret == -ENOENT && trace_kprobe_is_on_module(tk)) { pr_warn("This probe might be able to register after target module is loaded. Continue.\n"); ret = 0; From b11954a6971baa5842e24e6c0dcf56f117249638 Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Fri, 30 Mar 2018 15:11:30 +0100 Subject: [PATCH 059/182] drm/exynos: Move GEM BOs to drm_framebuffer Since drm_framebuffer can now store GEM objects directly, place them there rather than in our own subclass. As this makes the framebuffer create_handle and destroy functions the same as the GEM framebuffer helper, we can reuse those. Signed-off-by: Daniel Stone Signed-off-by: Inki Dae Cc: Inki Dae Cc: Joonyoung Shim Cc: Seung-Woo Kim Cc: Kyungmin Park --- drivers/gpu/drm/exynos/exynos_drm_fb.c | 39 +++----------------------- 1 file changed, 4 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 0faaf829f5bf..f28ce493e314 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "exynos_drm_drv.h" @@ -36,7 +37,6 @@ */ struct exynos_drm_fb { struct drm_framebuffer fb; - struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; dma_addr_t dma_addr[MAX_FB_BUFFER]; }; @@ -66,40 +66,9 @@ static int check_fb_gem_memory_type(struct drm_device *drm_dev, return 0; } -static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) -{ - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - unsigned int i; - - drm_framebuffer_cleanup(fb); - - for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem); i++) { - struct drm_gem_object *obj; - - if (exynos_fb->exynos_gem[i] == NULL) - continue; - - obj = &exynos_fb->exynos_gem[i]->base; - drm_gem_object_unreference_unlocked(obj); - } - - kfree(exynos_fb); - exynos_fb = NULL; -} - -static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb, - struct drm_file *file_priv, - unsigned int *handle) -{ - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); - - return drm_gem_handle_create(file_priv, - &exynos_fb->exynos_gem[0]->base, handle); -} - static const struct drm_framebuffer_funcs exynos_drm_fb_funcs = { - .destroy = exynos_drm_fb_destroy, - .create_handle = exynos_drm_fb_create_handle, + .destroy = drm_gem_fb_destroy, + .create_handle = drm_gem_fb_create_handle, }; struct drm_framebuffer * @@ -121,7 +90,7 @@ exynos_drm_framebuffer_init(struct drm_device *dev, if (ret < 0) goto err; - exynos_fb->exynos_gem[i] = exynos_gem[i]; + exynos_fb->fb.obj[i] = &exynos_gem[i]->base; exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr + mode_cmd->offsets[i]; } From 7b30508f5116574f94b50c71d3da1089d145e603 Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Fri, 30 Mar 2018 15:11:31 +0100 Subject: [PATCH 060/182] drm/exynos: Move dma_addr out of exynos_drm_fb This can be calculated from the GEM BO DMA address as well as the offset stored in the base framebuffer. Signed-off-by: Daniel Stone Signed-off-by: Inki Dae Cc: Inki Dae Cc: Joonyoung Shim Cc: Seung-Woo Kim Cc: Kyungmin Park --- drivers/gpu/drm/exynos/exynos_drm_fb.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index f28ce493e314..168c71f80b72 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -37,7 +37,6 @@ */ struct exynos_drm_fb { struct drm_framebuffer fb; - dma_addr_t dma_addr[MAX_FB_BUFFER]; }; static int check_fb_gem_memory_type(struct drm_device *drm_dev, @@ -91,8 +90,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev, goto err; exynos_fb->fb.obj[i] = &exynos_gem[i]->base; - exynos_fb->dma_addr[i] = exynos_gem[i]->dma_addr - + mode_cmd->offsets[i]; } drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); @@ -160,12 +157,13 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, dma_addr_t exynos_drm_fb_dma_addr(struct drm_framebuffer *fb, int index) { - struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); + struct exynos_drm_gem *exynos_gem; if (WARN_ON_ONCE(index >= MAX_FB_BUFFER)) return 0; - return exynos_fb->dma_addr[index]; + exynos_gem = to_exynos_gem(fb->obj[index]); + return exynos_gem->dma_addr + fb->offsets[index]; } static struct drm_mode_config_helper_funcs exynos_drm_mode_config_helpers = { From ff059fcbeed9cbed7421f82d1463dd74c472636e Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Fri, 30 Mar 2018 15:11:32 +0100 Subject: [PATCH 061/182] drm/exynos: exynos_drm_fb -> drm_framebuffer Now exynos_drm_fb is just an empty wrapper around drm_framebuffer, we can drop it. Signed-off-by: Daniel Stone Signed-off-by: Inki Dae Cc: Inki Dae Cc: Joonyoung Shim Cc: Seung-Woo Kim Cc: Kyungmin Park --- drivers/gpu/drm/exynos/exynos_drm_fb.c | 28 ++++++++------------------ 1 file changed, 8 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c index 168c71f80b72..f0e79178bde6 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fb.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c @@ -27,18 +27,6 @@ #include "exynos_drm_iommu.h" #include "exynos_drm_crtc.h" -#define to_exynos_fb(x) container_of(x, struct exynos_drm_fb, fb) - -/* - * exynos specific framebuffer structure. - * - * @fb: drm framebuffer obejct. - * @exynos_gem: array of exynos specific gem object containing a gem object. - */ -struct exynos_drm_fb { - struct drm_framebuffer fb; -}; - static int check_fb_gem_memory_type(struct drm_device *drm_dev, struct exynos_drm_gem *exynos_gem) { @@ -76,12 +64,12 @@ exynos_drm_framebuffer_init(struct drm_device *dev, struct exynos_drm_gem **exynos_gem, int count) { - struct exynos_drm_fb *exynos_fb; + struct drm_framebuffer *fb; int i; int ret; - exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL); - if (!exynos_fb) + fb = kzalloc(sizeof(*fb), GFP_KERNEL); + if (!fb) return ERR_PTR(-ENOMEM); for (i = 0; i < count; i++) { @@ -89,21 +77,21 @@ exynos_drm_framebuffer_init(struct drm_device *dev, if (ret < 0) goto err; - exynos_fb->fb.obj[i] = &exynos_gem[i]->base; + fb->obj[i] = &exynos_gem[i]->base; } - drm_helper_mode_fill_fb_struct(dev, &exynos_fb->fb, mode_cmd); + drm_helper_mode_fill_fb_struct(dev, fb, mode_cmd); - ret = drm_framebuffer_init(dev, &exynos_fb->fb, &exynos_drm_fb_funcs); + ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs); if (ret < 0) { DRM_ERROR("failed to initialize framebuffer\n"); goto err; } - return &exynos_fb->fb; + return fb; err: - kfree(exynos_fb); + kfree(fb); return ERR_PTR(ret); } From 596a9f6768af58d7034f6ebe559a0d489ae61467 Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Thu, 12 Apr 2018 14:21:54 -0500 Subject: [PATCH 062/182] objtool: Support HOSTCFLAGS and HOSTLDFLAGS It may be useful to compile host programs with different flags (e.g. hardening). Ensure that objtool picks up the appropriate flags. Signed-off-by: Laura Abbott Signed-off-by: Josh Poimboeuf Cc: Linus Torvalds Cc: Masahiro Yamada Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kbuild@vger.kernel.org Link: http://lkml.kernel.org/r/05a360681176f1423cb2fde8faae3a0a0261afc5.1523560825.git.jpoimboe@redhat.com Signed-off-by: Ingo Molnar --- tools/objtool/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile index 8ae824dbfca3..f76d9914686a 100644 --- a/tools/objtool/Makefile +++ b/tools/objtool/Makefile @@ -31,8 +31,8 @@ INCLUDES := -I$(srctree)/tools/include \ -I$(srctree)/tools/arch/$(HOSTARCH)/include/uapi \ -I$(srctree)/tools/objtool/arch/$(ARCH)/include WARNINGS := $(EXTRA_WARNINGS) -Wno-switch-default -Wno-switch-enum -Wno-packed -CFLAGS += -Wall -Werror $(WARNINGS) -fomit-frame-pointer -O2 -g $(INCLUDES) -LDFLAGS += -lelf $(LIBSUBCMD) +CFLAGS += -Werror $(WARNINGS) $(HOSTCFLAGS) -g $(INCLUDES) +LDFLAGS += -lelf $(LIBSUBCMD) $(HOSTLDFLAGS) # Allow old libelf to be used: elfshdr := $(shell echo '$(pound)include ' | $(CC) $(CFLAGS) -x c -E - | grep elf_getshdr) From 765cca91b895c8b747bca0b5fa54d1dc85c867a7 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Tue, 17 Apr 2018 09:28:59 +1000 Subject: [PATCH 063/182] netfilter: conntrack: include kmemleak.h for kmemleak_not_leak() After merging the netfilter tree, today's linux-next build (powerpc ppc64_defconfig) failed like this: net/netfilter/nf_conntrack_extend.c: In function 'nf_ct_ext_add': net/netfilter/nf_conntrack_extend.c:74:2: error: implicit declaration of function 'kmemleak_not_leak' [-Werror=implicit-function-declaration] kmemleak_not_leak(old); ^~~~~~~~~~~~~~~~~ cc1: some warnings being treated as errors Fixes: 114aa35d06d4 ("netfilter: conntrack: silent a memory leak warning") Signed-off-by: Stephen Rothwell Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_conntrack_extend.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/nf_conntrack_extend.c b/net/netfilter/nf_conntrack_extend.c index bd71a828ebde..277bbfe26478 100644 --- a/net/netfilter/nf_conntrack_extend.c +++ b/net/netfilter/nf_conntrack_extend.c @@ -9,6 +9,7 @@ * 2 of the License, or (at your option) any later version. */ #include +#include #include #include #include From ef97837db916075af54554552628bcb0840df62f Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 16 Apr 2018 14:44:42 +0200 Subject: [PATCH 064/182] x86: Remove pci-nommu.c The commit that switched x86 to dma_direct_ops stopped using and building this file, but accidentally left it in the tree. Remove it. Signed-off-by: Christoph Hellwig Signed-off-by: Thomas Gleixner Cc: iommu@lists.infradead.org Link: https://lkml.kernel.org/r/20180416124442.13831-1-hch@lst.de --- arch/x86/kernel/pci-nommu.c | 90 ------------------------------------- 1 file changed, 90 deletions(-) delete mode 100644 arch/x86/kernel/pci-nommu.c diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c deleted file mode 100644 index ac7ea3a8242f..000000000000 --- a/arch/x86/kernel/pci-nommu.c +++ /dev/null @@ -1,90 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* Fallback functions when the main IOMMU code is not compiled in. This - code is roughly equivalent to i386. */ -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -#define NOMMU_MAPPING_ERROR 0 - -static int -check_addr(char *name, struct device *hwdev, dma_addr_t bus, size_t size) -{ - if (hwdev && !dma_capable(hwdev, bus, size)) { - if (*hwdev->dma_mask >= DMA_BIT_MASK(32)) - printk(KERN_ERR - "nommu_%s: overflow %Lx+%zu of device mask %Lx\n", - name, (long long)bus, size, - (long long)*hwdev->dma_mask); - return 0; - } - return 1; -} - -static dma_addr_t nommu_map_page(struct device *dev, struct page *page, - unsigned long offset, size_t size, - enum dma_data_direction dir, - unsigned long attrs) -{ - dma_addr_t bus = phys_to_dma(dev, page_to_phys(page)) + offset; - WARN_ON(size == 0); - if (!check_addr("map_single", dev, bus, size)) - return NOMMU_MAPPING_ERROR; - return bus; -} - -/* Map a set of buffers described by scatterlist in streaming - * mode for DMA. This is the scatter-gather version of the - * above pci_map_single interface. Here the scatter gather list - * elements are each tagged with the appropriate dma address - * and length. They are obtained via sg_dma_{address,length}(SG). - * - * NOTE: An implementation may be able to use a smaller number of - * DMA address/length pairs than there are SG table elements. - * (for example via virtual mapping capabilities) - * The routine returns the number of addr/length pairs actually - * used, at most nents. - * - * Device ownership issues as mentioned above for pci_map_single are - * the same here. - */ -static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg, - int nents, enum dma_data_direction dir, - unsigned long attrs) -{ - struct scatterlist *s; - int i; - - WARN_ON(nents == 0 || sg[0].length == 0); - - for_each_sg(sg, s, nents, i) { - BUG_ON(!sg_page(s)); - s->dma_address = sg_phys(s); - if (!check_addr("map_sg", hwdev, s->dma_address, s->length)) - return 0; - s->dma_length = s->length; - } - return nents; -} - -static int nommu_mapping_error(struct device *dev, dma_addr_t dma_addr) -{ - return dma_addr == NOMMU_MAPPING_ERROR; -} - -const struct dma_map_ops nommu_dma_ops = { - .alloc = dma_generic_alloc_coherent, - .free = dma_generic_free_coherent, - .map_sg = nommu_map_sg, - .map_page = nommu_map_page, - .is_phys = 1, - .mapping_error = nommu_mapping_error, - .dma_supported = x86_dma_supported, -}; From d3878e164dcd3925a237a20e879432400e369172 Mon Sep 17 00:00:00 2001 From: Xiaoming Gao Date: Fri, 13 Apr 2018 17:48:08 +0800 Subject: [PATCH 065/182] x86/tsc: Prevent 32bit truncation in calc_hpet_ref() The TSC calibration code uses HPET as reference. The conversion normalizes the delta of two HPET timestamps: hpetref = ((tshpet1 - tshpet2) * HPET_PERIOD) / 1e6 and then divides the normalized delta of the corresponding TSC timestamps by the result to calulate the TSC frequency. tscfreq = ((tstsc1 - tstsc2 ) * 1e6) / hpetref This uses do_div() which takes an u32 as the divisor, which worked so far because the HPET frequency was low enough that 'hpetref' never exceeded 32bit. On Skylake machines the HPET frequency increased so 'hpetref' can exceed 32bit. do_div() truncates the divisor, which causes the calibration to fail. Use div64_u64() to avoid the problem. [ tglx: Fixes whitespace mangled patch and rewrote changelog ] Signed-off-by: Xiaoming Gao Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Cc: peterz@infradead.org Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/38894564-4fc9-b8ec-353f-de702839e44e@gmail.com --- arch/x86/kernel/tsc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index ef32297ff17e..91e6da48cbb6 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -317,7 +317,7 @@ static unsigned long calc_hpet_ref(u64 deltatsc, u64 hpet1, u64 hpet2) hpet2 -= hpet1; tmp = ((u64)hpet2 * hpet_readl(HPET_PERIOD)); do_div(tmp, 1000000); - do_div(deltatsc, tmp); + deltatsc = div64_u64(deltatsc, tmp); return (unsigned long) deltatsc; } From 10daf10ab154e31237a8c07242be3063fb6a9bf4 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Thu, 12 Apr 2018 09:40:52 +0800 Subject: [PATCH 066/182] x86/acpi: Prevent X2APIC id 0xffffffff from being accounted MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit RongQing reported that there are some X2APIC id 0xffffffff in his machine's ACPI MADT table, which makes the number of possible CPU inaccurate. The reason is that the ACPI X2APIC parser has no sanity check for APIC ID 0xffffffff, which is an invalid id in all APIC types. See "Intel® 64 Architecture x2APIC Specification", Chapter 2.4.1. Add a sanity check to acpi_parse_x2apic() which ignores the invalid id. Reported-by: Li RongQing Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Cc: len.brown@intel.com Cc: rjw@rjwysocki.net Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180412014052.25186-1-douly.fnst@cn.fujitsu.com --- arch/x86/kernel/acpi/boot.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index dde444f932c1..3b20607d581b 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -215,6 +215,10 @@ acpi_parse_x2apic(struct acpi_subtable_header *header, const unsigned long end) apic_id = processor->local_apic_id; enabled = processor->lapic_flags & ACPI_MADT_ENABLED; + /* Ignore invalid ID */ + if (apic_id == 0xffffffff) + return 0; + /* * We need to register disabled CPU as well to permit * counting disabled CPUs. This allows us to size From 451cf3ca7d4615631443014ee769c25e267c25ff Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Wed, 4 Apr 2018 14:45:27 +0800 Subject: [PATCH 067/182] x86/processor: Remove two unused function declarations early_trap_init() and cpu_set_gdt() have been removed, so remove the stale declarations as well. Signed-off-by: Dou Liyang Signed-off-by: Thomas Gleixner Cc: keescook@chromium.org Cc: luto@kernel.org Cc: hpa@zytor.com Cc: bp@suse.de Cc: kirill.shutemov@linux.intel.com Link: https://lkml.kernel.org/r/20180404064527.10562-1-douly.fnst@cn.fujitsu.com --- arch/x86/include/asm/processor.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 4fa4206029e3..21a114914ba4 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -749,13 +749,11 @@ enum idle_boot_override {IDLE_NO_OVERRIDE=0, IDLE_HALT, IDLE_NOMWAIT, extern void enable_sep_cpu(void); extern int sysenter_setup(void); -extern void early_trap_init(void); void early_trap_pf_init(void); /* Defined in head.S */ extern struct desc_ptr early_gdt_descr; -extern void cpu_set_gdt(int); extern void switch_to_new_gdt(int); extern void load_direct_gdt(int); extern void load_fixmap_gdt(int); From e2f73a1828e9ffd2765ce1726b9a9c6e022e3cd6 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Mon, 16 Apr 2018 08:18:22 +0200 Subject: [PATCH 068/182] tools/headers: Synchronize kernel ABI headers, v4.17-rc1 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sync the following tooling headers with the latest kernel version: tools/arch/arm/include/uapi/asm/kvm.h - New ABI: KVM_REG_ARM_* tools/arch/x86/include/asm/required-features.h - Removal of NEED_LA57 dependency tools/arch/x86/include/uapi/asm/kvm.h - New KVM ABI: KVM_SYNC_X86_* tools/include/uapi/asm-generic/mman-common.h - New ABI: MAP_FIXED_NOREPLACE flag tools/include/uapi/linux/bpf.h - New ABI: BPF_F_SEQ_NUMBER functions tools/include/uapi/linux/if_link.h - New ABI: IFLA tun and rmnet support tools/include/uapi/linux/kvm.h - New ABI: hyperv eventfd and CONN_ID_MASK support plus header cleanups tools/include/uapi/sound/asound.h - New ABI: SNDRV_PCM_FORMAT_FIRST PCM format specifier tools/perf/arch/x86/entry/syscalls/syscall_64.tbl - The x86 system call table description changed due to the ptregs changes and the renames, in: d5a00528b58c: syscalls/core, syscalls/x86: Rename struct pt_regs-based sys_*() to __x64_sys_*() 5ac9efa3c50d: syscalls/core, syscalls/x86: Clean up compat syscall stub naming convention ebeb8c82ffaf: syscalls/x86: Use 'struct pt_regs' based syscall calling for IA32_EMULATION and x32 Also fix the x86 syscall table warning: -Warning: Kernel ABI header at 'tools/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl' +Warning: Kernel ABI header at 'tools/perf/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl' None of these changes impact existing tooling code, so we only have to copy the kernel version. Signed-off-by: Ingo Molnar Cc: Adrian Hunter Cc: Alexander Potapenko Cc: Alexander Shishkin Cc: Alexei Starovoitov Cc: Alexey Budankov Cc: Andi Kleen Cc: Andrey Ryabinin Cc: Andy Lutomirski Cc: Arnd Bergmann Cc: Brian Robbins Cc: Clark Williams Cc: Daniel Borkmann Cc: David Ahern Cc: Dmitriy Vyukov Cc: Heiko Carstens Cc: Hendrik Brueckner Cc: Jesper Dangaard Brouer Cc: Jin Yao Cc: Jiri Olsa Cc: Josh Poimboeuf Cc: Kan Liang Cc: Kim Phillips Cc: Linus Torvalds Cc: Li Zhijian Cc: Mark Rutland Cc: Martin Liška Cc: Martin Schwidefsky Cc: Matthias Kaehlcke Cc: Miguel Bernal Marin Cc: Namhyung Kim Cc: Naveen N. Rao Cc: Peter Zijlstra Cc: Ravi Bangoria Cc: Sandipan Das Cc: Stephane Eranian Cc: Stephen Rothwell Cc: Takuya Yamamoto Cc: Thomas Gleixner Cc: Thomas Richter Cc: Wang Nan Cc: William Cohen Cc: Yonghong Song Link: http://lkml.kernel.org/r/20180416064024.ofjtrz5yuu3ykhvl@gmail.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/arm/include/uapi/asm/kvm.h | 9 + .../arch/x86/include/asm/required-features.h | 8 +- tools/arch/x86/include/uapi/asm/kvm.h | 19 +- tools/include/uapi/asm-generic/mman-common.h | 3 + tools/include/uapi/linux/bpf.h | 1 + tools/include/uapi/linux/if_link.h | 39 + tools/include/uapi/linux/kvm.h | 21 +- tools/include/uapi/sound/asound.h | 1 + tools/perf/arch/x86/Makefile | 2 +- .../arch/x86/entry/syscalls/syscall_64.tbl | 712 +++++++++--------- 10 files changed, 451 insertions(+), 364 deletions(-) diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h index 6edd177bb1c7..2ba95d6fe852 100644 --- a/tools/arch/arm/include/uapi/asm/kvm.h +++ b/tools/arch/arm/include/uapi/asm/kvm.h @@ -135,6 +135,15 @@ struct kvm_arch_memory_slot { #define KVM_REG_ARM_CRM_SHIFT 7 #define KVM_REG_ARM_32_CRN_MASK 0x0000000000007800 #define KVM_REG_ARM_32_CRN_SHIFT 11 +/* + * For KVM currently all guest registers are nonsecure, but we reserve a bit + * in the encoding to distinguish secure from nonsecure for AArch32 system + * registers that are banked by security. This is 1 for the secure banked + * register, and 0 for the nonsecure banked register or if the register is + * not banked by security. + */ +#define KVM_REG_ARM_SECURE_MASK 0x0000000010000000 +#define KVM_REG_ARM_SECURE_SHIFT 28 #define ARM_CP15_REG_SHIFT_MASK(x,n) \ (((x) << KVM_REG_ARM_ ## n ## _SHIFT) & KVM_REG_ARM_ ## n ## _MASK) diff --git a/tools/arch/x86/include/asm/required-features.h b/tools/arch/x86/include/asm/required-features.h index fb3a6de7440b..6847d85400a8 100644 --- a/tools/arch/x86/include/asm/required-features.h +++ b/tools/arch/x86/include/asm/required-features.h @@ -53,12 +53,6 @@ # define NEED_MOVBE 0 #endif -#ifdef CONFIG_X86_5LEVEL -# define NEED_LA57 (1<<(X86_FEATURE_LA57 & 31)) -#else -# define NEED_LA57 0 -#endif - #ifdef CONFIG_X86_64 #ifdef CONFIG_PARAVIRT /* Paravirtualized systems may not have PSE or PGE available */ @@ -104,7 +98,7 @@ #define REQUIRED_MASK13 0 #define REQUIRED_MASK14 0 #define REQUIRED_MASK15 0 -#define REQUIRED_MASK16 (NEED_LA57) +#define REQUIRED_MASK16 0 #define REQUIRED_MASK17 0 #define REQUIRED_MASK18 0 #define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 19) diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h index f3a960488eae..c535c2fdea13 100644 --- a/tools/arch/x86/include/uapi/asm/kvm.h +++ b/tools/arch/x86/include/uapi/asm/kvm.h @@ -354,8 +354,25 @@ struct kvm_xcrs { __u64 padding[16]; }; -/* definition of registers in kvm_run */ +#define KVM_SYNC_X86_REGS (1UL << 0) +#define KVM_SYNC_X86_SREGS (1UL << 1) +#define KVM_SYNC_X86_EVENTS (1UL << 2) + +#define KVM_SYNC_X86_VALID_FIELDS \ + (KVM_SYNC_X86_REGS| \ + KVM_SYNC_X86_SREGS| \ + KVM_SYNC_X86_EVENTS) + +/* kvm_sync_regs struct included by kvm_run struct */ struct kvm_sync_regs { + /* Members of this structure are potentially malicious. + * Care must be taken by code reading, esp. interpreting, + * data fields from them inside KVM to prevent TOCTOU and + * double-fetch types of vulnerabilities. + */ + struct kvm_regs regs; + struct kvm_sregs sregs; + struct kvm_vcpu_events events; }; #define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0) diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h index f8b134f5608f..e7ee32861d51 100644 --- a/tools/include/uapi/asm-generic/mman-common.h +++ b/tools/include/uapi/asm-generic/mman-common.h @@ -27,6 +27,9 @@ # define MAP_UNINITIALIZED 0x0 /* Don't support this flag */ #endif +/* 0x0100 - 0x80000 flags are defined in asm-generic/mman.h */ +#define MAP_FIXED_NOREPLACE 0x100000 /* MAP_FIXED which doesn't unmap underlying mapping */ + /* * Flags for mlock */ diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index 9d07465023a2..c5ec89732a8d 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -864,6 +864,7 @@ enum bpf_func_id { /* BPF_FUNC_skb_set_tunnel_key flags. */ #define BPF_F_ZERO_CSUM_TX (1ULL << 1) #define BPF_F_DONT_FRAGMENT (1ULL << 2) +#define BPF_F_SEQ_NUMBER (1ULL << 3) /* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and * BPF_FUNC_perf_event_read_value flags. diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h index 6d9447700e18..68699f654118 100644 --- a/tools/include/uapi/linux/if_link.h +++ b/tools/include/uapi/linux/if_link.h @@ -941,4 +941,43 @@ enum { IFLA_EVENT_BONDING_OPTIONS, /* change in bonding options */ }; +/* tun section */ + +enum { + IFLA_TUN_UNSPEC, + IFLA_TUN_OWNER, + IFLA_TUN_GROUP, + IFLA_TUN_TYPE, + IFLA_TUN_PI, + IFLA_TUN_VNET_HDR, + IFLA_TUN_PERSIST, + IFLA_TUN_MULTI_QUEUE, + IFLA_TUN_NUM_QUEUES, + IFLA_TUN_NUM_DISABLED_QUEUES, + __IFLA_TUN_MAX, +}; + +#define IFLA_TUN_MAX (__IFLA_TUN_MAX - 1) + +/* rmnet section */ + +#define RMNET_FLAGS_INGRESS_DEAGGREGATION (1U << 0) +#define RMNET_FLAGS_INGRESS_MAP_COMMANDS (1U << 1) +#define RMNET_FLAGS_INGRESS_MAP_CKSUMV4 (1U << 2) +#define RMNET_FLAGS_EGRESS_MAP_CKSUMV4 (1U << 3) + +enum { + IFLA_RMNET_UNSPEC, + IFLA_RMNET_MUX_ID, + IFLA_RMNET_FLAGS, + __IFLA_RMNET_MAX, +}; + +#define IFLA_RMNET_MAX (__IFLA_RMNET_MAX - 1) + +struct ifla_rmnet_flags { + __u32 flags; + __u32 mask; +}; + #endif /* _UAPI_LINUX_IF_LINK_H */ diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h index 6b89f87db200..1065006c9bf5 100644 --- a/tools/include/uapi/linux/kvm.h +++ b/tools/include/uapi/linux/kvm.h @@ -396,6 +396,10 @@ struct kvm_run { char padding[256]; }; + /* 2048 is the size of the char array used to bound/pad the size + * of the union that holds sync regs. + */ + #define SYNC_REGS_SIZE_BYTES 2048 /* * shared registers between kvm and userspace. * kvm_valid_regs specifies the register classes set by the host @@ -407,7 +411,7 @@ struct kvm_run { __u64 kvm_dirty_regs; union { struct kvm_sync_regs regs; - char padding[2048]; + char padding[SYNC_REGS_SIZE_BYTES]; } s; }; @@ -936,6 +940,7 @@ struct kvm_ppc_resize_hpt { #define KVM_CAP_PPC_GET_CPU_CHAR 151 #define KVM_CAP_S390_BPB 152 #define KVM_CAP_GET_MSR_FEATURES 153 +#define KVM_CAP_HYPERV_EVENTFD 154 #ifdef KVM_CAP_IRQ_ROUTING @@ -1375,6 +1380,10 @@ struct kvm_enc_region { #define KVM_MEMORY_ENCRYPT_REG_REGION _IOR(KVMIO, 0xbb, struct kvm_enc_region) #define KVM_MEMORY_ENCRYPT_UNREG_REGION _IOR(KVMIO, 0xbc, struct kvm_enc_region) +/* Available with KVM_CAP_HYPERV_EVENTFD */ +#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd) + + /* Secure Encrypted Virtualization command */ enum sev_cmd_id { /* Guest initialization commands */ @@ -1515,4 +1524,14 @@ struct kvm_assigned_msix_entry { #define KVM_ARM_DEV_EL1_PTIMER (1 << 1) #define KVM_ARM_DEV_PMU (1 << 2) +struct kvm_hyperv_eventfd { + __u32 conn_id; + __s32 fd; + __u32 flags; + __u32 padding[3]; +}; + +#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff +#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0) + #endif /* __LINUX_KVM_H */ diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h index 07d61583fd02..ed0a120d4f08 100644 --- a/tools/include/uapi/sound/asound.h +++ b/tools/include/uapi/sound/asound.h @@ -242,6 +242,7 @@ typedef int __bitwise snd_pcm_format_t; #define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */ #define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */ #define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE +#define SNDRV_PCM_FORMAT_FIRST SNDRV_PCM_FORMAT_S8 #ifdef SNDRV_LITTLE_ENDIAN #define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE diff --git a/tools/perf/arch/x86/Makefile b/tools/perf/arch/x86/Makefile index d74eaa7aa927..1a38e78117ce 100644 --- a/tools/perf/arch/x86/Makefile +++ b/tools/perf/arch/x86/Makefile @@ -21,7 +21,7 @@ _dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') $(header): $(sys)/syscall_64.tbl $(systbl) @(test -d ../../kernel -a -d ../../tools -a -d ../perf && ( \ (diff -B arch/x86/entry/syscalls/syscall_64.tbl ../../arch/x86/entry/syscalls/syscall_64.tbl >/dev/null) \ - || echo "Warning: Kernel ABI header at 'tools/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true + || echo "Warning: Kernel ABI header at 'tools/perf/arch/x86/entry/syscalls/syscall_64.tbl' differs from latest version at 'arch/x86/entry/syscalls/syscall_64.tbl'" >&2 )) || true $(Q)$(SHELL) '$(systbl)' $(sys)/syscall_64.tbl 'x86_64' > $@ clean:: diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl index 5aef183e2f85..4dfe42666d0c 100644 --- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl +++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl @@ -4,379 +4,383 @@ # The format is: # # +# The __x64_sys_*() stubs are created on-the-fly for sys_*() system calls +# # The abi is "common", "64" or "x32" for this file. # -0 common read sys_read -1 common write sys_write -2 common open sys_open -3 common close sys_close -4 common stat sys_newstat -5 common fstat sys_newfstat -6 common lstat sys_newlstat -7 common poll sys_poll -8 common lseek sys_lseek -9 common mmap sys_mmap -10 common mprotect sys_mprotect -11 common munmap sys_munmap -12 common brk sys_brk -13 64 rt_sigaction sys_rt_sigaction -14 common rt_sigprocmask sys_rt_sigprocmask -15 64 rt_sigreturn sys_rt_sigreturn/ptregs -16 64 ioctl sys_ioctl -17 common pread64 sys_pread64 -18 common pwrite64 sys_pwrite64 -19 64 readv sys_readv -20 64 writev sys_writev -21 common access sys_access -22 common pipe sys_pipe -23 common select sys_select -24 common sched_yield sys_sched_yield -25 common mremap sys_mremap -26 common msync sys_msync -27 common mincore sys_mincore -28 common madvise sys_madvise -29 common shmget sys_shmget -30 common shmat sys_shmat -31 common shmctl sys_shmctl -32 common dup sys_dup -33 common dup2 sys_dup2 -34 common pause sys_pause -35 common nanosleep sys_nanosleep -36 common getitimer sys_getitimer -37 common alarm sys_alarm -38 common setitimer sys_setitimer -39 common getpid sys_getpid -40 common sendfile sys_sendfile64 -41 common socket sys_socket -42 common connect sys_connect -43 common accept sys_accept -44 common sendto sys_sendto -45 64 recvfrom sys_recvfrom -46 64 sendmsg sys_sendmsg -47 64 recvmsg sys_recvmsg -48 common shutdown sys_shutdown -49 common bind sys_bind -50 common listen sys_listen -51 common getsockname sys_getsockname -52 common getpeername sys_getpeername -53 common socketpair sys_socketpair -54 64 setsockopt sys_setsockopt -55 64 getsockopt sys_getsockopt -56 common clone sys_clone/ptregs -57 common fork sys_fork/ptregs -58 common vfork sys_vfork/ptregs -59 64 execve sys_execve/ptregs -60 common exit sys_exit -61 common wait4 sys_wait4 -62 common kill sys_kill -63 common uname sys_newuname -64 common semget sys_semget -65 common semop sys_semop -66 common semctl sys_semctl -67 common shmdt sys_shmdt -68 common msgget sys_msgget -69 common msgsnd sys_msgsnd -70 common msgrcv sys_msgrcv -71 common msgctl sys_msgctl -72 common fcntl sys_fcntl -73 common flock sys_flock -74 common fsync sys_fsync -75 common fdatasync sys_fdatasync -76 common truncate sys_truncate -77 common ftruncate sys_ftruncate -78 common getdents sys_getdents -79 common getcwd sys_getcwd -80 common chdir sys_chdir -81 common fchdir sys_fchdir -82 common rename sys_rename -83 common mkdir sys_mkdir -84 common rmdir sys_rmdir -85 common creat sys_creat -86 common link sys_link -87 common unlink sys_unlink -88 common symlink sys_symlink -89 common readlink sys_readlink -90 common chmod sys_chmod -91 common fchmod sys_fchmod -92 common chown sys_chown -93 common fchown sys_fchown -94 common lchown sys_lchown -95 common umask sys_umask -96 common gettimeofday sys_gettimeofday -97 common getrlimit sys_getrlimit -98 common getrusage sys_getrusage -99 common sysinfo sys_sysinfo -100 common times sys_times -101 64 ptrace sys_ptrace -102 common getuid sys_getuid -103 common syslog sys_syslog -104 common getgid sys_getgid -105 common setuid sys_setuid -106 common setgid sys_setgid -107 common geteuid sys_geteuid -108 common getegid sys_getegid -109 common setpgid sys_setpgid -110 common getppid sys_getppid -111 common getpgrp sys_getpgrp -112 common setsid sys_setsid -113 common setreuid sys_setreuid -114 common setregid sys_setregid -115 common getgroups sys_getgroups -116 common setgroups sys_setgroups -117 common setresuid sys_setresuid -118 common getresuid sys_getresuid -119 common setresgid sys_setresgid -120 common getresgid sys_getresgid -121 common getpgid sys_getpgid -122 common setfsuid sys_setfsuid -123 common setfsgid sys_setfsgid -124 common getsid sys_getsid -125 common capget sys_capget -126 common capset sys_capset -127 64 rt_sigpending sys_rt_sigpending -128 64 rt_sigtimedwait sys_rt_sigtimedwait -129 64 rt_sigqueueinfo sys_rt_sigqueueinfo -130 common rt_sigsuspend sys_rt_sigsuspend -131 64 sigaltstack sys_sigaltstack -132 common utime sys_utime -133 common mknod sys_mknod +0 common read __x64_sys_read +1 common write __x64_sys_write +2 common open __x64_sys_open +3 common close __x64_sys_close +4 common stat __x64_sys_newstat +5 common fstat __x64_sys_newfstat +6 common lstat __x64_sys_newlstat +7 common poll __x64_sys_poll +8 common lseek __x64_sys_lseek +9 common mmap __x64_sys_mmap +10 common mprotect __x64_sys_mprotect +11 common munmap __x64_sys_munmap +12 common brk __x64_sys_brk +13 64 rt_sigaction __x64_sys_rt_sigaction +14 common rt_sigprocmask __x64_sys_rt_sigprocmask +15 64 rt_sigreturn __x64_sys_rt_sigreturn/ptregs +16 64 ioctl __x64_sys_ioctl +17 common pread64 __x64_sys_pread64 +18 common pwrite64 __x64_sys_pwrite64 +19 64 readv __x64_sys_readv +20 64 writev __x64_sys_writev +21 common access __x64_sys_access +22 common pipe __x64_sys_pipe +23 common select __x64_sys_select +24 common sched_yield __x64_sys_sched_yield +25 common mremap __x64_sys_mremap +26 common msync __x64_sys_msync +27 common mincore __x64_sys_mincore +28 common madvise __x64_sys_madvise +29 common shmget __x64_sys_shmget +30 common shmat __x64_sys_shmat +31 common shmctl __x64_sys_shmctl +32 common dup __x64_sys_dup +33 common dup2 __x64_sys_dup2 +34 common pause __x64_sys_pause +35 common nanosleep __x64_sys_nanosleep +36 common getitimer __x64_sys_getitimer +37 common alarm __x64_sys_alarm +38 common setitimer __x64_sys_setitimer +39 common getpid __x64_sys_getpid +40 common sendfile __x64_sys_sendfile64 +41 common socket __x64_sys_socket +42 common connect __x64_sys_connect +43 common accept __x64_sys_accept +44 common sendto __x64_sys_sendto +45 64 recvfrom __x64_sys_recvfrom +46 64 sendmsg __x64_sys_sendmsg +47 64 recvmsg __x64_sys_recvmsg +48 common shutdown __x64_sys_shutdown +49 common bind __x64_sys_bind +50 common listen __x64_sys_listen +51 common getsockname __x64_sys_getsockname +52 common getpeername __x64_sys_getpeername +53 common socketpair __x64_sys_socketpair +54 64 setsockopt __x64_sys_setsockopt +55 64 getsockopt __x64_sys_getsockopt +56 common clone __x64_sys_clone/ptregs +57 common fork __x64_sys_fork/ptregs +58 common vfork __x64_sys_vfork/ptregs +59 64 execve __x64_sys_execve/ptregs +60 common exit __x64_sys_exit +61 common wait4 __x64_sys_wait4 +62 common kill __x64_sys_kill +63 common uname __x64_sys_newuname +64 common semget __x64_sys_semget +65 common semop __x64_sys_semop +66 common semctl __x64_sys_semctl +67 common shmdt __x64_sys_shmdt +68 common msgget __x64_sys_msgget +69 common msgsnd __x64_sys_msgsnd +70 common msgrcv __x64_sys_msgrcv +71 common msgctl __x64_sys_msgctl +72 common fcntl __x64_sys_fcntl +73 common flock __x64_sys_flock +74 common fsync __x64_sys_fsync +75 common fdatasync __x64_sys_fdatasync +76 common truncate __x64_sys_truncate +77 common ftruncate __x64_sys_ftruncate +78 common getdents __x64_sys_getdents +79 common getcwd __x64_sys_getcwd +80 common chdir __x64_sys_chdir +81 common fchdir __x64_sys_fchdir +82 common rename __x64_sys_rename +83 common mkdir __x64_sys_mkdir +84 common rmdir __x64_sys_rmdir +85 common creat __x64_sys_creat +86 common link __x64_sys_link +87 common unlink __x64_sys_unlink +88 common symlink __x64_sys_symlink +89 common readlink __x64_sys_readlink +90 common chmod __x64_sys_chmod +91 common fchmod __x64_sys_fchmod +92 common chown __x64_sys_chown +93 common fchown __x64_sys_fchown +94 common lchown __x64_sys_lchown +95 common umask __x64_sys_umask +96 common gettimeofday __x64_sys_gettimeofday +97 common getrlimit __x64_sys_getrlimit +98 common getrusage __x64_sys_getrusage +99 common sysinfo __x64_sys_sysinfo +100 common times __x64_sys_times +101 64 ptrace __x64_sys_ptrace +102 common getuid __x64_sys_getuid +103 common syslog __x64_sys_syslog +104 common getgid __x64_sys_getgid +105 common setuid __x64_sys_setuid +106 common setgid __x64_sys_setgid +107 common geteuid __x64_sys_geteuid +108 common getegid __x64_sys_getegid +109 common setpgid __x64_sys_setpgid +110 common getppid __x64_sys_getppid +111 common getpgrp __x64_sys_getpgrp +112 common setsid __x64_sys_setsid +113 common setreuid __x64_sys_setreuid +114 common setregid __x64_sys_setregid +115 common getgroups __x64_sys_getgroups +116 common setgroups __x64_sys_setgroups +117 common setresuid __x64_sys_setresuid +118 common getresuid __x64_sys_getresuid +119 common setresgid __x64_sys_setresgid +120 common getresgid __x64_sys_getresgid +121 common getpgid __x64_sys_getpgid +122 common setfsuid __x64_sys_setfsuid +123 common setfsgid __x64_sys_setfsgid +124 common getsid __x64_sys_getsid +125 common capget __x64_sys_capget +126 common capset __x64_sys_capset +127 64 rt_sigpending __x64_sys_rt_sigpending +128 64 rt_sigtimedwait __x64_sys_rt_sigtimedwait +129 64 rt_sigqueueinfo __x64_sys_rt_sigqueueinfo +130 common rt_sigsuspend __x64_sys_rt_sigsuspend +131 64 sigaltstack __x64_sys_sigaltstack +132 common utime __x64_sys_utime +133 common mknod __x64_sys_mknod 134 64 uselib -135 common personality sys_personality -136 common ustat sys_ustat -137 common statfs sys_statfs -138 common fstatfs sys_fstatfs -139 common sysfs sys_sysfs -140 common getpriority sys_getpriority -141 common setpriority sys_setpriority -142 common sched_setparam sys_sched_setparam -143 common sched_getparam sys_sched_getparam -144 common sched_setscheduler sys_sched_setscheduler -145 common sched_getscheduler sys_sched_getscheduler -146 common sched_get_priority_max sys_sched_get_priority_max -147 common sched_get_priority_min sys_sched_get_priority_min -148 common sched_rr_get_interval sys_sched_rr_get_interval -149 common mlock sys_mlock -150 common munlock sys_munlock -151 common mlockall sys_mlockall -152 common munlockall sys_munlockall -153 common vhangup sys_vhangup -154 common modify_ldt sys_modify_ldt -155 common pivot_root sys_pivot_root -156 64 _sysctl sys_sysctl -157 common prctl sys_prctl -158 common arch_prctl sys_arch_prctl -159 common adjtimex sys_adjtimex -160 common setrlimit sys_setrlimit -161 common chroot sys_chroot -162 common sync sys_sync -163 common acct sys_acct -164 common settimeofday sys_settimeofday -165 common mount sys_mount -166 common umount2 sys_umount -167 common swapon sys_swapon -168 common swapoff sys_swapoff -169 common reboot sys_reboot -170 common sethostname sys_sethostname -171 common setdomainname sys_setdomainname -172 common iopl sys_iopl/ptregs -173 common ioperm sys_ioperm +135 common personality __x64_sys_personality +136 common ustat __x64_sys_ustat +137 common statfs __x64_sys_statfs +138 common fstatfs __x64_sys_fstatfs +139 common sysfs __x64_sys_sysfs +140 common getpriority __x64_sys_getpriority +141 common setpriority __x64_sys_setpriority +142 common sched_setparam __x64_sys_sched_setparam +143 common sched_getparam __x64_sys_sched_getparam +144 common sched_setscheduler __x64_sys_sched_setscheduler +145 common sched_getscheduler __x64_sys_sched_getscheduler +146 common sched_get_priority_max __x64_sys_sched_get_priority_max +147 common sched_get_priority_min __x64_sys_sched_get_priority_min +148 common sched_rr_get_interval __x64_sys_sched_rr_get_interval +149 common mlock __x64_sys_mlock +150 common munlock __x64_sys_munlock +151 common mlockall __x64_sys_mlockall +152 common munlockall __x64_sys_munlockall +153 common vhangup __x64_sys_vhangup +154 common modify_ldt __x64_sys_modify_ldt +155 common pivot_root __x64_sys_pivot_root +156 64 _sysctl __x64_sys_sysctl +157 common prctl __x64_sys_prctl +158 common arch_prctl __x64_sys_arch_prctl +159 common adjtimex __x64_sys_adjtimex +160 common setrlimit __x64_sys_setrlimit +161 common chroot __x64_sys_chroot +162 common sync __x64_sys_sync +163 common acct __x64_sys_acct +164 common settimeofday __x64_sys_settimeofday +165 common mount __x64_sys_mount +166 common umount2 __x64_sys_umount +167 common swapon __x64_sys_swapon +168 common swapoff __x64_sys_swapoff +169 common reboot __x64_sys_reboot +170 common sethostname __x64_sys_sethostname +171 common setdomainname __x64_sys_setdomainname +172 common iopl __x64_sys_iopl/ptregs +173 common ioperm __x64_sys_ioperm 174 64 create_module -175 common init_module sys_init_module -176 common delete_module sys_delete_module +175 common init_module __x64_sys_init_module +176 common delete_module __x64_sys_delete_module 177 64 get_kernel_syms 178 64 query_module -179 common quotactl sys_quotactl +179 common quotactl __x64_sys_quotactl 180 64 nfsservctl 181 common getpmsg 182 common putpmsg 183 common afs_syscall 184 common tuxcall 185 common security -186 common gettid sys_gettid -187 common readahead sys_readahead -188 common setxattr sys_setxattr -189 common lsetxattr sys_lsetxattr -190 common fsetxattr sys_fsetxattr -191 common getxattr sys_getxattr -192 common lgetxattr sys_lgetxattr -193 common fgetxattr sys_fgetxattr -194 common listxattr sys_listxattr -195 common llistxattr sys_llistxattr -196 common flistxattr sys_flistxattr -197 common removexattr sys_removexattr -198 common lremovexattr sys_lremovexattr -199 common fremovexattr sys_fremovexattr -200 common tkill sys_tkill -201 common time sys_time -202 common futex sys_futex -203 common sched_setaffinity sys_sched_setaffinity -204 common sched_getaffinity sys_sched_getaffinity +186 common gettid __x64_sys_gettid +187 common readahead __x64_sys_readahead +188 common setxattr __x64_sys_setxattr +189 common lsetxattr __x64_sys_lsetxattr +190 common fsetxattr __x64_sys_fsetxattr +191 common getxattr __x64_sys_getxattr +192 common lgetxattr __x64_sys_lgetxattr +193 common fgetxattr __x64_sys_fgetxattr +194 common listxattr __x64_sys_listxattr +195 common llistxattr __x64_sys_llistxattr +196 common flistxattr __x64_sys_flistxattr +197 common removexattr __x64_sys_removexattr +198 common lremovexattr __x64_sys_lremovexattr +199 common fremovexattr __x64_sys_fremovexattr +200 common tkill __x64_sys_tkill +201 common time __x64_sys_time +202 common futex __x64_sys_futex +203 common sched_setaffinity __x64_sys_sched_setaffinity +204 common sched_getaffinity __x64_sys_sched_getaffinity 205 64 set_thread_area -206 64 io_setup sys_io_setup -207 common io_destroy sys_io_destroy -208 common io_getevents sys_io_getevents -209 64 io_submit sys_io_submit -210 common io_cancel sys_io_cancel +206 64 io_setup __x64_sys_io_setup +207 common io_destroy __x64_sys_io_destroy +208 common io_getevents __x64_sys_io_getevents +209 64 io_submit __x64_sys_io_submit +210 common io_cancel __x64_sys_io_cancel 211 64 get_thread_area -212 common lookup_dcookie sys_lookup_dcookie -213 common epoll_create sys_epoll_create +212 common lookup_dcookie __x64_sys_lookup_dcookie +213 common epoll_create __x64_sys_epoll_create 214 64 epoll_ctl_old 215 64 epoll_wait_old -216 common remap_file_pages sys_remap_file_pages -217 common getdents64 sys_getdents64 -218 common set_tid_address sys_set_tid_address -219 common restart_syscall sys_restart_syscall -220 common semtimedop sys_semtimedop -221 common fadvise64 sys_fadvise64 -222 64 timer_create sys_timer_create -223 common timer_settime sys_timer_settime -224 common timer_gettime sys_timer_gettime -225 common timer_getoverrun sys_timer_getoverrun -226 common timer_delete sys_timer_delete -227 common clock_settime sys_clock_settime -228 common clock_gettime sys_clock_gettime -229 common clock_getres sys_clock_getres -230 common clock_nanosleep sys_clock_nanosleep -231 common exit_group sys_exit_group -232 common epoll_wait sys_epoll_wait -233 common epoll_ctl sys_epoll_ctl -234 common tgkill sys_tgkill -235 common utimes sys_utimes +216 common remap_file_pages __x64_sys_remap_file_pages +217 common getdents64 __x64_sys_getdents64 +218 common set_tid_address __x64_sys_set_tid_address +219 common restart_syscall __x64_sys_restart_syscall +220 common semtimedop __x64_sys_semtimedop +221 common fadvise64 __x64_sys_fadvise64 +222 64 timer_create __x64_sys_timer_create +223 common timer_settime __x64_sys_timer_settime +224 common timer_gettime __x64_sys_timer_gettime +225 common timer_getoverrun __x64_sys_timer_getoverrun +226 common timer_delete __x64_sys_timer_delete +227 common clock_settime __x64_sys_clock_settime +228 common clock_gettime __x64_sys_clock_gettime +229 common clock_getres __x64_sys_clock_getres +230 common clock_nanosleep __x64_sys_clock_nanosleep +231 common exit_group __x64_sys_exit_group +232 common epoll_wait __x64_sys_epoll_wait +233 common epoll_ctl __x64_sys_epoll_ctl +234 common tgkill __x64_sys_tgkill +235 common utimes __x64_sys_utimes 236 64 vserver -237 common mbind sys_mbind -238 common set_mempolicy sys_set_mempolicy -239 common get_mempolicy sys_get_mempolicy -240 common mq_open sys_mq_open -241 common mq_unlink sys_mq_unlink -242 common mq_timedsend sys_mq_timedsend -243 common mq_timedreceive sys_mq_timedreceive -244 64 mq_notify sys_mq_notify -245 common mq_getsetattr sys_mq_getsetattr -246 64 kexec_load sys_kexec_load -247 64 waitid sys_waitid -248 common add_key sys_add_key -249 common request_key sys_request_key -250 common keyctl sys_keyctl -251 common ioprio_set sys_ioprio_set -252 common ioprio_get sys_ioprio_get -253 common inotify_init sys_inotify_init -254 common inotify_add_watch sys_inotify_add_watch -255 common inotify_rm_watch sys_inotify_rm_watch -256 common migrate_pages sys_migrate_pages -257 common openat sys_openat -258 common mkdirat sys_mkdirat -259 common mknodat sys_mknodat -260 common fchownat sys_fchownat -261 common futimesat sys_futimesat -262 common newfstatat sys_newfstatat -263 common unlinkat sys_unlinkat -264 common renameat sys_renameat -265 common linkat sys_linkat -266 common symlinkat sys_symlinkat -267 common readlinkat sys_readlinkat -268 common fchmodat sys_fchmodat -269 common faccessat sys_faccessat -270 common pselect6 sys_pselect6 -271 common ppoll sys_ppoll -272 common unshare sys_unshare -273 64 set_robust_list sys_set_robust_list -274 64 get_robust_list sys_get_robust_list -275 common splice sys_splice -276 common tee sys_tee -277 common sync_file_range sys_sync_file_range -278 64 vmsplice sys_vmsplice -279 64 move_pages sys_move_pages -280 common utimensat sys_utimensat -281 common epoll_pwait sys_epoll_pwait -282 common signalfd sys_signalfd -283 common timerfd_create sys_timerfd_create -284 common eventfd sys_eventfd -285 common fallocate sys_fallocate -286 common timerfd_settime sys_timerfd_settime -287 common timerfd_gettime sys_timerfd_gettime -288 common accept4 sys_accept4 -289 common signalfd4 sys_signalfd4 -290 common eventfd2 sys_eventfd2 -291 common epoll_create1 sys_epoll_create1 -292 common dup3 sys_dup3 -293 common pipe2 sys_pipe2 -294 common inotify_init1 sys_inotify_init1 -295 64 preadv sys_preadv -296 64 pwritev sys_pwritev -297 64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo -298 common perf_event_open sys_perf_event_open -299 64 recvmmsg sys_recvmmsg -300 common fanotify_init sys_fanotify_init -301 common fanotify_mark sys_fanotify_mark -302 common prlimit64 sys_prlimit64 -303 common name_to_handle_at sys_name_to_handle_at -304 common open_by_handle_at sys_open_by_handle_at -305 common clock_adjtime sys_clock_adjtime -306 common syncfs sys_syncfs -307 64 sendmmsg sys_sendmmsg -308 common setns sys_setns -309 common getcpu sys_getcpu -310 64 process_vm_readv sys_process_vm_readv -311 64 process_vm_writev sys_process_vm_writev -312 common kcmp sys_kcmp -313 common finit_module sys_finit_module -314 common sched_setattr sys_sched_setattr -315 common sched_getattr sys_sched_getattr -316 common renameat2 sys_renameat2 -317 common seccomp sys_seccomp -318 common getrandom sys_getrandom -319 common memfd_create sys_memfd_create -320 common kexec_file_load sys_kexec_file_load -321 common bpf sys_bpf -322 64 execveat sys_execveat/ptregs -323 common userfaultfd sys_userfaultfd -324 common membarrier sys_membarrier -325 common mlock2 sys_mlock2 -326 common copy_file_range sys_copy_file_range -327 64 preadv2 sys_preadv2 -328 64 pwritev2 sys_pwritev2 -329 common pkey_mprotect sys_pkey_mprotect -330 common pkey_alloc sys_pkey_alloc -331 common pkey_free sys_pkey_free -332 common statx sys_statx +237 common mbind __x64_sys_mbind +238 common set_mempolicy __x64_sys_set_mempolicy +239 common get_mempolicy __x64_sys_get_mempolicy +240 common mq_open __x64_sys_mq_open +241 common mq_unlink __x64_sys_mq_unlink +242 common mq_timedsend __x64_sys_mq_timedsend +243 common mq_timedreceive __x64_sys_mq_timedreceive +244 64 mq_notify __x64_sys_mq_notify +245 common mq_getsetattr __x64_sys_mq_getsetattr +246 64 kexec_load __x64_sys_kexec_load +247 64 waitid __x64_sys_waitid +248 common add_key __x64_sys_add_key +249 common request_key __x64_sys_request_key +250 common keyctl __x64_sys_keyctl +251 common ioprio_set __x64_sys_ioprio_set +252 common ioprio_get __x64_sys_ioprio_get +253 common inotify_init __x64_sys_inotify_init +254 common inotify_add_watch __x64_sys_inotify_add_watch +255 common inotify_rm_watch __x64_sys_inotify_rm_watch +256 common migrate_pages __x64_sys_migrate_pages +257 common openat __x64_sys_openat +258 common mkdirat __x64_sys_mkdirat +259 common mknodat __x64_sys_mknodat +260 common fchownat __x64_sys_fchownat +261 common futimesat __x64_sys_futimesat +262 common newfstatat __x64_sys_newfstatat +263 common unlinkat __x64_sys_unlinkat +264 common renameat __x64_sys_renameat +265 common linkat __x64_sys_linkat +266 common symlinkat __x64_sys_symlinkat +267 common readlinkat __x64_sys_readlinkat +268 common fchmodat __x64_sys_fchmodat +269 common faccessat __x64_sys_faccessat +270 common pselect6 __x64_sys_pselect6 +271 common ppoll __x64_sys_ppoll +272 common unshare __x64_sys_unshare +273 64 set_robust_list __x64_sys_set_robust_list +274 64 get_robust_list __x64_sys_get_robust_list +275 common splice __x64_sys_splice +276 common tee __x64_sys_tee +277 common sync_file_range __x64_sys_sync_file_range +278 64 vmsplice __x64_sys_vmsplice +279 64 move_pages __x64_sys_move_pages +280 common utimensat __x64_sys_utimensat +281 common epoll_pwait __x64_sys_epoll_pwait +282 common signalfd __x64_sys_signalfd +283 common timerfd_create __x64_sys_timerfd_create +284 common eventfd __x64_sys_eventfd +285 common fallocate __x64_sys_fallocate +286 common timerfd_settime __x64_sys_timerfd_settime +287 common timerfd_gettime __x64_sys_timerfd_gettime +288 common accept4 __x64_sys_accept4 +289 common signalfd4 __x64_sys_signalfd4 +290 common eventfd2 __x64_sys_eventfd2 +291 common epoll_create1 __x64_sys_epoll_create1 +292 common dup3 __x64_sys_dup3 +293 common pipe2 __x64_sys_pipe2 +294 common inotify_init1 __x64_sys_inotify_init1 +295 64 preadv __x64_sys_preadv +296 64 pwritev __x64_sys_pwritev +297 64 rt_tgsigqueueinfo __x64_sys_rt_tgsigqueueinfo +298 common perf_event_open __x64_sys_perf_event_open +299 64 recvmmsg __x64_sys_recvmmsg +300 common fanotify_init __x64_sys_fanotify_init +301 common fanotify_mark __x64_sys_fanotify_mark +302 common prlimit64 __x64_sys_prlimit64 +303 common name_to_handle_at __x64_sys_name_to_handle_at +304 common open_by_handle_at __x64_sys_open_by_handle_at +305 common clock_adjtime __x64_sys_clock_adjtime +306 common syncfs __x64_sys_syncfs +307 64 sendmmsg __x64_sys_sendmmsg +308 common setns __x64_sys_setns +309 common getcpu __x64_sys_getcpu +310 64 process_vm_readv __x64_sys_process_vm_readv +311 64 process_vm_writev __x64_sys_process_vm_writev +312 common kcmp __x64_sys_kcmp +313 common finit_module __x64_sys_finit_module +314 common sched_setattr __x64_sys_sched_setattr +315 common sched_getattr __x64_sys_sched_getattr +316 common renameat2 __x64_sys_renameat2 +317 common seccomp __x64_sys_seccomp +318 common getrandom __x64_sys_getrandom +319 common memfd_create __x64_sys_memfd_create +320 common kexec_file_load __x64_sys_kexec_file_load +321 common bpf __x64_sys_bpf +322 64 execveat __x64_sys_execveat/ptregs +323 common userfaultfd __x64_sys_userfaultfd +324 common membarrier __x64_sys_membarrier +325 common mlock2 __x64_sys_mlock2 +326 common copy_file_range __x64_sys_copy_file_range +327 64 preadv2 __x64_sys_preadv2 +328 64 pwritev2 __x64_sys_pwritev2 +329 common pkey_mprotect __x64_sys_pkey_mprotect +330 common pkey_alloc __x64_sys_pkey_alloc +331 common pkey_free __x64_sys_pkey_free +332 common statx __x64_sys_statx # # x32-specific system call numbers start at 512 to avoid cache impact -# for native 64-bit operation. +# for native 64-bit operation. The __x32_compat_sys stubs are created +# on-the-fly for compat_sys_*() compatibility system calls if X86_X32 +# is defined. # -512 x32 rt_sigaction compat_sys_rt_sigaction +512 x32 rt_sigaction __x32_compat_sys_rt_sigaction 513 x32 rt_sigreturn sys32_x32_rt_sigreturn -514 x32 ioctl compat_sys_ioctl -515 x32 readv compat_sys_readv -516 x32 writev compat_sys_writev -517 x32 recvfrom compat_sys_recvfrom -518 x32 sendmsg compat_sys_sendmsg -519 x32 recvmsg compat_sys_recvmsg -520 x32 execve compat_sys_execve/ptregs -521 x32 ptrace compat_sys_ptrace -522 x32 rt_sigpending compat_sys_rt_sigpending -523 x32 rt_sigtimedwait compat_sys_rt_sigtimedwait -524 x32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo -525 x32 sigaltstack compat_sys_sigaltstack -526 x32 timer_create compat_sys_timer_create -527 x32 mq_notify compat_sys_mq_notify -528 x32 kexec_load compat_sys_kexec_load -529 x32 waitid compat_sys_waitid -530 x32 set_robust_list compat_sys_set_robust_list -531 x32 get_robust_list compat_sys_get_robust_list -532 x32 vmsplice compat_sys_vmsplice -533 x32 move_pages compat_sys_move_pages -534 x32 preadv compat_sys_preadv64 -535 x32 pwritev compat_sys_pwritev64 -536 x32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo -537 x32 recvmmsg compat_sys_recvmmsg -538 x32 sendmmsg compat_sys_sendmmsg -539 x32 process_vm_readv compat_sys_process_vm_readv -540 x32 process_vm_writev compat_sys_process_vm_writev -541 x32 setsockopt compat_sys_setsockopt -542 x32 getsockopt compat_sys_getsockopt -543 x32 io_setup compat_sys_io_setup -544 x32 io_submit compat_sys_io_submit -545 x32 execveat compat_sys_execveat/ptregs -546 x32 preadv2 compat_sys_preadv64v2 -547 x32 pwritev2 compat_sys_pwritev64v2 +514 x32 ioctl __x32_compat_sys_ioctl +515 x32 readv __x32_compat_sys_readv +516 x32 writev __x32_compat_sys_writev +517 x32 recvfrom __x32_compat_sys_recvfrom +518 x32 sendmsg __x32_compat_sys_sendmsg +519 x32 recvmsg __x32_compat_sys_recvmsg +520 x32 execve __x32_compat_sys_execve/ptregs +521 x32 ptrace __x32_compat_sys_ptrace +522 x32 rt_sigpending __x32_compat_sys_rt_sigpending +523 x32 rt_sigtimedwait __x32_compat_sys_rt_sigtimedwait +524 x32 rt_sigqueueinfo __x32_compat_sys_rt_sigqueueinfo +525 x32 sigaltstack __x32_compat_sys_sigaltstack +526 x32 timer_create __x32_compat_sys_timer_create +527 x32 mq_notify __x32_compat_sys_mq_notify +528 x32 kexec_load __x32_compat_sys_kexec_load +529 x32 waitid __x32_compat_sys_waitid +530 x32 set_robust_list __x32_compat_sys_set_robust_list +531 x32 get_robust_list __x32_compat_sys_get_robust_list +532 x32 vmsplice __x32_compat_sys_vmsplice +533 x32 move_pages __x32_compat_sys_move_pages +534 x32 preadv __x32_compat_sys_preadv64 +535 x32 pwritev __x32_compat_sys_pwritev64 +536 x32 rt_tgsigqueueinfo __x32_compat_sys_rt_tgsigqueueinfo +537 x32 recvmmsg __x32_compat_sys_recvmmsg +538 x32 sendmmsg __x32_compat_sys_sendmmsg +539 x32 process_vm_readv __x32_compat_sys_process_vm_readv +540 x32 process_vm_writev __x32_compat_sys_process_vm_writev +541 x32 setsockopt __x32_compat_sys_setsockopt +542 x32 getsockopt __x32_compat_sys_getsockopt +543 x32 io_setup __x32_compat_sys_io_setup +544 x32 io_submit __x32_compat_sys_io_submit +545 x32 execveat __x32_compat_sys_execveat/ptregs +546 x32 preadv2 __x32_compat_sys_preadv64v2 +547 x32 pwritev2 __x32_compat_sys_pwritev64v2 From 101592b4904ecf6b8ed2a4784d41d180319d95a1 Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Mon, 9 Apr 2018 10:25:32 +0300 Subject: [PATCH 069/182] perf/core: Store context switch out type in PERF_RECORD_SWITCH[_CPU_WIDE] Store preempting context switch out event into Perf trace as a part of PERF_RECORD_SWITCH[_CPU_WIDE] record. Percentage of preempting and non-preempting context switches help understanding the nature of workloads (CPU or IO bound) that are running on a machine; The event is treated as preemption one when task->state value of the thread being switched out is TASK_RUNNING. Event type encoding is implemented using PERF_RECORD_MISC_SWITCH_OUT_PREEMPT bit; Signed-off-by: Alexey Budankov Acked-by: Peter Zijlstra Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Namhyung Kim Link: http://lkml.kernel.org/r/9ff84e83-a0ca-dd82-a6d0-cb951689be74@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- include/uapi/linux/perf_event.h | 18 +++++++++++++++--- kernel/events/core.c | 4 ++++ tools/include/uapi/linux/perf_event.h | 18 +++++++++++++++--- 3 files changed, 34 insertions(+), 6 deletions(-) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 912b85b52344..b8e288a1f740 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -650,11 +650,23 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) /* - * Indicates that the content of PERF_SAMPLE_IP points to - * the actual instruction that triggered the event. See also - * perf_event_attr::precise_ip. + * These PERF_RECORD_MISC_* flags below are safely reused + * for the following events: + * + * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events + * + * + * PERF_RECORD_MISC_EXACT_IP: + * Indicates that the content of PERF_SAMPLE_IP points to + * the actual instruction that triggered the event. See also + * perf_event_attr::precise_ip. + * + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: + * Indicates that thread was preempted in TASK_RUNNING state. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) +#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) /* * Reserve the last bit to indicate some extended misc field */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 2d5fe26551f8..1bae80aaabfb 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7587,6 +7587,10 @@ static void perf_event_switch(struct task_struct *task, }, }; + if (!sched_in && task->state == TASK_RUNNING) + switch_event.event_id.header.misc |= + PERF_RECORD_MISC_SWITCH_OUT_PREEMPT; + perf_iterate_sb(perf_event_switch_output, &switch_event, NULL); diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 912b85b52344..b8e288a1f740 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -650,11 +650,23 @@ struct perf_event_mmap_page { #define PERF_RECORD_MISC_COMM_EXEC (1 << 13) #define PERF_RECORD_MISC_SWITCH_OUT (1 << 13) /* - * Indicates that the content of PERF_SAMPLE_IP points to - * the actual instruction that triggered the event. See also - * perf_event_attr::precise_ip. + * These PERF_RECORD_MISC_* flags below are safely reused + * for the following events: + * + * PERF_RECORD_MISC_EXACT_IP - PERF_RECORD_SAMPLE of precise events + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT - PERF_RECORD_SWITCH* events + * + * + * PERF_RECORD_MISC_EXACT_IP: + * Indicates that the content of PERF_SAMPLE_IP points to + * the actual instruction that triggered the event. See also + * perf_event_attr::precise_ip. + * + * PERF_RECORD_MISC_SWITCH_OUT_PREEMPT: + * Indicates that thread was preempted in TASK_RUNNING state. */ #define PERF_RECORD_MISC_EXACT_IP (1 << 14) +#define PERF_RECORD_MISC_SWITCH_OUT_PREEMPT (1 << 14) /* * Reserve the last bit to indicate some extended misc field */ From b3f35b5d5d36fba9311d1a965fcce2dd35614f2e Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Mon, 9 Apr 2018 10:26:05 +0300 Subject: [PATCH 070/182] perf report: Extend raw dump (-D) out with switch out event type Print additional 'preempt' tag for PERF_RECORD_SWITCH[_CPU_WIDE] OUT records when event header misc field contains PERF_RECORD_MISC_SWITCH_OUT_PREEMPT bit set designating preemption context switch out event: tools/perf/perf report -D -i perf.data | grep _SWITCH 0 768361415226 0x27f076 [0x28]: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 8/8 4 768362216813 0x28f45e [0x28]: PERF_RECORD_SWITCH_CPU_WIDE OUT next pid/tid: 0/0 4 768362217824 0x28f486 [0x28]: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 4073/4073 0 768362414027 0x27f0ce [0x28]: PERF_RECORD_SWITCH_CPU_WIDE OUT preempt next pid/tid: 8/8 0 768362414367 0x27f0f6 [0x28]: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 0/0 Signed-off-by: Alexey Budankov Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/6f5aebb9-b96c-f304-f08f-8f046d38de4f@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/event.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index f0a6cbd033cc..98ff3a6a3d50 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -1421,7 +1421,9 @@ size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp) size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp) { bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT; - const char *in_out = out ? "OUT" : "IN "; + const char *in_out = !out ? "IN " : + !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ? + "OUT " : "OUT preempt"; if (event->header.type == PERF_RECORD_SWITCH) return fprintf(fp, " %s\n", in_out); From bf30cc1882d2c65aaf92842cc9bcf06565eab73c Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Mon, 9 Apr 2018 10:26:46 +0300 Subject: [PATCH 071/182] perf script: Extend misc field decoding with switch out event type Append 'p' sign to 'S' tag designating the type of context switch out event so 'Sp' means preemption context switch. Documentation is extended to cover new presentation changes. $ perf script --show-switch-events -F +misc -I -i perf.data: hdparm 4073 [004] U 762.198265: 380194 cycles:ppp: 7faf727f5a23 strchr (/usr/lib64/ld-2.26.so) hdparm 4073 [004] K 762.198366: 441572 cycles:ppp: ffffffffb9218435 alloc_set_pte (/lib/modules/4.16.0-rc6+/build/vmlinux) hdparm 4073 [004] S 762.198391: PERF_RECORD_SWITCH_CPU_WIDE OUT next pid/tid: 0/0 swapper 0 [004] 762.198392: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 4073/4073 swapper 0 [004] Sp 762.198477: PERF_RECORD_SWITCH_CPU_WIDE OUT preempt next pid/tid: 4073/4073 hdparm 4073 [004] 762.198478: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 0/0 swapper 0 [007] K 762.198514: 2303073 cycles:ppp: ffffffffb98b0c66 intel_idle (/lib/modules/4.16.0-rc6+/build/vmlinux) swapper 0 [007] Sp 762.198561: PERF_RECORD_SWITCH_CPU_WIDE OUT preempt next pid/tid: 1134/1134 kworker/u16:18 1134 [007] 762.198562: PERF_RECORD_SWITCH_CPU_WIDE IN prev pid/tid: 0/0 kworker/u16:18 1134 [007] S 762.198567: PERF_RECORD_SWITCH_CPU_WIDE OUT next pid/tid: 0/0 Signed-off-by: Alexey Budankov Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/5fc65ce7-8ca5-53ae-8858-8ddd27290575@linux.intel.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-script.txt | 17 +++++++++-------- tools/perf/builtin-script.c | 5 ++++- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/tools/perf/Documentation/perf-script.txt b/tools/perf/Documentation/perf-script.txt index 36ec0257f8d3..afdafe2110a1 100644 --- a/tools/perf/Documentation/perf-script.txt +++ b/tools/perf/Documentation/perf-script.txt @@ -228,14 +228,15 @@ OPTIONS For sample events it's possible to display misc field with -F +misc option, following letters are displayed for each bit: - PERF_RECORD_MISC_KERNEL K - PERF_RECORD_MISC_USER U - PERF_RECORD_MISC_HYPERVISOR H - PERF_RECORD_MISC_GUEST_KERNEL G - PERF_RECORD_MISC_GUEST_USER g - PERF_RECORD_MISC_MMAP_DATA* M - PERF_RECORD_MISC_COMM_EXEC E - PERF_RECORD_MISC_SWITCH_OUT S + PERF_RECORD_MISC_KERNEL K + PERF_RECORD_MISC_USER U + PERF_RECORD_MISC_HYPERVISOR H + PERF_RECORD_MISC_GUEST_KERNEL G + PERF_RECORD_MISC_GUEST_USER g + PERF_RECORD_MISC_MMAP_DATA* M + PERF_RECORD_MISC_COMM_EXEC E + PERF_RECORD_MISC_SWITCH_OUT S + PERF_RECORD_MISC_SWITCH_OUT_PREEMPT Sp $ perf script -F +misc ... sched-messaging 1414 K 28690.636582: 4590 cycles ... diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c index b17edbcd98cc..e0a9845b6cbc 100644 --- a/tools/perf/builtin-script.c +++ b/tools/perf/builtin-script.c @@ -657,8 +657,11 @@ static int perf_sample__fprintf_start(struct perf_sample *sample, break; case PERF_RECORD_SWITCH: case PERF_RECORD_SWITCH_CPU_WIDE: - if (has(SWITCH_OUT)) + if (has(SWITCH_OUT)) { ret += fprintf(fp, "S"); + if (sample->misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) + ret += fprintf(fp, "p"); + } default: break; } From 038586c34301578e538f6c5aa79ca82bce1b9152 Mon Sep 17 00:00:00 2001 From: Thomas Richter Date: Mon, 16 Apr 2018 15:23:14 +0200 Subject: [PATCH 072/182] perf list: Add s390 support for detailed/verbose PMU event description 'perf list' with flags -d and -v print a description (-d) or a very verbose explanation (-v) of CPU specific counter events. These descriptions are provided with the json files in directory pmu-events/arch/s390/*.json. Display of these descriptions on s390 requires the corresponding json files. On s390 this does not work because function is_pmu_core() does not detect the s390 directory name where the CPU specific events are listed. On x86 it is: /sys/bus/event_source/devices/cpu whereas on s390 it is: /sys/bus/event_source/devices/cpum_cf /sys/bus/event_source/devices/cpum_sf Fix this by adding s390 directory name testing to function is_pmu_core(). This is the same approach as taken for the ARM platform. Output before: [root@s35lp76 perf]# ./perf list -d pmu List of pre-defined events (to be used in -e): cpum_cf/AES_BLOCKED_CYCLES/ [Kernel PMU event] cpum_cf/AES_BLOCKED_FUNCTIONS/ [Kernel PMU event] cpum_cf/AES_CYCLES/ [Kernel PMU event] cpum_cf/AES_FUNCTIONS/ [Kernel PMU event] .... cpum_cf/TX_NC_TEND/ [Kernel PMU event] cpum_cf/VX_BCD_EXECUTION_SLOTS/ [Kernel PMU event] cpum_sf/SF_CYCLES_BASIC/ [Kernel PMU event] Output after: [root@s35lp76 perf]# ./perf list -d pmu List of pre-defined events (to be used in -e): cpum_cf/AES_BLOCKED_CYCLES/ [Kernel PMU event] cpum_cf/AES_BLOCKED_FUNCTIONS/ [Kernel PMU event] cpum_cf/AES_CYCLES/ [Kernel PMU event] cpum_cf/AES_FUNCTIONS/ [Kernel PMU event] .... cpum_cf/TX_NC_TEND/ [Kernel PMU event] cpum_cf/VX_BCD_EXECUTION_SLOTS/ [Kernel PMU event] cpum_sf/SF_CYCLES_BASIC/ [Kernel PMU event] 3906: bcd_dfp_execution_slots [BCD DFP Execution Slots] decimal_instructions [Decimal Instructions] dtlb2_gpage_writes [DTLB2 GPAGE Writes] dtlb2_hpage_writes [DTLB2 HPAGE Writes] dtlb2_misses [DTLB2 Misses] dtlb2_writes [DTLB2 Writes] itlb2_misses [ITLB2 Misses] itlb2_writes [ITLB2 Writes] l1c_tlb2_misses [L1C TLB2 Misses] ..... cfvn 3: cpu_cycles [CPU Cycles] instructions [Instructions] l1d_dir_writes [L1D Directory Writes] l1d_penalty_cycles [L1D Penalty Cycles] l1i_dir_writes [L1I Directory Writes] l1i_penalty_cycles [L1I Penalty Cycles] problem_state_cpu_cycles [Problem State CPU Cycles] problem_state_instructions [Problem State Instructions] .... csvn generic: aes_blocked_cycles [AES Blocked Cycles] aes_blocked_functions [AES Blocked Functions] aes_cycles [AES Cycles] aes_functions [AES Functions] dea_blocked_cycles [DEA Blocked Cycles] dea_blocked_functions [DEA Blocked Functions] .... Signed-off-by: Thomas Richter Reviewed-by: Hendrik Brueckner Acked-by: Mark Rutland Cc: Heiko Carstens Cc: Martin Schwidefsky Link: http://lkml.kernel.org/r/20180416132314.33249-1-tmricht@linux.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/pmu.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 064bdcb7bd78..61a5e5027338 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -562,6 +562,12 @@ static int is_pmu_core(const char *name) if (stat(path, &st) == 0) return 1; + /* Look for cpu sysfs (specific to s390) */ + scnprintf(path, PATH_MAX, "%s/bus/event_source/devices/%s", + sysfs, name); + if (stat(path, &st) == 0 && !strncmp(name, "cpum_", 5)) + return 1; + return 0; } From 78b562fbfa2cf0a9fcb23c3154756b690f4905c1 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sun, 15 Apr 2018 11:23:50 +0200 Subject: [PATCH 073/182] perf: Return proper values for user stack errors Return immediately when we find issue in the user stack checks. The error value could get overwritten by following check for PERF_SAMPLE_REGS_INTR. Signed-off-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: H. Peter Anvin Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: syzkaller-bugs@googlegroups.com Cc: x86@kernel.org Fixes: 60e2364e60e8 ("perf: Add ability to sample machine state on interrupt") Link: http://lkml.kernel.org/r/20180415092352.12403-1-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 1bae80aaabfb..67612ce359ad 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -10209,9 +10209,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr, * __u16 sample size limit. */ if (attr->sample_stack_user >= USHRT_MAX) - ret = -EINVAL; + return -EINVAL; else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64))) - ret = -EINVAL; + return -EINVAL; } if (!attr->sample_max_stack) From 5af44ca53d019de47efe6dbc4003dd518e5197ed Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sun, 15 Apr 2018 11:23:51 +0200 Subject: [PATCH 074/182] perf: Fix sample_max_stack maximum check The syzbot hit KASAN bug in perf_callchain_store having the entry stored behind the allocated bounds [1]. We miss the sample_max_stack check for the initial event that allocates callchain buffers. This missing check allows to create an event with sample_max_stack value bigger than the global sysctl maximum: # sysctl -a | grep perf_event_max_stack kernel.perf_event_max_stack = 127 # perf record -vv -C 1 -e cycles/max-stack=256/ kill ... perf_event_attr: size 112 ... sample_max_stack 256 ------------------------------------------------------------ sys_perf_event_open: pid -1 cpu 1 group_fd -1 flags 0x8 = 4 Note the '-C 1', which forces perf record to create just single event. Otherwise it opens event for every cpu, then the sample_max_stack check fails on the second event and all's fine. The fix is to run the sample_max_stack check also for the first event with callchains. [1] https://marc.info/?l=linux-kernel&m=152352732920874&w=2 Reported-by: syzbot+7c449856228b63ac951e@syzkaller.appspotmail.com Signed-off-by: Jiri Olsa Cc: Alexander Shishkin Cc: Andi Kleen Cc: H. Peter Anvin Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: syzkaller-bugs@googlegroups.com Cc: x86@kernel.org Fixes: 97c79a38cd45 ("perf core: Per event callchain limit") Link: http://lkml.kernel.org/r/20180415092352.12403-2-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/callchain.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 772a43fea825..73cc26e321de 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -119,19 +119,22 @@ int get_callchain_buffers(int event_max_stack) goto exit; } + /* + * If requesting per event more than the global cap, + * return a different error to help userspace figure + * this out. + * + * And also do it here so that we have &callchain_mutex held. + */ + if (event_max_stack > sysctl_perf_event_max_stack) { + err = -EOVERFLOW; + goto exit; + } + if (count > 1) { /* If the allocation failed, give up */ if (!callchain_cpus_entries) err = -ENOMEM; - /* - * If requesting per event more than the global cap, - * return a different error to help userspace figure - * this out. - * - * And also do it here so that we have &callchain_mutex held. - */ - if (event_max_stack > sysctl_perf_event_max_stack) - err = -EOVERFLOW; goto exit; } From bfb3d7b8b906b66551424d7636182126e1d134c8 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Sun, 15 Apr 2018 11:23:52 +0200 Subject: [PATCH 075/182] perf: Remove superfluous allocation error check If the get_callchain_buffers fails to allocate the buffer it will decrease the nr_callchain_events right away. There's no point of checking the allocation error for nr_callchain_events > 1. Removing that check. Signed-off-by: Jiri Olsa Tested-by: Arnaldo Carvalho de Melo Cc: Alexander Shishkin Cc: Andi Kleen Cc: H. Peter Anvin Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: syzkaller-bugs@googlegroups.com Cc: x86@kernel.org Link: http://lkml.kernel.org/r/20180415092352.12403-3-jolsa@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- kernel/events/callchain.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 73cc26e321de..c187aa3df3c8 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -131,14 +131,8 @@ int get_callchain_buffers(int event_max_stack) goto exit; } - if (count > 1) { - /* If the allocation failed, give up */ - if (!callchain_cpus_entries) - err = -ENOMEM; - goto exit; - } - - err = alloc_callchain_buffers(); + if (count == 1) + err = alloc_callchain_buffers(); exit: if (err) atomic_dec(&nr_callchain_events); From 1340ccfa9a9afefdbab90d7935d4ed19817e37c2 Mon Sep 17 00:00:00 2001 From: Alison Schofield Date: Fri, 6 Apr 2018 17:21:30 -0700 Subject: [PATCH 076/182] x86,sched: Allow topologies where NUMA nodes share an LLC Intel's Skylake Server CPUs have a different LLC topology than previous generations. When in Sub-NUMA-Clustering (SNC) mode, the package is divided into two "slices", each containing half the cores, half the LLC, and one memory controller and each slice is enumerated to Linux as a NUMA node. This is similar to how the cores and LLC were arranged for the Cluster-On-Die (CoD) feature. CoD allowed the same cache line to be present in each half of the LLC. But, with SNC, each line is only ever present in *one* slice. This means that the portion of the LLC *available* to a CPU depends on the data being accessed: Remote socket: entire package LLC is shared Local socket->local slice: data goes into local slice LLC Local socket->remote slice: data goes into remote-slice LLC. Slightly higher latency than local slice LLC. The biggest implication from this is that a process accessing all NUMA-local memory only sees half the LLC capacity. The CPU describes its cache hierarchy with the CPUID instruction. One of the CPUID leaves enumerates the "logical processors sharing this cache". This information is used for scheduling decisions so that tasks move more freely between CPUs sharing the cache. But, the CPUID for the SNC configuration discussed above enumerates the LLC as being shared by the entire package. This is not 100% precise because the entire cache is not usable by all accesses. But, it *is* the way the hardware enumerates itself, and this is not likely to change. The userspace visible impact of all the above is that the sysfs info reports the entire LLC as being available to the entire package. As noted above, this is not true for local socket accesses. This patch does not correct the sysfs info. It is the same, pre and post patch. The current code emits the following warning: sched: CPU #3's llc-sibling CPU #0 is not on the same node! [node: 1 != 0]. Ignoring dependency. The warning is coming from the topology_sane() check in smpboot.c because the topology is not matching the expectations of the model for obvious reasons. To fix this, add a vendor and model specific check to never call topology_sane() for these systems. Also, just like "Cluster-on-Die" disable the "coregroup" sched_domain_topology_level and use NUMA information from the SRAT alone. This is OK at least on the hardware we are immediately concerned about because the LLC sharing happens at both the slice and at the package level, which are also NUMA boundaries. Signed-off-by: Alison Schofield Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov Cc: Prarit Bhargava Cc: Tony Luck Cc: Peter Zijlstra (Intel) Cc: brice.goglin@gmail.com Cc: Dave Hansen Cc: Borislav Petkov Cc: David Rientjes Cc: Igor Mammedov Cc: "H. Peter Anvin" Cc: Tim Chen Link: https://lkml.kernel.org/r/20180407002130.GA18984@alison-desk.jf.intel.com --- arch/x86/kernel/smpboot.c | 45 ++++++++++++++++++++++++++++++++++----- 1 file changed, 40 insertions(+), 5 deletions(-) diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index ff99e2b6fc54..45175b81dd5b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c @@ -77,6 +77,8 @@ #include #include #include +#include +#include /* Number of siblings per CPU package */ int smp_num_siblings = 1; @@ -390,15 +392,47 @@ static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) return false; } +/* + * Define snc_cpu[] for SNC (Sub-NUMA Cluster) CPUs. + * + * These are Intel CPUs that enumerate an LLC that is shared by + * multiple NUMA nodes. The LLC on these systems is shared for + * off-package data access but private to the NUMA node (half + * of the package) for on-package access. + * + * CPUID (the source of the information about the LLC) can only + * enumerate the cache as being shared *or* unshared, but not + * this particular configuration. The CPU in this case enumerates + * the cache to be shared across the entire package (spanning both + * NUMA nodes). + */ + +static const struct x86_cpu_id snc_cpu[] = { + { X86_VENDOR_INTEL, 6, INTEL_FAM6_SKYLAKE_X }, + {} +}; + static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) { int cpu1 = c->cpu_index, cpu2 = o->cpu_index; - if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID && - per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2)) - return topology_sane(c, o, "llc"); + /* Do not match if we do not have a valid APICID for cpu: */ + if (per_cpu(cpu_llc_id, cpu1) == BAD_APICID) + return false; - return false; + /* Do not match if LLC id does not match: */ + if (per_cpu(cpu_llc_id, cpu1) != per_cpu(cpu_llc_id, cpu2)) + return false; + + /* + * Allow the SNC topology without warning. Return of false + * means 'c' does not share the LLC of 'o'. This will be + * reflected to userspace. + */ + if (!topology_same_node(c, o) && x86_match_cpu(snc_cpu)) + return false; + + return topology_sane(c, o, "llc"); } /* @@ -456,7 +490,8 @@ static struct sched_domain_topology_level x86_topology[] = { /* * Set if a package/die has multiple NUMA nodes inside. - * AMD Magny-Cours and Intel Cluster-on-Die have this. + * AMD Magny-Cours, Intel Cluster-on-Die, and Intel + * Sub-NUMA Clustering have this. */ static bool x86_has_numa_in_package; From d6ef1f194b7569af8b8397876dc9ab07649d63cb Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Tue, 17 Apr 2018 15:27:16 +0200 Subject: [PATCH 077/182] x86/mm: Prevent kernel Oops in PTDUMP code with HIGHPTE=y The walk_pte_level() function just uses __va to get the virtual address of the PTE page, but that breaks when the PTE page is not in the direct mapping with HIGHPTE=y. The result is an unhandled kernel paging request at some random address when accessing the current_kernel or current_user file. Use the correct API to access PTE pages. Fixes: fe770bf0310d ('x86: clean up the page table dumper and add 32-bit support') Signed-off-by: Joerg Roedel Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Cc: jgross@suse.com Cc: JBeulich@suse.com Cc: hpa@zytor.com Cc: aryabinin@virtuozzo.com Cc: kirill.shutemov@linux.intel.com Link: https://lkml.kernel.org/r/1523971636-4137-1-git-send-email-joro@8bytes.org --- arch/x86/mm/dump_pagetables.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 62a7e9f65dec..cc7ff5957194 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -18,6 +18,7 @@ #include #include #include +#include #include @@ -334,16 +335,16 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr, pgprotval_t eff_in, unsigned long P) { int i; - pte_t *start; + pte_t *pte; pgprotval_t prot, eff; - start = (pte_t *)pmd_page_vaddr(addr); for (i = 0; i < PTRS_PER_PTE; i++) { - prot = pte_flags(*start); - eff = effective_prot(eff_in, prot); st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT); + pte = pte_offset_map(&addr, st->current_address); + prot = pte_flags(*pte); + eff = effective_prot(eff_in, prot); note_page(m, st, __pgprot(prot), eff, 5); - start++; + pte_unmap(pte); } } #ifdef CONFIG_KASAN From 800cb2e553d44541b83aa3ec45d9839385fe8ab6 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 16 Apr 2018 14:44:41 +0100 Subject: [PATCH 078/182] arm64: kasan: avoid pfn_to_nid() before page array is initialized In arm64's kasan_init(), we use pfn_to_nid() to find the NUMA node a span of memory is in, hoping to allocate shadow from the same NUMA node. However, at this point, the page array has not been initialized, and thus this is bogus. Since commit: f165b378bbdf6c8a ("mm: uninitialized struct page poisoning sanity") ... accessing fields of the page array results in a boot time Oops(), highlighting this problem: [ 0.000000] Unable to handle kernel paging request at virtual address dfff200000000000 [ 0.000000] Mem abort info: [ 0.000000] ESR = 0x96000004 [ 0.000000] Exception class = DABT (current EL), IL = 32 bits [ 0.000000] SET = 0, FnV = 0 [ 0.000000] EA = 0, S1PTW = 0 [ 0.000000] Data abort info: [ 0.000000] ISV = 0, ISS = 0x00000004 [ 0.000000] CM = 0, WnR = 0 [ 0.000000] [dfff200000000000] address between user and kernel address ranges [ 0.000000] Internal error: Oops: 96000004 [#1] PREEMPT SMP [ 0.000000] Modules linked in: [ 0.000000] CPU: 0 PID: 0 Comm: swapper Not tainted 4.16.0-07317-gf165b378bbdf #42 [ 0.000000] Hardware name: ARM Juno development board (r1) (DT) [ 0.000000] pstate: 80000085 (Nzcv daIf -PAN -UAO) [ 0.000000] pc : __asan_load8+0x8c/0xa8 [ 0.000000] lr : __dump_page+0x3c/0x3b8 [ 0.000000] sp : ffff2000099b7ca0 [ 0.000000] x29: ffff2000099b7ca0 x28: ffff20000a1762c0 [ 0.000000] x27: ffff7e0000000000 x26: ffff2000099dd000 [ 0.000000] x25: ffff200009a3f960 x24: ffff200008f9c38c [ 0.000000] x23: ffff20000a9d3000 x22: ffff200009735430 [ 0.000000] x21: fffffffffffffffe x20: ffff7e0001e50420 [ 0.000000] x19: ffff7e0001e50400 x18: 0000000000001840 [ 0.000000] x17: ffffffffffff8270 x16: 0000000000001840 [ 0.000000] x15: 0000000000001920 x14: 0000000000000004 [ 0.000000] x13: 0000000000000000 x12: 0000000000000800 [ 0.000000] x11: 1ffff0012d0f89ff x10: ffff10012d0f89ff [ 0.000000] x9 : 0000000000000000 x8 : ffff8009687c5000 [ 0.000000] x7 : 0000000000000000 x6 : ffff10000f282000 [ 0.000000] x5 : 0000000000000040 x4 : fffffffffffffffe [ 0.000000] x3 : 0000000000000000 x2 : dfff200000000000 [ 0.000000] x1 : 0000000000000005 x0 : 0000000000000000 [ 0.000000] Process swapper (pid: 0, stack limit = 0x (ptrval)) [ 0.000000] Call trace: [ 0.000000] __asan_load8+0x8c/0xa8 [ 0.000000] __dump_page+0x3c/0x3b8 [ 0.000000] dump_page+0xc/0x18 [ 0.000000] kasan_init+0x2e8/0x5a8 [ 0.000000] setup_arch+0x294/0x71c [ 0.000000] start_kernel+0xdc/0x500 [ 0.000000] Code: aa0403e0 9400063c 17ffffee d343fc00 (38e26800) [ 0.000000] ---[ end trace 67064f0e9c0cc338 ]--- [ 0.000000] Kernel panic - not syncing: Attempted to kill the idle task! [ 0.000000] ---[ end Kernel panic - not syncing: Attempted to kill the idle task! ]--- Let's fix this by using early_pfn_to_nid(), as other architectures do in their kasan init code. Note that early_pfn_to_nid acquires the nid from the memblock array, which we iterate over in kasan_init(), so this should be fine. Signed-off-by: Mark Rutland Fixes: 39d114ddc6822302 ("arm64: add KASAN support") Cc: Will Deacon Signed-off-by: Catalin Marinas --- arch/arm64/mm/kasan_init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index dabfc1ecda3d..12145874c02b 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -204,7 +204,7 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); kasan_map_populate(kimg_shadow_start, kimg_shadow_end, - pfn_to_nid(virt_to_pfn(lm_alias(_text)))); + early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); kasan_populate_zero_shadow((void *)KASAN_SHADOW_START, (void *)mod_shadow_start); @@ -224,7 +224,7 @@ void __init kasan_init(void) kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), (unsigned long)kasan_mem_to_shadow(end), - pfn_to_nid(virt_to_pfn(start))); + early_pfn_to_nid(virt_to_pfn(start))); } /* From 4450dc0ae2c18a0ac6dce560215c7a1fa12122b5 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Thu, 5 Apr 2018 17:26:58 +0200 Subject: [PATCH 079/182] clockevents: Fix kernel messages split across multiple lines Convert the clockevents driver from old-style printk() to pr_info() and pr_cont(), to fix split kernel messages like below: Clockevents: could not switch to one-shot mode: dummy_timer is not functional. Signed-off-by: Geert Uytterhoeven Signed-off-by: Thomas Gleixner Cc: Frederic Weisbecker Link: https://lkml.kernel.org/r/1522942018-14471-1-git-send-email-geert%2Brenesas@glider.be --- kernel/time/tick-oneshot.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c index c1f518e7aa80..6fe615d57ebb 100644 --- a/kernel/time/tick-oneshot.c +++ b/kernel/time/tick-oneshot.c @@ -82,16 +82,15 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *)) if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT) || !tick_device_is_functional(dev)) { - printk(KERN_INFO "Clockevents: " - "could not switch to one-shot mode:"); + pr_info("Clockevents: could not switch to one-shot mode:"); if (!dev) { - printk(" no tick device\n"); + pr_cont(" no tick device\n"); } else { if (!tick_device_is_functional(dev)) - printk(" %s is not functional.\n", dev->name); + pr_cont(" %s is not functional.\n", dev->name); else - printk(" %s does not support one-shot mode.\n", - dev->name); + pr_cont(" %s does not support one-shot mode.\n", + dev->name); } return -EINVAL; } From f0ae6a0321222864ed8675a924cc8ee2cb042c31 Mon Sep 17 00:00:00 2001 From: "Liu, Changcheng" Date: Thu, 12 Apr 2018 15:57:01 +0800 Subject: [PATCH 080/182] timers: Remove stale struct tvec_base forward declaration struct tvec_base is a leftover of the original timer wheel implementation and not longer used. Remove the forward declaration. Signed-off-by: Liu Changcheng Signed-off-by: Thomas Gleixner Cc: akpm@linux-foundation.org Link: https://lkml.kernel.org/r/20180412075701.GA38952@sofia --- include/linux/timer.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/linux/timer.h b/include/linux/timer.h index 2448f9cc48a3..7b066fd38248 100644 --- a/include/linux/timer.h +++ b/include/linux/timer.h @@ -8,8 +8,6 @@ #include #include -struct tvec_base; - struct timer_list { /* * All fields that change during normal runtime grouped to the From e142aa09ed88be98395dde7acb96fb2263566b68 Mon Sep 17 00:00:00 2001 From: Baolin Wang Date: Fri, 13 Apr 2018 13:27:58 +0800 Subject: [PATCH 081/182] timekeeping: Remove __current_kernel_time() The __current_kernel_time() function based on 'struct timespec' is no longer recommended for new code, and the only user of this function has been replaced by commit 6909e29fdefb ("kdb: use __ktime_get_real_seconds instead of __current_kernel_time"). Remove the obsolete interface. Signed-off-by: Baolin Wang Signed-off-by: Thomas Gleixner Cc: arnd@arndb.de Cc: sboyd@kernel.org Cc: broonie@kernel.org Cc: john.stultz@linaro.org Link: https://lkml.kernel.org/r/1a9dbea7ee2cda7efe9ed330874075cf17fdbff6.1523596316.git.baolin.wang@linaro.org --- include/linux/timekeeping32.h | 3 --- kernel/time/timekeeping.c | 7 ------- 2 files changed, 10 deletions(-) diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h index af4114d5dc17..3616b4becb59 100644 --- a/include/linux/timekeeping32.h +++ b/include/linux/timekeeping32.h @@ -9,9 +9,6 @@ extern void do_gettimeofday(struct timeval *tv); unsigned long get_seconds(void); -/* does not take xtime_lock */ -struct timespec __current_kernel_time(void); - static inline struct timespec current_kernel_time(void) { struct timespec64 now = current_kernel_time64(); diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index ca90219a1e73..dcf7f20fcd12 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c @@ -2139,13 +2139,6 @@ unsigned long get_seconds(void) } EXPORT_SYMBOL(get_seconds); -struct timespec __current_kernel_time(void) -{ - struct timekeeper *tk = &tk_core.timekeeper; - - return timespec64_to_timespec(tk_xtime(tk)); -} - struct timespec64 current_kernel_time64(void) { struct timekeeper *tk = &tk_core.timekeeper; From 0d568cd34eb04acf05c26f360d1a0f071f0bb636 Mon Sep 17 00:00:00 2001 From: "Gustavo A. R. Silva" Date: Fri, 13 Apr 2018 10:13:29 -0500 Subject: [PATCH 082/182] cifs: smb2ops: Fix NULL check in smb2_query_symlink The current code null checks variable err_buf, which is always null when it is checked, hence utf16_path is free'd and the function returns -ENOENT everytime it is called, making it impossible for the execution path to reach the following code: err_buf = err_iov.iov_base; Fix this by null checking err_iov.iov_base instead of err_buf. Also, notice that err_buf no longer needs to be initialized to NULL. Addresses-Coverity-ID: 1467876 ("Logically dead code") Fixes: 2d636199e400 ("cifs: Change SMB2_open to return an iov for the error parameter") Signed-off-by: Gustavo A. R. Silva Signed-off-by: Steve French Reviewed-by: Pavel Shilovsky --- fs/cifs/smb2ops.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c index b4ae932ea134..38ebf3f357d2 100644 --- a/fs/cifs/smb2ops.c +++ b/fs/cifs/smb2ops.c @@ -1452,7 +1452,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_open_parms oparms; struct cifs_fid fid; struct kvec err_iov = {NULL, 0}; - struct smb2_err_rsp *err_buf = NULL; + struct smb2_err_rsp *err_buf; struct smb2_symlink_err_rsp *symlink; unsigned int sub_len; unsigned int sub_offset; @@ -1476,7 +1476,7 @@ smb2_query_symlink(const unsigned int xid, struct cifs_tcon *tcon, rc = SMB2_open(xid, &oparms, utf16_path, &oplock, NULL, &err_iov); - if (!rc || !err_buf) { + if (!rc || !err_iov.iov_base) { kfree(utf16_path); return -ENOENT; } From a5240cbde22c86c606c6462d32aea0648c21fdc3 Mon Sep 17 00:00:00 2001 From: Souptick Joarder Date: Sun, 15 Apr 2018 00:58:25 +0530 Subject: [PATCH 083/182] fs: cifs: Adding new return type vm_fault_t Use new return type vm_fault_t for page_mkwrite handler. Signed-off-by: Souptick Joarder Reviewed-by: Matthew Wilcox Signed-off-by: Steve French --- fs/cifs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 4bcd4e838b47..23fd430fe74a 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -3462,7 +3462,7 @@ cifs_read(struct file *file, char *read_data, size_t read_size, loff_t *offset) * If the page is mmap'ed into a process' page tables, then we need to make * sure that it doesn't change while being written back. */ -static int +static vm_fault_t cifs_page_mkwrite(struct vm_fault *vmf) { struct page *page = vmf->page; From 8bf24e8319613bbe950d4188682b3a0d9441b76b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 10 Apr 2018 16:20:53 +1000 Subject: [PATCH 084/182] selftests/filesystems: Don't run dnotify_test by default In commit ce290a19609d ("selftests: add devpts selftests"), the filesystems directory was added to the top-level selftests Makefile. That had the effect of causing the existing dnotify_test in the filesystems directory to now be run as part of the default selftests test-run. Unfortunately dnotify_test is actually an infinite loop. Fix it by moving dnotify_test to TEST_GEN_PROGS_EXTENDED, which says that it's a generated file (ie. built) but should not be run as part of the default test suite run (it's an "extended" test). While we're here cleanup a few other things, devpts_pts should be in TEST_GEN_PROGS to indicate that it's built, and with the above two changes we no longer need a custom all or clean rule. Fixes: ce290a19609d ("selftests: add devpts selftests") Signed-off-by: Michael Ellerman Acked-by: Christian brauner Signed-off-by: Shuah Khan --- tools/testing/selftests/filesystems/Makefile | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/filesystems/Makefile b/tools/testing/selftests/filesystems/Makefile index 4e6d09fb166f..5c7d7001ad37 100644 --- a/tools/testing/selftests/filesystems/Makefile +++ b/tools/testing/selftests/filesystems/Makefile @@ -1,8 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 -TEST_PROGS := dnotify_test devpts_pts -all: $(TEST_PROGS) -include ../lib.mk +TEST_GEN_PROGS := devpts_pts +TEST_GEN_PROGS_EXTENDED := dnotify_test -clean: - rm -fr $(TEST_PROGS) +include ../lib.mk From fcf1fadf4c65eea6c519c773d2d9901e8ad94f5f Mon Sep 17 00:00:00 2001 From: Xidong Wang Date: Wed, 4 Apr 2018 10:38:24 +0100 Subject: [PATCH 085/182] drm/i915: Do no use kfree() to free a kmem_cache_alloc() return value Along the eb_lookup_vmas() error path, the return value from kmem_cache_alloc() was freed using kfree(). Fix it to use the proper kmem_cache_free() instead. Fixes: d1b48c1e7184 ("drm/i915: Replace execbuf vma ht with an idr") Signed-off-by: Xidong Wang Cc: Chris Wilson Cc: Tvrtko Ursulin Cc: # v4.14+ Reviewed-by: Chris Wilson Signed-off-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180404093824.9313-1-chris@chris-wilson.co.uk (cherry picked from commit 6be1187dbffa0027ea379c53f7ca0c782515c610) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 8c170db8495d..0414228cd2b5 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -728,7 +728,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb) err = radix_tree_insert(handles_vma, handle, vma); if (unlikely(err)) { - kfree(lut); + kmem_cache_free(eb->i915->luts, lut); goto err_obj; } From e6be6bd85654dba55b97758f937c46835d961a44 Mon Sep 17 00:00:00 2001 From: Tvrtko Ursulin Date: Tue, 10 Apr 2018 12:27:04 +0100 Subject: [PATCH 086/182] drm/i915/pmu: Inspect runtime PM state more carefully while estimating RC6 While thinking about sporadic failures of perf_pmu/rc6-runtime-pm* tests on some CI machines I have concluded that: a) the PMU readout of RC6 can race against runtime PM transitions, and b) there are other reasons than being runtime suspended which can cause intel_runtime_pm_get_if_in_use to fail. Therefore when estimating RC6 the code needs to assert we are indeed in suspended state, and if not, the best we can do is return the last known RC6 value. Without this check we can calculate the estimated value based on un- initialized or inappropriate internal state, which can result in over- estimation, or in any case incorrect value being returned. v2: * Re-arrange the code a bit to avoid second unlock and return branch. (Chris Wilson) v3: * Insert some strategic blank lines and improve commit msg. (Chris Wilson) Signed-off-by: Tvrtko Ursulin Fixes: 1fe699e30113 ("drm/i915/pmu: Fix sleep under atomic in RC6 readout") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105010 Cc: Tvrtko Ursulin Cc: Chris Wilson Cc: Imre Deak Reviewed-by: Chris Wilson Link: https://patchwork.freedesktop.org/patch/msgid/20180410112704.24462-1-tvrtko.ursulin@linux.intel.com (cherry picked from commit 2924bdee21edd6785a4df1b4d17fd3cb265fddd9) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_pmu.c | 37 ++++++++++++++++++++++++--------- 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index d8feb9053e0c..f0519e31543a 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -473,20 +473,37 @@ static u64 get_rc6(struct drm_i915_private *i915) spin_lock_irqsave(&i915->pmu.lock, flags); spin_lock(&kdev->power.lock); - if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) - i915->pmu.suspended_jiffies_last = - kdev->power.suspended_jiffies; + /* + * After the above branch intel_runtime_pm_get_if_in_use failed + * to get the runtime PM reference we cannot assume we are in + * runtime suspend since we can either: a) race with coming out + * of it before we took the power.lock, or b) there are other + * states than suspended which can bring us here. + * + * We need to double-check that we are indeed currently runtime + * suspended and if not we cannot do better than report the last + * known RC6 value. + */ + if (kdev->power.runtime_status == RPM_SUSPENDED) { + if (!i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) + i915->pmu.suspended_jiffies_last = + kdev->power.suspended_jiffies; - val = kdev->power.suspended_jiffies - - i915->pmu.suspended_jiffies_last; - val += jiffies - kdev->power.accounting_timestamp; + val = kdev->power.suspended_jiffies - + i915->pmu.suspended_jiffies_last; + val += jiffies - kdev->power.accounting_timestamp; - spin_unlock(&kdev->power.lock); + val = jiffies_to_nsecs(val); + val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; - val = jiffies_to_nsecs(val); - val += i915->pmu.sample[__I915_SAMPLE_RC6].cur; - i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur = val; + } else if (i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur) { + val = i915->pmu.sample[__I915_SAMPLE_RC6_ESTIMATED].cur; + } else { + val = i915->pmu.sample[__I915_SAMPLE_RC6].cur; + } + spin_unlock(&kdev->power.lock); spin_unlock_irqrestore(&i915->pmu.lock, flags); } From a3520b8992e57bc94ab6ec9f95f09c6c932555fd Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 11 Apr 2018 16:15:18 +0300 Subject: [PATCH 087/182] drm/i915/bios: filter out invalid DDC pins from VBT child devices The VBT contains the DDC pin to use for specific ports. Alas, sometimes the field appears to contain bogus data, and while we check for it later on in intel_gmbus_get_adapter() we fail to check the returned NULL on errors. Oops results. The simplest approach seems to be to catch and ignore the bogus DDC pins already at the VBT parsing phase, reverting to fixed per port default pins. This doesn't guarantee display working, but at least it prevents the oops. And we continue to be fuzzed by VBT. One affected machine is Dell Latitude 5590 where a BIOS upgrade added invalid DDC pins. Typical backtrace: [ 35.461411] WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)) [ 35.461432] WARNING: CPU: 6 PID: 411 at drivers/gpu/drm/i915/intel_i2c.c:844 intel_gmbus_get_adapter+0x32/0x37 [i915] [ 35.461437] Modules linked in: i915 ahci libahci dm_snapshot dm_bufio dm_raid raid456 async_raid6_recov async_pq raid6_pq async_xor xor async_memcpy async_tx [ 35.461445] CPU: 6 PID: 411 Comm: kworker/u16:2 Not tainted 4.16.0-rc7.x64-g1cda370ffded #1 [ 35.461447] Hardware name: Dell Inc. Latitude 5590/0MM81M, BIOS 1.1.9 03/13/2018 [ 35.461450] Workqueue: events_unbound async_run_entry_fn [ 35.461465] RIP: 0010:intel_gmbus_get_adapter+0x32/0x37 [i915] [ 35.461467] RSP: 0018:ffff9b4e43d47c40 EFLAGS: 00010286 [ 35.461469] RAX: 0000000000000000 RBX: ffff98f90639f800 RCX: ffffffffae051960 [ 35.461471] RDX: 0000000000000001 RSI: 0000000000000092 RDI: 0000000000000246 [ 35.461472] RBP: ffff98f905410000 R08: 0000004d062a83f6 R09: 00000000000003bd [ 35.461474] R10: 0000000000000031 R11: ffffffffad4eda58 R12: ffff98f905410000 [ 35.461475] R13: ffff98f9064c1000 R14: ffff9b4e43d47cf0 R15: ffff98f905410000 [ 35.461477] FS: 0000000000000000(0000) GS:ffff98f92e580000(0000) knlGS:0000000000000000 [ 35.461479] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 35.461481] CR2: 00007f5682359008 CR3: 00000001b700c005 CR4: 00000000003606e0 [ 35.461483] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 35.461484] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 35.461486] Call Trace: [ 35.461501] intel_hdmi_set_edid+0x37/0x27f [i915] [ 35.461515] intel_hdmi_detect+0x7c/0x97 [i915] [ 35.461518] drm_helper_probe_single_connector_modes+0xe1/0x6c0 [ 35.461521] drm_setup_crtcs+0x129/0xa6a [ 35.461523] ? __switch_to_asm+0x34/0x70 [ 35.461525] ? __switch_to_asm+0x34/0x70 [ 35.461527] ? __switch_to_asm+0x40/0x70 [ 35.461528] ? __switch_to_asm+0x34/0x70 [ 35.461529] ? __switch_to_asm+0x40/0x70 [ 35.461531] ? __switch_to_asm+0x34/0x70 [ 35.461532] ? __switch_to_asm+0x40/0x70 [ 35.461534] ? __switch_to_asm+0x34/0x70 [ 35.461536] __drm_fb_helper_initial_config_and_unlock+0x34/0x46f [ 35.461538] ? __switch_to_asm+0x40/0x70 [ 35.461541] ? _cond_resched+0x10/0x33 [ 35.461557] intel_fbdev_initial_config+0xf/0x1c [i915] [ 35.461560] async_run_entry_fn+0x2e/0xf5 [ 35.461563] process_one_work+0x15b/0x364 [ 35.461565] worker_thread+0x2c/0x3a0 [ 35.461567] ? process_one_work+0x364/0x364 [ 35.461568] kthread+0x10c/0x122 [ 35.461570] ? _kthread_create_on_node+0x5d/0x5d [ 35.461572] ret_from_fork+0x35/0x40 [ 35.461574] Code: 74 16 89 f6 48 8d 04 b6 48 c1 e0 05 48 29 f0 48 8d 84 c7 e8 11 00 00 c3 48 c7 c6 b0 19 1e c0 48 c7 c7 64 8a 1c c0 e8 47 88 ed ec <0f> 0b 31 c0 c3 8b 87 a4 04 00 00 80 e4 fc 09 c6 89 b7 a4 04 00 [ 35.461604] WARNING: CPU: 6 PID: 411 at drivers/gpu/drm/i915/intel_i2c.c:844 intel_gmbus_get_adapter+0x32/0x37 [i915] [ 35.461606] ---[ end trace 4fe1e63e2dd93373 ]--- [ 35.461609] BUG: unable to handle kernel NULL pointer dereference at 0000000000000010 [ 35.461613] IP: i2c_transfer+0x4/0x86 [ 35.461614] PGD 0 P4D 0 [ 35.461616] Oops: 0000 [#1] SMP PTI [ 35.461618] Modules linked in: i915 ahci libahci dm_snapshot dm_bufio dm_raid raid456 async_raid6_recov async_pq raid6_pq async_xor xor async_memcpy async_tx [ 35.461624] CPU: 6 PID: 411 Comm: kworker/u16:2 Tainted: G W 4.16.0-rc7.x64-g1cda370ffded #1 [ 35.461625] Hardware name: Dell Inc. Latitude 5590/0MM81M, BIOS 1.1.9 03/13/2018 [ 35.461628] Workqueue: events_unbound async_run_entry_fn [ 35.461630] RIP: 0010:i2c_transfer+0x4/0x86 [ 35.461631] RSP: 0018:ffff9b4e43d47b30 EFLAGS: 00010246 [ 35.461633] RAX: ffff9b4e43d47b6e RBX: 0000000000000005 RCX: 0000000000000001 [ 35.461635] RDX: 0000000000000002 RSI: ffff9b4e43d47b80 RDI: 0000000000000000 [ 35.461636] RBP: ffff9b4e43d47bd8 R08: 0000004d062a83f6 R09: 00000000000003bd [ 35.461638] R10: 0000000000000031 R11: ffffffffad4eda58 R12: 0000000000000002 [ 35.461639] R13: 0000000000000001 R14: ffff9b4e43d47b6f R15: ffff9b4e43d47c07 [ 35.461641] FS: 0000000000000000(0000) GS:ffff98f92e580000(0000) knlGS:0000000000000000 [ 35.461643] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 35.461645] CR2: 0000000000000010 CR3: 00000001b700c005 CR4: 00000000003606e0 [ 35.461646] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 35.461647] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 35.461649] Call Trace: [ 35.461652] drm_do_probe_ddc_edid+0xb3/0x128 [ 35.461654] drm_get_edid+0xe5/0x38d [ 35.461669] intel_hdmi_set_edid+0x45/0x27f [i915] [ 35.461684] intel_hdmi_detect+0x7c/0x97 [i915] [ 35.461687] drm_helper_probe_single_connector_modes+0xe1/0x6c0 [ 35.461689] drm_setup_crtcs+0x129/0xa6a [ 35.461691] ? __switch_to_asm+0x34/0x70 [ 35.461693] ? __switch_to_asm+0x34/0x70 [ 35.461694] ? __switch_to_asm+0x40/0x70 [ 35.461696] ? __switch_to_asm+0x34/0x70 [ 35.461697] ? __switch_to_asm+0x40/0x70 [ 35.461698] ? __switch_to_asm+0x34/0x70 [ 35.461700] ? __switch_to_asm+0x40/0x70 [ 35.461701] ? __switch_to_asm+0x34/0x70 [ 35.461703] __drm_fb_helper_initial_config_and_unlock+0x34/0x46f [ 35.461705] ? __switch_to_asm+0x40/0x70 [ 35.461707] ? _cond_resched+0x10/0x33 [ 35.461724] intel_fbdev_initial_config+0xf/0x1c [i915] [ 35.461727] async_run_entry_fn+0x2e/0xf5 [ 35.461729] process_one_work+0x15b/0x364 [ 35.461731] worker_thread+0x2c/0x3a0 [ 35.461733] ? process_one_work+0x364/0x364 [ 35.461734] kthread+0x10c/0x122 [ 35.461736] ? _kthread_create_on_node+0x5d/0x5d [ 35.461738] ret_from_fork+0x35/0x40 [ 35.461739] Code: 5c fa e1 ad 48 89 df e8 ea fb ff ff e9 2a ff ff ff 0f 1f 44 00 00 31 c0 e9 43 fd ff ff 31 c0 45 31 e4 e9 c5 fd ff ff 41 54 55 53 <48> 8b 47 10 48 83 78 10 00 74 70 41 89 d4 48 89 f5 48 89 fb 65 [ 35.461756] RIP: i2c_transfer+0x4/0x86 RSP: ffff9b4e43d47b30 [ 35.461757] CR2: 0000000000000010 [ 35.461759] ---[ end trace 4fe1e63e2dd93374 ]--- Based on a patch by Fei Li. v2: s/reverting/sticking/ (Chris) Cc: stable@vger.kernel.org Cc: Fei Li Co-developed-by: Fei Li Reported-by: Pavel Nakonechnyi Reported-and-tested-by: Seweryn Kokot Reported-and-tested-by: Laszlo Valko Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105549 Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105961 Reviewed-by: Chris Wilson Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20180411131519.9091-1-jani.nikula@intel.com (cherry picked from commit f212bf9abe5de9f938fecea7df07046e74052dde) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/intel_bios.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index c5c7530ba157..447b721c3be9 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c @@ -1256,7 +1256,6 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, return; aux_channel = child->aux_channel; - ddc_pin = child->ddc_pin; is_dvi = child->device_type & DEVICE_TYPE_TMDS_DVI_SIGNALING; is_dp = child->device_type & DEVICE_TYPE_DISPLAYPORT_OUTPUT; @@ -1303,9 +1302,15 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv, enum port port, DRM_DEBUG_KMS("Port %c is internal DP\n", port_name(port)); if (is_dvi) { - info->alternate_ddc_pin = map_ddc_pin(dev_priv, ddc_pin); - - sanitize_ddc_pin(dev_priv, port); + ddc_pin = map_ddc_pin(dev_priv, child->ddc_pin); + if (intel_gmbus_is_valid_pin(dev_priv, ddc_pin)) { + info->alternate_ddc_pin = ddc_pin; + sanitize_ddc_pin(dev_priv, port); + } else { + DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, " + "sticking to defaults\n", + port_name(port), ddc_pin); + } } if (is_dp) { From 4a0559ed99189f259e92ed34f60dd51688f1bc40 Mon Sep 17 00:00:00 2001 From: Chris Wilson Date: Sat, 14 Apr 2018 10:12:33 +0100 Subject: [PATCH 088/182] drm/i915: Call i915_perf_fini() on init_hw error unwind We have to cleanup after i915_perf_init(), even on the error path, as it passes a pointer into the module to the sysfs core. If we fail to unregister the sysctl table, we leave a dangling pointer which then may explode anytime later. Fixes: 9f9b2792b6d3 ("drm/i915/perf: reuse timestamp frequency from device info") Signed-off-by: Chris Wilson Cc: Lionel Landwerlin Cc: Matthew Auld Reviewed-by: Lionel Landwerlin Reviewed-by: Michal Wajdeczko Link: https://patchwork.freedesktop.org/patch/msgid/20180414091233.32224-1-chris@chris-wilson.co.uk (cherry picked from commit 9f172f6fbd243759c808d97bd83c95e49325b2c9) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/i915_drv.c | 27 +++++++++++++++------------ 1 file changed, 15 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 07c07d55398b..be8555049c93 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -1102,30 +1102,32 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = i915_ggtt_probe_hw(dev_priv); if (ret) - return ret; + goto err_perf; - /* WARNING: Apparently we must kick fbdev drivers before vgacon, - * otherwise the vga fbdev driver falls over. */ + /* + * WARNING: Apparently we must kick fbdev drivers before vgacon, + * otherwise the vga fbdev driver falls over. + */ ret = i915_kick_out_firmware_fb(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); - goto out_ggtt; + goto err_ggtt; } ret = i915_kick_out_vgacon(dev_priv); if (ret) { DRM_ERROR("failed to remove conflicting VGA console\n"); - goto out_ggtt; + goto err_ggtt; } ret = i915_ggtt_init_hw(dev_priv); if (ret) - return ret; + goto err_ggtt; ret = i915_ggtt_enable_hw(dev_priv); if (ret) { DRM_ERROR("failed to enable GGTT\n"); - goto out_ggtt; + goto err_ggtt; } pci_set_master(pdev); @@ -1136,7 +1138,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto out_ggtt; + goto err_ggtt; } } @@ -1154,7 +1156,7 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) if (ret) { DRM_ERROR("failed to set DMA mask\n"); - goto out_ggtt; + goto err_ggtt; } } @@ -1187,13 +1189,14 @@ static int i915_driver_init_hw(struct drm_i915_private *dev_priv) ret = intel_gvt_init(dev_priv); if (ret) - goto out_ggtt; + goto err_ggtt; return 0; -out_ggtt: +err_ggtt: i915_ggtt_cleanup_hw(dev_priv); - +err_perf: + i915_perf_fini(dev_priv); return ret; } From b4615730530be85fc45ab4631c2ad6d8e2d0b97d Mon Sep 17 00:00:00 2001 From: Gaurav K Singh Date: Tue, 17 Apr 2018 23:52:18 +0530 Subject: [PATCH 089/182] drm/i915/audio: Fix audio detection issue on GLK On Geminilake, sometimes audio card is not getting detected after reboot. This is a spurious issue happening on Geminilake. HW codec and HD audio controller link was going out of sync for which there was a fix in i915 driver but was not getting invoked for GLK. Extending this fix to GLK as well. Tested by Du,Wenkai on GLK board. Bspec: 21829 v2: Instead of checking GEN9_BC, BXT and GLK macros, use IS_GEN9 macro (Jani N) Cc: # b651bd2a3ae3 ("drm/i915/audio: Fix audio enumeration issue on BXT") Cc: Signed-off-by: Gaurav K Singh Reviewed-by: Abhay Kumar Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1523989338-29677-1-git-send-email-gaurav.k.singh@intel.com (cherry picked from commit 8221229046e862977ae93ec9d34aa583fbd10397) Signed-off-by: Joonas Lahtinen --- drivers/gpu/drm/i915/intel_audio.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c index 709d6ca68074..3ea566f99450 100644 --- a/drivers/gpu/drm/i915/intel_audio.c +++ b/drivers/gpu/drm/i915/intel_audio.c @@ -729,7 +729,7 @@ static void i915_audio_component_codec_wake_override(struct device *kdev, struct drm_i915_private *dev_priv = kdev_to_i915(kdev); u32 tmp; - if (!IS_GEN9_BC(dev_priv)) + if (!IS_GEN9(dev_priv)) return; i915_audio_component_get_power(kdev); From 7eb2c4dd54ff841f2fe509a84973eb25fa20bda2 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Mon, 16 Apr 2018 18:53:09 +0300 Subject: [PATCH 090/182] drm/i915: Fix LSPCON TMDS output buffer enabling from low-power state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit LSPCON adapters in low-power state may ignore the first I2C write during TMDS output buffer enabling, resulting in a blank screen even with an otherwise enabled pipe. Fix this by reading back and validating the written value a few times. The problem was noticed on GLK machines with an onboard LSPCON adapter after entering/exiting DC5 power state. Doing an I2C read of the adapter ID as the first transaction - instead of the I2C write to enable the TMDS buffers - returns the correct value. Based on this we assume that the transaction itself is sent properly, it's only the adapter that is not ready for some reason to accept this first write after waking from low-power state. In my case the second I2C write attempt always succeeded. Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=105854 Cc: Clinton Taylor Cc: Ville Syrjälä Cc: stable@vger.kernel.org Signed-off-by: Imre Deak Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20180416155309.11100-1-imre.deak@intel.com --- drivers/gpu/drm/drm_dp_dual_mode_helper.c | 39 +++++++++++++++++++---- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c index 02a50929af67..e7f4fe2848a5 100644 --- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c @@ -350,19 +350,44 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type, { uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE; ssize_t ret; + int retry; if (type < DRM_DP_DUAL_MODE_TYPE2_DVI) return 0; - ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, - &tmds_oen, sizeof(tmds_oen)); - if (ret) { - DRM_DEBUG_KMS("Failed to %s TMDS output buffers\n", - enable ? "enable" : "disable"); - return ret; + /* + * LSPCON adapters in low-power state may ignore the first write, so + * read back and verify the written value a few times. + */ + for (retry = 0; retry < 3; retry++) { + uint8_t tmp; + + ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmds_oen, sizeof(tmds_oen)); + if (ret) { + DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n", + enable ? "enable" : "disable", + retry + 1); + return ret; + } + + ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN, + &tmp, sizeof(tmp)); + if (ret) { + DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n", + enable ? "enabling" : "disabling", + retry + 1); + return ret; + } + + if (tmp == tmds_oen) + return 0; } - return 0; + DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n", + enable ? "enabling" : "disabling"); + + return -EIO; } EXPORT_SYMBOL(drm_dp_dual_mode_set_tmds_output); From b2d71b3cda19831ec67f49d7c6ba0214d9367b29 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 16 Apr 2018 16:45:01 +0100 Subject: [PATCH 091/182] arm64: signal: don't force known signals to SIGKILL Since commit: a7e6f1ca90354a31 ("arm64: signal: Force SIGKILL for unknown signals in force_signal_inject") ... any signal which is not SIGKILL will be upgraded to a SIGKILL be force_signal_inject(). This includes signals we do expect, such as SIGILL triggered by do_undefinstr(). Fix the check to use a logical AND rather than a logical OR, permitting signals whose layout is SIL_FAULT. Fixes: a7e6f1ca90354a31 ("arm64: signal: Force SIGKILL for unknown signals in force_signal_inject") Cc: Will Deacon Reviewed-by: Dave Martin Signed-off-by: Mark Rutland Signed-off-by: Catalin Marinas --- arch/arm64/kernel/traps.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index ba964da31a25..1cb2749a72bf 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -366,7 +366,7 @@ void force_signal_inject(int signal, int code, unsigned long address) } /* Force signals we don't understand to SIGKILL */ - if (WARN_ON(signal != SIGKILL || + if (WARN_ON(signal != SIGKILL && siginfo_layout(signal, code) != SIL_FAULT)) { signal = SIGKILL; } From a514d63882c3d2063b21b865447266ebcb18b04c Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Fri, 22 Dec 2017 16:06:39 +0800 Subject: [PATCH 092/182] btrfs: qgroup: Commit transaction in advance to reduce early EDQUOT Unlike previous method that tries to commit transaction inside qgroup_reserve(), this time we will try to commit transaction using fs_info->transaction_kthread to avoid nested transaction and no need to worry about locking context. Since it's an asynchronous function call and we won't wait for transaction commit, unlike previous method, we must call it before we hit the qgroup limit. So this patch will use the ratio and size of qgroup meta_pertrans reservation as indicator to check if we should trigger a transaction commit. (meta_prealloc won't be cleaned in transaction committ, it's useless anyway) Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/ctree.h | 6 ++++++ fs/btrfs/disk-io.c | 1 + fs/btrfs/qgroup.c | 43 ++++++++++++++++++++++++++++++++++++++++-- fs/btrfs/transaction.c | 1 + fs/btrfs/transaction.h | 14 ++++++++++++++ 5 files changed, 63 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 5474ef14d6e6..ec84e2dabb04 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -714,6 +714,12 @@ struct btrfs_delayed_root; */ #define BTRFS_FS_EXCL_OP 16 +/* + * To info transaction_kthread we need an immediate commit so it doesn't + * need to wait for commit_interval + */ +#define BTRFS_FS_NEED_ASYNC_COMMIT 17 + struct btrfs_fs_info { u8 fsid[BTRFS_FSID_SIZE]; u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 4ac8b1d21baf..60caa68c3618 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1824,6 +1824,7 @@ static int transaction_kthread(void *arg) now = get_seconds(); if (cur->state < TRANS_STATE_BLOCKED && + !test_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags) && (now < cur->start_time || now - cur->start_time < fs_info->commit_interval)) { spin_unlock(&fs_info->trans_lock); diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 09c7e4fd550f..9fb758d5077a 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -11,6 +11,7 @@ #include #include #include +#include #include "ctree.h" #include "transaction.h" @@ -2375,8 +2376,21 @@ int btrfs_qgroup_inherit(struct btrfs_trans_handle *trans, return ret; } -static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) +/* + * Two limits to commit transaction in advance. + * + * For RATIO, it will be 1/RATIO of the remaining limit + * (excluding data and prealloc meta) as threshold. + * For SIZE, it will be in byte unit as threshold. + */ +#define QGROUP_PERTRANS_RATIO 32 +#define QGROUP_PERTRANS_SIZE SZ_32M +static bool qgroup_check_limits(struct btrfs_fs_info *fs_info, + const struct btrfs_qgroup *qg, u64 num_bytes) { + u64 limit; + u64 threshold; + if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && qgroup_rsv_total(qg) + (s64)qg->rfer + num_bytes > qg->max_rfer) return false; @@ -2385,6 +2399,31 @@ static bool qgroup_check_limits(const struct btrfs_qgroup *qg, u64 num_bytes) qgroup_rsv_total(qg) + (s64)qg->excl + num_bytes > qg->max_excl) return false; + /* + * Even if we passed the check, it's better to check if reservation + * for meta_pertrans is pushing us near limit. + * If there is too much pertrans reservation or it's near the limit, + * let's try commit transaction to free some, using transaction_kthread + */ + if ((qg->lim_flags & (BTRFS_QGROUP_LIMIT_MAX_RFER | + BTRFS_QGROUP_LIMIT_MAX_EXCL))) { + if (qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_EXCL) + limit = qg->max_excl; + else + limit = qg->max_rfer; + threshold = (limit - qg->rsv.values[BTRFS_QGROUP_RSV_DATA] - + qg->rsv.values[BTRFS_QGROUP_RSV_META_PREALLOC]) / + QGROUP_PERTRANS_RATIO; + threshold = min_t(u64, threshold, QGROUP_PERTRANS_SIZE); + + /* + * Use transaction_kthread to commit transaction, so we no + * longer need to bother nested transaction nor lock context. + */ + if (qg->rsv.values[BTRFS_QGROUP_RSV_META_PERTRANS] > threshold) + btrfs_commit_transaction_locksafe(fs_info); + } + return true; } @@ -2434,7 +2473,7 @@ static int qgroup_reserve(struct btrfs_root *root, u64 num_bytes, bool enforce, qg = unode_aux_to_qgroup(unode); - if (enforce && !qgroup_check_limits(qg, num_bytes)) { + if (enforce && !qgroup_check_limits(fs_info, qg, num_bytes)) { ret = -EDQUOT; goto out; } diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 63fdcab64b01..c944b4769e3c 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -2267,6 +2267,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) */ cur_trans->state = TRANS_STATE_COMPLETED; wake_up(&cur_trans->commit_wait); + clear_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); spin_lock(&fs_info->trans_lock); list_del_init(&cur_trans->list); diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index c88fccd80bc5..d8c0826bc2c7 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -199,6 +199,20 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root); int btrfs_commit_transaction(struct btrfs_trans_handle *trans); int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, int wait_for_unblock); + +/* + * Try to commit transaction asynchronously, so this is safe to call + * even holding a spinlock. + * + * It's done by informing transaction_kthread to commit transaction without + * waiting for commit interval. + */ +static inline void btrfs_commit_transaction_locksafe( + struct btrfs_fs_info *fs_info) +{ + set_bit(BTRFS_FS_NEED_ASYNC_COMMIT, &fs_info->flags); + wake_up_process(fs_info->transaction_kthread); +} int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans); int btrfs_should_end_transaction(struct btrfs_trans_handle *trans); void btrfs_throttle(struct btrfs_fs_info *fs_info); From ff6bc37eb7f6e7b052e50c13a480e1080b3ec07a Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 21 Dec 2017 13:42:04 +0800 Subject: [PATCH 093/182] btrfs: qgroup: Use independent and accurate per inode qgroup rsv Unlike reservation calculation used in inode rsv for metadata, qgroup doesn't really need to care about things like csum size or extent usage for the whole tree COW. Qgroups care more about net change of the extent usage. That's to say, if we're going to insert one file extent, it will mostly find its place in COWed tree block, leaving no change in extent usage. Or causing a leaf split, resulting in one new net extent and increasing qgroup number by nodesize. Or in an even more rare case, increase the tree level, increasing qgroup number by 2 * nodesize. So here instead of using the complicated calculation for extent allocator, which cares more about accuracy and no error, qgroup doesn't need that over-estimated reservation. This patch will maintain 2 new members in btrfs_block_rsv structure for qgroup, using much smaller calculation for qgroup rsv, reducing false EDQUOT. Signed-off-by: David Sterba Signed-off-by: Qu Wenruo --- fs/btrfs/ctree.h | 19 ++++++++++++++ fs/btrfs/extent-tree.c | 57 ++++++++++++++++++++++++++++++++++-------- 2 files changed, 65 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index ec84e2dabb04..2771cc56a622 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -459,6 +459,25 @@ struct btrfs_block_rsv { unsigned short full; unsigned short type; unsigned short failfast; + + /* + * Qgroup equivalent for @size @reserved + * + * Unlike normal @size/@reserved for inode rsv, qgroup doesn't care + * about things like csum size nor how many tree blocks it will need to + * reserve. + * + * Qgroup cares more about net change of the extent usage. + * + * So for one newly inserted file extent, in worst case it will cause + * leaf split and level increase, nodesize for each file extent is + * already too much. + * + * In short, qgroup_size/reserved is the upper limit of possible needed + * qgroup metadata reservation. + */ + u64 qgroup_rsv_size; + u64 qgroup_rsv_reserved; }; /* diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 5bdb5636d552..055494ddcace 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -5560,14 +5560,18 @@ static void space_info_add_new_bytes(struct btrfs_fs_info *fs_info, static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, - struct btrfs_block_rsv *dest, u64 num_bytes) + struct btrfs_block_rsv *dest, u64 num_bytes, + u64 *qgroup_to_release_ret) { struct btrfs_space_info *space_info = block_rsv->space_info; + u64 qgroup_to_release = 0; u64 ret; spin_lock(&block_rsv->lock); - if (num_bytes == (u64)-1) + if (num_bytes == (u64)-1) { num_bytes = block_rsv->size; + qgroup_to_release = block_rsv->qgroup_rsv_size; + } block_rsv->size -= num_bytes; if (block_rsv->reserved >= block_rsv->size) { num_bytes = block_rsv->reserved - block_rsv->size; @@ -5576,6 +5580,13 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, } else { num_bytes = 0; } + if (block_rsv->qgroup_rsv_reserved >= block_rsv->qgroup_rsv_size) { + qgroup_to_release = block_rsv->qgroup_rsv_reserved - + block_rsv->qgroup_rsv_size; + block_rsv->qgroup_rsv_reserved = block_rsv->qgroup_rsv_size; + } else { + qgroup_to_release = 0; + } spin_unlock(&block_rsv->lock); ret = num_bytes; @@ -5598,6 +5609,8 @@ static u64 block_rsv_release_bytes(struct btrfs_fs_info *fs_info, space_info_add_old_bytes(fs_info, space_info, num_bytes); } + if (qgroup_to_release_ret) + *qgroup_to_release_ret = qgroup_to_release; return ret; } @@ -5739,17 +5752,21 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode, struct btrfs_root *root = inode->root; struct btrfs_block_rsv *block_rsv = &inode->block_rsv; u64 num_bytes = 0; + u64 qgroup_num_bytes = 0; int ret = -ENOSPC; spin_lock(&block_rsv->lock); if (block_rsv->reserved < block_rsv->size) num_bytes = block_rsv->size - block_rsv->reserved; + if (block_rsv->qgroup_rsv_reserved < block_rsv->qgroup_rsv_size) + qgroup_num_bytes = block_rsv->qgroup_rsv_size - + block_rsv->qgroup_rsv_reserved; spin_unlock(&block_rsv->lock); if (num_bytes == 0) return 0; - ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); + ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_num_bytes, true); if (ret) return ret; ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush); @@ -5757,7 +5774,13 @@ static int btrfs_inode_rsv_refill(struct btrfs_inode *inode, block_rsv_add_bytes(block_rsv, num_bytes, 0); trace_btrfs_space_reservation(root->fs_info, "delalloc", btrfs_ino(inode), num_bytes, 1); - } + + /* Don't forget to increase qgroup_rsv_reserved */ + spin_lock(&block_rsv->lock); + block_rsv->qgroup_rsv_reserved += qgroup_num_bytes; + spin_unlock(&block_rsv->lock); + } else + btrfs_qgroup_free_meta_prealloc(root, qgroup_num_bytes); return ret; } @@ -5778,20 +5801,23 @@ static void btrfs_inode_rsv_release(struct btrfs_inode *inode, bool qgroup_free) struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv; struct btrfs_block_rsv *block_rsv = &inode->block_rsv; u64 released = 0; + u64 qgroup_to_release = 0; /* * Since we statically set the block_rsv->size we just want to say we * are releasing 0 bytes, and then we'll just get the reservation over * the size free'd. */ - released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0); + released = block_rsv_release_bytes(fs_info, block_rsv, global_rsv, 0, + &qgroup_to_release); if (released > 0) trace_btrfs_space_reservation(fs_info, "delalloc", btrfs_ino(inode), released, 0); if (qgroup_free) - btrfs_qgroup_free_meta_prealloc(inode->root, released); + btrfs_qgroup_free_meta_prealloc(inode->root, qgroup_to_release); else - btrfs_qgroup_convert_reserved_meta(inode->root, released); + btrfs_qgroup_convert_reserved_meta(inode->root, + qgroup_to_release); } void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, @@ -5803,7 +5829,7 @@ void btrfs_block_rsv_release(struct btrfs_fs_info *fs_info, if (global_rsv == block_rsv || block_rsv->space_info != global_rsv->space_info) global_rsv = NULL; - block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes); + block_rsv_release_bytes(fs_info, block_rsv, global_rsv, num_bytes, NULL); } static void update_global_block_rsv(struct btrfs_fs_info *fs_info) @@ -5883,7 +5909,7 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info) static void release_global_block_rsv(struct btrfs_fs_info *fs_info) { block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL, - (u64)-1); + (u64)-1, NULL); WARN_ON(fs_info->trans_block_rsv.size > 0); WARN_ON(fs_info->trans_block_rsv.reserved > 0); WARN_ON(fs_info->chunk_block_rsv.size > 0); @@ -5907,7 +5933,7 @@ void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans) WARN_ON_ONCE(!list_empty(&trans->new_bgs)); block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL, - trans->chunk_bytes_reserved); + trans->chunk_bytes_reserved, NULL); trans->chunk_bytes_reserved = 0; } @@ -6012,6 +6038,7 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, { struct btrfs_block_rsv *block_rsv = &inode->block_rsv; u64 reserve_size = 0; + u64 qgroup_rsv_size = 0; u64 csum_leaves; unsigned outstanding_extents; @@ -6024,9 +6051,17 @@ static void btrfs_calculate_inode_block_rsv_size(struct btrfs_fs_info *fs_info, inode->csum_bytes); reserve_size += btrfs_calc_trans_metadata_size(fs_info, csum_leaves); + /* + * For qgroup rsv, the calculation is very simple: + * account one nodesize for each outstanding extent + * + * This is overestimating in most cases. + */ + qgroup_rsv_size = outstanding_extents * fs_info->nodesize; spin_lock(&block_rsv->lock); block_rsv->size = reserve_size; + block_rsv->qgroup_rsv_size = qgroup_rsv_size; spin_unlock(&block_rsv->lock); } @@ -8405,7 +8440,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info, struct btrfs_block_rsv *block_rsv, u32 blocksize) { block_rsv_add_bytes(block_rsv, blocksize, 0); - block_rsv_release_bytes(fs_info, block_rsv, NULL, 0); + block_rsv_release_bytes(fs_info, block_rsv, NULL, 0, NULL); } /* From f218ea6c4792e0fabba0195f2f866d0a3b58431e Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 17 Apr 2018 16:52:45 +0800 Subject: [PATCH 094/182] btrfs: delayed-inode: Remove wrong qgroup meta reservation calls Commit 4f5427ccce5d ("btrfs: delayed-inode: Use new qgroup meta rsv for delayed inode and item") merged into mainline was not latest version submitted to the mail list in Dec 2017. Which lacks the following fixes: 1) Remove btrfs_qgroup_convert_reserved_meta() call in btrfs_delayed_item_release_metadata() 2) Remove btrfs_qgroup_reserve_meta_prealloc() call in btrfs_delayed_inode_reserve_metadata() Those fixes will resolve unexpected EDQUOT problems. Fixes: 4f5427ccce5d ("btrfs: delayed-inode: Use new qgroup meta rsv for delayed inode and item") Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/delayed-inode.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 06ec8ab6d9ba..a8d492dbd3e7 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -556,6 +556,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, dst_rsv = &fs_info->delayed_block_rsv; num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); + + /* + * Here we migrate space rsv from transaction rsv, since have already + * reserved space when starting a transaction. So no need to reserve + * qgroup space here. + */ ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes, 1); if (!ret) { trace_btrfs_space_reservation(fs_info, "delayed_item", @@ -577,7 +583,10 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, return; rsv = &fs_info->delayed_block_rsv; - btrfs_qgroup_convert_reserved_meta(root, item->bytes_reserved); + /* + * Check btrfs_delayed_item_reserve_metadata() to see why we don't need + * to release/reserve qgroup space. + */ trace_btrfs_space_reservation(fs_info, "delayed_item", item->key.objectid, item->bytes_reserved, 0); @@ -602,9 +611,6 @@ static int btrfs_delayed_inode_reserve_metadata( num_bytes = btrfs_calc_trans_metadata_size(fs_info, 1); - ret = btrfs_qgroup_reserve_meta_prealloc(root, num_bytes, true); - if (ret < 0) - return ret; /* * btrfs_dirty_inode will update the inode under btrfs_join_transaction * which doesn't reserve space for speed. This is a problem since we @@ -616,6 +622,10 @@ static int btrfs_delayed_inode_reserve_metadata( */ if (!src_rsv || (!trans->bytes_reserved && src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) { + ret = btrfs_qgroup_reserve_meta_prealloc(root, + fs_info->nodesize, true); + if (ret < 0) + return ret; ret = btrfs_block_rsv_add(root, dst_rsv, num_bytes, BTRFS_RESERVE_NO_FLUSH); /* @@ -634,6 +644,8 @@ static int btrfs_delayed_inode_reserve_metadata( "delayed_inode", btrfs_ino(inode), num_bytes, 1); + } else { + btrfs_qgroup_free_meta_prealloc(root, fs_info->nodesize); } return ret; } From 336a8bb8e36a273a802a54b2e673c777c9c62fb1 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Tue, 17 Apr 2018 18:43:58 +0800 Subject: [PATCH 095/182] btrfs: Fix wrong btrfs_delalloc_release_extents parameter Commit 43b18595d660 ("btrfs: qgroup: Use separate meta reservation type for delalloc") merged into mainline is not the latest version submitted to mail list in Dec 2017. It has a fatal wrong @qgroup_free parameter, which results increasing qgroup metadata pertrans reserved space, and causing a lot of early EDQUOT. Fix it by applying the correct diff on top of current branch. Fixes: 43b18595d660 ("btrfs: qgroup: Use separate meta reservation type for delalloc") Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 0167a9c97c9c..f660ba1e5e58 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1748,7 +1748,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, &cached_state); btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, - (ret != 0)); + true); if (ret) { btrfs_drop_pages(pages, num_pages); break; From 4e5c01a7c746130033eb6e7153b346b8ca61e7a5 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Mon, 16 Apr 2018 16:39:38 -0300 Subject: [PATCH 096/182] perf trace: Support MAP_FIXED_NOREPLACE Introduced in a4ff8e8620d3 ("mm: introduce MAP_FIXED_NOREPLACE"), and now that we have that define in the just syncronized tools/arch/*/include/uapi/asm/mman.h files, add support for it. This should really transition to autogeneration of string tables as done for various other things: $ ls /tmp/build/perf/trace/beauty/generated/*.c arch_errno_name_array.c kcmp_type_array.c madvise_behavior_array.c pkey_alloc_access_rights_array.c prctl_option_array.c $ head /tmp/build/perf/trace/beauty/generated/madvise_behavior_array.c static const char *madvise_advices[] = { [0] = "NORMAL", [1] = "RANDOM", [2] = "SEQUENTIAL", [3] = "WILLNEED", [4] = "DONTNEED", [8] = "FREE", [9] = "REMOVE", [10] = "DONTFORK", [11] = "DOFORK", $ Till then, add support for this the old way. Also it has to be ifdef'ed, because arches like mips still don't define it. The proper solution will be to have per-arch tables for these values to support cross-analysis. Cc: Adrian Hunter Cc: David Ahern Cc: Jiri Olsa Cc: Linus Torvalds Cc: Michal Hocko Cc: Namhyung Kim Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-td9t5vhjltqnlzaurkkgq8cn@git.kernel.org Signef-off-by: Arnaldo Carvalho de Melo --- tools/perf/trace/beauty/mmap.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/perf/trace/beauty/mmap.c b/tools/perf/trace/beauty/mmap.c index 417e3ecfe9d7..9f68077b241b 100644 --- a/tools/perf/trace/beauty/mmap.c +++ b/tools/perf/trace/beauty/mmap.c @@ -54,6 +54,9 @@ static size_t syscall_arg__scnprintf_mmap_flags(char *bf, size_t size, P_MMAP_FLAG(EXECUTABLE); P_MMAP_FLAG(FILE); P_MMAP_FLAG(FIXED); +#ifdef MAP_FIXED_NOREPLACE + P_MMAP_FLAG(FIXED_NOREPLACE); +#endif P_MMAP_FLAG(GROWSDOWN); P_MMAP_FLAG(HUGETLB); P_MMAP_FLAG(LOCKED); From a7e9eab3dbd35268c16244557a4155a2d9a641c3 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 6 Apr 2018 13:38:09 -0700 Subject: [PATCH 097/182] perf mem: Allow all record/report options For perf mem report / perf mem record, pass all unknown options through to the underlying report/record commands. This makes things like perf mem record -a sleep 1 work. Matches how c2c and other tools work. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lkml.kernel.org/r/20180406203812.3087-2-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/Documentation/perf-mem.txt | 3 +++ tools/perf/builtin-mem.c | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tools/perf/Documentation/perf-mem.txt b/tools/perf/Documentation/perf-mem.txt index b0211410969b..8806ed5f3802 100644 --- a/tools/perf/Documentation/perf-mem.txt +++ b/tools/perf/Documentation/perf-mem.txt @@ -67,6 +67,9 @@ OPTIONS --phys-data:: Record/Report sample physical addresses +In addition, for report all perf report options are valid, and for record +all perf record options. + SEE ALSO -------- linkperf:perf-record[1], linkperf:perf-report[1] diff --git a/tools/perf/builtin-mem.c b/tools/perf/builtin-mem.c index 506564651cda..57393e94d156 100644 --- a/tools/perf/builtin-mem.c +++ b/tools/perf/builtin-mem.c @@ -83,7 +83,7 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem) }; argc = parse_options(argc, argv, options, record_mem_usage, - PARSE_OPT_STOP_AT_NON_OPTION); + PARSE_OPT_KEEP_UNKNOWN); rec_argc = argc + 9; /* max number of arguments */ rec_argv = calloc(rec_argc + 1, sizeof(char *)); @@ -436,7 +436,7 @@ int cmd_mem(int argc, const char **argv) } argc = parse_options_subcommand(argc, argv, mem_options, mem_subcommands, - mem_usage, PARSE_OPT_STOP_AT_NON_OPTION); + mem_usage, PARSE_OPT_KEEP_UNKNOWN); if (!argc || !(strncmp(argv[0], "rec", 3) || mem.operation)) usage_with_options(mem_usage, mem_options); From 6a02f06edea5a5910c787fd6c49b0552e8080e5d Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 6 Apr 2018 13:38:10 -0700 Subject: [PATCH 098/182] perf hists browser: Clarify top/report browser help Clarify in the browser help that ESC in tui mode may go back to the previous screen instead of just exiting (was not clear to me) Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lkml.kernel.org/r/20180406203812.3087-3-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/ui/browsers/hists.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index 0eec06c105c6..e5f247247daa 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -2714,7 +2714,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, "h/?/F1 Show this window\n" \ "UP/DOWN/PGUP\n" \ "PGDN/SPACE Navigate\n" \ - "q/ESC/CTRL+C Exit browser\n\n" \ + "q/ESC/CTRL+C Exit browser or go back to previous screen\n\n" \ "For multiple event sessions:\n\n" \ "TAB/UNTAB Switch events\n\n" \ "For symbolic views (--sort has sym):\n\n" \ From ec3948451e0ba317e66873b48d6cc51d701d4eb0 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 6 Apr 2018 13:38:11 -0700 Subject: [PATCH 099/182] perf record: Remove misleading error suggestion When perf record encounters an error setting up an event it suggests to enable CONFIG_PERF_EVENTS. This is misleading because: - Usually it is enabled (it is really hard to disable on x86) - The problem is usually somewhere else, e.g. the CPU is not supported or an invalid configuration has been used. Remove the misleading suggestion. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lkml.kernel.org/r/20180406203812.3087-4-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 1ac8d9236efd..66b62570c855 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2894,8 +2894,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, return scnprintf(msg, size, "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" - "/bin/dmesg may provide additional information.\n" - "No CONFIG_PERF_EVENTS=y kernel support configured?", + "/bin/dmesg | grep -i perf may provide additional information.\n", err, str_error_r(err, sbuf, sizeof(sbuf)), perf_evsel__name(evsel)); } From ccbb6afe0890b09cc828373a9a5fffab40ec85df Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Fri, 6 Apr 2018 13:38:12 -0700 Subject: [PATCH 100/182] perf record: Remove suggestion to enable APIC 'perf record' suggests to enable the APIC on errors. APIC is practically always used today and the problem is usually somewhere else. Just remove the outdated suggestion. Signed-off-by: Andi Kleen Acked-by: Jiri Olsa Link: http://lkml.kernel.org/r/20180406203812.3087-5-andi@firstfloor.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/util/evsel.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 66b62570c855..3e87486c28fe 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2870,8 +2870,7 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, #if defined(__i386__) || defined(__x86_64__) if (evsel->attr.type == PERF_TYPE_HARDWARE) return scnprintf(msg, size, "%s", - "No hardware sampling interrupt available.\n" - "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); + "No hardware sampling interrupt available.\n"); #endif break; case EBUSY: From 66f5a0779af2a4f28b1832f6c20bc9d3b1ab1886 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Tue, 17 Apr 2018 09:43:44 +0530 Subject: [PATCH 101/182] perf tools: Add '\n' at the end of parse-options error messages Few error messages does not have '\n' at the end and thus next prompt gets printed in the same line. Ex, linux~$ perf buildid-cache -verbose --add ./a.out Error: did you mean `--verbose` (with two dashes ?)linux~$ Fix it. Signed-off-by: Ravi Bangoria Reviewed-by: Masami Hiramatsu Cc: Alexander Shishkin Cc: Jiri Olsa Cc: Kate Stewart Cc: Krister Johansen Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Philippe Ombredanne Cc: Sihyeon Jang Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20180417041346.5617-2-ravi.bangoria@linux.vnet.ibm.com Signed-off-by: Arnaldo Carvalho de Melo --- tools/lib/subcmd/parse-options.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c index f6a1babcbac4..cb7154eccbdc 100644 --- a/tools/lib/subcmd/parse-options.c +++ b/tools/lib/subcmd/parse-options.c @@ -433,7 +433,7 @@ static int parse_long_opt(struct parse_opt_ctx_t *p, const char *arg, if (ambiguous_option) { fprintf(stderr, - " Error: Ambiguous option: %s (could be --%s%s or --%s%s)", + " Error: Ambiguous option: %s (could be --%s%s or --%s%s)\n", arg, (ambiguous_flags & OPT_UNSET) ? "no-" : "", ambiguous_option->long_name, @@ -458,7 +458,7 @@ static void check_typos(const char *arg, const struct option *options) return; if (strstarts(arg, "no-")) { - fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); + fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg); exit(129); } @@ -466,7 +466,7 @@ static void check_typos(const char *arg, const struct option *options) if (!options->long_name) continue; if (strstarts(options->long_name, arg)) { - fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)", arg); + fprintf(stderr, " Error: did you mean `--%s` (with two dashes ?)\n", arg); exit(129); } } From 518c6021e9b82696819b380cd173e76cac55a01e Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 17 Apr 2018 14:47:36 -0300 Subject: [PATCH 102/182] perf tests mmap: Show which tracepoint is failing In the 'perf test "mmap interface"' we try creating events for several tracepoints, but when perf_evsel__new() fails we're not showing which one is failing, fix that to help diagnosing problems, such as the syscall tracepoints ones being found and fixes in this merge window. Now the failing tests shows: # perf test -v "mmap interface" 4: Read samples using the mmap interface : --- start --- test child forked, pid 14311 perf_evsel__new(sys_enter_getppid) test child finished with -1 ---- end ---- Read samples using the mmap interface: FAILED! # Now to check why the syscalls:sys_enter_getppid is failing... # ls -la /sys/kernel/debug/tracing/events/syscalls/sys_enter_getppid ls: cannot access '/sys/kernel/debug/tracing/events/syscalls/sys_enter_getppid': No such file or directory # Cc: Adrian Hunter Cc: David Ahern Cc: Dominik Brodowski Cc: Jiri Olsa Cc: Namhyung Kim Cc: Steven Rostedt Cc: Wang Nan Link: https://lkml.kernel.org/n/tip-44xk0ycdzrfzx1o9rklf5itl@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/mmap-basic.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/mmap-basic.c b/tools/perf/tests/mmap-basic.c index bb8e6bcb0d96..0919b0793e5b 100644 --- a/tools/perf/tests/mmap-basic.c +++ b/tools/perf/tests/mmap-basic.c @@ -75,7 +75,7 @@ int test__basic_mmap(struct test *test __maybe_unused, int subtest __maybe_unuse snprintf(name, sizeof(name), "sys_enter_%s", syscall_names[i]); evsels[i] = perf_evsel__newtp("syscalls", name); if (IS_ERR(evsels[i])) { - pr_debug("perf_evsel__new\n"); + pr_debug("perf_evsel__new(%s)\n", name); goto out_delete_evlist; } From eccb1b936363c62544bccb5bbb75afec9536f7e3 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 18 Apr 2018 12:59:03 -0300 Subject: [PATCH 103/182] perf test BPF: Fixup BPF test using epoll_pwait syscall function probe Since e145242ea0df ("syscalls/core, syscalls/x86: Clean up syscall stub naming convention") changed the main syscall function for 'epoll_pwait' to something other than the expected 'SyS_epoll_pwait the' 'perf test BPF' entries started failing, fix it by using something called from the main syscall function instead, 'epoll_wait', which should keep this test working in older kernels too. Before: # perf test BPF 40: BPF filter : 40.1: Basic BPF filtering : FAILED! 40.2: BPF pinning : Skip 40.3: BPF prologue generation : Skip 40.4: BPF relocation checker : Skip If we use -v for that test we see the problem: Probe point 'SyS_epoll_pwait' not found. After: # perf test BPF 40: BPF filter : 40.1: Basic BPF filtering : Ok 40.2: BPF pinning : Ok 40.3: BPF prologue generation : Ok 40.4: BPF relocation checker : Ok # Cc: Adrian Hunter Cc: David Ahern Cc: Dominik Brodowski Cc: Jiri Olsa Cc: Namhyung Kim Cc: Steven Rostedt Cc: Wang Nan Link: https://lkml.kernel.org/r/tip-y24nmn70cs2am8jh4i344dng@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/tests/bpf-script-example.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/tests/bpf-script-example.c b/tools/perf/tests/bpf-script-example.c index e4123c1b0e88..1ca5106df5f1 100644 --- a/tools/perf/tests/bpf-script-example.c +++ b/tools/perf/tests/bpf-script-example.c @@ -31,7 +31,7 @@ struct bpf_map_def SEC("maps") flip_table = { .max_entries = 1, }; -SEC("func=SyS_epoll_pwait") +SEC("func=do_epoll_wait") int bpf_func__SyS_epoll_pwait(void *ctx) { int ind =0; From 0a0a7e00a250c117f0c7ad8e1184abd98e7c098a Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Wed, 18 Apr 2018 10:49:12 -0700 Subject: [PATCH 104/182] tools/bpf: fix test_sock and test_sock_addr.sh failure The bpf selftests test_sock and test_sock_addr.sh failed in my test machine. The failure looks like: $ ./test_sock Test case: bind4 load with invalid access: src_ip6 .. [PASS] Test case: bind4 load with invalid access: mark .. [PASS] Test case: bind6 load with invalid access: src_ip4 .. [PASS] Test case: sock_create load with invalid access: src_port .. [PASS] Test case: sock_create load w/o expected_attach_type (compat mode) .. [FAIL] Test case: sock_create load w/ expected_attach_type .. [FAIL] Test case: attach type mismatch bind4 vs bind6 .. [FAIL] ... Summary: 4 PASSED, 12 FAILED $ ./test_sock_addr.sh Wait for testing IPv4/IPv6 to become available ..... ERROR: Timeout waiting for test IP to become available. In test_sock, bpf program loads failed due to hitting memlock limits. In test_sock_addr.sh, my test machine is a ipv6 only test box and using "ping" without specifying address family for an ipv6 address does not work. This patch fixed the issue by including header bpf_rlimit.h in test_sock.c and test_sock_addr.c, and specifying address family for ping command. Cc: Andrey Ignatov Signed-off-by: Yonghong Song Acked-by: Andrey Ignatov Signed-off-by: Daniel Borkmann --- tools/testing/selftests/bpf/test_sock.c | 1 + tools/testing/selftests/bpf/test_sock_addr.c | 1 + tools/testing/selftests/bpf/test_sock_addr.sh | 4 ++-- 3 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/bpf/test_sock.c b/tools/testing/selftests/bpf/test_sock.c index 73bb20cfb9b7..f4d99fabc56d 100644 --- a/tools/testing/selftests/bpf/test_sock.c +++ b/tools/testing/selftests/bpf/test_sock.c @@ -13,6 +13,7 @@ #include #include "cgroup_helpers.h" +#include "bpf_rlimit.h" #ifndef ARRAY_SIZE # define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) diff --git a/tools/testing/selftests/bpf/test_sock_addr.c b/tools/testing/selftests/bpf/test_sock_addr.c index d488f20926e8..2950f80ba7fb 100644 --- a/tools/testing/selftests/bpf/test_sock_addr.c +++ b/tools/testing/selftests/bpf/test_sock_addr.c @@ -15,6 +15,7 @@ #include #include "cgroup_helpers.h" +#include "bpf_rlimit.h" #define CG_PATH "/foo" #define CONNECT4_PROG_PATH "./connect4_prog.o" diff --git a/tools/testing/selftests/bpf/test_sock_addr.sh b/tools/testing/selftests/bpf/test_sock_addr.sh index c6e1dcf992c4..9832a875a828 100755 --- a/tools/testing/selftests/bpf/test_sock_addr.sh +++ b/tools/testing/selftests/bpf/test_sock_addr.sh @@ -4,7 +4,7 @@ set -eu ping_once() { - ping -q -c 1 -W 1 ${1%%/*} >/dev/null 2>&1 + ping -${1} -q -c 1 -W 1 ${2%%/*} >/dev/null 2>&1 } wait_for_ip() @@ -13,7 +13,7 @@ wait_for_ip() echo -n "Wait for testing IPv4/IPv6 to become available " for _i in $(seq ${MAX_PING_TRIES}); do echo -n "." - if ping_once ${TEST_IPv4} && ping_once ${TEST_IPv6}; then + if ping_once 4 ${TEST_IPv4} && ping_once 6 ${TEST_IPv6}; then echo " OK" return fi From 92d32170847bfff2dd08af2c016085779f2fd2a1 Mon Sep 17 00:00:00 2001 From: David Sterba Date: Mon, 16 Apr 2018 21:10:14 +0200 Subject: [PATCH 105/182] btrfs: fix unaligned access in readdir MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The last update to readdir introduced a temporary buffer to store the emitted readdir data, but as there are file names of variable length, there's a lot of unaligned access. This was observed on a sparc64 machine: Kernel unaligned access at TPC[102f3080] btrfs_real_readdir+0x51c/0x718 [btrfs] Fixes: 23b5ec74943 ("btrfs: fix readdir deadlock with pagefault") CC: stable@vger.kernel.org # 4.14+ Reported-and-tested-by: René Rebe Reviewed-by: Liu Bo Signed-off-by: David Sterba --- fs/btrfs/inode.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e064c49c9a9a..d241285a0d2a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -31,6 +31,7 @@ #include #include #include +#include #include "ctree.h" #include "disk-io.h" #include "transaction.h" @@ -5905,11 +5906,13 @@ static int btrfs_filldir(void *addr, int entries, struct dir_context *ctx) struct dir_entry *entry = addr; char *name = (char *)(entry + 1); - ctx->pos = entry->offset; - if (!dir_emit(ctx, name, entry->name_len, entry->ino, - entry->type)) + ctx->pos = get_unaligned(&entry->offset); + if (!dir_emit(ctx, name, get_unaligned(&entry->name_len), + get_unaligned(&entry->ino), + get_unaligned(&entry->type))) return 1; - addr += sizeof(struct dir_entry) + entry->name_len; + addr += sizeof(struct dir_entry) + + get_unaligned(&entry->name_len); ctx->pos++; } return 0; @@ -5999,14 +6002,15 @@ static int btrfs_real_readdir(struct file *file, struct dir_context *ctx) } entry = addr; - entry->name_len = name_len; + put_unaligned(name_len, &entry->name_len); name_ptr = (char *)(entry + 1); read_extent_buffer(leaf, name_ptr, (unsigned long)(di + 1), name_len); - entry->type = btrfs_filetype_table[btrfs_dir_type(leaf, di)]; + put_unaligned(btrfs_filetype_table[btrfs_dir_type(leaf, di)], + &entry->type); btrfs_dir_item_key_to_cpu(leaf, di, &location); - entry->ino = location.objectid; - entry->offset = found_key.offset; + put_unaligned(location.objectid, &entry->ino); + put_unaligned(found_key.offset, &entry->offset); entries++; addr += sizeof(struct dir_entry) + name_len; total_len += sizeof(struct dir_entry) + name_len; From ab60ee7bf9a84954f50a66a3d835860e80f99b7f Mon Sep 17 00:00:00 2001 From: Long Li Date: Tue, 17 Apr 2018 12:17:05 -0700 Subject: [PATCH 106/182] cifs: smbd: Check for iov length on sending the last iov When sending the last iov that breaks into smaller buffers to fit the transfer size, it's necessary to check if this is the last iov. If this is the latest iov, stop and proceed to send pages. Signed-off-by: Long Li Cc: stable@vger.kernel.org Signed-off-by: Steve French Reviewed-by: Ronnie Sahlberg --- fs/cifs/smbdirect.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index 5008af546dd1..d611ed0537fd 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -2194,6 +2194,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) goto done; } i++; + if (i == rqst->rq_nvec) + break; } start = i; buflen = 0; From 3ce0d5aa265bcc0a4b281cb0cabf92491276101b Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Thu, 19 Apr 2018 13:29:04 +0800 Subject: [PATCH 107/182] ALSA: hda/realtek - set PINCFG_HEADSET_MIC to parse_flags Otherwise, the pin will be regarded as microphone, and the jack name is "Mic Phantom", it is always on in the pulseaudio even nothing is plugged into the jack. So the UI is confusing to users since the microphone always shows up in the UI even there is no microphone plugged. After adding this flag, the jack name is "Headset Mic Phantom", then the pulseaudio can handle its detection correctly. Fixes: f0ba9d699e5c ("ALSA: hda/realtek - Fix Dell headset Mic can't record") Cc: Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index aef1f52db7d9..c3b63b7a4ba4 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6370,6 +6370,8 @@ static const struct hda_fixup alc269_fixups[] = { { 0x19, 0x01a1913c }, /* use as headset mic, without its own jack detect */ { } }, + .chained = true, + .chain_id = ALC269_FIXUP_HEADSET_MIC }, }; From a3dafb2200bf3c13905a088e82ae11f1eb275a83 Mon Sep 17 00:00:00 2001 From: Hui Wang Date: Thu, 19 Apr 2018 13:29:05 +0800 Subject: [PATCH 108/182] ALSA: hda/realtek - adjust the location of one mic There are two front mics on this machine, if we don't adjust the location for one of them, they will have the same mixer name, pulseaudio can't handle this situation. After applying this FIXUP, they will have different mixer name, then pulseaudio can handle them correctly. Cc: Signed-off-by: Hui Wang Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index c3b63b7a4ba4..fc77bf7a1544 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -6575,6 +6575,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), + SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION), SND_PCI_QUIRK(0x17aa, 0x3112, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY), SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI), From 39f2ff0816e5421476c2bc538b68b4bb0708a78e Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 18 Apr 2018 12:23:39 +0200 Subject: [PATCH 109/182] netfilter: nf_tables: NAT chain and extensions require NF_TABLES Move these options inside the scope of the 'if' NF_TABLES and NF_TABLES_IPV6 dependencies. This patch fixes: net/ipv6/netfilter/nft_chain_nat_ipv6.o: In function `nft_nat_do_chain': >> net/ipv6/netfilter/nft_chain_nat_ipv6.c:37: undefined reference to `nft_do_chain' net/ipv6/netfilter/nft_chain_nat_ipv6.o: In function `nft_chain_nat_ipv6_exit': >> net/ipv6/netfilter/nft_chain_nat_ipv6.c:94: undefined reference to `nft_unregister_chain_type' net/ipv6/netfilter/nft_chain_nat_ipv6.o: In function `nft_chain_nat_ipv6_init': >> net/ipv6/netfilter/nft_chain_nat_ipv6.c:87: undefined reference to `nft_register_chain_type' that happens with: CONFIG_NF_TABLES=m CONFIG_NFT_CHAIN_NAT_IPV6=y Fixes: 02c7b25e5f54 ("netfilter: nf_tables: build-in filter chain type") Reported-by: kbuild test robot Signed-off-by: Pablo Neira Ayuso --- net/ipv6/netfilter/Kconfig | 55 +++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 27 deletions(-) diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index ccbfa83e4bb0..ce77bcc2490c 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -48,6 +48,34 @@ config NFT_CHAIN_ROUTE_IPV6 fields such as the source, destination, flowlabel, hop-limit and the packet mark. +if NF_NAT_IPV6 + +config NFT_CHAIN_NAT_IPV6 + tristate "IPv6 nf_tables nat chain support" + help + This option enables the "nat" chain for IPv6 in nf_tables. This + chain type is used to perform Network Address Translation (NAT) + packet transformations such as the source, destination address and + source and destination ports. + +config NFT_MASQ_IPV6 + tristate "IPv6 masquerade support for nf_tables" + depends on NFT_MASQ + select NF_NAT_MASQUERADE_IPV6 + help + This is the expression that provides IPv4 masquerading support for + nf_tables. + +config NFT_REDIR_IPV6 + tristate "IPv6 redirect support for nf_tables" + depends on NFT_REDIR + select NF_NAT_REDIRECT + help + This is the expression that provides IPv4 redirect support for + nf_tables. + +endif # NF_NAT_IPV6 + config NFT_REJECT_IPV6 select NF_REJECT_IPV6 default NFT_REJECT @@ -107,39 +135,12 @@ config NF_NAT_IPV6 if NF_NAT_IPV6 -config NFT_CHAIN_NAT_IPV6 - depends on NF_TABLES_IPV6 - tristate "IPv6 nf_tables nat chain support" - help - This option enables the "nat" chain for IPv6 in nf_tables. This - chain type is used to perform Network Address Translation (NAT) - packet transformations such as the source, destination address and - source and destination ports. - config NF_NAT_MASQUERADE_IPV6 tristate "IPv6 masquerade support" help This is the kernel functionality to provide NAT in the masquerade flavour (automatic source address selection) for IPv6. -config NFT_MASQ_IPV6 - tristate "IPv6 masquerade support for nf_tables" - depends on NF_TABLES_IPV6 - depends on NFT_MASQ - select NF_NAT_MASQUERADE_IPV6 - help - This is the expression that provides IPv4 masquerading support for - nf_tables. - -config NFT_REDIR_IPV6 - tristate "IPv6 redirect support for nf_tables" - depends on NF_TABLES_IPV6 - depends on NFT_REDIR - select NF_NAT_REDIRECT - help - This is the expression that provides IPv4 redirect support for - nf_tables. - endif # NF_NAT_IPV6 config IP6_NF_IPTABLES From d71efb599ad42ef1e564c652d8084252bdc85edf Mon Sep 17 00:00:00 2001 From: Taehee Yoo Date: Wed, 18 Apr 2018 23:35:34 +0900 Subject: [PATCH 110/182] netfilter: nf_tables: fix out-of-bounds in nft_chain_commit_update When chain name is changed, nft_chain_commit_update is called. In the nft_chain_commit_update, trans->ctx.chain->name has old chain name and nft_trans_chain_name(trans) has new chain name. If new chain name is longer than old chain name, KASAN warns slab-out-of-bounds. [ 175.015012] BUG: KASAN: slab-out-of-bounds in strcpy+0x9e/0xb0 [ 175.022735] Write of size 1 at addr ffff880114e022da by task iptables-compat/1458 [ 175.031353] CPU: 0 PID: 1458 Comm: iptables-compat Not tainted 4.16.0-rc7+ #146 [ 175.031353] Hardware name: To be filled by O.E.M. To be filled by O.E.M./Aptio CRB, BIOS 5.6.5 07/08/2015 [ 175.031353] Call Trace: [ 175.031353] dump_stack+0x68/0xa0 [ 175.031353] print_address_description+0xd0/0x260 [ 175.031353] ? strcpy+0x9e/0xb0 [ 175.031353] kasan_report+0x234/0x350 [ 175.031353] __asan_report_store1_noabort+0x1c/0x20 [ 175.031353] strcpy+0x9e/0xb0 [ 175.031353] nf_tables_commit+0x1ccc/0x2990 [ 175.031353] nfnetlink_rcv+0x141e/0x16c0 [ 175.031353] ? nfnetlink_net_init+0x150/0x150 [ 175.031353] ? lock_acquire+0x370/0x370 [ 175.031353] ? lock_acquire+0x370/0x370 [ 175.031353] netlink_unicast+0x444/0x640 [ 175.031353] ? netlink_attachskb+0x700/0x700 [ 175.031353] ? _copy_from_iter_full+0x180/0x740 [ 175.031353] ? kasan_check_write+0x14/0x20 [ 175.031353] ? _copy_from_user+0x9b/0xd0 [ 175.031353] netlink_sendmsg+0x845/0xc70 [ ... ] Steps to reproduce: iptables-compat -N 1 iptables-compat -E 1 aaaaaaaaa Signed-off-by: Taehee Yoo Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 102ad873acb4..04d4e3772584 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -5745,7 +5745,7 @@ static void nft_chain_commit_update(struct nft_trans *trans) struct nft_base_chain *basechain; if (nft_trans_chain_name(trans)) - strcpy(trans->ctx.chain->name, nft_trans_chain_name(trans)); + swap(trans->ctx.chain->name, nft_trans_chain_name(trans)); if (!nft_is_base_chain(trans->ctx.chain)) return; From c3bca5d450b620dd3d36e14b5e1f43639fd47d6b Mon Sep 17 00:00:00 2001 From: Laura Abbott Date: Tue, 17 Apr 2018 14:57:42 -0700 Subject: [PATCH 111/182] posix-cpu-timers: Ensure set_process_cpu_timer is always evaluated Commit a9445e47d897 ("posix-cpu-timers: Make set_process_cpu_timer() more robust") moved the check into the 'if' statement. Unfortunately, it did so on the right side of an && which means that it may get short circuited and never evaluated. This is easily reproduced with: $ cat loop.c void main() { struct rlimit res; /* set the CPU time limit */ getrlimit(RLIMIT_CPU,&res); res.rlim_cur = 2; res.rlim_max = 2; setrlimit(RLIMIT_CPU,&res); while (1); } Which will hang forever instead of being killed. Fix this by pulling the evaluation out of the if statement but checking the return value instead. Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1568337 Fixes: a9445e47d897 ("posix-cpu-timers: Make set_process_cpu_timer() more robust") Signed-off-by: Laura Abbott Signed-off-by: Thomas Gleixner Cc: stable@vger.kernel.org Cc: "Max R . P . Grossmann" Cc: John Stultz Link: https://lkml.kernel.org/r/20180417215742.2521-1-labbott@redhat.com --- kernel/time/posix-cpu-timers.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 2541bd89f20e..5a6251ac6f7a 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c @@ -1205,10 +1205,12 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, u64 *newval, u64 *oldval) { u64 now; + int ret; WARN_ON_ONCE(clock_idx == CPUCLOCK_SCHED); + ret = cpu_timer_sample_group(clock_idx, tsk, &now); - if (oldval && cpu_timer_sample_group(clock_idx, tsk, &now) != -EINVAL) { + if (oldval && ret != -EINVAL) { /* * We are setting itimer. The *oldval is absolute and we update * it to be relative, *newval argument is relative and we update From 7407188489c62a7b5694bc75a6db2b82af94c9a5 Mon Sep 17 00:00:00 2001 From: Anson Huang Date: Thu, 19 Apr 2018 14:04:43 +0800 Subject: [PATCH 112/182] clocksource/imx-tpm: Correct -ETIME return condition check The additional brakects added to tpm_set_next_event's return value computation causes (int) forced type conversion NOT taking effect, and the incorrect value return will cause various system timer issue, like RCU stall etc.. Remove the additional brackets to make sure tpm_set_next_event always returns correct value. Fixes: 059ab7b82eec ("clocksource/drivers/imx-tpm: Add imx tpm timer support") Signed-off-by: Anson Huang Signed-off-by: Thomas Gleixner Acked-by: Dong Aisheng Cc: stable@vger.kernel.org Cc: daniel.lezcano@linaro.org Cc: Linux-imx@nxp.com Link: https://lkml.kernel.org/r/1524117883-2484-1-git-send-email-Anson.Huang@nxp.com --- drivers/clocksource/timer-imx-tpm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/clocksource/timer-imx-tpm.c b/drivers/clocksource/timer-imx-tpm.c index 05d97a6871d8..6c8318470b48 100644 --- a/drivers/clocksource/timer-imx-tpm.c +++ b/drivers/clocksource/timer-imx-tpm.c @@ -114,7 +114,7 @@ static int tpm_set_next_event(unsigned long delta, * of writing CNT registers which may cause the min_delta event got * missed, so we need add a ETIME check here in case it happened. */ - return (int)((next - now) <= 0) ? -ETIME : 0; + return (int)(next - now) <= 0 ? -ETIME : 0; } static int tpm_set_state_oneshot(struct clock_event_device *evt) From 5a786232eb69a1f870ddc0cfd69d5bdef241a2ea Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 19 Apr 2018 16:17:14 +0200 Subject: [PATCH 113/182] netfilter: xt_connmark: do not cast xt_connmark_tginfo1 to xt_connmark_tginfo2 These structures have different layout, fill xt_connmark_tginfo2 with old fields in xt_connmark_tginfo1. Based on patch from Jack Ma. Fixes: 472a73e00757 ("netfilter: xt_conntrack: Support bit-shifting for CONNMARK & MARK targets.") Signed-off-by: Pablo Neira Ayuso --- net/netfilter/xt_connmark.c | 38 +++++++++++++++++++++---------------- 1 file changed, 22 insertions(+), 16 deletions(-) diff --git a/net/netfilter/xt_connmark.c b/net/netfilter/xt_connmark.c index 4b424e6caf3e..94df000abb92 100644 --- a/net/netfilter/xt_connmark.c +++ b/net/netfilter/xt_connmark.c @@ -36,9 +36,7 @@ MODULE_ALIAS("ipt_connmark"); MODULE_ALIAS("ip6t_connmark"); static unsigned int -connmark_tg_shift(struct sk_buff *skb, - const struct xt_connmark_tginfo1 *info, - u8 shift_bits, u8 shift_dir) +connmark_tg_shift(struct sk_buff *skb, const struct xt_connmark_tginfo2 *info) { enum ip_conntrack_info ctinfo; u_int32_t new_targetmark; @@ -52,10 +50,11 @@ connmark_tg_shift(struct sk_buff *skb, switch (info->mode) { case XT_CONNMARK_SET: newmark = (ct->mark & ~info->ctmask) ^ info->ctmark; - if (shift_dir == D_SHIFT_RIGHT) - newmark >>= shift_bits; + if (info->shift_dir == D_SHIFT_RIGHT) + newmark >>= info->shift_bits; else - newmark <<= shift_bits; + newmark <<= info->shift_bits; + if (ct->mark != newmark) { ct->mark = newmark; nf_conntrack_event_cache(IPCT_MARK, ct); @@ -63,10 +62,11 @@ connmark_tg_shift(struct sk_buff *skb, break; case XT_CONNMARK_SAVE: new_targetmark = (skb->mark & info->nfmask); - if (shift_dir == D_SHIFT_RIGHT) - new_targetmark >>= shift_bits; + if (info->shift_dir == D_SHIFT_RIGHT) + new_targetmark >>= info->shift_bits; else - new_targetmark <<= shift_bits; + new_targetmark <<= info->shift_bits; + newmark = (ct->mark & ~info->ctmask) ^ new_targetmark; if (ct->mark != newmark) { @@ -76,10 +76,11 @@ connmark_tg_shift(struct sk_buff *skb, break; case XT_CONNMARK_RESTORE: new_targetmark = (ct->mark & info->ctmask); - if (shift_dir == D_SHIFT_RIGHT) - new_targetmark >>= shift_bits; + if (info->shift_dir == D_SHIFT_RIGHT) + new_targetmark >>= info->shift_bits; else - new_targetmark <<= shift_bits; + new_targetmark <<= info->shift_bits; + newmark = (skb->mark & ~info->nfmask) ^ new_targetmark; skb->mark = newmark; @@ -92,8 +93,14 @@ static unsigned int connmark_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_connmark_tginfo1 *info = par->targinfo; - - return connmark_tg_shift(skb, info, 0, 0); + const struct xt_connmark_tginfo2 info2 = { + .ctmark = info->ctmark, + .ctmask = info->ctmask, + .nfmask = info->nfmask, + .mode = info->mode, + }; + + return connmark_tg_shift(skb, &info2); } static unsigned int @@ -101,8 +108,7 @@ connmark_tg_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_connmark_tginfo2 *info = par->targinfo; - return connmark_tg_shift(skb, (const struct xt_connmark_tginfo1 *)info, - info->shift_bits, info->shift_dir); + return connmark_tg_shift(skb, info); } static int connmark_tg_check(const struct xt_tgchk_param *par) From d78fd7255881645fc645e23145d469385227170d Mon Sep 17 00:00:00 2001 From: Harry Wentland Date: Thu, 12 Apr 2018 16:37:09 -0400 Subject: [PATCH 114/182] drm/amd/display: Don't program bypass on linear regamma LUT Even though this is required for degamma since DCE HW only supports a couple predefined LUTs we can just program the LUT directly for regamma. This fixes dark screens which occurs when we program regamma to bypass while degamma is using srgb LUT. Signed-off-by: Harry Wentland Reviewed-by: Leo Li Cc: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c index f6cb502c303f..25f064c01038 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -138,13 +138,6 @@ int amdgpu_dm_set_regamma_lut(struct dm_crtc_state *crtc) lut = (struct drm_color_lut *)blob->data; lut_size = blob->length / sizeof(struct drm_color_lut); - if (__is_lut_linear(lut, lut_size)) { - /* Set to bypass if lut is set to linear */ - stream->out_transfer_func->type = TF_TYPE_BYPASS; - stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; - return 0; - } - gamma = dc_create_gamma(); if (!gamma) return -ENOMEM; From 84f8508f717268c333de8e472f351d6a7a487e51 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Tue, 17 Apr 2018 17:26:26 +0800 Subject: [PATCH 115/182] drm/amd/pp: Fix bug voltage can't be OD separately on VI Make sure to update the MCLK and SCLK flags when setting the VDDC flags due to dependencies. Reviewed-by: Alex Deucher Signed-off-by: Rex Zhu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c index add90675fd2a..26fbeafc3c96 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c @@ -4743,23 +4743,27 @@ static void smu7_check_dpm_table_updated(struct pp_hwmgr *hwmgr) for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_MCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_MCLK; + } dep_table = table_info->vdd_dep_on_sclk; odn_dep_table = (struct phm_ppt_v1_clock_voltage_dependency_table *)&(odn_table->vdd_dependency_on_sclk); for (i=0; i < dep_table->count; i++) { if (dep_table->entries[i].vddc != odn_dep_table->entries[i].vddc) { - data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC; - break; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC | DPMTABLE_OD_UPDATE_SCLK; + return; } } - if (i == dep_table->count) + if (i == dep_table->count && data->need_update_smu7_dpm_table & DPMTABLE_OD_UPDATE_VDDC) { data->need_update_smu7_dpm_table &= ~DPMTABLE_OD_UPDATE_VDDC; + data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_SCLK; + } } static int smu7_odn_edit_dpm_table(struct pp_hwmgr *hwmgr, From cc9e992dfb5bb48f59f3fbc1268d3f38d2c86ef3 Mon Sep 17 00:00:00 2001 From: Kenneth Feng Date: Tue, 17 Apr 2018 21:49:51 +0800 Subject: [PATCH 116/182] drm/amd/powerplay: header file interface to SMU update update vega12 smu interface. Signed-off-by: Kenneth Feng Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h index fb696e3d06cf..2f8a3b983cce 100644 --- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h +++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h @@ -412,8 +412,10 @@ typedef struct { QuadraticInt_t ReservedEquation2; QuadraticInt_t ReservedEquation3; + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; - uint32_t Reserved[15]; + uint32_t Reserved[14]; From 8a9fd8323087e794f1d3cd4850b393ced048bc73 Mon Sep 17 00:00:00 2001 From: Mathieu Poirier Date: Wed, 18 Apr 2018 16:05:18 -0600 Subject: [PATCH 117/182] coresight: Move to SPDX identifier Move CoreSight headers to the SPDX identifier. Signed-off-by: Mathieu Poirier Cc: Alexander Shishkin Cc: Greg Kroah-Hartman Cc: Jiri Olsa Cc: Namhyung Kim Cc: Peter Zijlstra Cc: linux-arm-kernel@lists.infradead.org Link: http://lkml.kernel.org/r/1524089118-27595-1-git-send-email-mathieu.poirier@linaro.org Signed-off-by: Arnaldo Carvalho de Melo --- include/linux/coresight-pmu.h | 13 +------------ tools/include/linux/coresight-pmu.h | 13 +------------ tools/perf/arch/arm/util/auxtrace.c | 13 +------------ tools/perf/arch/arm/util/cs-etm.c | 13 +------------ tools/perf/arch/arm/util/cs-etm.h | 13 +------------ tools/perf/arch/arm/util/pmu.c | 13 +------------ tools/perf/util/cs-etm-decoder/cs-etm-decoder.c | 3 +-- tools/perf/util/cs-etm.c | 3 +-- tools/perf/util/cs-etm.h | 13 +------------ 9 files changed, 9 insertions(+), 88 deletions(-) diff --git a/include/linux/coresight-pmu.h b/include/linux/coresight-pmu.h index edfeaba95429..a1a959ba24ff 100644 --- a/include/linux/coresight-pmu.h +++ b/include/linux/coresight-pmu.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . */ #ifndef _LINUX_CORESIGHT_PMU_H diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h index edfeaba95429..a1a959ba24ff 100644 --- a/tools/include/linux/coresight-pmu.h +++ b/tools/include/linux/coresight-pmu.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . */ #ifndef _LINUX_CORESIGHT_PMU_H diff --git a/tools/perf/arch/arm/util/auxtrace.c b/tools/perf/arch/arm/util/auxtrace.c index fa639e3e52ac..1ce6bdbda561 100644 --- a/tools/perf/arch/arm/util/auxtrace.c +++ b/tools/perf/arch/arm/util/auxtrace.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . */ #include diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c index 5c655ad4621e..2f595cd73da6 100644 --- a/tools/perf/arch/arm/util/cs-etm.c +++ b/tools/perf/arch/arm/util/cs-etm.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . */ #include diff --git a/tools/perf/arch/arm/util/cs-etm.h b/tools/perf/arch/arm/util/cs-etm.h index 5256741be549..1a12e64f5127 100644 --- a/tools/perf/arch/arm/util/cs-etm.h +++ b/tools/perf/arch/arm/util/cs-etm.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . */ #ifndef INCLUDE__PERF_CS_ETM_H__ diff --git a/tools/perf/arch/arm/util/pmu.c b/tools/perf/arch/arm/util/pmu.c index ac4dffc807b8..e047571e6080 100644 --- a/tools/perf/arch/arm/util/pmu.c +++ b/tools/perf/arch/arm/util/pmu.c @@ -1,18 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . */ #include diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c index 640af88331b4..c8b98fa22997 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * SPDX-License-Identifier: GPL-2.0 - * * Copyright(C) 2015-2018 Linaro Limited. * * Author: Tor Jeremiassen diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 1b0d422373be..40020b1ca54f 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -1,6 +1,5 @@ +// SPDX-License-Identifier: GPL-2.0 /* - * SPDX-License-Identifier: GPL-2.0 - * * Copyright(C) 2015-2018 Linaro Limited. * * Author: Tor Jeremiassen diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h index 5864d5dca616..37f8d48179ca 100644 --- a/tools/perf/util/cs-etm.h +++ b/tools/perf/util/cs-etm.h @@ -1,18 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright(C) 2015 Linaro Limited. All rights reserved. * Author: Mathieu Poirier - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 as published by - * the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for - * more details. - * - * You should have received a copy of the GNU General Public License along with - * this program. If not, see . */ #ifndef INCLUDE__UTIL_PERF_CS_ETM_H__ From 8a56ef4f3ffba9ebf4967b61ef600b0a7ba10f11 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 19 Apr 2018 18:16:15 +0200 Subject: [PATCH 118/182] ALSA: rawmidi: Fix missing input substream checks in compat ioctls Some rawmidi compat ioctls lack of the input substream checks (although they do check only for rfile->output). This many eventually lead to an Oops as NULL substream is passed to the rawmidi core functions. Fix it by adding the proper checks before each function call. The bug was spotted by syzkaller. Reported-by: syzbot+f7a0348affc3b67bc617@syzkaller.appspotmail.com Cc: Signed-off-by: Takashi Iwai --- sound/core/rawmidi_compat.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/sound/core/rawmidi_compat.c b/sound/core/rawmidi_compat.c index f69764d7cdd7..e30e30ba6e39 100644 --- a/sound/core/rawmidi_compat.c +++ b/sound/core/rawmidi_compat.c @@ -36,8 +36,6 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, struct snd_rawmidi_params params; unsigned int val; - if (rfile->output == NULL) - return -EINVAL; if (get_user(params.stream, &src->stream) || get_user(params.buffer_size, &src->buffer_size) || get_user(params.avail_min, &src->avail_min) || @@ -46,8 +44,12 @@ static int snd_rawmidi_ioctl_params_compat(struct snd_rawmidi_file *rfile, params.no_active_sensing = val; switch (params.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: + if (!rfile->output) + return -EINVAL; return snd_rawmidi_output_params(rfile->output, ¶ms); case SNDRV_RAWMIDI_STREAM_INPUT: + if (!rfile->input) + return -EINVAL; return snd_rawmidi_input_params(rfile->input, ¶ms); } return -EINVAL; @@ -67,16 +69,18 @@ static int snd_rawmidi_ioctl_status_compat(struct snd_rawmidi_file *rfile, int err; struct snd_rawmidi_status status; - if (rfile->output == NULL) - return -EINVAL; if (get_user(status.stream, &src->stream)) return -EFAULT; switch (status.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: + if (!rfile->output) + return -EINVAL; err = snd_rawmidi_output_status(rfile->output, &status); break; case SNDRV_RAWMIDI_STREAM_INPUT: + if (!rfile->input) + return -EINVAL; err = snd_rawmidi_input_status(rfile->input, &status); break; default: @@ -112,16 +116,18 @@ static int snd_rawmidi_ioctl_status_x32(struct snd_rawmidi_file *rfile, int err; struct snd_rawmidi_status status; - if (rfile->output == NULL) - return -EINVAL; if (get_user(status.stream, &src->stream)) return -EFAULT; switch (status.stream) { case SNDRV_RAWMIDI_STREAM_OUTPUT: + if (!rfile->output) + return -EINVAL; err = snd_rawmidi_output_status(rfile->output, &status); break; case SNDRV_RAWMIDI_STREAM_INPUT: + if (!rfile->input) + return -EINVAL; err = snd_rawmidi_input_status(rfile->input, &status); break; default: From df3f126482dba8e00cdbfc8fc44a05a5a35b1704 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Mon, 16 Apr 2018 11:58:16 -0500 Subject: [PATCH 119/182] libnvdimm, of_pmem: use dev_to_node() instead of of_node_to_nid() Remove the direct dependency on of_node_to_nid() by using dev_to_node() instead. Any DT platform device will have its NUMA node id set when the device is created. With this, commit 291717b6fbdb ("libnvdimm, of_pmem: workaround OF_NUMA=n build error") can be reverted. Fixes: 717197608952 ("libnvdimm: Add device-tree based driver") Cc: Dan Williams Cc: Oliver O'Halloran Cc: linux-nvdimm@lists.01.org Signed-off-by: Rob Herring Signed-off-by: Dan Williams --- drivers/nvdimm/of_pmem.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c index 85013bad35de..0a701837dfc0 100644 --- a/drivers/nvdimm/of_pmem.c +++ b/drivers/nvdimm/of_pmem.c @@ -67,7 +67,7 @@ static int of_pmem_region_probe(struct platform_device *pdev) */ memset(&ndr_desc, 0, sizeof(ndr_desc)); ndr_desc.attr_groups = region_attr_groups; - ndr_desc.numa_node = of_node_to_nid(np); + ndr_desc.numa_node = dev_to_node(&pdev->dev); ndr_desc.res = &pdev->resource[i]; ndr_desc.of_node = np; set_bit(ND_REGION_PAGEMAP, &ndr_desc.flags); From f22acf82746dd3d75e06d7c5801926d3b9c50169 Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 19 Apr 2018 15:07:42 -0700 Subject: [PATCH 120/182] Revert "libnvdimm, of_pmem: workaround OF_NUMA=n build error" With commit df3f126482db ("libnvdimm, of_pmem: use dev_to_node() instead of of_node_to_nid()") it is now possible to allow of_pmem to be built as a module as originally implemented. Signed-off-by: Dan Williams --- drivers/nvdimm/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/nvdimm/Kconfig b/drivers/nvdimm/Kconfig index 85997184e047..9d36473dc2a2 100644 --- a/drivers/nvdimm/Kconfig +++ b/drivers/nvdimm/Kconfig @@ -103,8 +103,7 @@ config NVDIMM_DAX Select Y if unsure config OF_PMEM - # FIXME: make tristate once OF_NUMA dependency removed - bool "Device-tree support for persistent memory regions" + tristate "Device-tree support for persistent memory regions" depends on OF default LIBNVDIMM help From ef8423022324cf79bd1b41d8707c766461e7e555 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Thu, 19 Apr 2018 13:39:43 -0700 Subject: [PATCH 121/182] device-dax: allow MAP_SYNC to succeed MAP_SYNC is a nop for device-dax. Allow MAP_SYNC to succeed on device-dax to eliminate special casing between device-dax and fs-dax as to when the flag can be specified. Device-dax users already implicitly assume that they do not need to call fsync(), and this enables them to explicitly check for this capability. Cc: Fixes: b6fb293f2497 ("mm: Define MAP_SYNC and VM_SYNC flags") Signed-off-by: Dave Jiang Reviewed-by: Dan Williams Signed-off-by: Dan Williams --- drivers/dax/device.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dax/device.c b/drivers/dax/device.c index be8606457f27..aff2c1594220 100644 --- a/drivers/dax/device.c +++ b/drivers/dax/device.c @@ -19,6 +19,7 @@ #include #include #include +#include #include "dax-private.h" #include "dax.h" @@ -540,6 +541,7 @@ static const struct file_operations dax_fops = { .release = dax_release, .get_unmapped_area = dax_get_unmapped_area, .mmap = dax_mmap, + .mmap_supported_flags = MAP_SYNC, }; static void dev_dax_release(struct device *dev) From c5794510d7b5f210f05531ff9e82432cf7244367 Mon Sep 17 00:00:00 2001 From: Dave Jiang Date: Fri, 13 Apr 2018 13:47:40 -0700 Subject: [PATCH 122/182] MAINTAINERS: Add backup maintainers for libnvdimm and DAX Adding additional maintainers to libnvdimm related code and DAX. Signed-off-by: Dave Jiang Acked-by: Ross Zwisler Acked-by: Vishal Verma Signed-off-by: Dan Williams --- MAINTAINERS | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 0a1410d5a621..60f89cdc565f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4245,6 +4245,9 @@ F: include/trace/events/fs_dax.h DEVICE DIRECT ACCESS (DAX) M: Dan Williams +M: Dave Jiang +M: Ross Zwisler +M: Vishal Verma L: linux-nvdimm@lists.01.org S: Supported F: drivers/dax/ @@ -8048,6 +8051,9 @@ F: tools/lib/lockdep/ LIBNVDIMM BLK: MMIO-APERTURE DRIVER M: Ross Zwisler +M: Dan Williams +M: Vishal Verma +M: Dave Jiang L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported @@ -8056,6 +8062,9 @@ F: drivers/nvdimm/region_devs.c LIBNVDIMM BTT: BLOCK TRANSLATION TABLE M: Vishal Verma +M: Dan Williams +M: Ross Zwisler +M: Dave Jiang L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported @@ -8063,6 +8072,9 @@ F: drivers/nvdimm/btt* LIBNVDIMM PMEM: PERSISTENT MEMORY DRIVER M: Ross Zwisler +M: Dan Williams +M: Vishal Verma +M: Dave Jiang L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ S: Supported @@ -8078,6 +8090,9 @@ F: Documentation/devicetree/bindings/pmem/pmem-region.txt LIBNVDIMM: NON-VOLATILE MEMORY DEVICE SUBSYSTEM M: Dan Williams +M: Ross Zwisler +M: Vishal Verma +M: Dave Jiang L: linux-nvdimm@lists.01.org Q: https://patchwork.kernel.org/project/linux-nvdimm/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm.git From 05189820da23fc87ee2a7d87c20257f298af27f4 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Thu, 19 Apr 2018 20:26:00 +0200 Subject: [PATCH 123/182] x86/power/64: Fix page-table setup for temporary text mapping On a system with 4-level page-tables there is no p4d, so the pud in the pgd should be mapped. The old code before commit fb43d6cb91ef already did that. The change from above commit causes an invalid page-table which causes undefined behavior. In one report it caused triple faults. Fix it by changing the p4d back to pud. Fixes: fb43d6cb91ef ('x86/mm: Do not auto-massage page protections') Reported-by: Borislav Petkov Signed-off-by: Joerg Roedel Signed-off-by: Thomas Gleixner Tested-by: Michal Kubecek Tested-by: Borislav Petkov Cc: linux-pm@vger.kernel.org Cc: rjw@rjwysocki.net Cc: pavel@ucw.cz Cc: hpa@zytor.com Cc: Dave Hansen Link: https://lkml.kernel.org/r/1524162360-26179-1-git-send-email-joro@8bytes.org --- arch/x86/power/hibernate_64.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c index 48b14b534897..ccf4a49bb065 100644 --- a/arch/x86/power/hibernate_64.c +++ b/arch/x86/power/hibernate_64.c @@ -98,7 +98,7 @@ static int set_up_temporary_text_mapping(pgd_t *pgd) set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); } else { /* No p4d for 4-level paging: point the pgd to the pud page table */ - pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); + pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot)); set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); } From d7717587ac6deae00e0b66c0113a046be2c6fb1c Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Fri, 23 Mar 2018 09:11:29 -0400 Subject: [PATCH 124/182] perf/x86/intel/uncore: Revert "Remove SBOX support for Broadwell server" This reverts commit 3b94a891667c ("perf/x86/intel/uncore: Remove SBOX support for Broadwell server") Revert because there exists a proper workaround for Broadwell-EP servers without SBOX now. Note that BDX-DE does not have a SBOX. Signed-off-by: Stephane Eranian Signed-off-by: Thomas Gleixner Reviewed-by: Kan Liang Acked-by: Peter Zijlstra Cc: ak@linux.intel.com Cc: osk@google.com Cc: mark@voidzero.net Link: https://lkml.kernel.org/r/1521810690-2576-1-git-send-email-kan.liang@linux.intel.com --- arch/x86/events/intel/uncore_snbep.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index c98b943e58b4..5bbbbee11879 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3028,10 +3028,27 @@ static struct intel_uncore_type bdx_uncore_cbox = { .format_group = &hswep_uncore_cbox_format_group, }; +static struct intel_uncore_type bdx_uncore_sbox = { + .name = "sbox", + .num_counters = 4, + .num_boxes = 4, + .perf_ctr_bits = 48, + .event_ctl = HSWEP_S0_MSR_PMON_CTL0, + .perf_ctr = HSWEP_S0_MSR_PMON_CTR0, + .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, + .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, + .msr_offset = HSWEP_SBOX_MSR_OFFSET, + .ops = &hswep_uncore_sbox_msr_ops, + .format_group = &hswep_uncore_sbox_format_group, +}; + +#define BDX_MSR_UNCORE_SBOX 3 + static struct intel_uncore_type *bdx_msr_uncores[] = { &bdx_uncore_ubox, &bdx_uncore_cbox, &hswep_uncore_pcu, + &bdx_uncore_sbox, NULL, }; @@ -3047,6 +3064,10 @@ void bdx_uncore_cpu_init(void) bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; uncore_msr_uncores = bdx_msr_uncores; + /* BDX-DE doesn't have SBOX */ + if (boot_cpu_data.x86_model == 86) + uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; + hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; } From 15a3e845b01ce2342cf187dc123c92c44c3c8170 Mon Sep 17 00:00:00 2001 From: Oskar Senft Date: Fri, 23 Mar 2018 09:11:30 -0400 Subject: [PATCH 125/182] perf/x86/intel/uncore: Fix SBOX support for Broadwell CPUs SBOX on some Broadwell CPUs is broken because it's enabled unconditionally despite the fact that there are no SBOXes available. Check the Power Control Unit CAPID4 register to determine the number of available SBOXes on the particular CPU before trying to enable them. If there are none, nullify the SBOX descriptor so it isn't tried to be initialized. Signed-off-by: Oskar Senft Signed-off-by: Thomas Gleixner Tested-by: Mark van Dijk Reviewed-by: Kan Liang Acked-by: Peter Zijlstra Cc: ak@linux.intel.com Cc: peterz@infradead.org Cc: eranian@google.com Link: https://lkml.kernel.org/r/1521810690-2576-2-git-send-email-kan.liang@linux.intel.com --- arch/x86/events/intel/uncore_snbep.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c index 5bbbbee11879..77076a102e34 100644 --- a/arch/x86/events/intel/uncore_snbep.c +++ b/arch/x86/events/intel/uncore_snbep.c @@ -3060,14 +3060,25 @@ static struct event_constraint bdx_uncore_pcu_constraints[] = { void bdx_uncore_cpu_init(void) { + int pkg = topology_phys_to_logical_pkg(0); + if (bdx_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) bdx_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; uncore_msr_uncores = bdx_msr_uncores; /* BDX-DE doesn't have SBOX */ - if (boot_cpu_data.x86_model == 86) + if (boot_cpu_data.x86_model == 86) { uncore_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; + /* Detect systems with no SBOXes */ + } else if (uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]) { + struct pci_dev *pdev; + u32 capid4; + pdev = uncore_extra_pci_dev[pkg].dev[HSWEP_PCI_PCU_3]; + pci_read_config_dword(pdev, 0x94, &capid4); + if (((capid4 >> 6) & 0x3) == 0) + bdx_msr_uncores[BDX_MSR_UNCORE_SBOX] = NULL; + } hswep_uncore_pcu.constraints = bdx_uncore_pcu_constraints; } @@ -3285,6 +3296,11 @@ static const struct pci_device_id bdx_uncore_pci_ids[] = { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6f46), .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, 2), }, + { /* PCU.3 (for Capability registers) */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x6fc0), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + HSWEP_PCI_PCU_3), + }, { /* end: all zeroes */ } }; From 5e388e95815408c27f3612190d089afc0774b870 Mon Sep 17 00:00:00 2001 From: Nikolay Borisov Date: Wed, 18 Apr 2018 09:41:54 +0300 Subject: [PATCH 126/182] btrfs: Fix race condition between delayed refs and blockgroup removal When the delayed refs for a head are all run, eventually cleanup_ref_head is called which (in case of deletion) obtains a reference for the relevant btrfs_space_info struct by querying the bg for the range. This is problematic because when the last extent of a bg is deleted a race window emerges between removal of that bg and the subsequent invocation of cleanup_ref_head. This can result in cache being null and either a null pointer dereference or assertion failure. task: ffff8d04d31ed080 task.stack: ffff9e5dc10cc000 RIP: 0010:assfail.constprop.78+0x18/0x1a [btrfs] RSP: 0018:ffff9e5dc10cfbe8 EFLAGS: 00010292 RAX: 0000000000000044 RBX: 0000000000000000 RCX: 0000000000000000 RDX: ffff8d04ffc1f868 RSI: ffff8d04ffc178c8 RDI: ffff8d04ffc178c8 RBP: ffff8d04d29e5ea0 R08: 00000000000001f0 R09: 0000000000000001 R10: ffff9e5dc0507d58 R11: 0000000000000001 R12: ffff8d04d29e5ea0 R13: ffff8d04d29e5f08 R14: ffff8d04efe29b40 R15: ffff8d04efe203e0 FS: 00007fbf58ead500(0000) GS:ffff8d04ffc00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007fe6c6975648 CR3: 0000000013b2a000 CR4: 00000000000006f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: __btrfs_run_delayed_refs+0x10e7/0x12c0 [btrfs] btrfs_run_delayed_refs+0x68/0x250 [btrfs] btrfs_should_end_transaction+0x42/0x60 [btrfs] btrfs_truncate_inode_items+0xaac/0xfc0 [btrfs] btrfs_evict_inode+0x4c6/0x5c0 [btrfs] evict+0xc6/0x190 do_unlinkat+0x19c/0x300 do_syscall_64+0x74/0x140 entry_SYSCALL_64_after_hwframe+0x3d/0xa2 RIP: 0033:0x7fbf589c57a7 To fix this, introduce a new flag "is_system" to head_ref structs, which is populated at insertion time. This allows to decouple the querying for the spaceinfo from querying the possibly deleted bg. Fixes: d7eae3403f46 ("Btrfs: rework delayed ref total_bytes_pinned accounting") CC: stable@vger.kernel.org # 4.14+ Suggested-by: Omar Sandoval Signed-off-by: Nikolay Borisov Reviewed-by: Omar Sandoval Signed-off-by: David Sterba --- fs/btrfs/delayed-ref.c | 19 ++++++++++++++----- fs/btrfs/delayed-ref.h | 1 + fs/btrfs/extent-tree.c | 16 +++++++++++----- 3 files changed, 26 insertions(+), 10 deletions(-) diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index 9e98295de7ce..e1b0651686f7 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c @@ -540,8 +540,10 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_head *head_ref, struct btrfs_qgroup_extent_record *qrecord, u64 bytenr, u64 num_bytes, u64 ref_root, u64 reserved, - int action, int is_data, int *qrecord_inserted_ret, + int action, int is_data, int is_system, + int *qrecord_inserted_ret, int *old_ref_mod, int *new_ref_mod) + { struct btrfs_delayed_ref_head *existing; struct btrfs_delayed_ref_root *delayed_refs; @@ -585,6 +587,7 @@ add_delayed_ref_head(struct btrfs_fs_info *fs_info, head_ref->ref_mod = count_mod; head_ref->must_insert_reserved = must_insert_reserved; head_ref->is_data = is_data; + head_ref->is_system = is_system; head_ref->ref_tree = RB_ROOT; INIT_LIST_HEAD(&head_ref->ref_add_list); RB_CLEAR_NODE(&head_ref->href_node); @@ -772,6 +775,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, struct btrfs_delayed_ref_root *delayed_refs; struct btrfs_qgroup_extent_record *record = NULL; int qrecord_inserted; + int is_system = (ref_root == BTRFS_CHUNK_TREE_OBJECTID); BUG_ON(extent_op && extent_op->is_data); ref = kmem_cache_alloc(btrfs_delayed_tree_ref_cachep, GFP_NOFS); @@ -800,8 +804,8 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info, */ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, bytenr, num_bytes, 0, 0, action, 0, - &qrecord_inserted, old_ref_mod, - new_ref_mod); + is_system, &qrecord_inserted, + old_ref_mod, new_ref_mod); add_delayed_tree_ref(fs_info, trans, head_ref, &ref->node, bytenr, num_bytes, parent, ref_root, level, action); @@ -868,7 +872,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info, */ head_ref = add_delayed_ref_head(fs_info, trans, head_ref, record, bytenr, num_bytes, ref_root, reserved, - action, 1, &qrecord_inserted, + action, 1, 0, &qrecord_inserted, old_ref_mod, new_ref_mod); add_delayed_data_ref(fs_info, trans, head_ref, &ref->node, bytenr, @@ -898,9 +902,14 @@ int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info, delayed_refs = &trans->transaction->delayed_refs; spin_lock(&delayed_refs->lock); + /* + * extent_ops just modify the flags of an extent and they don't result + * in ref count changes, hence it's safe to pass false/0 for is_system + * argument + */ add_delayed_ref_head(fs_info, trans, head_ref, NULL, bytenr, num_bytes, 0, 0, BTRFS_UPDATE_DELAYED_HEAD, - extent_op->is_data, NULL, NULL, NULL); + extent_op->is_data, 0, NULL, NULL, NULL); spin_unlock(&delayed_refs->lock); return 0; diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 741869dbc316..7f00db50bd24 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h @@ -127,6 +127,7 @@ struct btrfs_delayed_ref_head { */ unsigned int must_insert_reserved:1; unsigned int is_data:1; + unsigned int is_system:1; unsigned int processing:1; }; diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 055494ddcace..f99102063366 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -2601,13 +2601,19 @@ static int cleanup_ref_head(struct btrfs_trans_handle *trans, trace_run_delayed_ref_head(fs_info, head, 0); if (head->total_ref_mod < 0) { - struct btrfs_block_group_cache *cache; + struct btrfs_space_info *space_info; + u64 flags; - cache = btrfs_lookup_block_group(fs_info, head->bytenr); - ASSERT(cache); - percpu_counter_add(&cache->space_info->total_bytes_pinned, + if (head->is_data) + flags = BTRFS_BLOCK_GROUP_DATA; + else if (head->is_system) + flags = BTRFS_BLOCK_GROUP_SYSTEM; + else + flags = BTRFS_BLOCK_GROUP_METADATA; + space_info = __find_space_info(fs_info, flags); + ASSERT(space_info); + percpu_counter_add(&space_info->total_bytes_pinned, -head->num_bytes); - btrfs_put_block_group(cache); if (head->is_data) { spin_lock(&delayed_refs->lock); From c0872323746e11fc79344e3738b283a8cda86654 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Wed, 11 Apr 2018 17:08:12 +0800 Subject: [PATCH 127/182] btrfs: print-tree: debugging output enhancement This patch enhances the following things: - tree block header * add generation and owner output for node and leaf - node pointer generation output - allow btrfs_print_tree() to not follow nodes * just like btrfs-progs Please note that, although function btrfs_print_tree() is not called by anyone right now, it's still a pretty useful function to debug kernel. So that function is still kept for later use. Signed-off-by: Qu Wenruo Reviewed-by: Lu Fengqi Signed-off-by: David Sterba --- fs/btrfs/print-tree.c | 25 +++++++++++++++---------- fs/btrfs/print-tree.h | 2 +- 2 files changed, 16 insertions(+), 11 deletions(-) diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c index 124276bba8cf..21a831d3d087 100644 --- a/fs/btrfs/print-tree.c +++ b/fs/btrfs/print-tree.c @@ -189,9 +189,10 @@ void btrfs_print_leaf(struct extent_buffer *l) fs_info = l->fs_info; nr = btrfs_header_nritems(l); - btrfs_info(fs_info, "leaf %llu total ptrs %d free space %d", - btrfs_header_bytenr(l), nr, - btrfs_leaf_free_space(fs_info, l)); + btrfs_info(fs_info, + "leaf %llu gen %llu total ptrs %d free space %d owner %llu", + btrfs_header_bytenr(l), btrfs_header_generation(l), nr, + btrfs_leaf_free_space(fs_info, l), btrfs_header_owner(l)); for (i = 0 ; i < nr ; i++) { item = btrfs_item_nr(i); btrfs_item_key_to_cpu(l, &key, i); @@ -325,7 +326,7 @@ void btrfs_print_leaf(struct extent_buffer *l) } } -void btrfs_print_tree(struct extent_buffer *c) +void btrfs_print_tree(struct extent_buffer *c, bool follow) { struct btrfs_fs_info *fs_info; int i; u32 nr; @@ -342,15 +343,19 @@ void btrfs_print_tree(struct extent_buffer *c) return; } btrfs_info(fs_info, - "node %llu level %d total ptrs %d free spc %u", - btrfs_header_bytenr(c), level, nr, - (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr); + "node %llu level %d gen %llu total ptrs %d free spc %u owner %llu", + btrfs_header_bytenr(c), level, btrfs_header_generation(c), + nr, (u32)BTRFS_NODEPTRS_PER_BLOCK(fs_info) - nr, + btrfs_header_owner(c)); for (i = 0; i < nr; i++) { btrfs_node_key_to_cpu(c, &key, i); - pr_info("\tkey %d (%llu %u %llu) block %llu\n", + pr_info("\tkey %d (%llu %u %llu) block %llu gen %llu\n", i, key.objectid, key.type, key.offset, - btrfs_node_blockptr(c, i)); + btrfs_node_blockptr(c, i), + btrfs_node_ptr_generation(c, i)); } + if (!follow) + return; for (i = 0; i < nr; i++) { struct btrfs_key first_key; struct extent_buffer *next; @@ -372,7 +377,7 @@ void btrfs_print_tree(struct extent_buffer *c) if (btrfs_header_level(next) != level - 1) BUG(); - btrfs_print_tree(next); + btrfs_print_tree(next, follow); free_extent_buffer(next); } } diff --git a/fs/btrfs/print-tree.h b/fs/btrfs/print-tree.h index 4a98481688f4..e6bb38fd75ad 100644 --- a/fs/btrfs/print-tree.h +++ b/fs/btrfs/print-tree.h @@ -7,6 +7,6 @@ #define BTRFS_PRINT_TREE_H void btrfs_print_leaf(struct extent_buffer *l); -void btrfs_print_tree(struct extent_buffer *c); +void btrfs_print_tree(struct extent_buffer *c, bool follow); #endif From ff30b89e0ab71115cbad6ae10a58bd83fe18b41f Mon Sep 17 00:00:00 2001 From: Long Li Date: Tue, 17 Apr 2018 12:17:10 -0700 Subject: [PATCH 128/182] cifs: smbd: Dump SMB packet when configured When sending through SMB Direct, also dump the packet in SMB send path. Also fixed a typo in debug message. Signed-off-by: Long Li Cc: stable@vger.kernel.org Signed-off-by: Steve French Reviewed-by: Ronnie Sahlberg --- fs/cifs/smbdirect.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c index d611ed0537fd..87817ddcc096 100644 --- a/fs/cifs/smbdirect.c +++ b/fs/cifs/smbdirect.c @@ -1028,7 +1028,7 @@ static int smbd_post_send(struct smbd_connection *info, for (i = 0; i < request->num_sge; i++) { log_rdma_send(INFO, "rdma_request sge[%d] addr=%llu length=%u\n", - i, request->sge[0].addr, request->sge[0].length); + i, request->sge[i].addr, request->sge[i].length); ib_dma_sync_single_for_device( info->id->device, request->sge[i].addr, @@ -2139,6 +2139,10 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) goto done; } + cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen); + for (i = 0; i < rqst->rq_nvec-1; i++) + dump_smb(iov[i].iov_base, iov[i].iov_len); + remaining_data_length = buflen; log_write(INFO, "rqst->rq_nvec=%d rqst->rq_npages=%d rq_pagesz=%d " From 1d0cffa674cfa7d185a302c8c6850fc50b893bed Mon Sep 17 00:00:00 2001 From: Steve French Date: Fri, 20 Apr 2018 12:19:07 -0500 Subject: [PATCH 129/182] cifs: do not allow creating sockets except with SMB1 posix exensions RHBZ: 1453123 Since at least the 3.10 kernel and likely a lot earlier we have not been able to create unix domain sockets in a cifs share when mounted using the SFU mount option (except when mounted with the cifs unix extensions to Samba e.g.) Trying to create a socket, for example using the af_unix command from xfstests will cause : BUG: unable to handle kernel NULL pointer dereference at 00000000 00000040 Since no one uses or depends on being able to create unix domains sockets on a cifs share the easiest fix to stop this vulnerability is to simply not allow creation of any other special files than char or block devices when sfu is used. Added update to Ronnie's patch to handle a tcon link leak, and to address a buf leak noticed by Gustavo and Colin. Acked-by: Gustavo A. R. Silva CC: Colin Ian King Reviewed-by: Pavel Shilovsky Reported-by: Eryu Guan Signed-off-by: Ronnie Sahlberg Signed-off-by: Steve French Cc: stable@vger.kernel.org --- fs/cifs/dir.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 81ba6e0d88d8..925844343038 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c @@ -684,6 +684,9 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, goto mknod_out; } + if (!S_ISCHR(mode) && !S_ISBLK(mode)) + goto mknod_out; + if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) goto mknod_out; @@ -692,10 +695,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, buf = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (buf == NULL) { - kfree(full_path); rc = -ENOMEM; - free_xid(xid); - return rc; + goto mknod_out; } if (backup_cred(cifs_sb)) @@ -742,7 +743,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode, pdev->minor = cpu_to_le64(MINOR(device_number)); rc = tcon->ses->server->ops->sync_write(xid, &fid, &io_parms, &bytes_written, iov, 1); - } /* else if (S_ISFIFO) */ + } tcon->ses->server->ops->close(xid, tcon, &fid); d_drop(direntry); From 596632de0440baecaccc9d4347329c64661c400f Mon Sep 17 00:00:00 2001 From: Aurelien Aptel Date: Thu, 19 Apr 2018 10:44:20 +0200 Subject: [PATCH 130/182] CIFS: fix typo in cifs_dbg Signed-off-by: Aurelien Aptel Signed-off-by: Steve French Reported-by: Long Li --- fs/cifs/cifs_debug.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h index fe5567655662..0e74690d11bc 100644 --- a/fs/cifs/cifs_debug.h +++ b/fs/cifs/cifs_debug.h @@ -54,7 +54,7 @@ do { \ pr_debug_ ## ratefunc("%s: " \ fmt, __FILE__, ##__VA_ARGS__); \ } else if ((type) & VFS) { \ - pr_err_ ## ratefunc("CuIFS VFS: " \ + pr_err_ ## ratefunc("CIFS VFS: " \ fmt, ##__VA_ARGS__); \ } else if ((type) & NOISY && (NOISY != 0)) { \ pr_debug_ ## ratefunc(fmt, ##__VA_ARGS__); \ From 6ab690aa439803347743c0d899ac422774fdd5e7 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Fri, 20 Apr 2018 18:16:30 +0200 Subject: [PATCH 131/182] bpf: sockmap remove dead check Remove dead code that bails on `attr->value_size > KMALLOC_MAX_SIZE` - the previous check already bails on `attr->value_size != 4`. Signed-off-by: Jann Horn Signed-off-by: Daniel Borkmann --- kernel/bpf/sockmap.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c index 8dd9210d7db7..a3b21385e947 100644 --- a/kernel/bpf/sockmap.c +++ b/kernel/bpf/sockmap.c @@ -1442,9 +1442,6 @@ static struct bpf_map *sock_map_alloc(union bpf_attr *attr) attr->value_size != 4 || attr->map_flags & ~SOCK_CREATE_FLAG_MASK) return ERR_PTR(-EINVAL); - if (attr->value_size > KMALLOC_MAX_SIZE) - return ERR_PTR(-E2BIG); - err = bpf_tcp_ulp_register(); if (err && err != -EEXIST) return ERR_PTR(err); From e01e80634ecdde1dd113ac43b3adad21b47f3957 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Fri, 20 Apr 2018 14:55:31 -0700 Subject: [PATCH 132/182] fork: unconditionally clear stack on fork One of the classes of kernel stack content leaks[1] is exposing the contents of prior heap or stack contents when a new process stack is allocated. Normally, those stacks are not zeroed, and the old contents remain in place. In the face of stack content exposure flaws, those contents can leak to userspace. Fixing this will make the kernel no longer vulnerable to these flaws, as the stack will be wiped each time a stack is assigned to a new process. There's not a meaningful change in runtime performance; it almost looks like it provides a benefit. Performing back-to-back kernel builds before: Run times: 157.86 157.09 158.90 160.94 160.80 Mean: 159.12 Std Dev: 1.54 and after: Run times: 159.31 157.34 156.71 158.15 160.81 Mean: 158.46 Std Dev: 1.46 Instead of making this a build or runtime config, Andy Lutomirski recommended this just be enabled by default. [1] A noisy search for many kinds of stack content leaks can be seen here: https://cve.mitre.org/cgi-bin/cvekey.cgi?keyword=linux+kernel+stack+leak I did some more with perf and cycle counts on running 100,000 execs of /bin/true. before: Cycles: 218858861551 218853036130 214727610969 227656844122 224980542841 Mean: 221015379122.60 Std Dev: 4662486552.47 after: Cycles: 213868945060 213119275204 211820169456 224426673259 225489986348 Mean: 217745009865.40 Std Dev: 5935559279.99 It continues to look like it's faster, though the deviation is rather wide, but I'm not sure what I could do that would be less noisy. I'm open to ideas! Link: http://lkml.kernel.org/r/20180221021659.GA37073@beast Signed-off-by: Kees Cook Acked-by: Michal Hocko Reviewed-by: Andrew Morton Cc: Andy Lutomirski Cc: Laura Abbott Cc: Rasmus Villemoes Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/thread_info.h | 6 +----- kernel/fork.c | 3 +-- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index 34f053a150a9..cf2862bd134a 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h @@ -43,11 +43,7 @@ enum { #define THREAD_ALIGN THREAD_SIZE #endif -#if IS_ENABLED(CONFIG_DEBUG_STACK_USAGE) || IS_ENABLED(CONFIG_DEBUG_KMEMLEAK) -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) -#else -# define THREADINFO_GFP (GFP_KERNEL_ACCOUNT) -#endif +#define THREADINFO_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO) /* * flag set/clear/test wrappers diff --git a/kernel/fork.c b/kernel/fork.c index 242c8c93d285..a5d21c42acfc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -216,10 +216,9 @@ static unsigned long *alloc_thread_stack_node(struct task_struct *tsk, int node) if (!s) continue; -#ifdef CONFIG_DEBUG_KMEMLEAK /* Clear stale pointers from reused stack. */ memset(s->addr, 0, THREAD_SIZE); -#endif + tsk->stack_vm_area = s; return s->addr; } From 8f175cf5c99dc0e3add2aac0ea1cd54e0f9ca87d Mon Sep 17 00:00:00 2001 From: Michal Hocko Date: Fri, 20 Apr 2018 14:55:35 -0700 Subject: [PATCH 133/182] mm: fix do_pages_move status handling Li Wang has reported that LTP move_pages04 test fails with the current tree: LTP move_pages04: TFAIL : move_pages04.c:143: status[1] is EPERM, expected EFAULT The test allocates an array of two pages, one is present while the other is not (resp. backed by zero page) and it expects EFAULT for the second page as the man page suggests. We are reporting EPERM which doesn't make any sense and this is a result of a bug from cf5f16b23ec9 ("mm: unclutter THP migration"). do_pages_move tries to handle as many pages in one batch as possible so we queue all pages with the same node target together and that corresponds to [start, i] range which is then used to update status array. add_page_for_migration will correctly notice the zero (resp. !present) page and returns with EFAULT which gets written to the status. But if this is the last page in the array we do not update start and so the last store_status after the loop will overwrite the range of the last batch with NUMA_NO_NODE (which corresponds to EPERM). Fix this by simply bailing out from the last flush if the pagelist is empty as there is clearly nothing more to do. Link: http://lkml.kernel.org/r/20180418121255.334-1-mhocko@kernel.org Fixes: cf5f16b23ec9 ("mm: unclutter THP migration") Signed-off-by: Michal Hocko Reported-by: Li Wang Tested-by: Li Wang Cc: Zi Yan Cc: "Kirill A. Shutemov" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/migrate.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/mm/migrate.c b/mm/migrate.c index f65dd69e1fd1..70ef794cccae 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1622,6 +1622,9 @@ static int do_pages_move(struct mm_struct *mm, nodemask_t task_nodes, current_node = NUMA_NO_NODE; } out_flush: + if (list_empty(&pagelist)) + return err; + /* Make sure we do not overwrite the existing error */ err1 = do_move_pages_to_node(mm, &pagelist, current_node); if (!err1) From 88c28f2469151b031f8cea9b28ed5be1b74a4172 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Fri, 20 Apr 2018 14:55:38 -0700 Subject: [PATCH 134/182] mm, pagemap: fix swap offset value for PMD migration entry The swap offset reported by /proc//pagemap may be not correct for PMD migration entries. If addr passed into pagemap_pmd_range() isn't aligned with PMD start address, the swap offset reported doesn't reflect this. And in the loop to report information of each sub-page, the swap offset isn't increased accordingly as that for PFN. This may happen after opening /proc//pagemap and seeking to a page whose address doesn't align with a PMD start address. I have verified this with a simple test program. BTW: migration swap entries have PFN information, do we need to restrict whether to show them? [akpm@linux-foundation.org: fix typo, per Huang, Ying] Link: http://lkml.kernel.org/r/20180408033737.10897-1-ying.huang@intel.com Signed-off-by: "Huang, Ying" Cc: Michal Hocko Cc: "Kirill A. Shutemov" Cc: Andrei Vagin Cc: Dan Williams Cc: "Jerome Glisse" Cc: Daniel Colascione Cc: Zi Yan Cc: Naoya Horiguchi Cc: Alexey Dobriyan Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/task_mmu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 65ae54659833..c486ad4b43f0 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c @@ -1310,9 +1310,11 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, #ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION else if (is_swap_pmd(pmd)) { swp_entry_t entry = pmd_to_swp_entry(pmd); + unsigned long offset = swp_offset(entry); + offset += (addr & ~PMD_MASK) >> PAGE_SHIFT; frame = swp_type(entry) | - (swp_offset(entry) << MAX_SWAPFILES_SHIFT); + (offset << MAX_SWAPFILES_SHIFT); flags |= PM_SWAP; if (pmd_swp_soft_dirty(pmd)) flags |= PM_SOFT_DIRTY; @@ -1332,6 +1334,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end, break; if (pm->show_pfn && (flags & PM_PRESENT)) frame++; + else if (flags & PM_SWAP) + frame += (1 << MAX_SWAPFILES_SHIFT); } spin_unlock(ptl); return err; From 2e898e4c0a3897ccd434adac5abb8330194f527b Mon Sep 17 00:00:00 2001 From: Greg Thelen Date: Fri, 20 Apr 2018 14:55:42 -0700 Subject: [PATCH 135/182] writeback: safer lock nesting lock_page_memcg()/unlock_page_memcg() use spin_lock_irqsave/restore() if the page's memcg is undergoing move accounting, which occurs when a process leaves its memcg for a new one that has memory.move_charge_at_immigrate set. unlocked_inode_to_wb_begin,end() use spin_lock_irq/spin_unlock_irq() if the given inode is switching writeback domains. Switches occur when enough writes are issued from a new domain. This existing pattern is thus suspicious: lock_page_memcg(page); unlocked_inode_to_wb_begin(inode, &locked); ... unlocked_inode_to_wb_end(inode, locked); unlock_page_memcg(page); If both inode switch and process memcg migration are both in-flight then unlocked_inode_to_wb_end() will unconditionally enable interrupts while still holding the lock_page_memcg() irq spinlock. This suggests the possibility of deadlock if an interrupt occurs before unlock_page_memcg(). truncate __cancel_dirty_page lock_page_memcg unlocked_inode_to_wb_begin unlocked_inode_to_wb_end end_page_writeback test_clear_page_writeback lock_page_memcg unlock_page_memcg Due to configuration limitations this deadlock is not currently possible because we don't mix cgroup writeback (a cgroupv2 feature) and memory.move_charge_at_immigrate (a cgroupv1 feature). If the kernel is hacked to always claim inode switching and memcg moving_account, then this script triggers lockup in less than a minute: cd /mnt/cgroup/memory mkdir a b echo 1 > a/memory.move_charge_at_immigrate echo 1 > b/memory.move_charge_at_immigrate ( echo $BASHPID > a/cgroup.procs while true; do dd if=/dev/zero of=/mnt/big bs=1M count=256 done ) & while true; do sync done & sleep 1h & SLEEP=$! while true; do echo $SLEEP > a/cgroup.procs echo $SLEEP > b/cgroup.procs done The deadlock does not seem possible, so it's debatable if there's any reason to modify the kernel. I suggest we should to prevent future surprises. And Wang Long said "this deadlock occurs three times in our environment", so there's more reason to apply this, even to stable. Stable 4.4 has minor conflicts applying this patch. For a clean 4.4 patch see "[PATCH for-4.4] writeback: safer lock nesting" https://lkml.org/lkml/2018/4/11/146 Wang Long said "this deadlock occurs three times in our environment" [gthelen@google.com: v4] Link: http://lkml.kernel.org/r/20180411084653.254724-1-gthelen@google.com [akpm@linux-foundation.org: comment tweaks, struct initialization simplification] Change-Id: Ibb773e8045852978f6207074491d262f1b3fb613 Link: http://lkml.kernel.org/r/20180410005908.167976-1-gthelen@google.com Fixes: 682aa8e1a6a1 ("writeback: implement unlocked_inode_to_wb transaction and use it for stat updates") Signed-off-by: Greg Thelen Reported-by: Wang Long Acked-by: Wang Long Acked-by: Michal Hocko Reviewed-by: Andrew Morton Cc: Johannes Weiner Cc: Tejun Heo Cc: Nicholas Piggin Cc: [v4.2+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/fs-writeback.c | 7 ++++--- include/linux/backing-dev-defs.h | 5 +++++ include/linux/backing-dev.h | 30 ++++++++++++++++-------------- mm/page-writeback.c | 18 +++++++++--------- 4 files changed, 34 insertions(+), 26 deletions(-) diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 4b12ba70a895..47d7c151fcba 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -745,11 +745,12 @@ int inode_congested(struct inode *inode, int cong_bits) */ if (inode && inode_to_wb_is_valid(inode)) { struct bdi_writeback *wb; - bool locked, congested; + struct wb_lock_cookie lock_cookie = {}; + bool congested; - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &lock_cookie); congested = wb_congested(wb, cong_bits); - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &lock_cookie); return congested; } diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index bfe86b54f6c1..0bd432a4d7bd 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h @@ -223,6 +223,11 @@ static inline void set_bdi_congested(struct backing_dev_info *bdi, int sync) set_wb_congested(bdi->wb.congested, sync); } +struct wb_lock_cookie { + bool locked; + unsigned long flags; +}; + #ifdef CONFIG_CGROUP_WRITEBACK /** diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index f6be4b0b6c18..72ca0f3d39f3 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h @@ -347,7 +347,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) /** * unlocked_inode_to_wb_begin - begin unlocked inode wb access transaction * @inode: target inode - * @lockedp: temp bool output param, to be passed to the end function + * @cookie: output param, to be passed to the end function * * The caller wants to access the wb associated with @inode but isn't * holding inode->i_lock, the i_pages lock or wb->list_lock. This @@ -355,12 +355,12 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) * association doesn't change until the transaction is finished with * unlocked_inode_to_wb_end(). * - * The caller must call unlocked_inode_to_wb_end() with *@lockdep - * afterwards and can't sleep during transaction. IRQ may or may not be - * disabled on return. + * The caller must call unlocked_inode_to_wb_end() with *@cookie afterwards and + * can't sleep during the transaction. IRQs may or may not be disabled on + * return. */ static inline struct bdi_writeback * -unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { rcu_read_lock(); @@ -368,10 +368,10 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) * Paired with store_release in inode_switch_wb_work_fn() and * ensures that we see the new wb if we see cleared I_WB_SWITCH. */ - *lockedp = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; + cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; - if (unlikely(*lockedp)) - xa_lock_irq(&inode->i_mapping->i_pages); + if (unlikely(cookie->locked)) + xa_lock_irqsave(&inode->i_mapping->i_pages, cookie->flags); /* * Protected by either !I_WB_SWITCH + rcu_read_lock() or the i_pages @@ -383,12 +383,13 @@ unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) /** * unlocked_inode_to_wb_end - end inode wb access transaction * @inode: target inode - * @locked: *@lockedp from unlocked_inode_to_wb_begin() + * @cookie: @cookie from unlocked_inode_to_wb_begin() */ -static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) { - if (unlikely(locked)) - xa_unlock_irq(&inode->i_mapping->i_pages); + if (unlikely(cookie->locked)) + xa_unlock_irqrestore(&inode->i_mapping->i_pages, cookie->flags); rcu_read_unlock(); } @@ -435,12 +436,13 @@ static inline struct bdi_writeback *inode_to_wb(struct inode *inode) } static inline struct bdi_writeback * -unlocked_inode_to_wb_begin(struct inode *inode, bool *lockedp) +unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) { return inode_to_wb(inode); } -static inline void unlocked_inode_to_wb_end(struct inode *inode, bool locked) +static inline void unlocked_inode_to_wb_end(struct inode *inode, + struct wb_lock_cookie *cookie) { } diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 5c1a3279e63f..337c6afb3345 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2502,13 +2502,13 @@ void account_page_redirty(struct page *page) if (mapping && mapping_cap_account_dirty(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; - bool locked; + struct wb_lock_cookie cookie = {}; - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &cookie); current->nr_dirtied--; dec_node_page_state(page, NR_DIRTIED); dec_wb_stat(wb, WB_DIRTIED); - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &cookie); } } EXPORT_SYMBOL(account_page_redirty); @@ -2614,15 +2614,15 @@ void __cancel_dirty_page(struct page *page) if (mapping_cap_account_dirty(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; - bool locked; + struct wb_lock_cookie cookie = {}; lock_page_memcg(page); - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &cookie); if (TestClearPageDirty(page)) account_page_cleaned(page, mapping, wb); - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &cookie); unlock_page_memcg(page); } else { ClearPageDirty(page); @@ -2654,7 +2654,7 @@ int clear_page_dirty_for_io(struct page *page) if (mapping && mapping_cap_account_dirty(mapping)) { struct inode *inode = mapping->host; struct bdi_writeback *wb; - bool locked; + struct wb_lock_cookie cookie = {}; /* * Yes, Virginia, this is indeed insane. @@ -2691,14 +2691,14 @@ int clear_page_dirty_for_io(struct page *page) * always locked coming in here, so we get the desired * exclusion. */ - wb = unlocked_inode_to_wb_begin(inode, &locked); + wb = unlocked_inode_to_wb_begin(inode, &cookie); if (TestClearPageDirty(page)) { dec_lruvec_page_state(page, NR_FILE_DIRTY); dec_zone_page_state(page, NR_ZONE_WRITE_PENDING); dec_wb_stat(wb, WB_RECLAIMABLE); ret = 1; } - unlocked_inode_to_wb_end(inode, locked); + unlocked_inode_to_wb_end(inode, &cookie); return ret; } return TestClearPageDirty(page); From e71769ae52609ea0044a9901709042e5634c2306 Mon Sep 17 00:00:00 2001 From: Naoya Horiguchi Date: Fri, 20 Apr 2018 14:55:45 -0700 Subject: [PATCH 136/182] mm: enable thp migration for shmem thp My testing for the latest kernel supporting thp migration showed an infinite loop in offlining the memory block that is filled with shmem thps. We can get out of the loop with a signal, but kernel should return with failure in this case. What happens in the loop is that scan_movable_pages() repeats returning the same pfn without any progress. That's because page migration always fails for shmem thps. In memory offline code, memory blocks containing unmovable pages should be prevented from being offline targets by has_unmovable_pages() inside start_isolate_page_range(). So it's possible to change migratability for non-anonymous thps to avoid the issue, but it introduces more complex and thp-specific handling in migration code, so it might not good. So this patch is suggesting to fix the issue by enabling thp migration for shmem thp. Both of anon/shmem thp are migratable so we don't need precheck about the type of thps. Link: http://lkml.kernel.org/r/20180406030706.GA2434@hori1.linux.bs1.fc.nec.co.jp Fixes: commit 72b39cfc4d75 ("mm, memory_hotplug: do not fail offlining too early") Signed-off-by: Naoya Horiguchi Acked-by: Kirill A. Shutemov Cc: Zi Yan Cc: Vlastimil Babka Cc: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/huge_memory.c | 5 ++++- mm/migrate.c | 19 ++++++++++++++++--- mm/rmap.c | 3 --- 3 files changed, 20 insertions(+), 7 deletions(-) diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 14ed6ee5e02f..a3a1815f8e11 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2925,7 +2925,10 @@ void remove_migration_pmd(struct page_vma_mapped_walk *pvmw, struct page *new) pmde = maybe_pmd_mkwrite(pmde, vma); flush_cache_range(vma, mmun_start, mmun_start + HPAGE_PMD_SIZE); - page_add_anon_rmap(new, vma, mmun_start, true); + if (PageAnon(new)) + page_add_anon_rmap(new, vma, mmun_start, true); + else + page_add_file_rmap(new, true); set_pmd_at(mm, mmun_start, pvmw->pmd, pmde); if (vma->vm_flags & VM_LOCKED) mlock_vma_page(new); diff --git a/mm/migrate.c b/mm/migrate.c index 70ef794cccae..568433023831 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping, pslot = radix_tree_lookup_slot(&mapping->i_pages, page_index(page)); - expected_count += 1 + page_has_private(page); + expected_count += hpage_nr_pages(page) + page_has_private(page); if (page_count(page) != expected_count || radix_tree_deref_slot_protected(pslot, &mapping->i_pages.xa_lock) != page) { @@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping, */ newpage->index = page->index; newpage->mapping = page->mapping; - get_page(newpage); /* add cache reference */ + page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */ if (PageSwapBacked(page)) { __SetPageSwapBacked(newpage); if (PageSwapCache(page)) { @@ -524,13 +524,26 @@ int migrate_page_move_mapping(struct address_space *mapping, } radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); + if (PageTransHuge(page)) { + int i; + int index = page_index(page); + + for (i = 0; i < HPAGE_PMD_NR; i++) { + pslot = radix_tree_lookup_slot(&mapping->i_pages, + index + i); + radix_tree_replace_slot(&mapping->i_pages, pslot, + newpage + i); + } + } else { + radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); + } /* * Drop cache reference from old page by unfreezing * to one less reference. * We know this isn't the last reference. */ - page_ref_unfreeze(page, expected_count - 1); + page_ref_unfreeze(page, expected_count - hpage_nr_pages(page)); xa_unlock(&mapping->i_pages); /* Leave irq disabled to prevent preemption while updating stats */ diff --git a/mm/rmap.c b/mm/rmap.c index f0dd4e4565bc..8d5337fed37b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1374,9 +1374,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, if (!pvmw.pte && (flags & TTU_MIGRATION)) { VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); - if (!PageAnon(page)) - continue; - set_pmd_migration_entry(&pvmw, page); continue; } From c5157b76869ba98c3a99a1982396437464e131a6 Mon Sep 17 00:00:00 2001 From: Ioan Nicu Date: Fri, 20 Apr 2018 14:55:49 -0700 Subject: [PATCH 137/182] rapidio: fix rio_dma_transfer error handling Some of the mport_dma_req structure members were initialized late inside the do_dma_request() function, just before submitting the request to the dma engine. But we have some error branches before that. In case of such an error, the code would return on the error path and trigger the calling of dma_req_free() with a req structure which is not completely initialized. This causes a NULL pointer dereference in dma_req_free(). This patch fixes these error branches by making sure that all necessary mport_dma_req structure members are initialized in rio_dma_transfer() immediately after the request structure gets allocated. Link: http://lkml.kernel.org/r/20180412150605.GA31409@nokia.com Fixes: bbd876adb8c72 ("rapidio: use a reference count for struct mport_dma_req") Signed-off-by: Ioan Nicu Tested-by: Alexander Sverdlin Acked-by: Alexandre Bounine Cc: Barry Wood Cc: Matt Porter Cc: Christophe JAILLET Cc: Logan Gunthorpe Cc: Chris Wilson Cc: Tvrtko Ursulin Cc: Frank Kunz Cc: [4.6+] Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- drivers/rapidio/devices/rio_mport_cdev.c | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c index 9d27016c899e..0434ab7b6497 100644 --- a/drivers/rapidio/devices/rio_mport_cdev.c +++ b/drivers/rapidio/devices/rio_mport_cdev.c @@ -740,10 +740,7 @@ static int do_dma_request(struct mport_dma_req *req, tx->callback = dma_xfer_callback; tx->callback_param = req; - req->dmach = chan; - req->sync = sync; req->status = DMA_IN_PROGRESS; - init_completion(&req->req_comp); kref_get(&req->refcount); cookie = dmaengine_submit(tx); @@ -831,13 +828,20 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, if (!req) return -ENOMEM; - kref_init(&req->refcount); - ret = get_dma_channel(priv); if (ret) { kfree(req); return ret; } + chan = priv->dmach; + + kref_init(&req->refcount); + init_completion(&req->req_comp); + req->dir = dir; + req->filp = filp; + req->priv = priv; + req->dmach = chan; + req->sync = sync; /* * If parameter loc_addr != NULL, we are transferring data from/to @@ -925,11 +929,6 @@ rio_dma_transfer(struct file *filp, u32 transfer_mode, xfer->offset, xfer->length); } - req->dir = dir; - req->filp = filp; - req->priv = priv; - chan = priv->dmach; - nents = dma_map_sg(chan->device->dev, req->sgt.sgl, req->sgt.nents, dir); if (nents == 0) { From 12c8f25a016dff69ee284aa3338bebfd2cfcba33 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Fri, 20 Apr 2018 14:55:52 -0700 Subject: [PATCH 138/182] kasan: add no_sanitize attribute for clang builds KASAN uses the __no_sanitize_address macro to disable instrumentation of particular functions. Right now it's defined only for GCC build, which causes false positives when clang is used. This patch adds a definition for clang. Note, that clang's revision 329612 or higher is required. [andreyknvl@google.com: remove redundant #ifdef CONFIG_KASAN check] Link: http://lkml.kernel.org/r/c79aa31a2a2790f6131ed607c58b0dd45dd62a6c.1523967959.git.andreyknvl@google.com Link: http://lkml.kernel.org/r/4ad725cc903f8534f8c8a60f0daade5e3d674f8d.1523554166.git.andreyknvl@google.com Signed-off-by: Andrey Konovalov Acked-by: Andrey Ryabinin Cc: Alexander Potapenko Cc: Dmitry Vyukov Cc: David Rientjes Cc: Thomas Gleixner Cc: Ingo Molnar Cc: David Woodhouse Cc: Andrey Konovalov Cc: Will Deacon Cc: Greg Kroah-Hartman Cc: Paul Lawrence Cc: Sandipan Das Cc: Kees Cook Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/compiler-clang.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index ceb96ecab96e..7d98e263e048 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h @@ -25,6 +25,9 @@ #define __SANITIZE_ADDRESS__ #endif +#undef __no_sanitize_address +#define __no_sanitize_address __attribute__((no_sanitize("address"))) + /* Clang doesn't have a way to turn it off per-function, yet. */ #ifdef __noretpoline #undef __noretpoline From 1551cf740cffc62221a4f13da1892f15b0d36f1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Fri, 20 Apr 2018 14:55:56 -0700 Subject: [PATCH 139/182] MAINTAINERS: add personal addresses for Sascha and Uwe MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The idea behind using kernel@pengutronix.de (i.e. the mail alias for the kernel people at Pengutronix) as email address was to have a backup when a given developer is on vacation or run over by a bus. Make this more explicit by adding the alias as reviewer and use the personal address for Sascha and me. Link: http://lkml.kernel.org/r/20180413083312.11213-1-u.kleine-koenig@pengutronix.de Signed-off-by: Uwe Kleine-König Acked-by: Sascha Hauer Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- MAINTAINERS | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 5ae51d05c09b..b7ff5654b8b5 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1373,7 +1373,8 @@ F: arch/arm/mach-ebsa110/ F: drivers/net/ethernet/amd/am79c961a.* ARM/ENERGY MICRO (SILICON LABS) EFM32 SUPPORT -M: Uwe Kleine-König +M: Uwe Kleine-König +R: Pengutronix Kernel Team L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained N: efm32 @@ -1401,7 +1402,8 @@ F: arch/arm/mach-footbridge/ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE M: Shawn Guo -M: Sascha Hauer +M: Sascha Hauer +R: Pengutronix Kernel Team R: Fabio Estevam L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -1416,7 +1418,8 @@ F: include/soc/imx/ ARM/FREESCALE VYBRID ARM ARCHITECTURE M: Shawn Guo -M: Sascha Hauer +M: Sascha Hauer +R: Pengutronix Kernel Team R: Stefan Agner L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -5652,7 +5655,8 @@ F: drivers/net/ethernet/freescale/fec.h F: Documentation/devicetree/bindings/net/fsl-fec.txt FREESCALE IMX / MXC FRAMEBUFFER DRIVER -M: Sascha Hauer +M: Sascha Hauer +R: Pengutronix Kernel Team L: linux-fbdev@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -12825,7 +12829,8 @@ F: include/linux/siphash.h SIOX M: Gavin Schenk -M: Uwe Kleine-König +M: Uwe Kleine-König +R: Pengutronix Kernel Team S: Supported F: drivers/siox/* F: include/trace/events/siox.h From 1e6306652ba18723015d1b4967fe9de55f042499 Mon Sep 17 00:00:00 2001 From: Ian Kent Date: Fri, 20 Apr 2018 14:55:59 -0700 Subject: [PATCH 140/182] autofs: mount point create should honour passed in mode The autofs file system mkdir inode operation blindly sets the created directory mode to S_IFDIR | 0555, ingoring the passed in mode, which can cause selinux dac_override denials. But the function also checks if the caller is the daemon (as no-one else should be able to do anything here) so there's no point in not honouring the passed in mode, allowing the daemon to set appropriate mode when required. Link: http://lkml.kernel.org/r/152361593601.8051.14014139124905996173.stgit@pluto.themaw.net Signed-off-by: Ian Kent Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/autofs4/root.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index 82e8f6edfb48..b12e37f27530 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -749,7 +749,7 @@ static int autofs4_dir_mkdir(struct inode *dir, autofs4_del_active(dentry); - inode = autofs4_get_inode(dir->i_sb, S_IFDIR | 0555); + inode = autofs4_get_inode(dir->i_sb, S_IFDIR | mode); if (!inode) return -ENOMEM; d_add(dentry, inode); From 2e0ad552f5f8cd0fda02bc45fcd2b89821c62fd1 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 20 Apr 2018 14:56:03 -0700 Subject: [PATCH 141/182] proc: revalidate kernel thread inodes to root:root task_dump_owner() has the following code: mm = task->mm; if (mm) { if (get_dumpable(mm) != SUID_DUMP_USER) { uid = ... } } Check for ->mm is buggy -- kernel thread might be borrowing mm and inode will go to some random uid:gid pair. Link: http://lkml.kernel.org/r/20180412220109.GA20978@avx2 Signed-off-by: Alexey Dobriyan Cc: "Eric W. Biederman" Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/proc/base.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/fs/proc/base.c b/fs/proc/base.c index eafa39a3a88c..1b2ede6abcdf 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1693,6 +1693,12 @@ void task_dump_owner(struct task_struct *task, umode_t mode, kuid_t uid; kgid_t gid; + if (unlikely(task->flags & PF_KTHREAD)) { + *ruid = GLOBAL_ROOT_UID; + *rgid = GLOBAL_ROOT_GID; + return; + } + /* Default to the tasks effective ownership */ rcu_read_lock(); cred = __task_cred(task); From 9a1015b32faa7cebfe19663c886b0cfe90be1d49 Mon Sep 17 00:00:00 2001 From: Alexey Dobriyan Date: Fri, 20 Apr 2018 14:56:06 -0700 Subject: [PATCH 142/182] proc: fix /proc/loadavg regression Commit 95846ecf9dac ("pid: replace pid bitmap implementation with IDR API") changed last field of /proc/loadavg (last pid allocated) to be off by one: # unshare -p -f --mount-proc cat /proc/loadavg 0.00 0.00 0.00 1/60 2 <=== It should be 1 after first fork into pid namespace. This is formally a regression but given how useless this field is I don't think anyone is affected. Bug was found by /proc testsuite! Link: http://lkml.kernel.org/r/20180413175408.GA27246@avx2 Fixes: 95846ecf9dac508 ("pid: replace pid bitmap implementation with IDR API") Signed-off-by: Alexey Dobriyan Cc: "Eric W. Biederman" Cc: Gargi Sharma Cc: Oleg Nesterov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/platforms/cell/spufs/sched.c | 2 +- fs/proc/loadavg.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 9033c8194eda..ccc421503363 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -1093,7 +1093,7 @@ static int show_spu_loadavg(struct seq_file *s, void *private) LOAD_INT(c), LOAD_FRAC(c), count_active_contexts(), atomic_read(&nr_spu_contexts), - idr_get_cursor(&task_active_pid_ns(current)->idr)); + idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } diff --git a/fs/proc/loadavg.c b/fs/proc/loadavg.c index a000d7547479..b572cc865b92 100644 --- a/fs/proc/loadavg.c +++ b/fs/proc/loadavg.c @@ -24,7 +24,7 @@ static int loadavg_proc_show(struct seq_file *m, void *v) LOAD_INT(avnrun[1]), LOAD_FRAC(avnrun[1]), LOAD_INT(avnrun[2]), LOAD_FRAC(avnrun[2]), nr_running(), nr_threads, - idr_get_cursor(&task_active_pid_ns(current)->idr)); + idr_get_cursor(&task_active_pid_ns(current)->idr) - 1); return 0; } From a841aa83dff0af75c88aa846ba610a8af4c5ee21 Mon Sep 17 00:00:00 2001 From: Dave Young Date: Fri, 20 Apr 2018 14:56:10 -0700 Subject: [PATCH 143/182] kexec_file: do not add extra alignment to efi memmap Chun-Yi reported a kernel warning message below: WARNING: CPU: 0 PID: 0 at ../mm/early_ioremap.c:182 early_iounmap+0x4f/0x12c() early_iounmap(ffffffffff200180, 00000118) [0] size not consistent 00000120 The problem is x86 kexec_file_load adds extra alignment to the efi memmap: in bzImage64_load(): efi_map_sz = efi_get_runtime_map_size(); efi_map_sz = ALIGN(efi_map_sz, 16); And __efi_memmap_init maps with the size including the alignment bytes but efi_memmap_unmap use nr_maps * desc_size which does not include the extra bytes. The alignment in kexec code is only needed for the kexec buffer internal use Actually kexec should pass exact size of the efi memmap to 2nd kernel. Link: http://lkml.kernel.org/r/20180417083600.GA1972@dhcp-128-65.nay.redhat.com Signed-off-by: Dave Young Reported-by: joeyli Tested-by: Randy Wright Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/kernel/kexec-bzimage64.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c index 3182908b7e6c..7326078eaa7a 100644 --- a/arch/x86/kernel/kexec-bzimage64.c +++ b/arch/x86/kernel/kexec-bzimage64.c @@ -398,11 +398,10 @@ static void *bzImage64_load(struct kimage *image, char *kernel, * little bit simple */ efi_map_sz = efi_get_runtime_map_size(); - efi_map_sz = ALIGN(efi_map_sz, 16); params_cmdline_sz = sizeof(struct boot_params) + cmdline_len + MAX_ELFCOREHDR_STR_LEN; params_cmdline_sz = ALIGN(params_cmdline_sz, 16); - kbuf.bufsz = params_cmdline_sz + efi_map_sz + + kbuf.bufsz = params_cmdline_sz + ALIGN(efi_map_sz, 16) + sizeof(struct setup_data) + sizeof(struct efi_setup_data); @@ -410,7 +409,7 @@ static void *bzImage64_load(struct kimage *image, char *kernel, if (!params) return ERR_PTR(-ENOMEM); efi_map_offset = params_cmdline_sz; - efi_setup_data_offset = efi_map_offset + efi_map_sz; + efi_setup_data_offset = efi_map_offset + ALIGN(efi_map_sz, 16); /* Copy setup header onto bootparams. Documentation/x86/boot.txt */ setup_header_size = 0x0202 + kernel[0x0201] - setup_hdr_offset; From d23a61ee90af38bb4a65decf76e798e65b401482 Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Fri, 20 Apr 2018 14:56:13 -0700 Subject: [PATCH 144/182] fs, elf: don't complain MAP_FIXED_NOREPLACE unless -EEXIST error Commit 4ed28639519c ("fs, elf: drop MAP_FIXED usage from elf_map") is printing spurious messages under memory pressure due to map_addr == -ENOMEM. 9794 (a.out): Uhuuh, elf segment at 00007f2e34738000(fffffffffffffff4) requested but the memory is mapped already 14104 (a.out): Uhuuh, elf segment at 00007f34fd76c000(fffffffffffffff4) requested but the memory is mapped already 16843 (a.out): Uhuuh, elf segment at 00007f930ecc7000(fffffffffffffff4) requested but the memory is mapped already Complain only if -EEXIST, and use %px for printing the address. Link: http://lkml.kernel.org/r/201804182307.FAC17665.SFMOFJVFtHOLOQ@I-love.SAKURA.ne.jp Fixes: 4ed28639519c7bad ("fs, elf: drop MAP_FIXED usage from elf_map") is Signed-off-by: Tetsuo Handa Acked-by: Michal Hocko Cc: Andrei Vagin Cc: Khalid Aziz Cc: Michael Ellerman Cc: Kees Cook Cc: Abdul Haleem Cc: Joel Stanley Cc: Anshuman Khandual Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/binfmt_elf.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c index 41e04183e4ce..4ad6f669fe34 100644 --- a/fs/binfmt_elf.c +++ b/fs/binfmt_elf.c @@ -377,10 +377,10 @@ static unsigned long elf_map(struct file *filep, unsigned long addr, } else map_addr = vm_mmap(filep, addr, size, prot, type, off); - if ((type & MAP_FIXED_NOREPLACE) && BAD_ADDR(map_addr)) - pr_info("%d (%s): Uhuuh, elf segment at %p requested but the memory is mapped already\n", - task_pid_nr(current), current->comm, - (void *)addr); + if ((type & MAP_FIXED_NOREPLACE) && + PTR_ERR((void *)map_addr) == -EEXIST) + pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n", + task_pid_nr(current), current->comm, (void *)addr); return(map_addr); } From c892fd82cc0632d425ae011a4dd75eb59e9f84ee Mon Sep 17 00:00:00 2001 From: Minchan Kim Date: Fri, 20 Apr 2018 14:56:17 -0700 Subject: [PATCH 145/182] mm: memcg: add __GFP_NOWARN in __memcg_schedule_kmem_cache_create() If there is heavy memory pressure, page allocation with __GFP_NOWAIT fails easily although it's order-0 request. I got below warning 9 times for normal boot. : page allocation failure: order:0, mode:0x2200000(GFP_NOWAIT|__GFP_NOTRACK) .. snip .. Call trace: dump_backtrace+0x0/0x4 dump_stack+0xa4/0xc0 warn_alloc+0xd4/0x15c __alloc_pages_nodemask+0xf88/0x10fc alloc_slab_page+0x40/0x18c new_slab+0x2b8/0x2e0 ___slab_alloc+0x25c/0x464 __kmalloc+0x394/0x498 memcg_kmem_get_cache+0x114/0x2b8 kmem_cache_alloc+0x98/0x3e8 mmap_region+0x3bc/0x8c0 do_mmap+0x40c/0x43c vm_mmap_pgoff+0x15c/0x1e4 sys_mmap+0xb0/0xc8 el0_svc_naked+0x24/0x28 Mem-Info: active_anon:17124 inactive_anon:193 isolated_anon:0 active_file:7898 inactive_file:712955 isolated_file:55 unevictable:0 dirty:27 writeback:18 unstable:0 slab_reclaimable:12250 slab_unreclaimable:23334 mapped:19310 shmem:212 pagetables:816 bounce:0 free:36561 free_pcp:1205 free_cma:35615 Node 0 active_anon:68496kB inactive_anon:772kB active_file:31592kB inactive_file:2851820kB unevictable:0kB isolated(anon):0kB isolated(file):220kB mapped:77240kB dirty:108kB writeback:72kB shmem:848kB writeback_tmp:0kB unstable:0kB all_unreclaimable? no DMA free:142188kB min:3056kB low:3820kB high:4584kB active_anon:10052kB inactive_anon:12kB active_file:312kB inactive_file:1412620kB unevictable:0kB writepending:0kB present:1781412kB managed:1604728kB mlocked:0kB slab_reclaimable:3592kB slab_unreclaimable:876kB kernel_stack:400kB pagetables:52kB bounce:0kB free_pcp:1436kB local_pcp:124kB free_cma:142492kB lowmem_reserve[]: 0 1842 1842 Normal free:4056kB min:4172kB low:5212kB high:6252kB active_anon:58376kB inactive_anon:760kB active_file:31348kB inactive_file:1439040kB unevictable:0kB writepending:180kB present:2000636kB managed:1923688kB mlocked:0kB slab_reclaimable:45408kB slab_unreclaimable:92460kB kernel_stack:9680kB pagetables:3212kB bounce:0kB free_pcp:3392kB local_pcp:688kB free_cma:0kB lowmem_reserve[]: 0 0 0 DMA: 0*4kB 0*8kB 1*16kB (C) 0*32kB 0*64kB 0*128kB 1*256kB (C) 1*512kB (C) 0*1024kB 1*2048kB (C) 34*4096kB (C) = 142096kB Normal: 228*4kB (UMEH) 172*8kB (UMH) 23*16kB (UH) 24*32kB (H) 5*64kB (H) 1*128kB (H) 0*256kB 0*512kB 0*1024kB 0*2048kB 0*4096kB = 3872kB 721350 total pagecache pages 0 pages in swap cache Swap cache stats: add 0, delete 0, find 0/0 Free swap = 0kB Total swap = 0kB 945512 pages RAM 0 pages HighMem/MovableOnly 63408 pages reserved 51200 pages cma reserved __memcg_schedule_kmem_cache_create() tries to create a shadow slab cache and the worker allocation failure is not really critical because we will retry on the next kmem charge. We might miss some charges but that shouldn't be critical. The excessive allocation failure report is not very helpful. [mhocko@kernel.org: changelog update] Link: http://lkml.kernel.org/r/20180418022912.248417-1-minchan@kernel.org Signed-off-by: Minchan Kim Acked-by: Johannes Weiner Reviewed-by: Andrew Morton Cc: Michal Hocko Cc: Vladimir Davydov Cc: Minchan Kim Cc: Matthew Wilcox Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/memcontrol.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/memcontrol.c b/mm/memcontrol.c index e074f7c637aa..2bd3df3d101a 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -2192,7 +2192,7 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, { struct memcg_kmem_cache_create_work *cw; - cw = kmalloc(sizeof(*cw), GFP_NOWAIT); + cw = kmalloc(sizeof(*cw), GFP_NOWAIT | __GFP_NOWARN); if (!cw) return; From abc1be13fd113ddef5e2d807a466286b864caed3 Mon Sep 17 00:00:00 2001 From: Matthew Wilcox Date: Fri, 20 Apr 2018 14:56:20 -0700 Subject: [PATCH 146/182] mm/filemap.c: fix NULL pointer in page_cache_tree_insert() f2fs specifies the __GFP_ZERO flag for allocating some of its pages. Unfortunately, the page cache also uses the mapping's GFP flags for allocating radix tree nodes. It always masked off the __GFP_HIGHMEM flag, and masks off __GFP_ZERO in some paths, but not all. That causes radix tree nodes to be allocated with a NULL list_head, which causes backtraces like: __list_del_entry+0x30/0xd0 list_lru_del+0xac/0x1ac page_cache_tree_insert+0xd8/0x110 The __GFP_DMA and __GFP_DMA32 flags would also be able to sneak through if they are ever used. Fix them all by using GFP_RECLAIM_MASK at the innermost location, and remove it from earlier in the callchain. Link: http://lkml.kernel.org/r/20180411060320.14458-2-willy@infradead.org Fixes: 449dd6984d0e ("mm: keep page cache radix tree nodes in check") Signed-off-by: Matthew Wilcox Reported-by: Chris Fries Debugged-by: Minchan Kim Acked-by: Johannes Weiner Acked-by: Michal Hocko Reviewed-by: Jan Kara Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/filemap.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/mm/filemap.c b/mm/filemap.c index 9276bdb2343c..0604cb02e6f3 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -786,7 +786,7 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) VM_BUG_ON_PAGE(!PageLocked(new), new); VM_BUG_ON_PAGE(new->mapping, new); - error = radix_tree_preload(gfp_mask & ~__GFP_HIGHMEM); + error = radix_tree_preload(gfp_mask & GFP_RECLAIM_MASK); if (!error) { struct address_space *mapping = old->mapping; void (*freepage)(struct page *); @@ -842,7 +842,7 @@ static int __add_to_page_cache_locked(struct page *page, return error; } - error = radix_tree_maybe_preload(gfp_mask & ~__GFP_HIGHMEM); + error = radix_tree_maybe_preload(gfp_mask & GFP_RECLAIM_MASK); if (error) { if (!huge) mem_cgroup_cancel_charge(page, memcg, false); @@ -1585,8 +1585,7 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, if (fgp_flags & FGP_ACCESSED) __SetPageReferenced(page); - err = add_to_page_cache_lru(page, mapping, offset, - gfp_mask & GFP_RECLAIM_MASK); + err = add_to_page_cache_lru(page, mapping, offset, gfp_mask); if (unlikely(err)) { put_page(page); page = NULL; @@ -2387,7 +2386,7 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) if (!page) return -ENOMEM; - ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask & GFP_KERNEL); + ret = add_to_page_cache_lru(page, mapping, offset, gfp_mask); if (ret == 0) ret = mapping->a_ops->readpage(file, page); else if (ret == -EEXIST) From 686c97ee29c886ee07d17987d0059874c5c3b5af Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:06 +0200 Subject: [PATCH 147/182] s390/qeth: fix error handling in adapter command callbacks Make sure to check both return code fields before(!) processing the command response. Otherwise we risk operating on invalid data. This matches an earlier fix for SETASSPARMS commands, see commit ad3cbf613329 ("s390/qeth: fix error handling in checksum cmd callback"). Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 85 ++++++++++++++----------------- 1 file changed, 37 insertions(+), 48 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 04fefa5bb08d..36bc94088de1 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -3033,28 +3033,23 @@ static int qeth_send_startlan(struct qeth_card *card) return rc; } -static int qeth_default_setadapterparms_cb(struct qeth_card *card, - struct qeth_reply *reply, unsigned long data) +static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd) { - struct qeth_ipa_cmd *cmd; - - QETH_CARD_TEXT(card, 4, "defadpcb"); - - cmd = (struct qeth_ipa_cmd *) data; - if (cmd->hdr.return_code == 0) + if (!cmd->hdr.return_code) cmd->hdr.return_code = cmd->data.setadapterparms.hdr.return_code; - return 0; + return cmd->hdr.return_code; } static int qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 3, "quyadpcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *) data; if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) { card->info.link_type = cmd->data.setadapterparms.data.query_cmds_supp.lan_type; @@ -3062,7 +3057,7 @@ static int qeth_query_setadapterparms_cb(struct qeth_card *card, } card->options.adp.supported_funcs = cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds; - return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); + return 0; } static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card, @@ -3154,22 +3149,20 @@ EXPORT_SYMBOL_GPL(qeth_query_ipassists); static int qeth_query_switch_attributes_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; - struct qeth_switch_info *sw_info; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_query_switch_attributes *attrs; + struct qeth_switch_info *sw_info; QETH_CARD_TEXT(card, 2, "qswiatcb"); - cmd = (struct qeth_ipa_cmd *) data; - sw_info = (struct qeth_switch_info *)reply->param; - if (cmd->data.setadapterparms.hdr.return_code == 0) { - attrs = &cmd->data.setadapterparms.data.query_switch_attributes; - sw_info->capabilities = attrs->capabilities; - sw_info->settings = attrs->settings; - QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, - sw_info->settings); - } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; + sw_info = (struct qeth_switch_info *)reply->param; + attrs = &cmd->data.setadapterparms.data.query_switch_attributes; + sw_info->capabilities = attrs->capabilities; + sw_info->settings = attrs->settings; + QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities, + sw_info->settings); return 0; } @@ -4207,16 +4200,13 @@ EXPORT_SYMBOL_GPL(qeth_do_send_packet); static int qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_ipacmd_setadpparms *setparms; QETH_CARD_TEXT(card, 4, "prmadpcb"); - cmd = (struct qeth_ipa_cmd *) data; setparms = &(cmd->data.setadapterparms); - - qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); - if (cmd->hdr.return_code) { + if (qeth_setadpparms_inspect_rc(cmd)) { QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code); setparms->data.mode = SET_PROMISC_MODE_OFF; } @@ -4286,18 +4276,18 @@ EXPORT_SYMBOL_GPL(qeth_get_stats); static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; QETH_CARD_TEXT(card, 4, "chgmaccb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *) data; if (!card->options.layer2 || !(card->info.mac_bits & QETH_LAYER2_MAC_READ)) { ether_addr_copy(card->dev->dev_addr, cmd->data.setadapterparms.data.change_addr.addr); card->info.mac_bits |= QETH_LAYER2_MAC_READ; } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } @@ -4328,13 +4318,15 @@ EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data; struct qeth_set_access_ctrl *access_ctrl_req; int fallback = *(int *)reply->param; QETH_CARD_TEXT(card, 4, "setaccb"); + if (cmd->hdr.return_code) + return 0; + qeth_setadpparms_inspect_rc(cmd); - cmd = (struct qeth_ipa_cmd *) data; access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl; QETH_DBF_TEXT_(SETUP, 2, "setaccb"); QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name); @@ -4407,7 +4399,6 @@ static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, card->options.isolation = card->options.prev_isolation; break; } - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); return 0; } @@ -4695,14 +4686,15 @@ static int qeth_snmp_command(struct qeth_card *card, char __user *udata) static int qeth_setadpparms_query_oat_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; struct qeth_qoat_priv *priv; char *resdata; int resdatalen; QETH_CARD_TEXT(card, 3, "qoatcb"); + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - cmd = (struct qeth_ipa_cmd *)data; priv = (struct qeth_qoat_priv *)reply->param; resdatalen = cmd->data.setadapterparms.hdr.cmdlength; resdata = (char *)data + 28; @@ -4796,21 +4788,18 @@ static int qeth_query_oat_command(struct qeth_card *card, char __user *udata) static int qeth_query_card_info_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) { - struct qeth_ipa_cmd *cmd; + struct carrier_info *carrier_info = (struct carrier_info *)reply->param; + struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data; struct qeth_query_card_info *card_info; - struct carrier_info *carrier_info; QETH_CARD_TEXT(card, 2, "qcrdincb"); - carrier_info = (struct carrier_info *)reply->param; - cmd = (struct qeth_ipa_cmd *)data; - card_info = &cmd->data.setadapterparms.data.card_info; - if (cmd->data.setadapterparms.hdr.return_code == 0) { - carrier_info->card_type = card_info->card_type; - carrier_info->port_mode = card_info->port_mode; - carrier_info->port_speed = card_info->port_speed; - } + if (qeth_setadpparms_inspect_rc(cmd)) + return 0; - qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); + card_info = &cmd->data.setadapterparms.data.card_info; + carrier_info->card_type = card_info->card_type; + carrier_info->port_mode = card_info->port_mode; + carrier_info->port_speed = card_info->port_speed; return 0; } From 901e3f49facbd31b2b3d1786637b4a35e1022e9b Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:07 +0200 Subject: [PATCH 148/182] s390/qeth: avoid control IO completion stalls For control IO, qeth currently tracks the index of the buffer that it expects to complete the next IO on each qeth_channel. If the channel presents an IRQ while this buffer has not yet completed, no completion processing for _any_ completed buffer takes place. So if the 'next buffer' is skipped for any sort of reason* (eg. when it is released due to error conditions, before the IO is started), the buffer obviously won't switch to PROCESSED until it is eventually allocated for a _different_ IO and completes. Until this happens, all completion processing on that channel stalls and pending requests possibly time out. As a fix, remove the whole 'next buffer' logic and simply process any IO buffer right when it completes. A channel will never have more than one IO pending, so there's no risk of processing out-of-sequence. *Note: currently just one location in the code really handles this problem, by advancing the 'next' index manually. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core.h | 2 -- drivers/s390/net/qeth_core_main.c | 22 +++++----------------- 2 files changed, 5 insertions(+), 19 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index 4326715dc13e..78b98b3e7efa 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -557,7 +557,6 @@ enum qeth_prot_versions { enum qeth_cmd_buffer_state { BUF_STATE_FREE, BUF_STATE_LOCKED, - BUF_STATE_PROCESSED, }; enum qeth_cq { @@ -601,7 +600,6 @@ struct qeth_channel { struct qeth_cmd_buffer iob[QETH_CMD_BUFFER_NO]; atomic_t irq_pending; int io_buf_no; - int buf_no; }; /** diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 36bc94088de1..5ec47c6ebaa6 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -818,7 +818,6 @@ void qeth_clear_cmd_buffers(struct qeth_channel *channel) for (cnt = 0; cnt < QETH_CMD_BUFFER_NO; cnt++) qeth_release_buffer(channel, &channel->iob[cnt]); - channel->buf_no = 0; channel->io_buf_no = 0; } EXPORT_SYMBOL_GPL(qeth_clear_cmd_buffers); @@ -924,7 +923,6 @@ static int qeth_setup_channel(struct qeth_channel *channel) kfree(channel->iob[cnt].data); return -ENOMEM; } - channel->buf_no = 0; channel->io_buf_no = 0; atomic_set(&channel->irq_pending, 0); spin_lock_init(&channel->iob_lock); @@ -1100,11 +1098,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, { int rc; int cstat, dstat; - struct qeth_cmd_buffer *buffer; struct qeth_channel *channel; struct qeth_card *card; struct qeth_cmd_buffer *iob; - __u8 index; if (__qeth_check_irb_error(cdev, intparm, irb)) return; @@ -1182,25 +1178,18 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel->state = CH_STATE_RCD_DONE; goto out; } - if (intparm) { - buffer = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - buffer->state = BUF_STATE_PROCESSED; - } if (channel == &card->data) return; if (channel == &card->read && channel->state == CH_STATE_UP) __qeth_issue_next_read(card); - iob = channel->iob; - index = channel->buf_no; - while (iob[index].state == BUF_STATE_PROCESSED) { - if (iob[index].callback != NULL) - iob[index].callback(channel, iob + index); - - index = (index + 1) % QETH_CMD_BUFFER_NO; + if (intparm) { + iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + if (iob->callback) + iob->callback(iob->channel, iob); } - channel->buf_no = index; + out: wake_up(&card->wait_q); return; @@ -2214,7 +2203,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, error: atomic_set(&card->write.irq_pending, 0); qeth_release_buffer(iob->channel, iob); - card->write.buf_no = (card->write.buf_no + 1) % QETH_CMD_BUFFER_NO; rc = reply->rc; qeth_put_reply(reply); return rc; From a936b1ef37ce1e996533878f4b23944f9444dcdf Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:08 +0200 Subject: [PATCH 149/182] s390/qeth: handle failure on workqueue creation Creating the global workqueue during driver init may fail, deal with it. Also, destroy the created workqueue on any subsequent error. Fixes: 0f54761d167f ("qeth: Support VEPA mode") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 5ec47c6ebaa6..9a08b545d018 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -6540,10 +6540,14 @@ static int __init qeth_core_init(void) mutex_init(&qeth_mod_mutex); qeth_wq = create_singlethread_workqueue("qeth_wq"); + if (!qeth_wq) { + rc = -ENOMEM; + goto out_err; + } rc = qeth_register_dbf_views(); if (rc) - goto out_err; + goto dbf_err; qeth_core_root_dev = root_device_register("qeth"); rc = PTR_ERR_OR_ZERO(qeth_core_root_dev); if (rc) @@ -6580,6 +6584,8 @@ static int __init qeth_core_init(void) root_device_unregister(qeth_core_root_dev); register_err: qeth_unregister_dbf_views(); +dbf_err: + destroy_workqueue(qeth_wq); out_err: pr_err("Initializing the qeth device driver failed\n"); return rc; From bcacfcbc82b4235d280ed9b067aa4567f4a0c756 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:09 +0200 Subject: [PATCH 150/182] s390/qeth: fix MAC address update sequence When changing the MAC address on a L2 qeth device, current code first unregisters the old address, then registers the new one. If HW rejects the new address (or the IO fails), the device ends up with no operable address at all. Re-order the code flow so that the old address only gets dropped if the new address was registered successfully. While at it, add logic to catch some corner-cases. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_l2_main.c | 55 +++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 24 deletions(-) diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 2ad6f12f3d49..36f9b74848fe 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -121,13 +121,10 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) QETH_CARD_TEXT(card, 2, "L2Setmac"); rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC); if (rc == 0) { - card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; - ether_addr_copy(card->dev->dev_addr, mac); dev_info(&card->gdev->dev, - "MAC address %pM successfully registered on device %s\n", - card->dev->dev_addr, card->dev->name); + "MAC address %pM successfully registered on device %s\n", + mac, card->dev->name); } else { - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; switch (rc) { case -EEXIST: dev_warn(&card->gdev->dev, @@ -142,19 +139,6 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac) return rc; } -static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac) -{ - int rc; - - QETH_CARD_TEXT(card, 2, "L2Delmac"); - if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) - return 0; - rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC); - if (rc == 0) - card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED; - return rc; -} - static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac) { enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ? @@ -519,6 +503,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) { struct sockaddr *addr = p; struct qeth_card *card = dev->ml_priv; + u8 old_addr[ETH_ALEN]; int rc = 0; QETH_CARD_TEXT(card, 3, "setmac"); @@ -530,14 +515,35 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) return -EOPNOTSUPP; } QETH_CARD_HEX(card, 3, addr->sa_data, ETH_ALEN); + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + if (qeth_wait_for_threads(card, QETH_RECOVER_THREAD)) { QETH_CARD_TEXT(card, 3, "setmcREC"); return -ERESTARTSYS; } - rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]); - if (!rc || (rc == -ENOENT)) - rc = qeth_l2_send_setmac(card, addr->sa_data); - return rc ? -EINVAL : 0; + + if (!qeth_card_hw_is_reachable(card)) { + ether_addr_copy(dev->dev_addr, addr->sa_data); + return 0; + } + + /* don't register the same address twice */ + if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) && + (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)) + return 0; + + /* add the new address, switch over, drop the old */ + rc = qeth_l2_send_setmac(card, addr->sa_data); + if (rc) + return rc; + ether_addr_copy(old_addr, dev->dev_addr); + ether_addr_copy(dev->dev_addr, addr->sa_data); + + if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED) + qeth_l2_remove_mac(card, old_addr); + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; + return 0; } static void qeth_promisc_to_bridge(struct qeth_card *card) @@ -1067,8 +1073,9 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) goto out_remove; } - if (card->info.type != QETH_CARD_TYPE_OSN) - qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); + if (card->info.type != QETH_CARD_TYPE_OSN && + !qeth_l2_send_setmac(card, card->dev->dev_addr)) + card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; if (qeth_is_diagass_supported(card, QETH_DIAGS_CMD_TRAP)) { if (card->info.hwtrap && From db71bbbd11a4d314f0fa3fbf3369b71cf33ce33c Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:10 +0200 Subject: [PATCH 151/182] s390/qeth: fix request-side race during cmd IO timeout Submitting a cmd IO request (usually on the WRITE device, but for IDX also on the READ device) is currently done with ccw_device_start() and a manual timeout in the caller. On timeout, the caller cleans up the related resources (eg. IO buffer). But 1) the IO might still be active and utilize those resources, and 2) when the IO completes, qeth_irq() will attempt to clean up the same resources again. Instead of introducing additional resource locking, switch to ccw_device_start_timeout() to ensure IO termination after timeout, and let the IRQ handler alone deal with cleaning up after a request. This also removes a stray write->irq_pending reset from clear_ipacmd_list(). The routine doesn't terminate any pending IO on the WRITE device, so this should be handled properly via IO timeout in the IRQ handler. Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 51 ++++++++++++++++--------------- drivers/s390/net/qeth_core_mpc.h | 12 ++++++++ drivers/s390/net/qeth_l2_main.c | 4 +-- 3 files changed, 40 insertions(+), 27 deletions(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9a08b545d018..9b22d5d496ae 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -706,7 +706,6 @@ void qeth_clear_ipacmd_list(struct qeth_card *card) qeth_put_reply(reply); } spin_unlock_irqrestore(&card->lock, flags); - atomic_set(&card->write.irq_pending, 0); } EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list); @@ -1098,14 +1097,9 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, { int rc; int cstat, dstat; + struct qeth_cmd_buffer *iob = NULL; struct qeth_channel *channel; struct qeth_card *card; - struct qeth_cmd_buffer *iob; - - if (__qeth_check_irb_error(cdev, intparm, irb)) - return; - cstat = irb->scsw.cmd.cstat; - dstat = irb->scsw.cmd.dstat; card = CARD_FROM_CDEV(cdev); if (!card) @@ -1123,6 +1117,19 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel = &card->data; QETH_CARD_TEXT(card, 5, "data"); } + + if (qeth_intparm_is_iob(intparm)) + iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); + + if (__qeth_check_irb_error(cdev, intparm, irb)) { + /* IO was terminated, free its resources. */ + if (iob) + qeth_release_buffer(iob->channel, iob); + atomic_set(&channel->irq_pending, 0); + wake_up(&card->wait_q); + return; + } + atomic_set(&channel->irq_pending, 0); if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC)) @@ -1146,6 +1153,10 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, /* we don't have to handle this further */ intparm = 0; } + + cstat = irb->scsw.cmd.cstat; + dstat = irb->scsw.cmd.dstat; + if ((dstat & DEV_STAT_UNIT_EXCEP) || (dstat & DEV_STAT_UNIT_CHECK) || (cstat)) { @@ -1184,11 +1195,8 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, channel->state == CH_STATE_UP) __qeth_issue_next_read(card); - if (intparm) { - iob = (struct qeth_cmd_buffer *) __va((addr_t)intparm); - if (iob->callback) - iob->callback(iob->channel, iob); - } + if (iob && iob->callback) + iob->callback(iob->channel, iob); out: wake_up(&card->wait_q); @@ -1859,8 +1867,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - &channel->ccw, (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { @@ -1877,7 +1885,6 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, if (channel->state != CH_STATE_UP) { rc = -ETIME; QETH_DBF_TEXT_(SETUP, 2, "3err%d", rc); - qeth_clear_cmd_buffers(channel); } else rc = 0; return rc; @@ -1931,8 +1938,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, atomic_cmpxchg(&channel->irq_pending, 0, 1) == 0); QETH_DBF_TEXT(SETUP, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); - rc = ccw_device_start(channel->ccwdev, - &channel->ccw, (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(channel->ccwdev, &channel->ccw, + (addr_t) iob, 0, 0, QETH_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); if (rc) { @@ -1953,7 +1960,6 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", dev_name(&channel->ccwdev->dev)); QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); - qeth_clear_cmd_buffers(channel); return -ETIME; } return qeth_idx_activate_get_answer(channel, idx_reply_cb); @@ -2155,8 +2161,8 @@ int qeth_send_control_data(struct qeth_card *card, int len, QETH_CARD_TEXT(card, 6, "noirqpnd"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, - (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + (addr_t) iob, 0, 0, event_timeout); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " @@ -2188,8 +2194,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, } } - if (reply->rc == -EIO) - goto error; rc = reply->rc; qeth_put_reply(reply); return rc; @@ -2200,9 +2204,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, list_del_init(&reply->list); spin_unlock_irqrestore(&reply->card->lock, flags); atomic_inc(&reply->received); -error: - atomic_set(&card->write.irq_pending, 0); - qeth_release_buffer(iob->channel, iob); rc = reply->rc; qeth_put_reply(reply); return rc; diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h index 619f897b4bb0..f4d1ec0b8f5a 100644 --- a/drivers/s390/net/qeth_core_mpc.h +++ b/drivers/s390/net/qeth_core_mpc.h @@ -35,6 +35,18 @@ extern unsigned char IPA_PDU_HEADER[]; #define QETH_HALT_CHANNEL_PARM -11 #define QETH_RCD_PARM -12 +static inline bool qeth_intparm_is_iob(unsigned long intparm) +{ + switch (intparm) { + case QETH_CLEAR_CHANNEL_PARM: + case QETH_HALT_CHANNEL_PARM: + case QETH_RCD_PARM: + case 0: + return false; + } + return true; +} + /*****************************************************************************/ /* IP Assist related definitions */ /*****************************************************************************/ diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 36f9b74848fe..b8079f2a65b3 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -1345,8 +1345,8 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, qeth_prepare_control_data(card, len, iob); QETH_CARD_TEXT(card, 6, "osnoirqp"); spin_lock_irqsave(get_ccwdev_lock(card->write.ccwdev), flags); - rc = ccw_device_start(card->write.ccwdev, &card->write.ccw, - (addr_t) iob, 0, 0); + rc = ccw_device_start_timeout(CARD_WDEV(card), &card->write.ccw, + (addr_t) iob, 0, 0, QETH_IPA_TIMEOUT); spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); if (rc) { QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " From b7493e91c11a757cf0f8ab26989642ee4bb2c642 Mon Sep 17 00:00:00 2001 From: Julian Wiedmann Date: Thu, 19 Apr 2018 12:52:11 +0200 Subject: [PATCH 152/182] s390/qeth: use Read device to query hypervisor for MAC For z/VM NICs, qeth needs to consider which of the three CCW devices in an MPC group it uses for requesting a managed MAC address. On the Base device, the hypervisor returns a default MAC which is pre-assigned when creating the NIC (this MAC is also returned by the READ MAC primitive). Querying any other device results in the allocation of an additional MAC address. For consistency with READ MAC and to avoid using up more addresses than necessary, it is preferable to use the NIC's default MAC. So switch the the diag26c over to using a NIC's Read device, which should always be identical to the Base device. Fixes: ec61bd2fd2a2 ("s390/qeth: use diag26c to get MAC address on L2") Signed-off-by: Julian Wiedmann Signed-off-by: David S. Miller --- drivers/s390/net/qeth_core_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 9b22d5d496ae..dffd820731f2 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -4835,7 +4835,7 @@ int qeth_vm_request_mac(struct qeth_card *card) goto out; } - ccw_device_get_id(CARD_DDEV(card), &id); + ccw_device_get_id(CARD_RDEV(card), &id); request->resp_buf_len = sizeof(*response); request->resp_version = DIAG26C_VERSION2; request->op_code = DIAG26C_GET_MAC; From 5411b6187adf62909e3b998ac782e722904c7487 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Thu, 19 Apr 2018 16:20:48 +0200 Subject: [PATCH 153/182] l2tp: fix {pppol2tp, l2tp_dfs}_seq_stop() in case of seq_file overflow Commit 0e0c3fee3a59 ("l2tp: hold reference on tunnels printed in pppol2tp proc file") assumed that if pppol2tp_seq_stop() was called with non-NULL private data (the 'v' pointer), then pppol2tp_seq_start() would not be called again. It turns out that this isn't guaranteed, and overflowing the seq_file's buffer in pppol2tp_seq_show() is a way to get into this situation. Therefore, pppol2tp_seq_stop() needs to reset pd->tunnel, so that pppol2tp_seq_start() won't drop a reference again if it gets called. We also have to clear pd->session, because the rest of the code expects a non-NULL tunnel when pd->session is set. The l2tp_debugfs module has the same issue. Fix it in the same way. Fixes: 0e0c3fee3a59 ("l2tp: hold reference on tunnels printed in pppol2tp proc file") Fixes: f726214d9b23 ("l2tp: hold reference on tunnels printed in l2tp/tunnels debugfs file") Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_debugfs.c | 5 ++++- net/l2tp/l2tp_ppp.c | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index b8f9d45bfeb1..7f1e842ef05a 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -106,8 +106,11 @@ static void l2tp_dfs_seq_stop(struct seq_file *p, void *v) return; /* Drop reference taken by last invocation of l2tp_dfs_next_tunnel() */ - if (pd->tunnel) + if (pd->tunnel) { l2tp_tunnel_dec_refcount(pd->tunnel); + pd->tunnel = NULL; + pd->session = NULL; + } } static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 7d0c963680e6..1404bc1c1bb7 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -1618,8 +1618,11 @@ static void pppol2tp_seq_stop(struct seq_file *p, void *v) return; /* Drop reference taken by last invocation of pppol2tp_next_tunnel() */ - if (pd->tunnel) + if (pd->tunnel) { l2tp_tunnel_dec_refcount(pd->tunnel); + pd->tunnel = NULL; + pd->session = NULL; + } } static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v) From b905ef9ab90115d001c1658259af4b1c65088779 Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 19 Apr 2018 12:25:38 -0700 Subject: [PATCH 154/182] llc: delete timers synchronously in llc_sk_free() The connection timers of an llc sock could be still flying after we delete them in llc_sk_free(), and even possibly after we free the sock. We could just wait synchronously here in case of troubles. Note, I leave other call paths as they are, since they may not have to wait, at least we can change them to synchronously when needed. Also, move the code to net/llc/llc_conn.c, which is apparently a better place. Reported-by: Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- include/net/llc_conn.h | 1 + net/llc/llc_c_ac.c | 9 +-------- net/llc/llc_conn.c | 22 +++++++++++++++++++++- 3 files changed, 23 insertions(+), 9 deletions(-) diff --git a/include/net/llc_conn.h b/include/net/llc_conn.h index 5c40f118c0fa..df528a623548 100644 --- a/include/net/llc_conn.h +++ b/include/net/llc_conn.h @@ -97,6 +97,7 @@ static __inline__ char llc_backlog_type(struct sk_buff *skb) struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct proto *prot, int kern); +void llc_sk_stop_all_timers(struct sock *sk, bool sync); void llc_sk_free(struct sock *sk); void llc_sk_reset(struct sock *sk); diff --git a/net/llc/llc_c_ac.c b/net/llc/llc_c_ac.c index 163121192aca..4d78375f9872 100644 --- a/net/llc/llc_c_ac.c +++ b/net/llc/llc_c_ac.c @@ -1099,14 +1099,7 @@ int llc_conn_ac_inc_tx_win_size(struct sock *sk, struct sk_buff *skb) int llc_conn_ac_stop_all_timers(struct sock *sk, struct sk_buff *skb) { - struct llc_sock *llc = llc_sk(sk); - - del_timer(&llc->pf_cycle_timer.timer); - del_timer(&llc->ack_timer.timer); - del_timer(&llc->rej_sent_timer.timer); - del_timer(&llc->busy_state_timer.timer); - llc->ack_must_be_send = 0; - llc->ack_pf = 0; + llc_sk_stop_all_timers(sk, false); return 0; } diff --git a/net/llc/llc_conn.c b/net/llc/llc_conn.c index 110e32bcb399..c0ac522b48a1 100644 --- a/net/llc/llc_conn.c +++ b/net/llc/llc_conn.c @@ -961,6 +961,26 @@ struct sock *llc_sk_alloc(struct net *net, int family, gfp_t priority, struct pr return sk; } +void llc_sk_stop_all_timers(struct sock *sk, bool sync) +{ + struct llc_sock *llc = llc_sk(sk); + + if (sync) { + del_timer_sync(&llc->pf_cycle_timer.timer); + del_timer_sync(&llc->ack_timer.timer); + del_timer_sync(&llc->rej_sent_timer.timer); + del_timer_sync(&llc->busy_state_timer.timer); + } else { + del_timer(&llc->pf_cycle_timer.timer); + del_timer(&llc->ack_timer.timer); + del_timer(&llc->rej_sent_timer.timer); + del_timer(&llc->busy_state_timer.timer); + } + + llc->ack_must_be_send = 0; + llc->ack_pf = 0; +} + /** * llc_sk_free - Frees a LLC socket * @sk - socket to free @@ -973,7 +993,7 @@ void llc_sk_free(struct sock *sk) llc->state = LLC_CONN_OUT_OF_SVC; /* Stop all (possibly) running timers */ - llc_conn_ac_stop_all_timers(sk, NULL); + llc_sk_stop_all_timers(sk, true); #ifdef DEBUG_LLC_CONN_ALLOC printk(KERN_INFO "%s: unackq=%d, txq=%d\n", __func__, skb_queue_len(&llc->pdu_unack_q), From 5e391dc5a8d801a2410d0032ad4a428d1d61800c Mon Sep 17 00:00:00 2001 From: Ivan Khoronzhuk Date: Thu, 19 Apr 2018 22:49:09 +0300 Subject: [PATCH 155/182] net: ethernet: ti: cpsw: fix tx vlan priority mapping The CPDMA_TX_PRIORITY_MAP in real is vlan pcp field priority mapping register and basically replaces vlan pcp field for tagged packets. So, set it to be 1:1 mapping. Otherwise, it will cause unexpected change of egress vlan tagged packets, like prio 2 -> prio 5. Fixes: e05107e6b747 ("net: ethernet: ti: cpsw: add multi queue support") Reviewed-by: Grygorii Strashko Signed-off-by: Ivan Khoronzhuk Signed-off-by: David S. Miller --- drivers/net/ethernet/ti/cpsw.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 30371274409d..74f828412055 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -129,7 +129,7 @@ do { \ #define RX_PRIORITY_MAPPING 0x76543210 #define TX_PRIORITY_MAPPING 0x33221100 -#define CPDMA_TX_PRIORITY_MAP 0x01234567 +#define CPDMA_TX_PRIORITY_MAP 0x76543210 #define CPSW_VLAN_AWARE BIT(1) #define CPSW_RX_VLAN_ENCAP BIT(2) From 3a04ce7130a7e5dad4e78d45d50313747f8c830f Mon Sep 17 00:00:00 2001 From: Cong Wang Date: Thu, 19 Apr 2018 21:54:34 -0700 Subject: [PATCH 156/182] llc: fix NULL pointer deref for SOCK_ZAPPED For SOCK_ZAPPED socket, we don't need to care about llc->sap, so we should just skip these refcount functions in this case. Fixes: f7e43672683b ("llc: hold llc_sap before release_sock()") Reported-by: kernel test robot Signed-off-by: Cong Wang Signed-off-by: David S. Miller --- net/llc/af_llc.c | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 6d29b2b94e84..cb80ebb38311 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -189,7 +189,6 @@ static int llc_ui_release(struct socket *sock) { struct sock *sk = sock->sk; struct llc_sock *llc; - struct llc_sap *sap; if (unlikely(sk == NULL)) goto out; @@ -200,15 +199,19 @@ static int llc_ui_release(struct socket *sock) llc->laddr.lsap, llc->daddr.lsap); if (!llc_send_disc(sk)) llc_ui_wait_for_disc(sk, sk->sk_rcvtimeo); - sap = llc->sap; - /* Hold this for release_sock(), so that llc_backlog_rcv() could still - * use it. - */ - llc_sap_hold(sap); - if (!sock_flag(sk, SOCK_ZAPPED)) + if (!sock_flag(sk, SOCK_ZAPPED)) { + struct llc_sap *sap = llc->sap; + + /* Hold this for release_sock(), so that llc_backlog_rcv() + * could still use it. + */ + llc_sap_hold(sap); llc_sap_remove_socket(llc->sap, sk); - release_sock(sk); - llc_sap_put(sap); + release_sock(sk); + llc_sap_put(sap); + } else { + release_sock(sk); + } if (llc->dev) dev_put(llc->dev); sock_put(sk); From a957fa190aa9d9168b33d460a5241a6d088c6265 Mon Sep 17 00:00:00 2001 From: Ahmed Abdelsalam Date: Fri, 20 Apr 2018 15:58:05 +0200 Subject: [PATCH 157/182] ipv6: sr: fix NULL pointer dereference in seg6_do_srh_encap()- v4 pkts In case of seg6 in encap mode, seg6_do_srh_encap() calls set_tun_src() in order to set the src addr of outer IPv6 header. The net_device is required for set_tun_src(). However calling ip6_dst_idev() on dst_entry in case of IPv4 traffic results on the following bug. Using just dst->dev should fix this BUG. [ 196.242461] BUG: unable to handle kernel NULL pointer dereference at 0000000000000000 [ 196.242975] PGD 800000010f076067 P4D 800000010f076067 PUD 10f060067 PMD 0 [ 196.243329] Oops: 0000 [#1] SMP PTI [ 196.243468] Modules linked in: nfsd auth_rpcgss nfs_acl nfs lockd grace fscache sunrpc crct10dif_pclmul crc32_pclmul ghash_clmulni_intel pcbc aesni_intel aes_x86_64 crypto_simd cryptd input_leds glue_helper led_class pcspkr serio_raw mac_hid video autofs4 hid_generic usbhid hid e1000 i2c_piix4 ahci pata_acpi libahci [ 196.244362] CPU: 2 PID: 1089 Comm: ping Not tainted 4.16.0+ #1 [ 196.244606] Hardware name: innotek GmbH VirtualBox/VirtualBox, BIOS VirtualBox 12/01/2006 [ 196.244968] RIP: 0010:seg6_do_srh_encap+0x1ac/0x300 [ 196.245236] RSP: 0018:ffffb2ce00b23a60 EFLAGS: 00010202 [ 196.245464] RAX: 0000000000000000 RBX: ffff8c7f53eea300 RCX: 0000000000000000 [ 196.245742] RDX: 0000f10000000000 RSI: ffff8c7f52085a6c RDI: ffff8c7f41166850 [ 196.246018] RBP: ffffb2ce00b23aa8 R08: 00000000000261e0 R09: ffff8c7f41166800 [ 196.246294] R10: ffffdce5040ac780 R11: ffff8c7f41166828 R12: ffff8c7f41166808 [ 196.246570] R13: ffff8c7f52085a44 R14: ffffffffb73211c0 R15: ffff8c7e69e44200 [ 196.246846] FS: 00007fc448789700(0000) GS:ffff8c7f59d00000(0000) knlGS:0000000000000000 [ 196.247286] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 196.247526] CR2: 0000000000000000 CR3: 000000010f05a000 CR4: 00000000000406e0 [ 196.247804] Call Trace: [ 196.247972] seg6_do_srh+0x15b/0x1c0 [ 196.248156] seg6_output+0x3c/0x220 [ 196.248341] ? prandom_u32+0x14/0x20 [ 196.248526] ? ip_idents_reserve+0x6c/0x80 [ 196.248723] ? __ip_select_ident+0x90/0x100 [ 196.248923] ? ip_append_data.part.50+0x6c/0xd0 [ 196.249133] lwtunnel_output+0x44/0x70 [ 196.249328] ip_send_skb+0x15/0x40 [ 196.249515] raw_sendmsg+0x8c3/0xac0 [ 196.249701] ? _copy_from_user+0x2e/0x60 [ 196.249897] ? rw_copy_check_uvector+0x53/0x110 [ 196.250106] ? _copy_from_user+0x2e/0x60 [ 196.250299] ? copy_msghdr_from_user+0xce/0x140 [ 196.250508] sock_sendmsg+0x36/0x40 [ 196.250690] ___sys_sendmsg+0x292/0x2a0 [ 196.250881] ? _cond_resched+0x15/0x30 [ 196.251074] ? copy_termios+0x1e/0x70 [ 196.251261] ? _copy_to_user+0x22/0x30 [ 196.251575] ? tty_mode_ioctl+0x1c3/0x4e0 [ 196.251782] ? _cond_resched+0x15/0x30 [ 196.251972] ? mutex_lock+0xe/0x30 [ 196.252152] ? vvar_fault+0xd2/0x110 [ 196.252337] ? __do_fault+0x1f/0xc0 [ 196.252521] ? __handle_mm_fault+0xc1f/0x12d0 [ 196.252727] ? __sys_sendmsg+0x63/0xa0 [ 196.252919] __sys_sendmsg+0x63/0xa0 [ 196.253107] do_syscall_64+0x72/0x200 [ 196.253305] entry_SYSCALL_64_after_hwframe+0x3d/0xa2 [ 196.253530] RIP: 0033:0x7fc4480b0690 [ 196.253715] RSP: 002b:00007ffde9f252f8 EFLAGS: 00000246 ORIG_RAX: 000000000000002e [ 196.254053] RAX: ffffffffffffffda RBX: 0000000000000040 RCX: 00007fc4480b0690 [ 196.254331] RDX: 0000000000000000 RSI: 000000000060a360 RDI: 0000000000000003 [ 196.254608] RBP: 00007ffde9f253f0 R08: 00000000002d1e81 R09: 0000000000000002 [ 196.254884] R10: 00007ffde9f250c0 R11: 0000000000000246 R12: 0000000000b22070 [ 196.255205] R13: 20c49ba5e353f7cf R14: 431bde82d7b634db R15: 00007ffde9f278fe [ 196.255484] Code: a5 0f b6 45 c0 41 88 41 28 41 0f b6 41 2c 48 c1 e0 04 49 8b 54 01 38 49 8b 44 01 30 49 89 51 20 49 89 41 18 48 8b 83 b0 00 00 00 <48> 8b 30 49 8b 86 08 0b 00 00 48 8b 40 20 48 8b 50 08 48 0b 10 [ 196.256190] RIP: seg6_do_srh_encap+0x1ac/0x300 RSP: ffffb2ce00b23a60 [ 196.256445] CR2: 0000000000000000 [ 196.256676] ---[ end trace 71af7d093603885c ]--- Fixes: 8936ef7604c11 ("ipv6: sr: fix NULL pointer dereference when setting encap source address") Signed-off-by: Ahmed Abdelsalam Acked-by: David Lebrun Signed-off-by: David S. Miller --- net/ipv6/seg6_iptunnel.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c index f343e6f0fc95..5fe139484919 100644 --- a/net/ipv6/seg6_iptunnel.c +++ b/net/ipv6/seg6_iptunnel.c @@ -136,7 +136,7 @@ int seg6_do_srh_encap(struct sk_buff *skb, struct ipv6_sr_hdr *osrh, int proto) isrh->nexthdr = proto; hdr->daddr = isrh->segments[isrh->first_segment]; - set_tun_src(net, ip6_dst_idev(dst)->dev, &hdr->daddr, &hdr->saddr); + set_tun_src(net, dst->dev, &hdr->daddr, &hdr->saddr); #ifdef CONFIG_IPV6_SEG6_HMAC if (sr_has_hmac(isrh)) { From 7c5aba211dd61f41d737a2c51729eb9fdcd3edf4 Mon Sep 17 00:00:00 2001 From: Doron Roberts-Kedes Date: Fri, 20 Apr 2018 12:11:11 -0700 Subject: [PATCH 158/182] strparser: Do not call mod_delayed_work with a timeout of LONG_MAX struct sock's sk_rcvtimeo is initialized to LONG_MAX/MAX_SCHEDULE_TIMEOUT in sock_init_data. Calling mod_delayed_work with a timeout of LONG_MAX causes spurious execution of the work function. timer->expires is set equal to jiffies + LONG_MAX. When timer_base->clk falls behind the current value of jiffies, the delta between timer_base->clk and jiffies + LONG_MAX causes the expiration to be in the past. Returning early from strp_start_timer if timeo == LONG_MAX solves this problem. Found while testing net/tls_sw recv path. Fixes: 43a0c6751a322847 ("strparser: Stream parser for messages") Reviewed-by: Tejun Heo Signed-off-by: Doron Roberts-Kedes Signed-off-by: David S. Miller --- net/strparser/strparser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c index 805b139756db..092bebc70048 100644 --- a/net/strparser/strparser.c +++ b/net/strparser/strparser.c @@ -67,7 +67,7 @@ static void strp_abort_strp(struct strparser *strp, int err) static void strp_start_timer(struct strparser *strp, long timeo) { - if (timeo) + if (timeo && timeo != LONG_MAX) mod_delayed_work(strp_wq, &strp->msg_timer_work, timeo); } From f6cd14537ff9919081be19b9c53b9b19c0d3ea97 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 20 Apr 2018 15:15:03 -0400 Subject: [PATCH 159/182] net: sched: ife: signal not finding metaid We need to record stats for received metadata that we dont know how to process. Have find_decode_metaid() return -ENOENT to capture this. Signed-off-by: Alexander Aring Reviewed-by: Yotam Gigi Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/sched/act_ife.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index a5994cf0512b..49b8ab551fbe 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -652,7 +652,7 @@ static int find_decode_metaid(struct sk_buff *skb, struct tcf_ife_info *ife, } } - return 0; + return -ENOENT; } static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, From cc74eddd0ff325d57373cea99f642b787d7f76f5 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 20 Apr 2018 15:15:04 -0400 Subject: [PATCH 160/182] net: sched: ife: handle malformed tlv length There is currently no handling to check on a invalid tlv length. This patch adds such handling to avoid killing the kernel with a malformed ife packet. Signed-off-by: Alexander Aring Reviewed-by: Yotam Gigi Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- include/net/ife.h | 3 ++- net/ife/ife.c | 35 +++++++++++++++++++++++++++++++++-- net/sched/act_ife.c | 7 ++++++- 3 files changed, 41 insertions(+), 4 deletions(-) diff --git a/include/net/ife.h b/include/net/ife.h index 44b9c00f7223..e117617e3c34 100644 --- a/include/net/ife.h +++ b/include/net/ife.h @@ -12,7 +12,8 @@ void *ife_encode(struct sk_buff *skb, u16 metalen); void *ife_decode(struct sk_buff *skb, u16 *metalen); -void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen); +void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype, + u16 *dlen, u16 *totlen); int ife_tlv_meta_encode(void *skbdata, u16 attrtype, u16 dlen, const void *dval); diff --git a/net/ife/ife.c b/net/ife/ife.c index 7d1ec76e7f43..7fbe70a0af4b 100644 --- a/net/ife/ife.c +++ b/net/ife/ife.c @@ -92,12 +92,43 @@ struct meta_tlvhdr { __be16 len; }; +static bool __ife_tlv_meta_valid(const unsigned char *skbdata, + const unsigned char *ifehdr_end) +{ + const struct meta_tlvhdr *tlv; + u16 tlvlen; + + if (unlikely(skbdata + sizeof(*tlv) > ifehdr_end)) + return false; + + tlv = (const struct meta_tlvhdr *)skbdata; + tlvlen = ntohs(tlv->len); + + /* tlv length field is inc header, check on minimum */ + if (tlvlen < NLA_HDRLEN) + return false; + + /* overflow by NLA_ALIGN check */ + if (NLA_ALIGN(tlvlen) < tlvlen) + return false; + + if (unlikely(skbdata + NLA_ALIGN(tlvlen) > ifehdr_end)) + return false; + + return true; +} + /* Caller takes care of presenting data in network order */ -void *ife_tlv_meta_decode(void *skbdata, u16 *attrtype, u16 *dlen, u16 *totlen) +void *ife_tlv_meta_decode(void *skbdata, const void *ifehdr_end, u16 *attrtype, + u16 *dlen, u16 *totlen) { - struct meta_tlvhdr *tlv = (struct meta_tlvhdr *) skbdata; + struct meta_tlvhdr *tlv; + + if (!__ife_tlv_meta_valid(skbdata, ifehdr_end)) + return NULL; + tlv = (struct meta_tlvhdr *)skbdata; *dlen = ntohs(tlv->len) - NLA_HDRLEN; *attrtype = ntohs(tlv->type); diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index 49b8ab551fbe..8527cfdc446d 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -682,7 +682,12 @@ static int tcf_ife_decode(struct sk_buff *skb, const struct tc_action *a, u16 mtype; u16 dlen; - curr_data = ife_tlv_meta_decode(tlv_data, &mtype, &dlen, NULL); + curr_data = ife_tlv_meta_decode(tlv_data, ifehdr_end, &mtype, + &dlen, NULL); + if (!curr_data) { + qstats_drop_inc(this_cpu_ptr(ife->common.cpu_qstats)); + return TC_ACT_SHOT; + } if (find_decode_metaid(skb, ife, mtype, dlen, curr_data)) { /* abuse overlimits to count when we receive metadata From d57493d6d1be26c8ac8516a4463bfe24956978eb Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 20 Apr 2018 15:15:05 -0400 Subject: [PATCH 161/182] net: sched: ife: check on metadata length This patch checks if sk buffer is available to dererence ife header. If not then NULL will returned to signal an malformed ife packet. This avoids to crashing the kernel from outside. Signed-off-by: Alexander Aring Reviewed-by: Yotam Gigi Acked-by: Jamal Hadi Salim Signed-off-by: David S. Miller --- net/ife/ife.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/ife/ife.c b/net/ife/ife.c index 7fbe70a0af4b..13bbf8cb6a39 100644 --- a/net/ife/ife.c +++ b/net/ife/ife.c @@ -69,6 +69,9 @@ void *ife_decode(struct sk_buff *skb, u16 *metalen) int total_pull; u16 ifehdrln; + if (!pskb_may_pull(skb, skb->dev->hard_header_len + IFE_METAHDRLEN)) + return NULL; + ifehdr = (struct ifeheadr *) (skb->data + skb->dev->hard_header_len); ifehdrln = ntohs(ifehdr->metalen); total_pull = skb->dev->hard_header_len + ifehdrln; From 660e309ddd6aa99bb4d2a859c4a0b56965e744ef Mon Sep 17 00:00:00 2001 From: Thomas Falcon Date: Fri, 20 Apr 2018 14:25:32 -0500 Subject: [PATCH 162/182] ibmvnic: Clean actual number of RX or TX pools Avoid using value stored in the login response buffer when cleaning TX and RX buffer pools since these could be inconsistent depending on the device state. Instead use the field in the driver's private data that tracks the number of active pools. Signed-off-by: Thomas Falcon Signed-off-by: David S. Miller --- drivers/net/ethernet/ibm/ibmvnic.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 2df01ad98df7..6e8d6a6f6aaf 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1128,7 +1128,7 @@ static void clean_rx_pools(struct ibmvnic_adapter *adapter) if (!adapter->rx_pool) return; - rx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs); + rx_scrqs = adapter->num_active_rx_pools; rx_entries = adapter->req_rx_add_entries_per_subcrq; /* Free any remaining skbs in the rx buffer pools */ @@ -1177,7 +1177,7 @@ static void clean_tx_pools(struct ibmvnic_adapter *adapter) if (!adapter->tx_pool || !adapter->tso_pool) return; - tx_scrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs); + tx_scrqs = adapter->num_active_tx_pools; /* Free any remaining skbs in the tx buffer pools */ for (i = 0; i < tx_scrqs; i++) { From 6d08b06e67cd117f6992c46611dfb4ce267cd71e Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 22 Apr 2018 19:20:09 -0700 Subject: [PATCH 163/182] Linux 4.17-rc2 --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index e811e0c509c5..83b6c541565a 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 4 PATCHLEVEL = 17 SUBLEVEL = 0 -EXTRAVERSION = -rc1 +EXTRAVERSION = -rc2 NAME = Fearless Coyote # *DOCUMENTATION* From 7e5a206ab686f098367b61aca989f5cdfa8114a3 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Fri, 20 Apr 2018 15:57:30 +0200 Subject: [PATCH 164/182] tcp: don't read out-of-bounds opsize The old code reads the "opsize" variable from out-of-bounds memory (first byte behind the segment) if a broken TCP segment ends directly after an opcode that is neither EOL nor NOP. The result of the read isn't used for anything, so the worst thing that could theoretically happen is a pagefault; and since the physmap is usually mostly contiguous, even that seems pretty unlikely. The following C reproducer triggers the uninitialized read - however, you can't actually see anything happen unless you put something like a pr_warn() in tcp_parse_md5sig_option() to print the opsize. ==================================== #define _GNU_SOURCE #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include void systemf(const char *command, ...) { char *full_command; va_list ap; va_start(ap, command); if (vasprintf(&full_command, command, ap) == -1) err(1, "vasprintf"); va_end(ap); printf("systemf: <<<%s>>>\n", full_command); system(full_command); } char *devname; int tun_alloc(char *name) { int fd = open("/dev/net/tun", O_RDWR); if (fd == -1) err(1, "open tun dev"); static struct ifreq req = { .ifr_flags = IFF_TUN|IFF_NO_PI }; strcpy(req.ifr_name, name); if (ioctl(fd, TUNSETIFF, &req)) err(1, "TUNSETIFF"); devname = req.ifr_name; printf("device name: %s\n", devname); return fd; } #define IPADDR(a,b,c,d) (((a)<<0)+((b)<<8)+((c)<<16)+((d)<<24)) void sum_accumulate(unsigned int *sum, void *data, int len) { assert((len&2)==0); for (int i=0; i> 16) + (sum & 0xffff); sum = (sum >> 16) + (sum & 0xffff); return htons(~sum); } void fix_ip_sum(struct iphdr *ip) { unsigned int sum = 0; sum_accumulate(&sum, ip, sizeof(*ip)); ip->check = sum_final(sum); } void fix_tcp_sum(struct iphdr *ip, struct tcphdr *tcp) { unsigned int sum = 0; struct { unsigned int saddr; unsigned int daddr; unsigned char pad; unsigned char proto_num; unsigned short tcp_len; } fakehdr = { .saddr = ip->saddr, .daddr = ip->daddr, .proto_num = ip->protocol, .tcp_len = htons(ntohs(ip->tot_len) - ip->ihl*4) }; sum_accumulate(&sum, &fakehdr, sizeof(fakehdr)); sum_accumulate(&sum, tcp, tcp->doff*4); tcp->check = sum_final(sum); } int main(void) { int tun_fd = tun_alloc("inject_dev%d"); systemf("ip link set %s up", devname); systemf("ip addr add 192.168.42.1/24 dev %s", devname); struct { struct iphdr ip; struct tcphdr tcp; unsigned char tcp_opts[20]; } __attribute__((packed)) syn_packet = { .ip = { .ihl = sizeof(struct iphdr)/4, .version = 4, .tot_len = htons(sizeof(syn_packet)), .ttl = 30, .protocol = IPPROTO_TCP, /* FIXUP check */ .saddr = IPADDR(192,168,42,2), .daddr = IPADDR(192,168,42,1) }, .tcp = { .source = htons(1), .dest = htons(1337), .seq = 0x12345678, .doff = (sizeof(syn_packet.tcp)+sizeof(syn_packet.tcp_opts))/4, .syn = 1, .window = htons(64), .check = 0 /*FIXUP*/ }, .tcp_opts = { /* INVALID: trailing MD5SIG opcode after NOPs */ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 19 } }; fix_ip_sum(&syn_packet.ip); fix_tcp_sum(&syn_packet.ip, &syn_packet.tcp); while (1) { int write_res = write(tun_fd, &syn_packet, sizeof(syn_packet)); if (write_res != sizeof(syn_packet)) err(1, "packet write failed"); } } ==================================== Fixes: cfb6eeb4c860 ("[TCP]: MD5 Signature Option (RFC2385) support.") Signed-off-by: Jann Horn Signed-off-by: David S. Miller --- net/ipv4/tcp_input.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 367def6ddeda..e51c644484dc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -3868,11 +3868,8 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th) int length = (th->doff << 2) - sizeof(*th); const u8 *ptr = (const u8 *)(th + 1); - /* If the TCP option is too short, we can short cut */ - if (length < TCPOLEN_MD5SIG) - return NULL; - - while (length > 0) { + /* If not enough data remaining, we can short cut */ + while (length >= TCPOLEN_MD5SIG) { int opcode = *ptr++; int opsize; From ddea788c63094f7c483783265563dd5b50052e28 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Sun, 22 Apr 2018 19:11:50 +0800 Subject: [PATCH 165/182] bonding: do not set slave_dev npinfo before slave_enable_netpoll in bond_enslave After Commit 8a8efa22f51b ("bonding: sync netpoll code with bridge"), it would set slave_dev npinfo in slave_enable_netpoll when enslaving a dev if bond->dev->npinfo was set. However now slave_dev npinfo is set with bond->dev->npinfo before calling slave_enable_netpoll. With slave_dev npinfo set, __netpoll_setup called in slave_enable_netpoll will not call slave dev's .ndo_netpoll_setup(). It causes that the lower dev of this slave dev can't set its npinfo. One way to reproduce it: # modprobe bonding # brctl addbr br0 # brctl addif br0 eth1 # ifconfig bond0 192.168.122.1/24 up # ifenslave bond0 eth2 # systemctl restart netconsole # ifenslave bond0 br0 # ifconfig eth2 down # systemctl restart netconsole The netpoll won't really work. This patch is to remove that slave_dev npinfo setting in bond_enslave(). Fixes: 8a8efa22f51b ("bonding: sync netpoll code with bridge") Signed-off-by: Xin Long Signed-off-by: David S. Miller --- drivers/net/bonding/bond_main.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index b7b113018853..718e4914e3a0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1660,8 +1660,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, } /* switch(bond_mode) */ #ifdef CONFIG_NET_POLL_CONTROLLER - slave_dev->npinfo = bond->dev->npinfo; - if (slave_dev->npinfo) { + if (bond->dev->npinfo) { if (slave_enable_netpoll(new_slave)) { netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n"); res = -EBUSY; From aa8f8778493c85fff480cdf8b349b1e1dcb5f243 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Sun, 22 Apr 2018 18:29:23 -0700 Subject: [PATCH 166/182] ipv6: add RTA_TABLE and RTA_PREFSRC to rtm_ipv6_policy KMSAN reported use of uninit-value that I tracked to lack of proper size check on RTA_TABLE attribute. I also believe RTA_PREFSRC lacks a similar check. Fixes: 86872cb57925 ("[IPv6] route: FIB6 configuration using struct fib6_config") Fixes: c3968a857a6b ("ipv6: RTA_PREFSRC support for ipv6 route source address selection") Signed-off-by: Eric Dumazet Reported-by: syzbot Acked-by: David Ahern Signed-off-by: David S. Miller --- net/ipv6/route.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 49b954d6d0fa..cde7d8251377 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -3975,6 +3975,7 @@ void rt6_mtu_change(struct net_device *dev, unsigned int mtu) static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, + [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) }, [RTA_OIF] = { .type = NLA_U32 }, [RTA_IIF] = { .type = NLA_U32 }, [RTA_PRIORITY] = { .type = NLA_U32 }, @@ -3986,6 +3987,7 @@ static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { [RTA_EXPIRES] = { .type = NLA_U32 }, [RTA_UID] = { .type = NLA_U32 }, [RTA_MARK] = { .type = NLA_U32 }, + [RTA_TABLE] = { .type = NLA_U32 }, }; static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, From b6a930fa88083b41d26ddf1cab95cbd740936c22 Mon Sep 17 00:00:00 2001 From: Jingju Hou Date: Mon, 23 Apr 2018 15:22:49 +0800 Subject: [PATCH 167/182] net: phy: marvell: clear wol event before setting it If WOL event happened once, the LED[2] interrupt pin will not be cleared unless we read the CSISR register. If interrupts are in use, the normal interrupt handling will clear the WOL event. Let's clear the WOL event before enabling it if !phy_interrupt_is_valid(). Signed-off-by: Jingju Hou Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/phy/marvell.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index c22e8e383247..25e2a099b71c 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -1393,6 +1393,15 @@ static int m88e1318_set_wol(struct phy_device *phydev, if (err < 0) goto error; + /* If WOL event happened once, the LED[2] interrupt pin + * will not be cleared unless we reading the interrupt status + * register. If interrupts are in use, the normal interrupt + * handling will clear the WOL event. Clear the WOL event + * before enabling it if !phy_interrupt_is_valid() + */ + if (!phy_interrupt_is_valid(phydev)) + phy_read(phydev, MII_M1011_IEVENT); + /* Enable the WOL interrupt */ err = __phy_modify(phydev, MII_88E1318S_PHY_CSIER, 0, MII_88E1318S_PHY_CSIER_WOL_EIE); From eb1c28c05894a4b1f6b56c5bf072205e64cfa280 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 23 Apr 2018 16:15:14 +0200 Subject: [PATCH 168/182] l2tp: check sockaddr length in pppol2tp_connect() Check sockaddr_len before dereferencing sp->sa_protocol, to ensure that it actually points to valid data. Fixes: fd558d186df2 ("l2tp: Split pppol2tp patch into separate l2tp and ppp parts") Reported-by: syzbot+a70ac890b23b1bf29f5c@syzkaller.appspotmail.com Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- net/l2tp/l2tp_ppp.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 1404bc1c1bb7..1fd9e145076a 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -619,6 +619,13 @@ static int pppol2tp_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); error = -EINVAL; + + if (sockaddr_len != sizeof(struct sockaddr_pppol2tp) && + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3) && + sockaddr_len != sizeof(struct sockaddr_pppol2tpin6) && + sockaddr_len != sizeof(struct sockaddr_pppol2tpv3in6)) + goto end; + if (sp->sa_protocol != PX_PROTO_OL2TP) goto end; From a49e2f5d5fb141884452ddb428f551b123d436b5 Mon Sep 17 00:00:00 2001 From: Guillaume Nault Date: Mon, 23 Apr 2018 16:38:27 +0200 Subject: [PATCH 169/182] pppoe: check sockaddr length in pppoe_connect() We must validate sockaddr_len, otherwise userspace can pass fewer data than we expect and we end up accessing invalid data. Fixes: 224cf5ad14c0 ("ppp: Move the PPP drivers") Reported-by: syzbot+4f03bdf92fdf9ef5ddab@syzkaller.appspotmail.com Signed-off-by: Guillaume Nault Signed-off-by: David S. Miller --- drivers/net/ppp/pppoe.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c index 1483bc7b01e1..7df07337d69c 100644 --- a/drivers/net/ppp/pppoe.c +++ b/drivers/net/ppp/pppoe.c @@ -620,6 +620,10 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr, lock_sock(sk); error = -EINVAL; + + if (sockaddr_len != sizeof(struct sockaddr_pppox)) + goto end; + if (sp->sa_protocol != PX_PROTO_OE) goto end; From 4d945663a6a0acf3cbe45940503f2eb9584bfee7 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 23 Apr 2018 11:43:08 -0500 Subject: [PATCH 170/182] amd-xgbe: Add pre/post auto-negotiation phy hooks Add hooks to the driver auto-negotiation (AN) flow to allow the different phy implementations to perform any steps necessary to improve AN. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 16 ++++++++++++++-- drivers/net/ethernet/amd/xgbe/xgbe.h | 5 +++++ 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index 072b9f664597..e3d361e242aa 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -437,6 +437,9 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata) static void xgbe_an_restart(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_pre) + pdata->phy_if.phy_impl.an_pre(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -453,6 +456,9 @@ static void xgbe_an_restart(struct xgbe_prv_data *pdata) static void xgbe_an_disable(struct xgbe_prv_data *pdata) { + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + switch (pdata->an_mode) { case XGBE_AN_MODE_CL73: case XGBE_AN_MODE_CL73_REDRV: @@ -637,11 +643,11 @@ static enum xgbe_an xgbe_an73_incompat_link(struct xgbe_prv_data *pdata) return XGBE_AN_NO_LINK; } - xgbe_an73_disable(pdata); + xgbe_an_disable(pdata); xgbe_switch_mode(pdata); - xgbe_an73_restart(pdata); + xgbe_an_restart(pdata); return XGBE_AN_INCOMPAT_LINK; } @@ -820,6 +826,9 @@ static void xgbe_an37_state_machine(struct xgbe_prv_data *pdata) pdata->an_result = pdata->an_state; pdata->an_state = XGBE_AN_READY; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL37 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } @@ -903,6 +912,9 @@ static void xgbe_an73_state_machine(struct xgbe_prv_data *pdata) pdata->kx_state = XGBE_RX_BPA; pdata->an_start = 0; + if (pdata->phy_if.phy_impl.an_post) + pdata->phy_if.phy_impl.an_post(pdata); + netif_dbg(pdata, link, pdata->netdev, "CL73 AN result: %s\n", xgbe_state_as_string(pdata->an_result)); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index ad102c8bac7b..fa0b51ea1b95 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -833,6 +833,7 @@ struct xgbe_hw_if { /* This structure represents implementation specific routines for an * implementation of a PHY. All routines are required unless noted below. * Optional routines: + * an_pre, an_post * kr_training_pre, kr_training_post */ struct xgbe_phy_impl_if { @@ -875,6 +876,10 @@ struct xgbe_phy_impl_if { /* Process results of auto-negotiation */ enum xgbe_mode (*an_outcome)(struct xgbe_prv_data *); + /* Pre/Post auto-negotiation support */ + void (*an_pre)(struct xgbe_prv_data *); + void (*an_post)(struct xgbe_prv_data *); + /* Pre/Post KR training enablement support */ void (*kr_training_pre)(struct xgbe_prv_data *); void (*kr_training_post)(struct xgbe_prv_data *); From 96f4d430c507ed4856048c2dc9c1a2ea5b5e74e4 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 23 Apr 2018 11:43:17 -0500 Subject: [PATCH 171/182] amd-xgbe: Improve KR auto-negotiation and training Update xgbe-phy-v2.c to make use of the auto-negotiation (AN) phy hooks to improve the ability to successfully complete Clause 73 AN when running at 10gbps. Hardware can sometimes have issues with CDR lock when the AN DME page exchange is being performed. The AN and KR training hooks are used as follows: - The pre AN hook is used to disable CDR tracking in the PHY so that the DME page exchange can be successfully and consistently completed. - The post KR training hook is used to re-enable the CDR tracking so that KR training can successfully complete. - The post AN hook is used to check for an unsuccessful AN which will increase a CDR tracking enablement delay (up to a maximum value). Add two debugfs entries to allow control over use of the CDR tracking workaround. The debugfs entries allow the CDR tracking workaround to be disabled and determine whether to re-enable CDR tracking before or after link training has been initiated. Also, with these changes the receiver reset cycle that is performed during the link status check can be performed less often. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-common.h | 8 ++ drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c | 16 +++ drivers/net/ethernet/amd/xgbe/xgbe-main.c | 1 + drivers/net/ethernet/amd/xgbe/xgbe-mdio.c | 8 +- drivers/net/ethernet/amd/xgbe/xgbe-pci.c | 2 + drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 125 ++++++++++++++++++- drivers/net/ethernet/amd/xgbe/xgbe.h | 4 + 7 files changed, 160 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h index 7ea72ef11a55..d272dc6984ac 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h @@ -1321,6 +1321,10 @@ #define MDIO_VEND2_AN_STAT 0x8002 #endif +#ifndef MDIO_VEND2_PMA_CDR_CONTROL +#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056 +#endif + #ifndef MDIO_CTRL1_SPEED1G #define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100) #endif @@ -1369,6 +1373,10 @@ #define XGBE_AN_CL37_TX_CONFIG_MASK 0x08 #define XGBE_AN_CL37_MII_CTRL_8BIT 0x0100 +#define XGBE_PMA_CDR_TRACK_EN_MASK 0x01 +#define XGBE_PMA_CDR_TRACK_EN_OFF 0x00 +#define XGBE_PMA_CDR_TRACK_EN_ON 0x01 + /* Bit setting and getting macros * The get macro will extract the current bit field value from within * the variable diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c index 7d128be61310..b91143947ed2 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c @@ -519,6 +519,22 @@ void xgbe_debugfs_init(struct xgbe_prv_data *pdata) "debugfs_create_file failed\n"); } + if (pdata->vdata->an_cdr_workaround) { + pfile = debugfs_create_bool("an_cdr_workaround", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_workaround); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + + pfile = debugfs_create_bool("an_cdr_track_early", 0600, + pdata->xgbe_debugfs, + &pdata->debugfs_an_cdr_track_early); + if (!pfile) + netdev_err(pdata->netdev, + "debugfs_create_bool failed\n"); + } + kfree(buf); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-main.c b/drivers/net/ethernet/amd/xgbe/xgbe-main.c index 795e556d4a3f..441d0973957b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-main.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-main.c @@ -349,6 +349,7 @@ int xgbe_config_netdev(struct xgbe_prv_data *pdata) XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1); /* Call MDIO/PHY initialization routine */ + pdata->debugfs_an_cdr_workaround = pdata->vdata->an_cdr_workaround; ret = pdata->phy_if.phy_init(pdata); if (ret) return ret; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c index e3d361e242aa..1b45cd73a258 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-mdio.c @@ -432,6 +432,8 @@ static void xgbe_an73_disable(struct xgbe_prv_data *pdata) xgbe_an73_set(pdata, false, false); xgbe_an73_disable_interrupts(pdata); + pdata->an_start = 0; + netif_dbg(pdata, link, pdata->netdev, "CL73 AN disabled\n"); } @@ -511,11 +513,11 @@ static enum xgbe_an xgbe_an73_tx_training(struct xgbe_prv_data *pdata, XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg); - if (pdata->phy_if.phy_impl.kr_training_post) - pdata->phy_if.phy_impl.kr_training_post(pdata); - netif_dbg(pdata, link, pdata->netdev, "KR training initiated\n"); + + if (pdata->phy_if.phy_impl.kr_training_post) + pdata->phy_if.phy_impl.kr_training_post(pdata); } return XGBE_AN_PAGE_RECEIVED; diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index eb23f9ba1a9a..82d1f416ee2a 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -456,6 +456,7 @@ static const struct xgbe_version_data xgbe_v2a = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct xgbe_version_data xgbe_v2b = { @@ -470,6 +471,7 @@ static const struct xgbe_version_data xgbe_v2b = { .irq_reissue_support = 1, .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, + .an_cdr_workaround = 1, }; static const struct pci_device_id xgbe_pci_table[] = { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 3304a291aa96..b48efc04c4da 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -147,6 +147,14 @@ /* Rate-change complete wait/retry count */ #define XGBE_RATECHANGE_COUNT 500 +/* CDR delay values for KR support (in usec) */ +#define XGBE_CDR_DELAY_INIT 10000 +#define XGBE_CDR_DELAY_INC 10000 +#define XGBE_CDR_DELAY_MAX 100000 + +/* RRC frequency during link status check */ +#define XGBE_RRC_FREQUENCY 10 + enum xgbe_port_mode { XGBE_PORT_MODE_RSVD = 0, XGBE_PORT_MODE_BACKPLANE, @@ -355,6 +363,10 @@ struct xgbe_phy_data { unsigned int redrv_addr; unsigned int redrv_lane; unsigned int redrv_model; + + /* KR AN support */ + unsigned int phy_cdr_notrack; + unsigned int phy_cdr_delay; }; /* I2C, MDIO and GPIO lines are muxed, so only one device at a time */ @@ -2361,7 +2373,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) return 1; /* No link, attempt a receiver reset cycle */ - if (phy_data->rrc_count++) { + if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { phy_data->rrc_count = 0; xgbe_phy_rrc(pdata); } @@ -2669,6 +2681,103 @@ static bool xgbe_phy_port_enabled(struct xgbe_prv_data *pdata) return true; } +static void xgbe_phy_cdr_track(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (!phy_data->phy_cdr_notrack) + return; + + usleep_range(phy_data->phy_cdr_delay, + phy_data->phy_cdr_delay + 500); + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_ON); + + phy_data->phy_cdr_notrack = 0; +} + +static void xgbe_phy_cdr_notrack(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + if (!pdata->debugfs_an_cdr_workaround) + return; + + if (phy_data->phy_cdr_notrack) + return; + + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL, + XGBE_PMA_CDR_TRACK_EN_MASK, + XGBE_PMA_CDR_TRACK_EN_OFF); + + xgbe_phy_rrc(pdata); + + phy_data->phy_cdr_notrack = 1; +} + +static void xgbe_phy_kr_training_post(struct xgbe_prv_data *pdata) +{ + if (!pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_kr_training_pre(struct xgbe_prv_data *pdata) +{ + if (pdata->debugfs_an_cdr_track_early) + xgbe_phy_cdr_track(pdata); +} + +static void xgbe_phy_an_post(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_track(pdata); + + switch (pdata->an_result) { + case XGBE_AN_READY: + case XGBE_AN_COMPLETE: + break; + default: + if (phy_data->phy_cdr_delay < XGBE_CDR_DELAY_MAX) + phy_data->phy_cdr_delay += XGBE_CDR_DELAY_INC; + else + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + break; + } + break; + default: + break; + } +} + +static void xgbe_phy_an_pre(struct xgbe_prv_data *pdata) +{ + struct xgbe_phy_data *phy_data = pdata->phy_data; + + switch (pdata->an_mode) { + case XGBE_AN_MODE_CL73: + case XGBE_AN_MODE_CL73_REDRV: + if (phy_data->cur_mode != XGBE_MODE_KR) + break; + + xgbe_phy_cdr_notrack(pdata); + break; + default: + break; + } +} + static void xgbe_phy_stop(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -2680,6 +2789,9 @@ static void xgbe_phy_stop(struct xgbe_prv_data *pdata) xgbe_phy_sfp_reset(phy_data); xgbe_phy_sfp_mod_absent(pdata); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* Power off the PHY */ xgbe_phy_power_off(pdata); @@ -2712,6 +2824,9 @@ static int xgbe_phy_start(struct xgbe_prv_data *pdata) /* Start in highest supported mode */ xgbe_phy_set_mode(pdata, phy_data->start_mode); + /* Reset CDR support */ + xgbe_phy_cdr_track(pdata); + /* After starting the I2C controller, we can check for an SFP */ switch (phy_data->port_mode) { case XGBE_PORT_MODE_SFP: @@ -3019,6 +3134,8 @@ static int xgbe_phy_init(struct xgbe_prv_data *pdata) } } + phy_data->phy_cdr_delay = XGBE_CDR_DELAY_INIT; + /* Register for driving external PHYs */ mii = devm_mdiobus_alloc(pdata->dev); if (!mii) { @@ -3071,4 +3188,10 @@ void xgbe_init_function_ptrs_phy_v2(struct xgbe_phy_if *phy_if) phy_impl->an_advertising = xgbe_phy_an_advertising; phy_impl->an_outcome = xgbe_phy_an_outcome; + + phy_impl->an_pre = xgbe_phy_an_pre; + phy_impl->an_post = xgbe_phy_an_post; + + phy_impl->kr_training_pre = xgbe_phy_kr_training_pre; + phy_impl->kr_training_post = xgbe_phy_kr_training_post; } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index fa0b51ea1b95..95d4b56448c6 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -994,6 +994,7 @@ struct xgbe_version_data { unsigned int irq_reissue_support; unsigned int tx_desc_prefetch; unsigned int rx_desc_prefetch; + unsigned int an_cdr_workaround; }; struct xgbe_vxlan_data { @@ -1262,6 +1263,9 @@ struct xgbe_prv_data { unsigned int debugfs_xprop_reg; unsigned int debugfs_xi2c_reg; + + bool debugfs_an_cdr_workaround; + bool debugfs_an_cdr_track_early; }; /* Function prototypes*/ From 117df655f8ed51adb6e6b163812a06ebeae9f453 Mon Sep 17 00:00:00 2001 From: Tom Lendacky Date: Mon, 23 Apr 2018 11:43:34 -0500 Subject: [PATCH 172/182] amd-xgbe: Only use the SFP supported transceiver signals The SFP eeprom indicates the transceiver signals (Rx LOS, Tx Fault, etc.) that it supports. Update the driver to include checking the eeprom data when deciding whether to use a transceiver signal. Signed-off-by: Tom Lendacky Signed-off-by: David S. Miller --- drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c | 71 ++++++++++++++++----- 1 file changed, 54 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index b48efc04c4da..aac884314000 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -253,6 +253,10 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_VENDOR_SN 4 #define XGBE_SFP_BASE_VENDOR_SN_LEN 16 +#define XGBE_SFP_EXTD_OPT1 1 +#define XGBE_SFP_EXTD_OPT1_RX_LOS BIT(1) +#define XGBE_SFP_EXTD_OPT1_TX_FAULT BIT(3) + #define XGBE_SFP_EXTD_DIAG 28 #define XGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2) @@ -332,6 +336,7 @@ struct xgbe_phy_data { unsigned int sfp_gpio_address; unsigned int sfp_gpio_mask; + unsigned int sfp_gpio_inputs; unsigned int sfp_gpio_rx_los; unsigned int sfp_gpio_tx_fault; unsigned int sfp_gpio_mod_absent; @@ -986,6 +991,49 @@ static void xgbe_phy_sfp_external_phy(struct xgbe_prv_data *pdata) phy_data->sfp_phy_avail = 1; } +static bool xgbe_phy_check_sfp_rx_los(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_RX_LOS)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_rx_los)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_tx_fault(struct xgbe_phy_data *phy_data) +{ + u8 *sfp_extd = phy_data->sfp_eeprom.extd; + + if (!(sfp_extd[XGBE_SFP_EXTD_OPT1] & XGBE_SFP_EXTD_OPT1_TX_FAULT)) + return false; + + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_tx_fault)) + return true; + + return false; +} + +static bool xgbe_phy_check_sfp_mod_absent(struct xgbe_phy_data *phy_data) +{ + if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) + return false; + + if (phy_data->sfp_gpio_inputs & (1 << phy_data->sfp_gpio_mod_absent)) + return true; + + return false; +} + static bool xgbe_phy_belfuse_parse_quirks(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; @@ -1031,6 +1079,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) if (sfp_base[XGBE_SFP_BASE_EXT_ID] != XGBE_SFP_EXT_ID_SFP) return; + /* Update transceiver signals (eeprom extd/options) */ + phy_data->sfp_tx_fault = xgbe_phy_check_sfp_tx_fault(phy_data); + phy_data->sfp_rx_los = xgbe_phy_check_sfp_rx_los(phy_data); + if (xgbe_phy_sfp_parse_quirks(pdata)) return; @@ -1196,7 +1248,6 @@ static int xgbe_phy_sfp_read_eeprom(struct xgbe_prv_data *pdata) static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) { struct xgbe_phy_data *phy_data = pdata->phy_data; - unsigned int gpio_input; u8 gpio_reg, gpio_ports[2]; int ret; @@ -1211,23 +1262,9 @@ static void xgbe_phy_sfp_signals(struct xgbe_prv_data *pdata) return; } - gpio_input = (gpio_ports[1] << 8) | gpio_ports[0]; - - if (phy_data->sfp_gpio_mask & XGBE_GPIO_NO_MOD_ABSENT) { - /* No GPIO, just assume the module is present for now */ - phy_data->sfp_mod_absent = 0; - } else { - if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent))) - phy_data->sfp_mod_absent = 0; - } - - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_RX_LOS) && - (gpio_input & (1 << phy_data->sfp_gpio_rx_los))) - phy_data->sfp_rx_los = 1; + phy_data->sfp_gpio_inputs = (gpio_ports[1] << 8) | gpio_ports[0]; - if (!(phy_data->sfp_gpio_mask & XGBE_GPIO_NO_TX_FAULT) && - (gpio_input & (1 << phy_data->sfp_gpio_tx_fault))) - phy_data->sfp_tx_fault = 1; + phy_data->sfp_mod_absent = xgbe_phy_check_sfp_mod_absent(phy_data); } static void xgbe_phy_sfp_mod_absent(struct xgbe_prv_data *pdata) From 9cf2f437ca5b39828984064fad213e68fc17ef11 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 24 Apr 2018 14:33:37 +0800 Subject: [PATCH 173/182] team: fix netconsole setup over team MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The same fix in Commit dbe173079ab5 ("bridge: fix netconsole setup over bridge") is also needed for team driver. While at it, remove the unnecessary parameter *team from team_port_enable_netpoll(). v1->v2: - fix it in a better way, as does bridge. Fixes: 0fb52a27a04a ("team: cleanup netpoll clode") Reported-by: João Avelino Bellomo Filho Signed-off-by: Xin Long Signed-off-by: David S. Miller --- drivers/net/team/team.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index acbe84967834..ddb6bf85a59c 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1072,14 +1072,11 @@ static void team_port_leave(struct team *team, struct team_port *port) } #ifdef CONFIG_NET_POLL_CONTROLLER -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int __team_port_enable_netpoll(struct team_port *port) { struct netpoll *np; int err; - if (!team->dev->npinfo) - return 0; - np = kzalloc(sizeof(*np), GFP_KERNEL); if (!np) return -ENOMEM; @@ -1093,6 +1090,14 @@ static int team_port_enable_netpoll(struct team *team, struct team_port *port) return err; } +static int team_port_enable_netpoll(struct team_port *port) +{ + if (!port->team->dev->npinfo) + return 0; + + return __team_port_enable_netpoll(port); +} + static void team_port_disable_netpoll(struct team_port *port) { struct netpoll *np = port->np; @@ -1107,7 +1112,7 @@ static void team_port_disable_netpoll(struct team_port *port) kfree(np); } #else -static int team_port_enable_netpoll(struct team *team, struct team_port *port) +static int team_port_enable_netpoll(struct team_port *port) { return 0; } @@ -1221,7 +1226,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, goto err_vids_add; } - err = team_port_enable_netpoll(team, port); + err = team_port_enable_netpoll(port); if (err) { netdev_err(dev, "Failed to enable netpoll on device %s\n", portname); @@ -1918,7 +1923,7 @@ static int team_netpoll_setup(struct net_device *dev, mutex_lock(&team->lock); list_for_each_entry(port, &team->port_list, list) { - err = team_port_enable_netpoll(team, port); + err = __team_port_enable_netpoll(port); if (err) { __team_netpoll_cleanup(team); break; From 39035bfdc3f18987aba04165060bfbfa10ffc1cd Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Tue, 27 Mar 2018 15:21:48 +0100 Subject: [PATCH 174/182] ixgbevf: ensure xdp_ring resources are free'd on error exit The current error handling for failed resource setup for xdp_ring data is a break out of the loop and returning 0 indicated everything was OK, when in fact it is not. Fix this by exiting via the error exit label err_setup_tx that will clean up the resources correctly and return and error status. Detected by CoverityScan, CID#1466879 ("Logically dead code") Fixes: 21092e9ce8b1 ("ixgbevf: Add support for XDP_TX action") Signed-off-by: Colin Ian King Tested-by: Andrew Bowers Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 3d9033f26eff..e3d04f226d57 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -3420,7 +3420,7 @@ static int ixgbevf_setup_all_tx_resources(struct ixgbevf_adapter *adapter) if (!err) continue; hw_dbg(&adapter->hw, "Allocation for XDP Queue %u failed\n", j); - break; + goto err_setup_tx; } return 0; From 2707df9773cd2cb8b0f35b8592431b301da9d352 Mon Sep 17 00:00:00 2001 From: Vinicius Costa Gomes Date: Fri, 30 Mar 2018 17:06:52 -0700 Subject: [PATCH 175/182] igb: Fix the transmission mode of queue 0 for Qav mode When Qav mode is enabled, queue 0 should be kept on Stream Reservation mode. From the i210 datasheet, section 8.12.19: "Note: Queue0 QueueMode must be set to 1b when TransmitMode is set to Qav." ("QueueMode 1b" represents the Stream Reservation mode) The solution is to give queue 0 the all the credits it might need, so it has priority over queue 1. A situation where this can happen is when cbs is "installed" only on queue 1, leaving queue 0 alone. For example: $ tc qdisc replace dev enp2s0 handle 100: parent root mqprio num_tc 3 \ map 2 2 1 0 2 2 2 2 2 2 2 2 2 2 2 2 queues 1@0 1@1 2@2 hw 0 $ tc qdisc replace dev enp2s0 parent 100:2 cbs locredit -1470 \ hicredit 30 sendslope -980000 idleslope 20000 offload 1 Signed-off-by: Vinicius Costa Gomes Tested-by: Aaron Brown Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/igb/igb_main.c | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index c1c0bc30a16d..cce7ada89255 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1700,7 +1700,22 @@ static void igb_configure_cbs(struct igb_adapter *adapter, int queue, WARN_ON(hw->mac.type != e1000_i210); WARN_ON(queue < 0 || queue > 1); - if (enable) { + if (enable || queue == 0) { + /* i210 does not allow the queue 0 to be in the Strict + * Priority mode while the Qav mode is enabled, so, + * instead of disabling strict priority mode, we give + * queue 0 the maximum of credits possible. + * + * See section 8.12.19 of the i210 datasheet, "Note: + * Queue0 QueueMode must be set to 1b when + * TransmitMode is set to Qav." + */ + if (queue == 0 && !enable) { + /* max "linkspeed" idleslope in kbps */ + idleslope = 1000000; + hicredit = ETH_FRAME_LEN; + } + set_tx_desc_fetch_prio(hw, queue, TX_QUEUE_PRIO_HIGH); set_queue_mode(hw, queue, QUEUE_MODE_STREAM_RESERVATION); From d332a38c9519e0208f04da465bc88427db3485f6 Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Tue, 10 Apr 2018 10:49:49 -0700 Subject: [PATCH 176/182] ice: Fix initialization for num_nodes_added ice_sched_add_nodes_to_layer is used recursively, and so we start with num_nodes_added being 0. This way, in case of an error or if num_nodes is NULL, the function just returns 0 to indicate that no nodes were added. Fixes: 5513b920a4f7 ("ice: Update Tx scheduler tree for VSI multi-Tx queue support") Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_sched.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index f16ff3e4a840..2e6c1d92cc88 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -751,14 +751,14 @@ ice_sched_add_nodes_to_layer(struct ice_port_info *pi, u16 num_added = 0; u32 temp; + *num_nodes_added = 0; + if (!num_nodes) return status; if (!parent || layer < hw->sw_entry_point_layer) return ICE_ERR_PARAM; - *num_nodes_added = 0; - /* max children per node per layer */ max_child_nodes = le16_to_cpu(hw->layer_info[parent->tx_sched_layer].max_children); From 34357a90d5ca8228df4f88b21197f970285b209b Mon Sep 17 00:00:00 2001 From: Anirudh Venkataramanan Date: Wed, 11 Apr 2018 10:41:47 -0700 Subject: [PATCH 177/182] ice: Fix incorrect comment for action type Action type 5 defines large action generic values. Fix comment to reflect that better. Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_adminq_cmd.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 5b13ca1bd85f..7dc5f045e969 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -586,7 +586,7 @@ struct ice_sw_rule_lg_act { #define ICE_LG_ACT_MIRROR_VSI_ID_S 3 #define ICE_LG_ACT_MIRROR_VSI_ID_M (0x3FF << ICE_LG_ACT_MIRROR_VSI_ID_S) - /* Action type = 5 - Large Action */ + /* Action type = 5 - Generic Value */ #define ICE_LG_ACT_GENERIC 0x5 #define ICE_LG_ACT_GENERIC_VALUE_S 3 #define ICE_LG_ACT_GENERIC_VALUE_M (0xFFFF << ICE_LG_ACT_GENERIC_VALUE_S) From 30d84397affb0fcb11beaf049caabfcb1dac65a6 Mon Sep 17 00:00:00 2001 From: Ben Shelton Date: Wed, 11 Apr 2018 12:21:33 -0700 Subject: [PATCH 178/182] ice: Do not check INTEVENT bit for OICR interrupts According to the hardware spec, checking the INTEVENT bit isn't a reliable way to detect if an OICR interrupt has occurred. This is because this bit can be cleared by the hardware/firmware before the interrupt service routine has run. So instead, just check for OICR events every time. Fixes: 940b61af02f4 ("ice: Initialize PF and setup miscellaneous interrupt") Signed-off-by: Ben Shelton Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_hw_autogen.h | 2 -- drivers/net/ethernet/intel/ice/ice_main.c | 4 ---- 2 files changed, 6 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 1b9e2ef48a9d..499904874b3f 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -121,8 +121,6 @@ #define PFINT_FW_CTL_CAUSE_ENA_S 30 #define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S) #define PFINT_OICR 0x0016CA00 -#define PFINT_OICR_INTEVENT_S 0 -#define PFINT_OICR_INTEVENT_M BIT(PFINT_OICR_INTEVENT_S) #define PFINT_OICR_HLP_RDY_S 14 #define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S) #define PFINT_OICR_CPM_RDY_S 15 diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 210b7910f1cd..5299caf55a7f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -1722,9 +1722,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) oicr = rd32(hw, PFINT_OICR); ena_mask = rd32(hw, PFINT_OICR_ENA); - if (!(oicr & PFINT_OICR_INTEVENT_M)) - goto ena_intr; - if (oicr & PFINT_OICR_GRST_M) { u32 reset; /* we have a reset warning */ @@ -1782,7 +1779,6 @@ static irqreturn_t ice_misc_intr(int __always_unused irq, void *data) } ret = IRQ_HANDLED; -ena_intr: /* re-enable interrupt causes that are not handled during this pass */ wr32(hw, PFINT_OICR_ENA, ena_mask); if (!test_bit(__ICE_DOWN, pf->state)) { From a6361f0ca4b25460f2cdf3235ebe8115f622901e Mon Sep 17 00:00:00 2001 From: Willem de Bruijn Date: Mon, 23 Apr 2018 17:37:03 -0400 Subject: [PATCH 179/182] packet: fix bitfield update race Updates to the bitfields in struct packet_sock are not atomic. Serialize these read-modify-write cycles. Move po->running into a separate variable. Its writes are protected by po->bind_lock (except for one startup case at packet_create). Also replace a textual precondition warning with lockdep annotation. All others are set only in packet_setsockopt. Serialize these updates by holding the socket lock. Analogous to other field updates, also hold the lock when testing whether a ring is active (pg_vec). Fixes: 8dc419447415 ("[PACKET]: Add optional checksum computation for recvmsg") Reported-by: DaeRyong Jeong Reported-by: Byoungyoung Lee Signed-off-by: Willem de Bruijn Signed-off-by: David S. Miller --- net/packet/af_packet.c | 60 +++++++++++++++++++++++++++++++----------- net/packet/internal.h | 10 +++---- 2 files changed, 49 insertions(+), 21 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index c31b0687396a..01f3515cada0 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -329,11 +329,11 @@ static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb) skb_set_queue_mapping(skb, queue_index); } -/* register_prot_hook must be invoked with the po->bind_lock held, +/* __register_prot_hook must be invoked through register_prot_hook * or from a context in which asynchronous accesses to the packet * socket is not possible (packet_create()). */ -static void register_prot_hook(struct sock *sk) +static void __register_prot_hook(struct sock *sk) { struct packet_sock *po = pkt_sk(sk); @@ -348,8 +348,13 @@ static void register_prot_hook(struct sock *sk) } } -/* {,__}unregister_prot_hook() must be invoked with the po->bind_lock - * held. If the sync parameter is true, we will temporarily drop +static void register_prot_hook(struct sock *sk) +{ + lockdep_assert_held_once(&pkt_sk(sk)->bind_lock); + __register_prot_hook(sk); +} + +/* If the sync parameter is true, we will temporarily drop * the po->bind_lock and do a synchronize_net to make sure no * asynchronous packet processing paths still refer to the elements * of po->prot_hook. If the sync parameter is false, it is the @@ -359,6 +364,8 @@ static void __unregister_prot_hook(struct sock *sk, bool sync) { struct packet_sock *po = pkt_sk(sk); + lockdep_assert_held_once(&po->bind_lock); + po->running = 0; if (po->fanout) @@ -3252,7 +3259,7 @@ static int packet_create(struct net *net, struct socket *sock, int protocol, if (proto) { po->prot_hook.type = proto; - register_prot_hook(sk); + __register_prot_hook(sk); } mutex_lock(&net->packet.sklist_lock); @@ -3732,12 +3739,18 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; - po->tp_loss = !!val; - return 0; + + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->tp_loss = !!val; + ret = 0; + } + release_sock(sk); + return ret; } case PACKET_AUXDATA: { @@ -3748,7 +3761,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; + lock_sock(sk); po->auxdata = !!val; + release_sock(sk); return 0; } case PACKET_ORIGDEV: @@ -3760,7 +3775,9 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; + lock_sock(sk); po->origdev = !!val; + release_sock(sk); return 0; } case PACKET_VNET_HDR: @@ -3769,15 +3786,20 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (sock->type != SOCK_RAW) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (optlen < sizeof(val)) return -EINVAL; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; - po->has_vnet_hdr = !!val; - return 0; + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->has_vnet_hdr = !!val; + ret = 0; + } + release_sock(sk); + return ret; } case PACKET_TIMESTAMP: { @@ -3815,11 +3837,17 @@ packet_setsockopt(struct socket *sock, int level, int optname, char __user *optv if (optlen != sizeof(val)) return -EINVAL; - if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) - return -EBUSY; if (copy_from_user(&val, optval, sizeof(val))) return -EFAULT; - po->tp_tx_has_off = !!val; + + lock_sock(sk); + if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) { + ret = -EBUSY; + } else { + po->tp_tx_has_off = !!val; + ret = 0; + } + release_sock(sk); return 0; } case PACKET_QDISC_BYPASS: diff --git a/net/packet/internal.h b/net/packet/internal.h index a1d2b2319ae9..3bb7c5fb3bff 100644 --- a/net/packet/internal.h +++ b/net/packet/internal.h @@ -112,10 +112,12 @@ struct packet_sock { int copy_thresh; spinlock_t bind_lock; struct mutex pg_vec_lock; - unsigned int running:1, /* prot_hook is attached*/ - auxdata:1, + unsigned int running; /* bind_lock must be held */ + unsigned int auxdata:1, /* writer must hold sock lock */ origdev:1, - has_vnet_hdr:1; + has_vnet_hdr:1, + tp_loss:1, + tp_tx_has_off:1; int pressure; int ifindex; /* bound device */ __be16 num; @@ -125,8 +127,6 @@ struct packet_sock { enum tpacket_versions tp_version; unsigned int tp_hdrlen; unsigned int tp_reserve; - unsigned int tp_loss:1; - unsigned int tp_tx_has_off:1; unsigned int tp_tstamp; struct net_device __rcu *cached_dev; int (*xmit)(struct sk_buff *skb); From d805c5209350ae725e3a1ee0204ba27d9e75ce3e Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Mon, 23 Apr 2018 15:51:38 -0700 Subject: [PATCH 180/182] net: ethtool: Add missing kernel doc for FEC parameters While adding support for ethtool::get_fecparam and set_fecparam, kernel doc for these functions was missed, add those. Fixes: 1a5f3da20bd9 ("net: ethtool: add support for forward error correction modes") Signed-off-by: Florian Fainelli Acked-by: Roopa Prabhu Signed-off-by: David S. Miller --- include/linux/ethtool.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index ebe41811ed34..b32cd2062f18 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -310,6 +310,8 @@ bool ethtool_convert_link_mode_to_legacy_u32(u32 *legacy_u32, * fields should be ignored (use %__ETHTOOL_LINK_MODE_MASK_NBITS * instead of the latter), any change to them will be overwritten * by kernel. Returns a negative error code or zero. + * @get_fecparam: Get the network device Forward Error Correction parameters. + * @set_fecparam: Set the network device Forward Error Correction parameters. * * All operations are optional (i.e. the function pointer may be set * to %NULL) and callers must take this into account. Callers must From f8d6203780b73c07dc49ee421fedae8edb76b6e4 Mon Sep 17 00:00:00 2001 From: Edward Cree Date: Tue, 24 Apr 2018 17:09:30 +0100 Subject: [PATCH 181/182] sfc: ARFS filter IDs Associate an arbitrary ID with each ARFS filter, allowing to properly query for expiry. The association is maintained in a hash table, which is protected by a spinlock. v3: fix build warnings when CONFIG_RFS_ACCEL is disabled (thanks lkp-robot). v2: fixed uninitialised variable (thanks davem and lkp-robot). Fixes: 3af0f34290f6 ("sfc: replace asynchronous filter operations") Signed-off-by: Edward Cree Signed-off-by: David S. Miller --- drivers/net/ethernet/sfc/ef10.c | 80 ++++++++------ drivers/net/ethernet/sfc/efx.c | 143 ++++++++++++++++++++++++++ drivers/net/ethernet/sfc/efx.h | 21 ++++ drivers/net/ethernet/sfc/farch.c | 41 ++++++-- drivers/net/ethernet/sfc/net_driver.h | 36 +++++++ drivers/net/ethernet/sfc/rx.c | 62 ++++++++++- 6 files changed, 337 insertions(+), 46 deletions(-) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 83ce229f4eb7..63036d9bf3e6 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3999,29 +3999,6 @@ static void efx_ef10_prepare_flr(struct efx_nic *efx) atomic_set(&efx->active_queues, 0); } -static bool efx_ef10_filter_equal(const struct efx_filter_spec *left, - const struct efx_filter_spec *right) -{ - if ((left->match_flags ^ right->match_flags) | - ((left->flags ^ right->flags) & - (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) - return false; - - return memcmp(&left->outer_vid, &right->outer_vid, - sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) == 0; -} - -static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec) -{ - BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); - return jhash2((const u32 *)&spec->outer_vid, - (sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) / 4, - 0); - /* XXX should we randomise the initval? */ -} - /* Decide whether a filter should be exclusive or else should allow * delivery to additional recipients. Currently we decide that * filters for specific local unicast MAC and IP addresses are @@ -4346,7 +4323,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, goto out_unlock; match_pri = rc; - hash = efx_ef10_filter_hash(spec); + hash = efx_filter_spec_hash(spec); is_mc_recip = efx_filter_is_mc_recipient(spec); if (is_mc_recip) bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT); @@ -4378,7 +4355,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx, if (!saved_spec) { if (ins_index < 0) ins_index = i; - } else if (efx_ef10_filter_equal(spec, saved_spec)) { + } else if (efx_filter_spec_equal(spec, saved_spec)) { if (spec->priority < saved_spec->priority && spec->priority != EFX_FILTER_PRI_AUTO) { rc = -EPERM; @@ -4762,27 +4739,62 @@ static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx, static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, unsigned int filter_idx) { + struct efx_filter_spec *spec, saved_spec; struct efx_ef10_filter_table *table; - struct efx_filter_spec *spec; - bool ret; + struct efx_arfs_rule *rule = NULL; + bool ret = true, force = false; + u16 arfs_id; down_read(&efx->filter_sem); table = efx->filter_state; down_write(&table->lock); spec = efx_ef10_filter_entry_spec(table, filter_idx); - if (!spec || spec->priority != EFX_FILTER_PRI_HINT) { - ret = true; + if (!spec || spec->priority != EFX_FILTER_PRI_HINT) goto out_unlock; - } - if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, 0)) { - ret = false; - goto out_unlock; + spin_lock_bh(&efx->rps_hash_lock); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always return 0 to ARFS. */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, spec); + if (!rule) + /* ARFS table doesn't know of this filter, so remove it */ + goto expire; + arfs_id = rule->arfs_id; + ret = efx_rps_check_rule(rule, filter_idx, &force); + if (force) + goto expire; + if (!ret) { + spin_unlock_bh(&efx->rps_hash_lock); + goto out_unlock; + } } - + if (!rps_may_expire_flow(efx->net_dev, spec->dmaq_id, flow_id, arfs_id)) + ret = false; + else if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; +expire: + saved_spec = *spec; /* remove operation will kfree spec */ + spin_unlock_bh(&efx->rps_hash_lock); + /* At this point (since we dropped the lock), another thread might queue + * up a fresh insertion request (but the actual insertion will be held + * up by our possession of the filter table lock). In that case, it + * will set rule->filter_id to EFX_ARFS_FILTER_ID_PENDING, meaning that + * the rule is not removed by efx_rps_hash_del() below. + */ ret = efx_ef10_filter_remove_internal(efx, 1U << spec->priority, filter_idx, true) == 0; + /* While we can't safely dereference rule (we dropped the lock), we can + * still test it for NULL. + */ + if (ret && rule) { + /* Expiring, so remove entry from ARFS table */ + spin_lock_bh(&efx->rps_hash_lock); + efx_rps_hash_del(efx, &saved_spec); + spin_unlock_bh(&efx->rps_hash_lock); + } out_unlock: up_write(&table->lock); up_read(&efx->filter_sem); diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 692dd729ee2a..a4ebd8715494 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -3027,6 +3027,10 @@ static int efx_init_struct(struct efx_nic *efx, mutex_init(&efx->mac_lock); #ifdef CONFIG_RFS_ACCEL mutex_init(&efx->rps_mutex); + spin_lock_init(&efx->rps_hash_lock); + /* Failure to allocate is not fatal, but may degrade ARFS performance */ + efx->rps_hash_table = kcalloc(EFX_ARFS_HASH_TABLE_SIZE, + sizeof(*efx->rps_hash_table), GFP_KERNEL); #endif efx->phy_op = &efx_dummy_phy_operations; efx->mdio.dev = net_dev; @@ -3070,6 +3074,10 @@ static void efx_fini_struct(struct efx_nic *efx) { int i; +#ifdef CONFIG_RFS_ACCEL + kfree(efx->rps_hash_table); +#endif + for (i = 0; i < EFX_MAX_CHANNELS; i++) kfree(efx->channel[i]); @@ -3092,6 +3100,141 @@ void efx_update_sw_stats(struct efx_nic *efx, u64 *stats) stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops); } +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right) +{ + if ((left->match_flags ^ right->match_flags) | + ((left->flags ^ right->flags) & + (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) + return false; + + return memcmp(&left->outer_vid, &right->outer_vid, + sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) == 0; +} + +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) +{ + BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); + return jhash2((const u32 *)&spec->outer_vid, + (sizeof(struct efx_filter_spec) - + offsetof(struct efx_filter_spec, outer_vid)) / 4, + 0); +} + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force) +{ + if (rule->filter_id == EFX_ARFS_FILTER_ID_PENDING) { + /* ARFS is currently updating this entry, leave it */ + return false; + } + if (rule->filter_id == EFX_ARFS_FILTER_ID_ERROR) { + /* ARFS tried and failed to update this, so it's probably out + * of date. Remove the filter and the ARFS rule entry. + */ + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + *force = true; + return true; + } else if (WARN_ON(rule->filter_id != filter_idx)) { /* can't happen */ + /* ARFS has moved on, so old filter is not needed. Since we did + * not mark the rule with EFX_ARFS_FILTER_ID_REMOVING, it will + * not be removed by efx_rps_hash_del() subsequently. + */ + *force = true; + return true; + } + /* Remove it iff ARFS wants to. */ + return true; +} + +struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + u32 hash = efx_filter_spec_hash(spec); + + WARN_ON(!spin_is_locked(&efx->rps_hash_lock)); + if (!efx->rps_hash_table) + return NULL; + return &efx->rps_hash_table[hash % EFX_ARFS_HASH_TABLE_SIZE]; +} + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) + return rule; + } + return NULL; +} + +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (!head) + return NULL; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + *new = false; + return rule; + } + } + rule = kmalloc(sizeof(*rule), GFP_ATOMIC); + *new = true; + if (rule) { + memcpy(&rule->spec, spec, sizeof(rule->spec)); + hlist_add_head(&rule->node, head); + } + return rule; +} + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec) +{ + struct efx_arfs_rule *rule; + struct hlist_head *head; + struct hlist_node *node; + + head = efx_rps_hash_bucket(efx, spec); + if (WARN_ON(!head)) + return; + hlist_for_each(node, head) { + rule = container_of(node, struct efx_arfs_rule, node); + if (efx_filter_spec_equal(spec, &rule->spec)) { + /* Someone already reused the entry. We know that if + * this check doesn't fire (i.e. filter_id == REMOVING) + * then the REMOVING mark was put there by our caller, + * because caller is holding a lock on filter table and + * only holders of that lock set REMOVING. + */ + if (rule->filter_id != EFX_ARFS_FILTER_ID_REMOVING) + return; + hlist_del(node); + kfree(rule); + return; + } + } + /* We didn't find it. */ + WARN_ON(1); +} +#endif + /* RSS contexts. We're using linked lists and crappy O(n) algorithms, because * (a) this is an infrequent control-plane operation and (b) n is small (max 64) */ diff --git a/drivers/net/ethernet/sfc/efx.h b/drivers/net/ethernet/sfc/efx.h index a3140e16fcef..3f759ebdcf10 100644 --- a/drivers/net/ethernet/sfc/efx.h +++ b/drivers/net/ethernet/sfc/efx.h @@ -186,6 +186,27 @@ static inline void efx_filter_rfs_expire(struct work_struct *data) {} #endif bool efx_filter_is_mc_recipient(const struct efx_filter_spec *spec); +bool efx_filter_spec_equal(const struct efx_filter_spec *left, + const struct efx_filter_spec *right); +u32 efx_filter_spec_hash(const struct efx_filter_spec *spec); + +#ifdef CONFIG_RFS_ACCEL +bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx, + bool *force); + +struct efx_arfs_rule *efx_rps_hash_find(struct efx_nic *efx, + const struct efx_filter_spec *spec); + +/* @new is written to indicate if entry was newly added (true) or if an old + * entry was found and returned (false). + */ +struct efx_arfs_rule *efx_rps_hash_add(struct efx_nic *efx, + const struct efx_filter_spec *spec, + bool *new); + +void efx_rps_hash_del(struct efx_nic *efx, const struct efx_filter_spec *spec); +#endif + /* RSS contexts */ struct efx_rss_context *efx_alloc_rss_context_entry(struct efx_nic *efx); struct efx_rss_context *efx_find_rss_context_entry(struct efx_nic *efx, u32 id); diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c index 7174ef5e5c5e..c72adf8b52ea 100644 --- a/drivers/net/ethernet/sfc/farch.c +++ b/drivers/net/ethernet/sfc/farch.c @@ -2905,18 +2905,45 @@ bool efx_farch_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id, { struct efx_farch_filter_state *state = efx->filter_state; struct efx_farch_filter_table *table; - bool ret = false; + bool ret = false, force = false; + u16 arfs_id; down_write(&state->lock); + spin_lock_bh(&efx->rps_hash_lock); table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP]; if (test_bit(index, table->used_bitmap) && - table->spec[index].priority == EFX_FILTER_PRI_HINT && - rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id, - flow_id, 0)) { - efx_farch_filter_table_clear_entry(efx, table, index); - ret = true; + table->spec[index].priority == EFX_FILTER_PRI_HINT) { + struct efx_arfs_rule *rule = NULL; + struct efx_filter_spec spec; + + efx_farch_filter_to_gen_spec(&spec, &table->spec[index]); + if (!efx->rps_hash_table) { + /* In the absence of the table, we always returned 0 to + * ARFS, so use the same to query it. + */ + arfs_id = 0; + } else { + rule = efx_rps_hash_find(efx, &spec); + if (!rule) { + /* ARFS table doesn't know of this filter, remove it */ + force = true; + } else { + arfs_id = rule->arfs_id; + if (!efx_rps_check_rule(rule, index, &force)) + goto out_unlock; + } + } + if (force || rps_may_expire_flow(efx->net_dev, spec.dmaq_id, + flow_id, arfs_id)) { + if (rule) + rule->filter_id = EFX_ARFS_FILTER_ID_REMOVING; + efx_rps_hash_del(efx, &spec); + efx_farch_filter_table_clear_entry(efx, table, index); + ret = true; + } } - +out_unlock: + spin_unlock_bh(&efx->rps_hash_lock); up_write(&state->lock); return ret; } diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h index eea3808b3f25..65568925c3ef 100644 --- a/drivers/net/ethernet/sfc/net_driver.h +++ b/drivers/net/ethernet/sfc/net_driver.h @@ -734,6 +734,35 @@ struct efx_rss_context { }; #ifdef CONFIG_RFS_ACCEL +/* Order of these is important, since filter_id >= %EFX_ARFS_FILTER_ID_PENDING + * is used to test if filter does or will exist. + */ +#define EFX_ARFS_FILTER_ID_PENDING -1 +#define EFX_ARFS_FILTER_ID_ERROR -2 +#define EFX_ARFS_FILTER_ID_REMOVING -3 +/** + * struct efx_arfs_rule - record of an ARFS filter and its IDs + * @node: linkage into hash table + * @spec: details of the filter (used as key for hash table). Use efx->type to + * determine which member to use. + * @rxq_index: channel to which the filter will steer traffic. + * @arfs_id: filter ID which was returned to ARFS + * @filter_id: index in software filter table. May be + * %EFX_ARFS_FILTER_ID_PENDING if filter was not inserted yet, + * %EFX_ARFS_FILTER_ID_ERROR if filter insertion failed, or + * %EFX_ARFS_FILTER_ID_REMOVING if expiry is currently removing the filter. + */ +struct efx_arfs_rule { + struct hlist_node node; + struct efx_filter_spec spec; + u16 rxq_index; + u16 arfs_id; + s32 filter_id; +}; + +/* Size chosen so that the table is one page (4kB) */ +#define EFX_ARFS_HASH_TABLE_SIZE 512 + /** * struct efx_async_filter_insertion - Request to asynchronously insert a filter * @net_dev: Reference to the netdevice @@ -873,6 +902,10 @@ struct efx_async_filter_insertion { * @rps_expire_channel's @rps_flow_id * @rps_slot_map: bitmap of in-flight entries in @rps_slot * @rps_slot: array of ARFS insertion requests for efx_filter_rfs_work() + * @rps_hash_lock: Protects ARFS filter mapping state (@rps_hash_table and + * @rps_next_id). + * @rps_hash_table: Mapping between ARFS filters and their various IDs + * @rps_next_id: next arfs_id for an ARFS filter * @active_queues: Count of RX and TX queues that haven't been flushed and drained. * @rxq_flush_pending: Count of number of receive queues that need to be flushed. * Decremented when the efx_flush_rx_queue() is called. @@ -1029,6 +1062,9 @@ struct efx_nic { unsigned int rps_expire_index; unsigned long rps_slot_map; struct efx_async_filter_insertion rps_slot[EFX_RPS_MAX_IN_FLIGHT]; + spinlock_t rps_hash_lock; + struct hlist_head *rps_hash_table; + u32 rps_next_id; #endif atomic_t active_queues; diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c index 9c593c661cbf..64a94f242027 100644 --- a/drivers/net/ethernet/sfc/rx.c +++ b/drivers/net/ethernet/sfc/rx.c @@ -834,9 +834,29 @@ static void efx_filter_rfs_work(struct work_struct *data) struct efx_nic *efx = netdev_priv(req->net_dev); struct efx_channel *channel = efx_get_channel(efx, req->rxq_index); int slot_idx = req - efx->rps_slot; + struct efx_arfs_rule *rule; + u16 arfs_id = 0; int rc; rc = efx->type->filter_insert(efx, &req->spec, true); + if (efx->rps_hash_table) { + spin_lock_bh(&efx->rps_hash_lock); + rule = efx_rps_hash_find(efx, &req->spec); + /* The rule might have already gone, if someone else's request + * for the same spec was already worked and then expired before + * we got around to our work. In that case we have nothing + * tying us to an arfs_id, meaning that as soon as the filter + * is considered for expiry it will be removed. + */ + if (rule) { + if (rc < 0) + rule->filter_id = EFX_ARFS_FILTER_ID_ERROR; + else + rule->filter_id = rc; + arfs_id = rule->arfs_id; + } + spin_unlock_bh(&efx->rps_hash_lock); + } if (rc >= 0) { /* Remember this so we can check whether to expire the filter * later. @@ -848,18 +868,18 @@ static void efx_filter_rfs_work(struct work_struct *data) if (req->spec.ether_type == htons(ETH_P_IP)) netif_info(efx, rx_status, efx->net_dev, - "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d]\n", + "steering %s %pI4:%u:%pI4:%u to queue %u [flow %u filter %d id %u]\n", (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc); + req->rxq_index, req->flow_id, rc, arfs_id); else netif_info(efx, rx_status, efx->net_dev, - "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d]\n", + "steering %s [%pI6]:%u:[%pI6]:%u to queue %u [flow %u filter %d id %u]\n", (req->spec.ip_proto == IPPROTO_TCP) ? "TCP" : "UDP", req->spec.rem_host, ntohs(req->spec.rem_port), req->spec.loc_host, ntohs(req->spec.loc_port), - req->rxq_index, req->flow_id, rc); + req->rxq_index, req->flow_id, rc, arfs_id); } /* Release references */ @@ -872,8 +892,10 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, { struct efx_nic *efx = netdev_priv(net_dev); struct efx_async_filter_insertion *req; + struct efx_arfs_rule *rule; struct flow_keys fk; int slot_idx; + bool new; int rc; /* find a free slot */ @@ -926,12 +948,42 @@ int efx_filter_rfs(struct net_device *net_dev, const struct sk_buff *skb, req->spec.rem_port = fk.ports.src; req->spec.loc_port = fk.ports.dst; + if (efx->rps_hash_table) { + /* Add it to ARFS hash table */ + spin_lock(&efx->rps_hash_lock); + rule = efx_rps_hash_add(efx, &req->spec, &new); + if (!rule) { + rc = -ENOMEM; + goto out_unlock; + } + if (new) + rule->arfs_id = efx->rps_next_id++ % RPS_NO_FILTER; + rc = rule->arfs_id; + /* Skip if existing or pending filter already does the right thing */ + if (!new && rule->rxq_index == rxq_index && + rule->filter_id >= EFX_ARFS_FILTER_ID_PENDING) + goto out_unlock; + rule->rxq_index = rxq_index; + rule->filter_id = EFX_ARFS_FILTER_ID_PENDING; + spin_unlock(&efx->rps_hash_lock); + } else { + /* Without an ARFS hash table, we just use arfs_id 0 for all + * filters. This means if multiple flows hash to the same + * flow_id, all but the most recently touched will be eligible + * for expiry. + */ + rc = 0; + } + + /* Queue the request */ dev_hold(req->net_dev = net_dev); INIT_WORK(&req->work, efx_filter_rfs_work); req->rxq_index = rxq_index; req->flow_id = flow_id; schedule_work(&req->work); - return 0; + return rc; +out_unlock: + spin_unlock(&efx->rps_hash_lock); out_clear: clear_bit(slot_idx, &efx->rps_slot_map); return rc; From d6fef10c750e64f248543d2eee7c86a4a019f7ec Mon Sep 17 00:00:00 2001 From: Md Fahad Iqbal Polash Date: Mon, 16 Apr 2018 10:07:03 -0700 Subject: [PATCH 182/182] ice: Fix insufficient memory issue in ice_aq_manage_mac_read For the MAC read operation, the device can return up to two (LAN and WoL) MAC addresses. Without access to adequate memory, the device will return an error. Fixed this by allocating the right amount of memory. Also, logic to detect and copy the LAN MAC address into the port_info structure has been added. Note that the WoL MAC address is ignored currently as the WoL feature isn't supported yet. Fixes: dc49c7723676 ("ice: Get MAC/PHY/link info and scheduler topology") Signed-off-by: Md Fahad Iqbal Polash Signed-off-by: Anirudh Venkataramanan Tested-by: Tony Brelinski Signed-off-by: Jeff Kirsher --- drivers/net/ethernet/intel/ice/ice_common.c | 22 ++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 21977ec984c4..71d032cc5fa7 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -78,6 +78,7 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, struct ice_aq_desc desc; enum ice_status status; u16 flags; + u8 i; cmd = &desc.params.mac_read; @@ -98,8 +99,16 @@ ice_aq_manage_mac_read(struct ice_hw *hw, void *buf, u16 buf_size, return ICE_ERR_CFG; } - ether_addr_copy(hw->port_info->mac.lan_addr, resp->mac_addr); - ether_addr_copy(hw->port_info->mac.perm_addr, resp->mac_addr); + /* A single port can report up to two (LAN and WoL) addresses */ + for (i = 0; i < cmd->num_addr; i++) + if (resp[i].addr_type == ICE_AQC_MAN_MAC_ADDR_TYPE_LAN) { + ether_addr_copy(hw->port_info->mac.lan_addr, + resp[i].mac_addr); + ether_addr_copy(hw->port_info->mac.perm_addr, + resp[i].mac_addr); + break; + } + return 0; } @@ -464,9 +473,12 @@ enum ice_status ice_init_hw(struct ice_hw *hw) if (status) goto err_unroll_sched; - /* Get port MAC information */ - mac_buf_len = sizeof(struct ice_aqc_manage_mac_read_resp); - mac_buf = devm_kzalloc(ice_hw_to_dev(hw), mac_buf_len, GFP_KERNEL); + /* Get MAC information */ + /* A single port can report up to two (LAN and WoL) addresses */ + mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, + sizeof(struct ice_aqc_manage_mac_read_resp), + GFP_KERNEL); + mac_buf_len = 2 * sizeof(struct ice_aqc_manage_mac_read_resp); if (!mac_buf) { status = ICE_ERR_NO_MEMORY;