Skip to content

Commit

Permalink
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/…
Browse files Browse the repository at this point in the history
…linux/kernel/git/tip/tip

Pull perf tooling fixes from Ingo  Molnar:
 "These are all perf tooling changes: most of them are fixes.

  Note that the large CPU count related fixes go beyond regression
  fixes, but the IPI-flood symptoms are severe enough that I think
  justifies their inclusion"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (38 commits)
  perf vendor events s390: Remove name from L1D_RO_EXCL_WRITES description
  perf vendor events s390: Fix counter long description for DTLB1_GPAGE_WRITES
  libtraceevent: Allow custom libdir path
  perf header: Fix false warning when there are no duplicate cache entries
  perf metricgroup: Fix printing event names of metric group with multiple events
  perf/x86/pmu-events: Fix Kernel_Utilization metric
  perf top: Do not bail out when perf_env__read_cpuid() returns ENOSYS
  perf arch: Make the default get_cpuid() return compatible error
  tools headers kvm: Sync linux/kvm.h with the kernel sources
  tools headers UAPI: Update tools's copy of drm.h headers
  tools headers UAPI: Sync drm/i915_drm.h with the kernel sources
  perf inject: Fix processing of ID index for injected instruction tracing
  perf report: Bail out --mem-mode if mem info is not available
  perf report: Make -F more strict like -s
  perf report/top TUI: Replace pr_err() with ui__error()
  libtraceevent: Copy pkg-config file to output folder when using O=
  libtraceevent: Fix lib installation with O=
  perf kvm: Clarify the 'perf kvm' -i and -o command line options
  tools arch x86: Sync asm/cpufeatures.h with the kernel sources
  perf beauty: Add CLEAR_SIGHAND support for clone's flags arg
  ...
  • Loading branch information
torvalds committed Dec 17, 2019
2 parents 9e8a0d5 + 57e04ee commit 89c683c
Show file tree
Hide file tree
Showing 65 changed files with 976 additions and 289 deletions.
3 changes: 2 additions & 1 deletion tools/arch/arm/include/uapi/asm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,8 +131,9 @@ struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
__u8 ext_dabt_pending;
/* Align it to 8 bytes */
__u8 pad[6];
__u8 pad[5];
__u64 serror_esr;
} exception;
__u32 reserved[12];
Expand Down
5 changes: 4 additions & 1 deletion tools/arch/arm64/include/uapi/asm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,9 @@ struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
__u8 ext_dabt_pending;
/* Align it to 8 bytes */
__u8 pad[6];
__u8 pad[5];
__u64 serror_esr;
} exception;
__u32 reserved[12];
Expand Down Expand Up @@ -323,6 +324,8 @@ struct kvm_vcpu_events {
#define KVM_ARM_VCPU_TIMER_CTRL 1
#define KVM_ARM_VCPU_TIMER_IRQ_VTIMER 0
#define KVM_ARM_VCPU_TIMER_IRQ_PTIMER 1
#define KVM_ARM_VCPU_PVTIME_CTRL 2
#define KVM_ARM_VCPU_PVTIME_IPA 0

/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_VCPU2_SHIFT 28
Expand Down
3 changes: 3 additions & 0 deletions tools/arch/powerpc/include/uapi/asm/kvm.h
Original file line number Diff line number Diff line change
Expand Up @@ -667,6 +667,8 @@ struct kvm_ppc_cpu_char {

/* PPC64 eXternal Interrupt Controller Specification */
#define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */
#define KVM_DEV_XICS_GRP_CTRL 2
#define KVM_DEV_XICS_NR_SERVERS 1

/* Layout of 64-bit source attribute values */
#define KVM_XICS_DESTINATION_SHIFT 0
Expand All @@ -683,6 +685,7 @@ struct kvm_ppc_cpu_char {
#define KVM_DEV_XIVE_GRP_CTRL 1
#define KVM_DEV_XIVE_RESET 1
#define KVM_DEV_XIVE_EQ_SYNC 2
#define KVM_DEV_XIVE_NR_SERVERS 3
#define KVM_DEV_XIVE_GRP_SOURCE 2 /* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_SOURCE_CONFIG 3 /* 64-bit source identifier */
#define KVM_DEV_XIVE_GRP_EQ_CONFIG 4 /* 64-bit EQ identifier */
Expand Down
3 changes: 3 additions & 0 deletions tools/arch/x86/include/asm/cpufeatures.h
Original file line number Diff line number Diff line change
Expand Up @@ -292,6 +292,7 @@
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
#define X86_FEATURE_RDPRU (13*32+ 4) /* Read processor register at user level */
#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
Expand Down Expand Up @@ -399,5 +400,7 @@
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */

#endif /* _ASM_X86_CPUFEATURES_H */
18 changes: 18 additions & 0 deletions tools/arch/x86/include/asm/msr-index.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,18 @@
* Microarchitectural Data
* Sampling (MDS) vulnerabilities.
*/
#define ARCH_CAP_PSCHANGE_MC_NO BIT(6) /*
* The processor is not susceptible to a
* machine check error due to modifying the
* code page size along with either the
* physical address or cache type
* without TLB invalidation.
*/
#define ARCH_CAP_TSX_CTRL_MSR BIT(7) /* MSR for TSX control is available. */
#define ARCH_CAP_TAA_NO BIT(8) /*
* Not susceptible to
* TSX Async Abort (TAA) vulnerabilities.
*/

#define MSR_IA32_FLUSH_CMD 0x0000010b
#define L1D_FLUSH BIT(0) /*
Expand All @@ -103,6 +115,10 @@
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e

#define MSR_IA32_TSX_CTRL 0x00000122
#define TSX_CTRL_RTM_DISABLE BIT(0) /* Disable RTM feature */
#define TSX_CTRL_CPUID_CLEAR BIT(1) /* Disable TSX enumeration */

#define MSR_IA32_SYSENTER_CS 0x00000174
#define MSR_IA32_SYSENTER_ESP 0x00000175
#define MSR_IA32_SYSENTER_EIP 0x00000176
Expand Down Expand Up @@ -393,6 +409,8 @@
#define MSR_AMD_PSTATE_DEF_BASE 0xc0010064
#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD_PPIN_CTL 0xc00102f0
#define MSR_AMD_PPIN 0xc00102f1
#define MSR_AMD64_LS_CFG 0xc0011020
#define MSR_AMD64_DC_CFG 0xc0011022
#define MSR_AMD64_BU_CFG2 0xc001102a
Expand Down
20 changes: 10 additions & 10 deletions tools/arch/x86/lib/memcpy_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -28,8 +28,8 @@
* Output:
* rax original destination
*/
ENTRY(__memcpy)
ENTRY(memcpy)
SYM_FUNC_START_ALIAS(__memcpy)
SYM_FUNC_START_LOCAL(memcpy)
ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
"jmp memcpy_erms", X86_FEATURE_ERMS

Expand All @@ -41,23 +41,23 @@ ENTRY(memcpy)
movl %edx, %ecx
rep movsb
ret
ENDPROC(memcpy)
ENDPROC(__memcpy)
SYM_FUNC_END(memcpy)
SYM_FUNC_END_ALIAS(__memcpy)
EXPORT_SYMBOL(memcpy)
EXPORT_SYMBOL(__memcpy)

/*
* memcpy_erms() - enhanced fast string memcpy. This is faster and
* simpler than memcpy. Use memcpy_erms when possible.
*/
ENTRY(memcpy_erms)
SYM_FUNC_START(memcpy_erms)
movq %rdi, %rax
movq %rdx, %rcx
rep movsb
ret
ENDPROC(memcpy_erms)
SYM_FUNC_END(memcpy_erms)

ENTRY(memcpy_orig)
SYM_FUNC_START(memcpy_orig)
movq %rdi, %rax

cmpq $0x20, %rdx
Expand Down Expand Up @@ -182,7 +182,7 @@ ENTRY(memcpy_orig)

.Lend:
retq
ENDPROC(memcpy_orig)
SYM_FUNC_END(memcpy_orig)

#ifndef CONFIG_UML

Expand All @@ -193,7 +193,7 @@ MCSAFE_TEST_CTL
* Note that we only catch machine checks when reading the source addresses.
* Writes to target are posted and don't generate machine checks.
*/
ENTRY(__memcpy_mcsafe)
SYM_FUNC_START(__memcpy_mcsafe)
cmpl $8, %edx
/* Less than 8 bytes? Go to byte copy loop */
jb .L_no_whole_words
Expand Down Expand Up @@ -260,7 +260,7 @@ ENTRY(__memcpy_mcsafe)
xorl %eax, %eax
.L_done:
ret
ENDPROC(__memcpy_mcsafe)
SYM_FUNC_END(__memcpy_mcsafe)
EXPORT_SYMBOL_GPL(__memcpy_mcsafe)

.section .fixup, "ax"
Expand Down
16 changes: 8 additions & 8 deletions tools/arch/x86/lib/memset_64.S
Original file line number Diff line number Diff line change
Expand Up @@ -18,8 +18,8 @@
*
* rax original destination
*/
ENTRY(memset)
ENTRY(__memset)
SYM_FUNC_START_ALIAS(memset)
SYM_FUNC_START(__memset)
/*
* Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
* to use it when possible. If not available, use fast string instructions.
Expand All @@ -42,8 +42,8 @@ ENTRY(__memset)
rep stosb
movq %r9,%rax
ret
ENDPROC(memset)
ENDPROC(__memset)
SYM_FUNC_END(__memset)
SYM_FUNC_END_ALIAS(memset)

/*
* ISO C memset - set a memory block to a byte value. This function uses
Expand All @@ -56,16 +56,16 @@ ENDPROC(__memset)
*
* rax original destination
*/
ENTRY(memset_erms)
SYM_FUNC_START(memset_erms)
movq %rdi,%r9
movb %sil,%al
movq %rdx,%rcx
rep stosb
movq %r9,%rax
ret
ENDPROC(memset_erms)
SYM_FUNC_END(memset_erms)

ENTRY(memset_orig)
SYM_FUNC_START(memset_orig)
movq %rdi,%r10

/* expand byte value */
Expand Down Expand Up @@ -136,4 +136,4 @@ ENTRY(memset_orig)
subq %r8,%rdx
jmp .Lafter_bad_alignment
.Lfinal:
ENDPROC(memset_orig)
SYM_FUNC_END(memset_orig)
3 changes: 2 additions & 1 deletion tools/include/uapi/drm/drm.h
Original file line number Diff line number Diff line change
Expand Up @@ -778,11 +778,12 @@ struct drm_syncobj_array {
__u32 pad;
};

#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
struct drm_syncobj_timeline_array {
__u64 handles;
__u64 points;
__u32 count_handles;
__u32 pad;
__u32 flags;
};


Expand Down
Loading

0 comments on commit 89c683c

Please sign in to comment.