Skip to content

Commit

Permalink
Merge branch 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/l…
Browse files Browse the repository at this point in the history
…inux/kernel/git/tip/tip

Pull x86 timer updates from Thomas Gleixner:
 "Early TSC based time stamping to allow better boot time analysis.

  This comes with a general cleanup of the TSC calibration code which
  grew warts and duct taping over the years and removes 250 lines of
  code. Initiated and mostly implemented by Pavel with help from various
  folks"

* 'x86-timers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (37 commits)
  x86/kvmclock: Mark kvm_get_preset_lpj() as __init
  x86/tsc: Consolidate init code
  sched/clock: Disable interrupts when calling generic_sched_clock_init()
  timekeeping: Prevent false warning when persistent clock is not available
  sched/clock: Close a hole in sched_clock_init()
  x86/tsc: Make use of tsc_calibrate_cpu_early()
  x86/tsc: Split native_calibrate_cpu() into early and late parts
  sched/clock: Use static key for sched_clock_running
  sched/clock: Enable sched clock early
  sched/clock: Move sched clock initialization and merge with generic clock
  x86/tsc: Use TSC as sched clock early
  x86/tsc: Initialize cyc2ns when tsc frequency is determined
  x86/tsc: Calibrate tsc only once
  ARM/time: Remove read_boot_clock64()
  s390/time: Remove read_boot_clock64()
  timekeeping: Default boot time offset to local_clock()
  timekeeping: Replace read_boot_clock64() with read_persistent_wall_and_boot_offset()
  s390/time: Add read_persistent_wall_and_boot_offset()
  x86/xen/time: Output xen sched_clock time from 0
  x86/xen/time: Initialize pv xen time in init_hypervisor_platform()
  ...
  • Loading branch information
torvalds committed Aug 14, 2018
2 parents eac3411 + 1088c6e commit 13e091b
Show file tree
Hide file tree
Showing 41 changed files with 508 additions and 754 deletions.
2 changes: 0 additions & 2 deletions Documentation/admin-guide/kernel-parameters.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2835,8 +2835,6 @@

nosync [HW,M68K] Disables sync negotiation for all devices.

notsc [BUGS=X86-32] Disable Time Stamp Counter

nowatchdog [KNL] Disable both lockup detectors, i.e.
soft-lockup and NMI watchdog (hard-lockup).

Expand Down
4 changes: 1 addition & 3 deletions Documentation/x86/x86_64/boot-options.txt
Original file line number Diff line number Diff line change
Expand Up @@ -92,9 +92,7 @@ APICs
Timing

notsc
Don't use the CPU time stamp counter to read the wall time.
This can be used to work around timing problems on multiprocessor systems
with not properly synchronized CPUs.
Deprecated, use tsc=unstable instead.

nohpet
Don't use the HPET timer.
Expand Down
3 changes: 1 addition & 2 deletions arch/arm/include/asm/mach/time.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
extern void timer_tick(void);

typedef void (*clock_access_fn)(struct timespec64 *);
extern int register_persistent_clock(clock_access_fn read_boot,
clock_access_fn read_persistent);
extern int register_persistent_clock(clock_access_fn read_persistent);

#endif
15 changes: 2 additions & 13 deletions arch/arm/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -83,29 +83,18 @@ static void dummy_clock_access(struct timespec64 *ts)
}

static clock_access_fn __read_persistent_clock = dummy_clock_access;
static clock_access_fn __read_boot_clock = dummy_clock_access;

void read_persistent_clock64(struct timespec64 *ts)
{
__read_persistent_clock(ts);
}

void read_boot_clock64(struct timespec64 *ts)
{
__read_boot_clock(ts);
}

int __init register_persistent_clock(clock_access_fn read_boot,
clock_access_fn read_persistent)
int __init register_persistent_clock(clock_access_fn read_persistent)
{
/* Only allow the clockaccess functions to be registered once */
if (__read_persistent_clock == dummy_clock_access &&
__read_boot_clock == dummy_clock_access) {
if (read_boot)
__read_boot_clock = read_boot;
if (__read_persistent_clock == dummy_clock_access) {
if (read_persistent)
__read_persistent_clock = read_persistent;

return 0;
}

Expand Down
2 changes: 1 addition & 1 deletion arch/arm/plat-omap/counter_32k.c
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ int __init omap_init_clocksource_32k(void __iomem *vbase)
}

sched_clock_register(omap_32k_read_sched_clock, 32, 32768);
register_persistent_clock(NULL, omap_read_persistent_clock64);
register_persistent_clock(omap_read_persistent_clock64);
pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");

return 0;
Expand Down
15 changes: 10 additions & 5 deletions arch/s390/kernel/time.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,17 +221,22 @@ void read_persistent_clock64(struct timespec64 *ts)
ext_to_timespec64(clk, ts);
}

void read_boot_clock64(struct timespec64 *ts)
void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
struct timespec64 *boot_offset)
{
unsigned char clk[STORE_CLOCK_EXT_SIZE];
struct timespec64 boot_time;
__u64 delta;

delta = initial_leap_seconds + TOD_UNIX_EPOCH;
memcpy(clk, tod_clock_base, 16);
*(__u64 *) &clk[1] -= delta;
if (*(__u64 *) &clk[1] > delta)
memcpy(clk, tod_clock_base, STORE_CLOCK_EXT_SIZE);
*(__u64 *)&clk[1] -= delta;
if (*(__u64 *)&clk[1] > delta)
clk[0]--;
ext_to_timespec64(clk, ts);
ext_to_timespec64(clk, &boot_time);

read_persistent_clock64(wall_time);
*boot_offset = timespec64_sub(*wall_time, boot_time);
}

static u64 read_tod_clock(struct clocksource *cs)
Expand Down
13 changes: 13 additions & 0 deletions arch/x86/include/asm/intel-family.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,4 +76,17 @@
#define INTEL_FAM6_XEON_PHI_KNL 0x57 /* Knights Landing */
#define INTEL_FAM6_XEON_PHI_KNM 0x85 /* Knights Mill */

/* Useful macros */
#define INTEL_CPU_FAM_ANY(_family, _model, _driver_data) \
{ \
.vendor = X86_VENDOR_INTEL, \
.family = _family, \
.model = _model, \
.feature = X86_FEATURE_ANY, \
.driver_data = (kernel_ulong_t)&_driver_data \
}

#define INTEL_CPU_FAM6(_model, _driver_data) \
INTEL_CPU_FAM_ANY(6, INTEL_FAM6_##_model, _driver_data)

#endif /* _ASM_X86_INTEL_FAMILY_H */
43 changes: 0 additions & 43 deletions arch/x86/include/asm/intel-mid.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,35 +80,6 @@ enum intel_mid_cpu_type {

extern enum intel_mid_cpu_type __intel_mid_cpu_chip;

/**
* struct intel_mid_ops - Interface between intel-mid & sub archs
* @arch_setup: arch_setup function to re-initialize platform
* structures (x86_init, x86_platform_init)
*
* This structure can be extended if any new interface is required
* between intel-mid & its sub arch files.
*/
struct intel_mid_ops {
void (*arch_setup)(void);
};

/* Helper API's for INTEL_MID_OPS_INIT */
#define DECLARE_INTEL_MID_OPS_INIT(cpuname, cpuid) \
[cpuid] = get_##cpuname##_ops

/* Maximum number of CPU ops */
#define MAX_CPU_OPS(a) (sizeof(a)/sizeof(void *))

/*
* For every new cpu addition, a weak get_<cpuname>_ops() function needs be
* declared in arch/x86/platform/intel_mid/intel_mid_weak_decls.h.
*/
#define INTEL_MID_OPS_INIT { \
DECLARE_INTEL_MID_OPS_INIT(penwell, INTEL_MID_CPU_CHIP_PENWELL), \
DECLARE_INTEL_MID_OPS_INIT(cloverview, INTEL_MID_CPU_CHIP_CLOVERVIEW), \
DECLARE_INTEL_MID_OPS_INIT(tangier, INTEL_MID_CPU_CHIP_TANGIER) \
};

#ifdef CONFIG_X86_INTEL_MID

static inline enum intel_mid_cpu_type intel_mid_identify_cpu(void)
Expand Down Expand Up @@ -136,20 +107,6 @@ enum intel_mid_timer_options {

extern enum intel_mid_timer_options intel_mid_timer_options;

/*
* Penwell uses spread spectrum clock, so the freq number is not exactly
* the same as reported by MSR based on SDM.
*/
#define FSB_FREQ_83SKU 83200
#define FSB_FREQ_100SKU 99840
#define FSB_FREQ_133SKU 133000

#define FSB_FREQ_167SKU 167000
#define FSB_FREQ_200SKU 200000
#define FSB_FREQ_267SKU 267000
#define FSB_FREQ_333SKU 333000
#define FSB_FREQ_400SKU 400000

/* Bus Select SoC Fuse value */
#define BSEL_SOC_FUSE_MASK 0x7
/* FSB 133MHz */
Expand Down
7 changes: 0 additions & 7 deletions arch/x86/include/asm/kvm_guest.h

This file was deleted.

1 change: 0 additions & 1 deletion arch/x86/include/asm/kvm_para.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@
#include <uapi/asm/kvm_para.h>

extern void kvmclock_init(void);
extern int kvm_register_clock(char *txt);

#ifdef CONFIG_KVM_GUEST
bool kvm_check_and_clear_guest_paused(void);
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/text-patching.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,5 +37,6 @@ extern void *text_poke_early(void *addr, const void *opcode, size_t len);
extern void *text_poke(void *addr, const void *opcode, size_t len);
extern int poke_int3_handler(struct pt_regs *regs);
extern void *text_poke_bp(void *addr, const void *opcode, size_t len, void *handler);
extern int after_bootmem;

#endif /* _ASM_X86_TEXT_PATCHING_H */
4 changes: 2 additions & 2 deletions arch/x86/include/asm/tsc.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,13 +33,13 @@ static inline cycles_t get_cycles(void)
extern struct system_counterval_t convert_art_to_tsc(u64 art);
extern struct system_counterval_t convert_art_ns_to_tsc(u64 art_ns);

extern void tsc_early_delay_calibrate(void);
extern void tsc_early_init(void);
extern void tsc_init(void);
extern void mark_tsc_unstable(char *reason);
extern int unsynchronized_tsc(void);
extern int check_tsc_unstable(void);
extern void mark_tsc_async_resets(char *reason);
extern unsigned long native_calibrate_cpu(void);
extern unsigned long native_calibrate_cpu_early(void);
extern unsigned long native_calibrate_tsc(void);
extern unsigned long long native_sched_clock_from_tsc(u64 tsc);

Expand Down
7 changes: 7 additions & 0 deletions arch/x86/kernel/alternative.c
Original file line number Diff line number Diff line change
Expand Up @@ -668,6 +668,7 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
local_irq_save(flags);
memcpy(addr, opcode, len);
local_irq_restore(flags);
sync_core();
/* Could also do a CLFLUSH here to speed up CPU recovery; but
that causes hangs on some VIA CPUs. */
return addr;
Expand All @@ -693,6 +694,12 @@ void *text_poke(void *addr, const void *opcode, size_t len)
struct page *pages[2];
int i;

/*
* While boot memory allocator is runnig we cannot use struct
* pages as they are not yet initialized.
*/
BUG_ON(!after_bootmem);

if (!core_kernel_text((unsigned long)addr)) {
pages[0] = vmalloc_to_page(addr);
pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
Expand Down
13 changes: 8 additions & 5 deletions arch/x86/kernel/cpu/amd.c
Original file line number Diff line number Diff line change
Expand Up @@ -232,8 +232,6 @@ static void init_amd_k7(struct cpuinfo_x86 *c)
}
}

set_cpu_cap(c, X86_FEATURE_K7);

/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
return;
Expand Down Expand Up @@ -617,6 +615,14 @@ static void early_init_amd(struct cpuinfo_x86 *c)

early_init_amd_mc(c);

#ifdef CONFIG_X86_32
if (c->x86 == 6)
set_cpu_cap(c, X86_FEATURE_K7);
#endif

if (c->x86 >= 0xf)
set_cpu_cap(c, X86_FEATURE_K8);

rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);

/*
Expand Down Expand Up @@ -863,9 +869,6 @@ static void init_amd(struct cpuinfo_x86 *c)

init_amd_cacheinfo(c);

if (c->x86 >= 0xf)
set_cpu_cap(c, X86_FEATURE_K8);

if (cpu_has(c, X86_FEATURE_XMM2)) {
unsigned long long val;
int ret;
Expand Down
40 changes: 20 additions & 20 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1018,6 +1018,24 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
setup_force_cpu_bug(X86_BUG_CPU_MELTDOWN);
}

/*
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
* unfortunately, that's not true in practice because of early VIA
* chips and (more importantly) broken virtualizers that are not easy
* to detect. In the latter case it doesn't even *fail* reliably, so
* probing for it doesn't even work. Disable it completely on 32-bit
* unless we can find a reliable way to detect all the broken cases.
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
*/
static void detect_nopl(void)
{
#ifdef CONFIG_X86_32
setup_clear_cpu_cap(X86_FEATURE_NOPL);
#else
setup_force_cpu_cap(X86_FEATURE_NOPL);
#endif
}

/*
* Do minimum CPU detection early.
* Fields really needed: vendor, cpuid_level, family, model, mask,
Expand Down Expand Up @@ -1092,6 +1110,8 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
*/
if (!pgtable_l5_enabled())
setup_clear_cpu_cap(X86_FEATURE_LA57);

detect_nopl();
}

void __init early_cpu_init(void)
Expand Down Expand Up @@ -1127,24 +1147,6 @@ void __init early_cpu_init(void)
early_identify_cpu(&boot_cpu_data);
}

/*
* The NOPL instruction is supposed to exist on all CPUs of family >= 6;
* unfortunately, that's not true in practice because of early VIA
* chips and (more importantly) broken virtualizers that are not easy
* to detect. In the latter case it doesn't even *fail* reliably, so
* probing for it doesn't even work. Disable it completely on 32-bit
* unless we can find a reliable way to detect all the broken cases.
* Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
*/
static void detect_nopl(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_32
clear_cpu_cap(c, X86_FEATURE_NOPL);
#else
set_cpu_cap(c, X86_FEATURE_NOPL);
#endif
}

static void detect_null_seg_behavior(struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_64
Expand Down Expand Up @@ -1207,8 +1209,6 @@ static void generic_identify(struct cpuinfo_x86 *c)

get_model_name(c); /* Default name */

detect_nopl(c);

detect_null_seg_behavior(c);

/*
Expand Down
11 changes: 7 additions & 4 deletions arch/x86/kernel/jump_label.c
Original file line number Diff line number Diff line change
Expand Up @@ -37,15 +37,18 @@ static void bug_at(unsigned char *ip, int line)
BUG();
}

static void __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
void *(*poker)(void *, const void *, size_t),
int init)
static void __ref __jump_label_transform(struct jump_entry *entry,
enum jump_label_type type,
void *(*poker)(void *, const void *, size_t),
int init)
{
union jump_code_union code;
const unsigned char default_nop[] = { STATIC_KEY_INIT_NOP };
const unsigned char *ideal_nop = ideal_nops[NOP_ATOMIC5];

if (early_boot_irqs_disabled)
poker = text_poke_early;

if (type == JUMP_LABEL_JMP) {
if (init) {
/*
Expand Down
14 changes: 1 addition & 13 deletions arch/x86/kernel/kvm.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,6 @@
#include <asm/apic.h>
#include <asm/apicdef.h>
#include <asm/hypervisor.h>
#include <asm/kvm_guest.h>

static int kvmapf = 1;

Expand All @@ -66,15 +65,6 @@ static int __init parse_no_stealacc(char *arg)

early_param("no-steal-acc", parse_no_stealacc);

static int kvmclock_vsyscall = 1;
static int __init parse_no_kvmclock_vsyscall(char *arg)
{
kvmclock_vsyscall = 0;
return 0;
}

early_param("no-kvmclock-vsyscall", parse_no_kvmclock_vsyscall);

static DEFINE_PER_CPU_DECRYPTED(struct kvm_vcpu_pv_apf_data, apf_reason) __aligned(64);
static DEFINE_PER_CPU_DECRYPTED(struct kvm_steal_time, steal_time) __aligned(64);
static int has_steal_clock = 0;
Expand Down Expand Up @@ -560,9 +550,6 @@ static void __init kvm_guest_init(void)
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
apic_set_eoi_write(kvm_guest_apic_eoi_write);

if (kvmclock_vsyscall)
kvm_setup_vsyscall_timeinfo();

#ifdef CONFIG_SMP
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
Expand Down Expand Up @@ -628,6 +615,7 @@ const __initconst struct hypervisor_x86 x86_hyper_kvm = {
.name = "KVM",
.detect = kvm_detect,
.type = X86_HYPER_KVM,
.init.init_platform = kvmclock_init,
.init.guest_late_init = kvm_guest_init,
.init.x2apic_available = kvm_para_available,
};
Expand Down
Loading

0 comments on commit 13e091b

Please sign in to comment.