Skip to content

Commit

Permalink
merge upstream 3.4.111
Browse files Browse the repository at this point in the history
  • Loading branch information
nisenbeck committed Mar 25, 2016
1 parent c6ec500 commit 82c7ca4
Show file tree
Hide file tree
Showing 125 changed files with 1,390 additions and 731 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 4
SUBLEVEL = 110
SUBLEVEL = 111
EXTRAVERSION =
NAME = Saber-toothed Squirrel

Expand Down
31 changes: 31 additions & 0 deletions arch/mips/include/asm/pgtable.h
Original file line number Diff line number Diff line change
Expand Up @@ -153,8 +153,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
#ifdef CONFIG_SMP
/*
* For SMP, multiple CPUs can race, so we need to do
* this atomically.
*/
#ifdef CONFIG_64BIT
#define LL_INSN "lld"
#define SC_INSN "scd"
#else /* CONFIG_32BIT */
#define LL_INSN "ll"
#define SC_INSN "sc"
#endif
unsigned long page_global = _PAGE_GLOBAL;
unsigned long tmp;

__asm__ __volatile__ (
" .set push\n"
" .set noreorder\n"
"1: " LL_INSN " %[tmp], %[buddy]\n"
" bnez %[tmp], 2f\n"
" or %[tmp], %[tmp], %[global]\n"
" " SC_INSN " %[tmp], %[buddy]\n"
" beqz %[tmp], 1b\n"
" nop\n"
"2:\n"
" .set pop"
: [buddy] "+m" (buddy->pte),
[tmp] "=&r" (tmp)
: [global] "r" (page_global));
#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
#endif /* CONFIG_SMP */
}
#endif
}
Expand Down
5 changes: 3 additions & 2 deletions arch/mips/kernel/mips-mt-fpaff.c
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
unsigned int real_len;
cpumask_t mask;
cpumask_t allowed, mask;
int retval;
struct task_struct *p;

Expand All @@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
if (retval)
goto out_unlock;

cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask);
cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
cpumask_and(&mask, &allowed, cpu_active_mask);

out_unlock:
read_unlock(&tasklist_lock);
Expand Down
2 changes: 1 addition & 1 deletion arch/s390/kernel/process.c
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ asmlinkage void execve_tail(void)
{
current->thread.fp_regs.fpc = 0;
if (MACHINE_HAS_IEEE)
asm volatile("sfpc %0,%0" : : "d" (0));
asm volatile("sfpc %0" : : "d" (0));
}

/*
Expand Down
4 changes: 4 additions & 0 deletions arch/s390/kernel/sclp.S
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,8 @@ ENTRY(_sclp_print_early)
jno .Lesa2
ahi %r15,-80
stmh %r6,%r15,96(%r15) # store upper register halves
basr %r13,0
lmh %r0,%r15,.Lzeroes-.(%r13) # clear upper register halves
.Lesa2:
#endif
lr %r10,%r2 # save string pointer
Expand All @@ -293,6 +295,8 @@ ENTRY(_sclp_print_early)
#endif
lm %r6,%r15,120(%r15) # restore registers
br %r14
.Lzeroes:
.fill 64,4,0

.LwritedataS4:
.long 0x00760005 # SCLP command for write data
Expand Down
2 changes: 1 addition & 1 deletion arch/tile/kernel/setup.c
Original file line number Diff line number Diff line change
Expand Up @@ -972,7 +972,7 @@ static void __init load_hv_initrd(void)

void __init free_initrd_mem(unsigned long begin, unsigned long end)
{
free_bootmem(__pa(begin), end - begin);
free_bootmem_late(__pa(begin), end - begin);
}

#else
Expand Down
15 changes: 0 additions & 15 deletions arch/x86/include/asm/desc.h
Original file line number Diff line number Diff line change
Expand Up @@ -279,21 +279,6 @@ static inline void clear_LDT(void)
set_ldt(NULL, 0);
}

/*
* load one particular LDT into the current CPU
*/
static inline void load_LDT_nolock(mm_context_t *pc)
{
set_ldt(pc->ldt, pc->size);
}

static inline void load_LDT(mm_context_t *pc)
{
preempt_disable();
load_LDT_nolock(pc);
preempt_enable();
}

static inline unsigned long get_desc_base(const struct desc_struct *desc)
{
return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/kvm_host.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,7 @@
#define GP_VECTOR 13
#define PF_VECTOR 14
#define MF_VECTOR 16
#define AC_VECTOR 17
#define MC_VECTOR 18

#define SELECTOR_TI_MASK (1 << 2)
Expand Down
3 changes: 1 addition & 2 deletions arch/x86/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,7 @@
* we put the segment information here.
*/
typedef struct {
void *ldt;
int size;
struct ldt_struct *ldt;

#ifdef CONFIG_X86_64
/* True if mm supports a task running in 32 bit compatibility mode. */
Expand Down
49 changes: 47 additions & 2 deletions arch/x86/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,51 @@ static inline void paravirt_activate_mm(struct mm_struct *prev,
}
#endif /* !CONFIG_PARAVIRT */

/*
* ldt_structs can be allocated, used, and freed, but they are never
* modified while live.
*/
struct ldt_struct {
/*
* Xen requires page-aligned LDTs with special permissions. This is
* needed to prevent us from installing evil descriptors such as
* call gates. On native, we could merge the ldt_struct and LDT
* allocations, but it's not worth trying to optimize.
*/
struct desc_struct *entries;
int size;
};

static inline void load_mm_ldt(struct mm_struct *mm)
{
struct ldt_struct *ldt;

/* smp_read_barrier_depends synchronizes with barrier in install_ldt */
ldt = ACCESS_ONCE(mm->context.ldt);
smp_read_barrier_depends();

/*
* Any change to mm->context.ldt is followed by an IPI to all
* CPUs with the mm active. The LDT will not be freed until
* after the IPI is handled by all such CPUs. This means that,
* if the ldt_struct changes before we return, the values we see
* will be safe, and the new values will be loaded before we run
* any user code.
*
* NB: don't try to convert this to use RCU without extreme care.
* We would still need IRQs off, because we don't want to change
* the local LDT after an IPI loaded a newer value than the one
* that we can see.
*/

if (unlikely(ldt))
set_ldt(ldt->entries, ldt->size);
else
clear_LDT();

DEBUG_LOCKS_WARN_ON(preemptible());
}

/*
* Used for LDT copy/destruction.
*/
Expand Down Expand Up @@ -52,7 +97,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* load the LDT, if the LDT is different:
*/
if (unlikely(prev->context.ldt != next->context.ldt))
load_LDT_nolock(&next->context);
load_mm_ldt(next);
}
#ifdef CONFIG_SMP
else {
Expand All @@ -65,7 +110,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
* to make sure to use no freed page tables.
*/
load_cr3(next->pgd);
load_LDT_nolock(&next->context);
load_mm_ldt(next);
}
}
#endif
Expand Down
4 changes: 2 additions & 2 deletions arch/x86/kernel/cpu/common.c
Original file line number Diff line number Diff line change
Expand Up @@ -1254,7 +1254,7 @@ void __cpuinit cpu_init(void)
load_sp0(t, &current->thread);
set_tss_desc(cpu, t);
load_TR_desc();
load_LDT(&init_mm.context);
load_mm_ldt(&init_mm);

clear_all_debug_regs();
dbg_restore_debug_regs();
Expand Down Expand Up @@ -1302,7 +1302,7 @@ void __cpuinit cpu_init(void)
load_sp0(t, thread);
set_tss_desc(cpu, t);
load_TR_desc();
load_LDT(&init_mm.context);
load_mm_ldt(&init_mm);

t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);

Expand Down
Loading

0 comments on commit 82c7ca4

Please sign in to comment.