Skip to content

Commit

Permalink
Merge tag 'arc-v3.12-rc1' of git://git.kernel.org/pub/scm/linux/kerne…
Browse files Browse the repository at this point in the history
…l/git/vgupta/arc

Pull ARC changes from Vineet Gupta:

 - ARC MM changes:
    - preparation for MMUv4 (accomodate new PTE bits, new cmds)
    - Rework the ASID allocation algorithm to remove asid-mm reverse map
 - Boilerplate code consolidation in Exception Handlers
 - Disable FRAME_POINTER for ARC
 - Unaligned Access Emulation for Big-Endian from Noam
 - Bunch of fixes (udelay, missing accessors) from Mischa

* tag 'arc-v3.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
  ARC: fix new Section mismatches in build (post __cpuinit cleanup)
  Kconfig.debug: Add FRAME_POINTER anti-dependency for ARC
  ARC: Fix __udelay calculation
  ARC: remove console_verbose() from setup_arch()
  ARC: Add read*_relaxed to asm/io.h
  ARC: Handle un-aligned user space access in BE.
  ARC: [ASID] Track ASID allocation cycles/generations
  ARC: [ASID] activate_mm() == switch_mm()
  ARC: [ASID] get_new_mmu_context() to conditionally allocate new ASID
  ARC: [ASID] Refactor the TLB paranoid debug code
  ARC: [ASID] Remove legacy/unused debug code
  ARC: No need to flush the TLB in early boot
  ARC: MMUv4 preps/3 - Abstract out TLB Insert/Delete
  ARC: MMUv4 preps/2 - Reshuffle PTE bits
  ARC: MMUv4 preps/1 - Fold PTE K/U access flags
  ARC: Code cosmetics (Nothing semantical)
  ARC: Entry Handler tweaks: Optimize away redundant IRQ_DISABLE_SAVE
  ARC: Exception Handlers Code consolidation
  ARC: Add some .gitignore entries
  • Loading branch information
torvalds committed Sep 9, 2013
2 parents 833ae40 + 07b9b65 commit 89c5a94
Show file tree
Hide file tree
Showing 19 changed files with 352 additions and 454 deletions.
1 change: 1 addition & 0 deletions arch/arc/boot/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
*.dtb*
2 changes: 1 addition & 1 deletion arch/arc/include/asm/cache.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@

extern void arc_cache_init(void);
extern char *arc_cache_mumbojumbo(int cpu_id, char *buf, int len);
extern void __init read_decode_cache_bcr(void);
extern void read_decode_cache_bcr(void);

#endif /* !__ASSEMBLY__ */

Expand Down
5 changes: 2 additions & 3 deletions arch/arc/include/asm/delay.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,11 +53,10 @@ static inline void __udelay(unsigned long usecs)
{
unsigned long loops;

/* (long long) cast ensures 64 bit MPY - real or emulated
/* (u64) cast ensures 64 bit MPY - real or emulated
* HZ * 4295 is pre-evaluated by gcc - hence only 2 mpy ops
*/
loops = ((long long)(usecs * 4295 * HZ) *
(long long)(loops_per_jiffy)) >> 32;
loops = ((u64) usecs * 4295 * HZ * loops_per_jiffy) >> 32;

__delay(loops);
}
Expand Down
24 changes: 23 additions & 1 deletion arch/arc/include/asm/entry.h
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@
* it to memory (non-SMP case) or SCRATCH0 Aux Reg (SMP).
*
* Before saving the full regfile - this reg is restored back, only
* to be saved again on kernel mode stack, as part of ptregs.
* to be saved again on kernel mode stack, as part of pt_regs.
*-------------------------------------------------------------*/
.macro EXCPN_PROLOG_FREEUP_REG reg
#ifdef CONFIG_SMP
Expand All @@ -383,6 +383,28 @@
#endif
.endm

/*--------------------------------------------------------------
* Exception Entry prologue
* -Switches stack to K mode (if not already)
* -Saves the register file
*
* After this it is safe to call the "C" handlers
*-------------------------------------------------------------*/
.macro EXCEPTION_PROLOGUE

/* Need at least 1 reg to code the early exception prologue */
EXCPN_PROLOG_FREEUP_REG r9

/* U/K mode at time of exception (stack not switched if already K) */
lr r9, [erstatus]

/* ARC700 doesn't provide auto-stack switching */
SWITCH_TO_KERNEL_STK

/* save the regfile */
SAVE_ALL_SYS
.endm

/*--------------------------------------------------------------
* Save all registers used by Exceptions (TLB Miss, Prot-V, Mem err etc)
* Requires SP to be already switched to kernel mode Stack
Expand Down
4 changes: 4 additions & 0 deletions arch/arc/include/asm/io.h
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,10 @@ static inline void __raw_writel(u32 w, volatile void __iomem *addr)

}

#define readb_relaxed readb
#define readw_relaxed readw
#define readl_relaxed readl

#include <asm-generic/io.h>

#endif /* _ASM_ARC_IO_H */
7 changes: 0 additions & 7 deletions arch/arc/include/asm/irqflags.h
Original file line number Diff line number Diff line change
Expand Up @@ -157,13 +157,6 @@ static inline void arch_unmask_irq(unsigned int irq)
flag \scratch
.endm

.macro IRQ_DISABLE_SAVE scratch, save
lr \scratch, [status32]
mov \save, \scratch /* Make a copy */
bic \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
flag \scratch
.endm

.macro IRQ_ENABLE scratch
lr \scratch, [status32]
or \scratch, \scratch, (STATUS_E1_MASK | STATUS_E2_MASK)
Expand Down
11 changes: 5 additions & 6 deletions arch/arc/include/asm/mmu.h
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,8 @@
/* Error code if probe fails */
#define TLB_LKUP_ERR 0x80000000

#define TLB_DUP_ERR (TLB_LKUP_ERR | 0x00000001)

/* TLB Commands */
#define TLBWrite 0x1
#define TLBRead 0x2
Expand All @@ -46,21 +48,18 @@
#ifndef __ASSEMBLY__

typedef struct {
unsigned long asid; /* Pvt Addr-Space ID for mm */
#ifdef CONFIG_ARC_TLB_DBG
struct task_struct *tsk;
#endif
unsigned long asid; /* 8 bit MMU PID + Generation cycle */
} mm_context_t;

#ifdef CONFIG_ARC_DBG_TLB_PARANOIA
void tlb_paranoid_check(unsigned int pid_sw, unsigned long address);
void tlb_paranoid_check(unsigned int mm_asid, unsigned long address);
#else
#define tlb_paranoid_check(a, b)
#endif

void arc_mmu_init(void);
extern char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len);
void __init read_decode_mmu_bcr(void);
void read_decode_mmu_bcr(void);

#endif /* !__ASSEMBLY__ */

Expand Down
161 changes: 49 additions & 112 deletions arch/arc/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,95 +34,65 @@
* When it reaches max 255, the allocation cycle starts afresh by flushing
* the entire TLB and wrapping ASID back to zero.
*
* For book-keeping, Linux uses a couple of data-structures:
* -mm_struct has an @asid field to keep a note of task's ASID (needed at the
* time of say switch_mm( )
* -An array of mm structs @asid_mm_map[] for asid->mm the reverse mapping,
* given an ASID, finding the mm struct associated.
*
* The round-robin allocation algorithm allows for ASID stealing.
* If asid tracker is at "x-1", a new req will allocate "x", even if "x" was
* already assigned to another (switched-out) task. Obviously the prev owner
* is marked with an invalid ASID to make it request for a new ASID when it
* gets scheduled next time. However its TLB entries (with ASID "x") could
* exist, which must be cleared before the same ASID is used by the new owner.
* Flushing them would be plausible but costly solution. Instead we force a
* allocation policy quirk, which ensures that a stolen ASID won't have any
* TLB entries associates, alleviating the need to flush.
* The quirk essentially is not allowing ASID allocated in prev cycle
* to be used past a roll-over in the next cycle.
* When this happens (i.e. task ASID > asid tracker), task needs to refresh
* its ASID, aligning it to current value of tracker. If the task doesn't get
* scheduled past a roll-over, hence its ASID is not yet realigned with
* tracker, such ASID is anyways safely reusable because it is
* gauranteed that TLB entries with that ASID wont exist.
* A new allocation cycle, post rollover, could potentially reassign an ASID
* to a different task. Thus the rule is to refresh the ASID in a new cycle.
* The 32 bit @asid_cache (and mm->asid) have 8 bits MMU PID and rest 24 bits
* serve as cycle/generation indicator and natural 32 bit unsigned math
* automagically increments the generation when lower 8 bits rollover.
*/

#define FIRST_ASID 0
#define MAX_ASID 255 /* 8 bit PID field in PID Aux reg */
#define NO_ASID (MAX_ASID + 1) /* ASID Not alloc to mmu ctxt */
#define NUM_ASID ((MAX_ASID - FIRST_ASID) + 1)
#define MM_CTXT_ASID_MASK 0x000000ff /* MMU PID reg :8 bit PID */
#define MM_CTXT_CYCLE_MASK (~MM_CTXT_ASID_MASK)

#define MM_CTXT_FIRST_CYCLE (MM_CTXT_ASID_MASK + 1)
#define MM_CTXT_NO_ASID 0UL

/* ASID to mm struct mapping */
extern struct mm_struct *asid_mm_map[NUM_ASID + 1];
#define hw_pid(mm) (mm->context.asid & MM_CTXT_ASID_MASK)

extern int asid_cache;
extern unsigned int asid_cache;

/*
* Assign a new ASID to task. If the task already has an ASID, it is
* relinquished.
* Get a new ASID if task doesn't have a valid one (unalloc or from prev cycle)
* Also set the MMU PID register to existing/updated ASID
*/
static inline void get_new_mmu_context(struct mm_struct *mm)
{
struct mm_struct *prev_owner;
unsigned long flags;

local_irq_save(flags);

/*
* Relinquish the currently owned ASID (if any).
* Doing unconditionally saves a cmp-n-branch; for already unused
* ASID slot, the value was/remains NULL
* Move to new ASID if it was not from current alloc-cycle/generation.
* This is done by ensuring that the generation bits in both mm->ASID
* and cpu's ASID counter are exactly same.
*
* Note: Callers needing new ASID unconditionally, independent of
* generation, e.g. local_flush_tlb_mm() for forking parent,
* first need to destroy the context, setting it to invalid
* value.
*/
asid_mm_map[mm->context.asid] = (struct mm_struct *)NULL;
if (!((mm->context.asid ^ asid_cache) & MM_CTXT_CYCLE_MASK))
goto set_hw;

/* move to new ASID and handle rollover */
if (unlikely(!(++asid_cache & MM_CTXT_ASID_MASK))) {

/* move to new ASID */
if (++asid_cache > MAX_ASID) { /* ASID roll-over */
asid_cache = FIRST_ASID;
flush_tlb_all();
}

/*
* Is next ASID already owned by some-one else (we are stealing it).
* If so, let the orig owner be aware of this, so when it runs, it
* asks for a brand new ASID. This would only happen for a long-lived
* task with ASID from prev allocation cycle (before ASID roll-over).
*
* This might look wrong - if we are re-using some other task's ASID,
* won't we use it's stale TLB entries too. Actually switch_mm( ) takes
* care of such a case: it ensures that task with ASID from prev alloc
* cycle, when scheduled will refresh it's ASID: see switch_mm( ) below
* The stealing scenario described here will only happen if that task
* didn't get a chance to refresh it's ASID - implying stale entries
* won't exist.
*/
prev_owner = asid_mm_map[asid_cache];
if (prev_owner)
prev_owner->context.asid = NO_ASID;
/*
* Above checke for rollover of 8 bit ASID in 32 bit container.
* If the container itself wrapped around, set it to a non zero
* "generation" to distinguish from no context
*/
if (!asid_cache)
asid_cache = MM_CTXT_FIRST_CYCLE;
}

/* Assign new ASID to tsk */
asid_mm_map[asid_cache] = mm;
mm->context.asid = asid_cache;

#ifdef CONFIG_ARC_TLB_DBG
pr_info("ARC_TLB_DBG: NewMM=0x%x OldMM=0x%x task_struct=0x%x Task: %s,"
" pid:%u, assigned asid:%lu\n",
(unsigned int)mm, (unsigned int)prev_owner,
(unsigned int)(mm->context.tsk), (mm->context.tsk)->comm,
(mm->context.tsk)->pid, mm->context.asid);
#endif

write_aux_reg(ARC_REG_PID, asid_cache | MMU_ENABLE);
set_hw:
write_aux_reg(ARC_REG_PID, hw_pid(mm) | MMU_ENABLE);

local_irq_restore(flags);
}
Expand All @@ -134,10 +104,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm)
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.asid = NO_ASID;
#ifdef CONFIG_ARC_TLB_DBG
mm->context.tsk = tsk;
#endif
mm->context.asid = MM_CTXT_NO_ASID;
return 0;
}

Expand All @@ -152,40 +119,21 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif

/*
* Get a new ASID if task doesn't have a valid one. Possible when
* -task never had an ASID (fresh after fork)
* -it's ASID was stolen - past an ASID roll-over.
* -There's a third obscure scenario (if this task is running for the
* first time afer an ASID rollover), where despite having a valid
* ASID, we force a get for new ASID - see comments at top.
*
* Both the non-alloc scenario and first-use-after-rollover can be
* detected using the single condition below: NO_ASID = 256
* while asid_cache is always a valid ASID value (0-255).
*/
if (next->context.asid > asid_cache) {
get_new_mmu_context(next);
} else {
/*
* XXX: This will never happen given the chks above
* BUG_ON(next->context.asid > MAX_ASID);
*/
write_aux_reg(ARC_REG_PID, next->context.asid | MMU_ENABLE);
}

get_new_mmu_context(next);
}

/*
* Called at the time of execve() to get a new ASID
* Note the subtlety here: get_new_mmu_context() behaves differently here
* vs. in switch_mm(). Here it always returns a new ASID, because mm has
* an unallocated "initial" value, while in latter, it moves to a new ASID,
* only if it was unallocated
*/
#define activate_mm(prev, next) switch_mm(prev, next, NULL)

static inline void destroy_context(struct mm_struct *mm)
{
unsigned long flags;

local_irq_save(flags);

asid_mm_map[mm->context.asid] = NULL;
mm->context.asid = NO_ASID;

local_irq_restore(flags);
mm->context.asid = MM_CTXT_NO_ASID;
}

/* it seemed that deactivate_mm( ) is a reasonable place to do book-keeping
Expand All @@ -197,17 +145,6 @@ static inline void destroy_context(struct mm_struct *mm)
*/
#define deactivate_mm(tsk, mm) do { } while (0)

static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
{
#ifndef CONFIG_SMP
write_aux_reg(ARC_REG_SCRATCH_DATA0, next->pgd);
#endif

/* Unconditionally get a new ASID */
get_new_mmu_context(next);

}

#define enter_lazy_tlb(mm, tsk)

#endif /* __ASM_ARC_MMU_CONTEXT_H */
Loading

0 comments on commit 89c5a94

Please sign in to comment.