Skip to content

Commit

Permalink
sparc32: centralize all mmu context handling in srmmu.c
Browse files Browse the repository at this point in the history
Signed-off-by: Sam Ravnborg <[email protected]>
Signed-off-by: David S. Miller <[email protected]>
  • Loading branch information
sravnborg authored and davem330 committed Jul 26, 2012
1 parent 59b00c7 commit b585e85
Show file tree
Hide file tree
Showing 6 changed files with 63 additions and 71 deletions.
8 changes: 3 additions & 5 deletions arch/sparc/include/asm/mmu_context_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}

/*
* Initialize a new mmu context. This is invoked when a new
/* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated.
*/
#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
int init_new_context(struct task_struct *tsk, struct mm_struct *mm);

/*
* Destroy a dead context. This occurs when mmput drops the
/* Destroy a dead context. This occurs when mmput drops the
* mm_users count to zero, the mmaps have been released, and
* all the page tables have been flushed. Our job is to destroy
* any remaining processor-specific state.
Expand Down
32 changes: 0 additions & 32 deletions arch/sparc/include/asm/pgtable_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,6 @@ extern unsigned long ptr_in_current_pgd;
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED

extern int num_contexts;

/* First physical page can be anywhere, the following is needed so that
* va-->pa and vice versa conversions work properly without performance
* hit for all __pa()/__va() operations.
Expand Down Expand Up @@ -399,36 +397,6 @@ static inline pte_t pgoff_to_pte(unsigned long pgoff)
*/
#define PTE_FILE_MAX_BITS 24

/*
*/
struct ctx_list {
struct ctx_list *next;
struct ctx_list *prev;
unsigned int ctx_number;
struct mm_struct *ctx_mm;
};

extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
extern struct ctx_list ctx_free; /* Head of free list */
extern struct ctx_list ctx_used; /* Head of used contexts list */

#define NO_CONTEXT -1

static inline void remove_from_ctx_list(struct ctx_list *entry)
{
entry->next->prev = entry->prev;
entry->prev->next = entry->next;
}

static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
{
entry->next = head;
(entry->prev = head->prev)->next = entry;
head->prev = entry;
}
#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)

static inline unsigned long
__get_phys (unsigned long addr)
{
Expand Down
1 change: 0 additions & 1 deletion arch/sparc/kernel/setup_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -371,7 +371,6 @@ void __init setup_arch(char **cmdline_p)
(*(linux_dbvec->teach_debugger))();
}

init_mm.context = (unsigned long) NO_CONTEXT;
init_task.thread.kregs = &fake_swapper_regs;

/* Run-time patch instructions to match the cpu model */
Expand Down
6 changes: 0 additions & 6 deletions arch/sparc/mm/fault_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,12 +32,6 @@

int show_unhandled_signals = 1;

/* At boot time we determine these two values necessary for setting
* up the segment maps and page table entries (pte's).
*/

int num_contexts;

/* Return how much physical memory we have. */
unsigned long probe_memory(void)
{
Expand Down
18 changes: 0 additions & 18 deletions arch/sparc/mm/init_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -82,24 +82,6 @@ void show_mem(unsigned int filter)
#endif
}

void __init sparc_context_init(int numctx)
{
int ctx;

ctx_list_pool = __alloc_bootmem(numctx * sizeof(struct ctx_list), SMP_CACHE_BYTES, 0UL);

for(ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;

clist = (ctx_list_pool + ctx);
clist->ctx_number = ctx;
clist->ctx_mm = NULL;
}
ctx_free.next = ctx_free.prev = &ctx_free;
ctx_used.next = ctx_used.prev = &ctx_used;
for(ctx = 0; ctx < numctx; ctx++)
add_to_free_ctxlist(ctx_list_pool + ctx);
}

extern unsigned long cmdline_memory_size;
unsigned long last_valid_pfn;
Expand Down
69 changes: 60 additions & 9 deletions arch/sparc/mm/srmmu.c
Original file line number Diff line number Diff line change
Expand Up @@ -55,10 +55,6 @@ static unsigned int hwbug_bitmask;
int vac_cache_size;
int vac_line_size;

struct ctx_list *ctx_list_pool;
struct ctx_list ctx_free;
struct ctx_list ctx_used;

extern struct resource sparc_iomap;

extern unsigned long last_valid_pfn;
Expand Down Expand Up @@ -355,8 +351,39 @@ void pte_free(struct mm_struct *mm, pgtable_t pte)
srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
}

/*
*/
/* context handling - a dynamically sized pool is used */
#define NO_CONTEXT -1

struct ctx_list {
struct ctx_list *next;
struct ctx_list *prev;
unsigned int ctx_number;
struct mm_struct *ctx_mm;
};

static struct ctx_list *ctx_list_pool;
static struct ctx_list ctx_free;
static struct ctx_list ctx_used;

/* At boot time we determine the number of contexts */
static int num_contexts;

static inline void remove_from_ctx_list(struct ctx_list *entry)
{
entry->next->prev = entry->prev;
entry->prev->next = entry->next;
}

static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
{
entry->next = head;
(entry->prev = head->prev)->next = entry;
head->prev = entry;
}
#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)


static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
{
struct ctx_list *ctxp;
Expand Down Expand Up @@ -392,6 +419,26 @@ static inline void free_context(int context)
add_to_free_ctxlist(ctx_old);
}

static void __init sparc_context_init(int numctx)
{
int ctx;
unsigned long size;

size = numctx * sizeof(struct ctx_list);
ctx_list_pool = __alloc_bootmem(size, SMP_CACHE_BYTES, 0UL);

for (ctx = 0; ctx < numctx; ctx++) {
struct ctx_list *clist;

clist = (ctx_list_pool + ctx);
clist->ctx_number = ctx;
clist->ctx_mm = NULL;
}
ctx_free.next = ctx_free.prev = &ctx_free;
ctx_used.next = ctx_used.prev = &ctx_used;
for (ctx = 0; ctx < numctx; ctx++)
add_to_free_ctxlist(ctx_list_pool + ctx);
}

void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
struct task_struct *tsk)
Expand Down Expand Up @@ -799,9 +846,6 @@ static void __init map_kernel(void)
}
}

/* Paging initialization on the Sparc Reference MMU. */
extern void sparc_context_init(int);

void (*poke_srmmu)(void) __cpuinitdata = NULL;

extern unsigned long bootmem_init(unsigned long *pages_avail);
Expand All @@ -816,6 +860,7 @@ void __init srmmu_paging_init(void)
pte_t *pte;
unsigned long pages_avail;

init_mm.context = (unsigned long) NO_CONTEXT;
sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */

if (sparc_cpu_model == sun4d)
Expand Down Expand Up @@ -918,6 +963,12 @@ void mmu_info(struct seq_file *m)
srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
}

int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context = NO_CONTEXT;
return 0;
}

void destroy_context(struct mm_struct *mm)
{

Expand Down

0 comments on commit b585e85

Please sign in to comment.