forked from woai3c/MIT6.828
-
Notifications
You must be signed in to change notification settings - Fork 0
/
malloc.c
140 lines (120 loc) · 2.97 KB
/
malloc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
#include <inc/lib.h>
/*
* Simple malloc/free.
*
* Uses the address space to do most of the hard work.
* The address space from mbegin to mend is scanned
* in order. Pages are allocated, used to fill successive
* malloc requests, and then left alone. Free decrements
* a ref count maintained in the page; the page is freed
* when the ref count hits zero.
*
* If we need to allocate a large amount (more than a page)
* we can't put a ref count at the end of each page,
* so we mark the pte entry with the bit PTE_CONTINUED.
*/
enum
{
MAXMALLOC = 1024*1024 /* max size of one allocated chunk */
};
#define PTE_CONTINUED 0x200
static uint8_t *mbegin = (uint8_t*) 0x08000000;
static uint8_t *mend = (uint8_t*) 0x10000000;
static uint8_t *mptr;
static int
isfree(void *v, size_t n)
{
uintptr_t va, end_va = (uintptr_t) v + n;
for (va = (uintptr_t) v; va < end_va; va += PGSIZE)
if (va >= (uintptr_t) mend
|| ((uvpd[PDX(va)] & PTE_P) && (uvpt[PGNUM(va)] & PTE_P)))
return 0;
return 1;
}
void*
malloc(size_t n)
{
int i, cont;
int nwrap;
uint32_t *ref;
void *v;
if (mptr == 0)
mptr = mbegin;
n = ROUNDUP(n, 4);
if (n >= MAXMALLOC)
return 0;
if ((uintptr_t) mptr % PGSIZE){
/*
* we're in the middle of a partially
* allocated page - can we add this chunk?
* the +4 below is for the ref count.
*/
ref = (uint32_t*) (ROUNDUP(mptr, PGSIZE) - 4);
if ((uintptr_t) mptr / PGSIZE == (uintptr_t) (mptr + n - 1 + 4) / PGSIZE) {
(*ref)++;
v = mptr;
mptr += n;
return v;
}
/*
* stop working on this page and move on.
*/
free(mptr); /* drop reference to this page */
mptr = ROUNDDOWN(mptr + PGSIZE, PGSIZE);
}
/*
* now we need to find some address space for this chunk.
* if it's less than a page we leave it open for allocation.
* runs of more than a page can't have ref counts so we
* flag the PTE entries instead.
*/
nwrap = 0;
while (1) {
if (isfree(mptr, n + 4))
break;
mptr += PGSIZE;
if (mptr == mend) {
mptr = mbegin;
if (++nwrap == 2)
return 0; /* out of address space */
}
}
/*
* allocate at mptr - the +4 makes sure we allocate a ref count.
*/
for (i = 0; i < n + 4; i += PGSIZE){
cont = (i + PGSIZE < n + 4) ? PTE_CONTINUED : 0;
if (sys_page_alloc(0, mptr + i, PTE_P|PTE_U|PTE_W|cont) < 0){
for (; i >= 0; i -= PGSIZE)
sys_page_unmap(0, mptr + i);
return 0; /* out of physical memory */
}
}
ref = (uint32_t*) (mptr + i - 4);
*ref = 2; /* reference for mptr, reference for returned block */
v = mptr;
mptr += n;
return v;
}
void
free(void *v)
{
uint8_t *c;
uint32_t *ref;
if (v == 0)
return;
assert(mbegin <= (uint8_t*) v && (uint8_t*) v < mend);
c = ROUNDDOWN(v, PGSIZE);
while (uvpt[PGNUM(c)] & PTE_CONTINUED) {
sys_page_unmap(0, c);
c += PGSIZE;
assert(mbegin <= c && c < mend);
}
/*
* c is just a piece of this page, so dec the ref count
* and maybe free the page.
*/
ref = (uint32_t*) (c + PGSIZE - 4);
if (--(*ref) == 0)
sys_page_unmap(0, c);
}