Skip to content

Commit

Permalink
o Mostly a WIP, refined the task ref count stuff a bit, added some mo…
Browse files Browse the repository at this point in the history
…re to the memory ref count. Runs, but not for long on complex workloads
  • Loading branch information
Mike Miller committed Dec 8, 2023
1 parent 6726072 commit 0f8681f
Show file tree
Hide file tree
Showing 11 changed files with 60 additions and 73 deletions.
15 changes: 7 additions & 8 deletions emu/memory.c
Original file line number Diff line number Diff line change
Expand Up @@ -47,21 +47,21 @@ void mem_init(struct mem *mem) {

void mem_destroy(struct mem *mem) {
write_lock(&mem->lock);
while((mem_ref_cnt_val_get(mem)) && (current->pid > 1) ){
while((mem_ref_cnt_get(mem)) && (current->pid > 1) ){
nanosleep(&lock_pause, NULL);
}
pt_unmap_always(mem, 0, MEM_PAGES);

#if ENGINE_JIT
while((task_ref_cnt_get(current, 1)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks
while((mem_ref_cnt_get(current)) && (current->pid > 1) ){ // Wait for now, task is in one or more critical sections, and/or has locks
nanosleep(&lock_pause, NULL);
}
jit_free(mem->mmu.jit);
#endif
for (int i = 0; i < MEM_PGDIR_SIZE; i++) {
do {
nanosleep(&lock_pause, NULL);
} while(mem_ref_cnt_val_get(mem));
} while(mem_ref_cnt_get(mem));


if (mem->pgdir[i] != NULL)
Expand Down Expand Up @@ -112,7 +112,7 @@ struct pt_entry *mem_pt(struct mem *mem, page_t page) {
static void mem_pt_del(struct mem *mem, page_t page) {
struct pt_entry *entry = mem_pt(mem, page);
if (entry != NULL) {
while(task_ref_cnt_get(current, 0) > 4) { // mark
while(mem_ref_cnt_get(mem)) { // Don't delete if memory is in use
nanosleep(&lock_pause, NULL);
}
entry->data = NULL;
Expand Down Expand Up @@ -255,7 +255,7 @@ int pt_set_flags(struct mem *mem, page_t start, pages_t pages, int flags) {
}

int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t pages) {
while(task_ref_cnt_get(current, 0)) { // Wait for now, task is in one or more critical sections
while(task_ref_cnt_get(current, 0) > 1) { // Will be at least 1, anything higher means another thread is accessing
nanosleep(&lock_pause, NULL);
}
for (page_t page = start; page < start + pages; mem_next_page(src, &page)) {
Expand All @@ -272,7 +272,7 @@ int pt_copy_on_write(struct mem *src, struct mem *dst, page_t start, page_t page
dst_entry->offset = entry->offset;
dst_entry->flags = entry->flags;
}
while(task_ref_cnt_get(current, 0)) { // Wait for now, task is in one or more critical sections
while(task_ref_cnt_get(current, 0) > 1) { // Wait for now, task is in one or more critical sections
nanosleep(&lock_pause, NULL);
}
mem_changed(src);
Expand Down Expand Up @@ -347,7 +347,6 @@ void *mem_ptr(struct mem *mem, addr_t addr, int type) {

// copy/paste from above
mem_ref_cnt_mod(mem, 1);
//read_to_write_lock(&mem->lock);
memcpy(copy, data, PAGE_SIZE); //mkemkemke Crashes here a lot when running both the go and parallel make test. 01 June 2022
mem_ref_cnt_mod(mem, -1);
pt_map(mem, page, 1, copy, 0, entry->flags &~ P_COW);
Expand Down Expand Up @@ -438,7 +437,7 @@ void mem_ref_cnt_mod(struct mem *mem, int value) { // value Should only be -1 or
pthread_mutex_unlock(&mem->reference.lock);
}

int mem_ref_cnt_val_get(struct mem *mem) {
int mem_ref_cnt_get(struct mem *mem) {
pthread_mutex_lock(&mem->reference.lock);
int cnt = mem->reference.count;
pthread_mutex_unlock(&mem->reference.lock);
Expand Down
2 changes: 1 addition & 1 deletion emu/memory.h
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ int mem_segv_reason(struct mem *mem, addr_t addr);

// Reference counting is important
void mem_ref_cnt_mod(struct mem *mem, int value);
int mem_ref_cnt_val_get(struct mem *mem);
int mem_ref_cnt_get(struct mem *mem);

extern size_t real_page_size;

Expand Down
31 changes: 3 additions & 28 deletions iSH-AOK.xcodeproj/xcshareddata/xcschemes/iSH.xcscheme
Original file line number Diff line number Diff line change
Expand Up @@ -91,8 +91,10 @@
buildConfiguration = "Debug-ApplePleaseFixFB19282108"
selectedDebuggerIdentifier = "Xcode.DebuggerFoundation.Debugger.LLDB"
selectedLauncherIdentifier = "Xcode.DebuggerFoundation.Launcher.LLDB"
enableAddressSanitizer = "YES"
enableASanStackUseAfterReturn = "YES"
enableUBSanitizer = "YES"
disableMainThreadChecker = "YES"
disablePerformanceAntipatternChecker = "YES"
launchStyle = "0"
useCustomWorkingDirectory = "NO"
ignoresPersistentStateOnLaunch = "NO"
Expand All @@ -110,33 +112,6 @@
ReferencedContainer = "container:iSH-AOK.xcodeproj">
</BuildableReference>
</BuildableProductRunnable>
<AdditionalOptions>
<AdditionalOption
key = "MallocStackLogging"
value = ""
isEnabled = "YES">
</AdditionalOption>
<AdditionalOption
key = "DYLD_INSERT_LIBRARIES"
value = "/usr/lib/libgmalloc.dylib"
isEnabled = "YES">
</AdditionalOption>
<AdditionalOption
key = "PrefersMallocStackLoggingLite"
value = ""
isEnabled = "YES">
</AdditionalOption>
<AdditionalOption
key = "MallocGuardEdges"
value = ""
isEnabled = "YES">
</AdditionalOption>
<AdditionalOption
key = "MallocScribble"
value = ""
isEnabled = "YES">
</AdditionalOption>
</AdditionalOptions>
</LaunchAction>
<ProfileAction
buildConfiguration = "Release"
Expand Down
21 changes: 13 additions & 8 deletions kernel/exit.c
Original file line number Diff line number Diff line change
Expand Up @@ -136,24 +136,27 @@ noreturn void do_exit(int status) {
complex_lockt(&pids_lock, 0);
// release the sighand
signal_pending = !!(current->pending & ~current->blocked);
while((task_ref_cnt_get(current, 0) > 2) ||
while((task_ref_cnt_get(current, 0) > 2) || // We added one to the task reference count above, thus the check is 2, in case any other thread is accessing.
(locks_held_count(current)) ||
(signal_pending)) { // Wait for now, task is in one or more critical // Wait for now, task is in one or more critical sections, and/or has locks, or signals in flight
nanosleep(&lock_pause, NULL);
signal_pending = !!(current->pending & ~current->blocked);
}

sighand_release(current->sighand);
current->sighand = NULL;
struct sigqueue *sigqueue, *sigqueue_tmp;
list_for_each_entry_safe(&current->queue, sigqueue, sigqueue_tmp, queue) {
list_remove(&sigqueue->queue);
free(sigqueue);
}

struct task *leader = current->group->leader;

// reparent children
struct task *new_parent = find_new_parent(current);
struct task *child, *tmp;

list_for_each_entry_safe(&current->children, child, tmp, siblings) {
child->parent = new_parent;
list_remove(&child->siblings);
Expand All @@ -179,7 +182,7 @@ noreturn void do_exit(int status) {
} else {
leader->zombie = true;
notify(&parent->group->child_exit);
struct siginfo_ info = {
struct siginfo_ info = { //mkemkemke This is interesting. Need to think about possibilities. TODO
.code = SI_KERNEL_,
.child.pid = current->pid,
.child.uid = current->uid,
Expand All @@ -191,25 +194,24 @@ noreturn void do_exit(int status) {
send_signal(parent, leader->exit_signal, info);
}


if (exit_hook != NULL)
exit_hook(current, status);

unlock(&parent->general_lock);
}

vfork_notify(current);
task_ref_cnt_mod(current, -1);

if(current != leader) {
task_ref_cnt_mod(current, -1);
task_destroy(current, 1);
} else {
unlock(&current->general_lock);
task_ref_cnt_mod(current, -1);
}

unlock(&pids_lock);

//atomic_l_unlockf();

EXIT:pthread_exit(NULL);
}

Expand Down Expand Up @@ -244,10 +246,13 @@ noreturn void do_exit_group(int status) {
}

unlock(&pids_lock);
task_ref_cnt_mod(current, -1);
unlock(&group->lock);
//if(current->pid <= MAX_PID) // abort if crazy. -mke
task_ref_cnt_mod(current, -1);
if(current->pid <= MAX_PID) // abort if crazy. -mke
do_exit(status);

unlock(&pids_lock); // Shouldn't get here
pthread_exit(NULL);
}

// always called from init process. Intended to be called when the init process exits.
Expand Down
7 changes: 4 additions & 3 deletions kernel/log.c
Original file line number Diff line number Diff line change
Expand Up @@ -221,7 +221,7 @@ inline int current_pid(void) {
return -1;
}
}

// This should never happen
task_ref_cnt_mod(current, -1);
return -1;
}
Expand All @@ -237,7 +237,7 @@ inline int current_uid(void) {
return -1;
}
}

// This should never happen
task_ref_cnt_mod(current, -1);
return -1;
}
Expand All @@ -252,6 +252,7 @@ inline char * current_comm(void) {
task_ref_cnt_mod(current, -1);
return "";
}

if (current->exiting != true) {
task_ref_cnt_mod(current, -1);
return comm;
Expand All @@ -260,8 +261,8 @@ inline char * current_comm(void) {
return "";
}
}
task_ref_cnt_mod(current, -1);

task_ref_cnt_mod(current, -1);
return "";
}

Expand Down
5 changes: 3 additions & 2 deletions kernel/mmap.c
Original file line number Diff line number Diff line change
Expand Up @@ -45,15 +45,16 @@ void mm_release(struct mm *mm) {
if (--mm->refcount == 0) {
if (mm->exefile != NULL)
fd_close(mm->exefile);
while(task_ref_cnt_get(current, 1)) { // FIXME: Should be locking current->reference.lock and updating
while(mem_ref_cnt_get(&mm->mem) ) { // FIXME: Should be locking current->reference.lock and updating
// current->reference.count before mem_destroy
nanosleep(&lock_pause, NULL);
}

mem_destroy(&mm->mem);
while(task_ref_cnt_get(current, 1)) { //FIXME: Should now unlock after mem_destroy
while(task_ref_cnt_get(current, 1) > 1) { //FIXME: Should now unlock after mem_destroy
nanosleep(&lock_pause, NULL);
}

free(mm);
}
}
Expand Down
2 changes: 1 addition & 1 deletion kernel/poll.c
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ dword_t sys_poll(addr_t fds, dword_t nfds, int_t timeout) {
TASK_MAY_BLOCK {
res = poll_wait(poll, poll_event_callback, &context, timeout < 0 ? NULL : &timeout_ts);
}
while(task_ref_cnt_get(current, 0)) { // Wait for now, task is in one or more critical sections
while(task_ref_cnt_get(current, 0) > 1) { // Wait for now, task is in one or more critical sections
nanosleep(&lock_pause, NULL);
}
poll_destroy(poll);
Expand Down
15 changes: 9 additions & 6 deletions kernel/task.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ struct pid *pid_get_last_allocated(void) {
}

dword_t get_count_of_blocked_tasks(void) {
task_ref_cnt_mod(current, 1);
// task_ref_cnt_mod(current, 1); // Not needed?
dword_t res = 0;
struct pid *pid_entry;
complex_lockt(&pids_lock, 0);
Expand All @@ -88,7 +88,7 @@ dword_t get_count_of_blocked_tasks(void) {
res++;
}
}
task_ref_cnt_mod(current, -1);
// task_ref_cnt_mod(current, -1);
unlock(&pids_lock);
return res;
}
Expand Down Expand Up @@ -201,7 +201,7 @@ void task_destroy(struct task *task, int caller) {
unlock(&pids_lock);
}

if (task_ref_cnt_get(task, 1)) {
if (task_ref_cnt_get(task, 1)) { // Check to see if another thread is accessing this process. If yes, note that and defer freeing it
struct task_pending_deletion *pd = malloc(sizeof(struct task_pending_deletion));
if (pd) {
task->reference.ready_to_be_freed = true;
Expand All @@ -212,7 +212,7 @@ void task_destroy(struct task *task, int caller) {
list_add(&tasks_pending_deletion_queue, &pd->list);
pthread_mutex_unlock(&tasks_pending_deletion_lock);
}
// Lets cleanup any pending deletions here for now
// Lets cleanup any expired pending deletions here for now
cleanup_pending_deletions();
return;
} else {
Expand All @@ -225,7 +225,7 @@ void cleanup_pending_deletions(void) {
pthread_mutex_lock(&tasks_pending_deletion_lock);
struct task_pending_deletion *pd, *tmp;
list_for_each_entry_safe(&tasks_pending_deletion_queue, pd, tmp, list) {
if (difftime(time(NULL), pd->added_time) >= GRACE_PERIOD) { // Delete reaped tasks older than
if ((difftime(time(NULL), pd->added_time) >= GRACE_PERIOD) && !! (!pd->task->reference.count)) { // Delete reaped tasks old and no longer referenced
if (task_ref_cnt_get(pd->task, 0) == 0) {
free(pd->task);
list_remove(&pd->list);
Expand Down Expand Up @@ -258,6 +258,7 @@ void task_run_current(void) {
tlb_refresh(&tlb, &current->mem->mmu);

while (true) {
task_ref_cnt_mod(current, 1);
read_lock(&current->mem->lock);

if(!doEnableMulticore) {
Expand All @@ -279,6 +280,8 @@ void task_run_current(void) {
} else {
handle_interrupt(interrupt);
}

task_ref_cnt_mod(current, -1);
}
}

Expand Down Expand Up @@ -336,7 +339,7 @@ void update_thread_name(void) {

void task_ref_cnt_mod(struct task *task, int value) { // value Should only be -1 or 1. -mke
// Keep track of how many threads are referencing this task
if(!doEnableExtraLocking) {// If they want to fly by the seat of their pants... -mke
if(!doEnableExtraLocking) { // If they want to fly by the seat of their pants... -mke
return;
}

Expand Down
4 changes: 2 additions & 2 deletions util/ro_locks.c
Original file line number Diff line number Diff line change
Expand Up @@ -91,14 +91,14 @@ void mylock(lock_t *lock, int log_lock) {
unlock(lock);

if(!log_lock) {
task_ref_cnt_mod_wrapper(1);
// task_ref_cnt_mod_wrapper(1);
pthread_mutex_lock(&lock->m);
modify_locks_held_count_wrapper(1);
lock->owner = pthread_self();
lock->pid = current_pid();
lock->uid = current_uid();
strlcpy(lock->comm, current_comm(), 16);
task_ref_cnt_mod_wrapper(-1);
// task_ref_cnt_mod_wrapper(-1);
} else {
pthread_mutex_lock(&lock->m);
lock->owner = pthread_self();
Expand Down
5 changes: 5 additions & 0 deletions util/ro_locks.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,11 @@ typedef struct {
char comm[16];
char lname[16];
bool wait4;
struct {
pthread_mutex_t lock;
int count; // If positive, don't delete yet, wait_to_delete
bool ready_to_be_freed; // Should be false initially
} reference;
#if LOCK_DEBUG
struct lock_debug {
const char *file;
Expand Down
Loading

0 comments on commit 0f8681f

Please sign in to comment.