Skip to content

Commit

Permalink
cpus-common: move CPU work item management to common code
Browse files Browse the repository at this point in the history
Make CPU work core functions common between system and user-mode
emulation. User-mode does not use run_on_cpu, so do not implement it.

Signed-off-by: Sergey Fedorov <[email protected]>
Signed-off-by: Sergey Fedorov <[email protected]>
Reviewed-by: Alex Bennée <[email protected]>
Signed-off-by: Alex Bennée <[email protected]>
Message-Id: <[email protected]>
Reviewed-by: Richard Henderson <[email protected]>
Signed-off-by: Paolo Bonzini <[email protected]>
  • Loading branch information
sergefdrv authored and bonzini committed Sep 27, 2016
1 parent 267f685 commit d148d90
Show file tree
Hide file tree
Showing 5 changed files with 148 additions and 91 deletions.
11 changes: 9 additions & 2 deletions bsd-user/main.c
Original file line number Diff line number Diff line change
Expand Up @@ -68,11 +68,11 @@ int cpu_get_pic_interrupt(CPUX86State *env)
#endif

/* These are no-ops because we are not threadsafe. */
static inline void cpu_exec_start(CPUArchState *env)
static inline void cpu_exec_start(CPUState *cpu)
{
}

static inline void cpu_exec_end(CPUArchState *env)
static inline void cpu_exec_end(CPUState *cpu)
{
}

Expand Down Expand Up @@ -164,7 +164,11 @@ void cpu_loop(CPUX86State *env)
//target_siginfo_t info;

for(;;) {
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
cpu_exec_end(cs);
process_queued_cpu_work(cs);

switch(trapnr) {
case 0x80:
/* syscall from int $0x80 */
Expand Down Expand Up @@ -505,7 +509,10 @@ void cpu_loop(CPUSPARCState *env)
//target_siginfo_t info;

while (1) {
cpu_exec_start(cs);
trapnr = cpu_exec(cs);
cpu_exec_end(cs);
process_queued_cpu_work(cs);

switch (trapnr) {
#ifndef TARGET_SPARC64
Expand Down
94 changes: 94 additions & 0 deletions cpus-common.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,10 +23,12 @@
#include "sysemu/cpus.h"

static QemuMutex qemu_cpu_list_lock;
static QemuCond qemu_work_cond;

void qemu_init_cpu_list(void)
{
qemu_mutex_init(&qemu_cpu_list_lock);
qemu_cond_init(&qemu_work_cond);
}

void cpu_list_lock(void)
Expand Down Expand Up @@ -81,3 +83,95 @@ void cpu_list_remove(CPUState *cpu)
cpu->cpu_index = UNASSIGNED_CPU_INDEX;
qemu_mutex_unlock(&qemu_cpu_list_lock);
}

struct qemu_work_item {
struct qemu_work_item *next;
run_on_cpu_func func;
void *data;
int done;
bool free;
};

static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
{
qemu_mutex_lock(&cpu->work_mutex);
if (cpu->queued_work_first == NULL) {
cpu->queued_work_first = wi;
} else {
cpu->queued_work_last->next = wi;
}
cpu->queued_work_last = wi;
wi->next = NULL;
wi->done = false;
qemu_mutex_unlock(&cpu->work_mutex);

qemu_cpu_kick(cpu);
}

void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
QemuMutex *mutex)
{
struct qemu_work_item wi;

if (qemu_cpu_is_self(cpu)) {
func(cpu, data);
return;
}

wi.func = func;
wi.data = data;
wi.free = false;

queue_work_on_cpu(cpu, &wi);
while (!atomic_mb_read(&wi.done)) {
CPUState *self_cpu = current_cpu;

qemu_cond_wait(&qemu_work_cond, mutex);
current_cpu = self_cpu;
}
}

void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
{
struct qemu_work_item *wi;

if (qemu_cpu_is_self(cpu)) {
func(cpu, data);
return;
}

wi = g_malloc0(sizeof(struct qemu_work_item));
wi->func = func;
wi->data = data;
wi->free = true;

queue_work_on_cpu(cpu, wi);
}

void process_queued_cpu_work(CPUState *cpu)
{
struct qemu_work_item *wi;

if (cpu->queued_work_first == NULL) {
return;
}

qemu_mutex_lock(&cpu->work_mutex);
while (cpu->queued_work_first != NULL) {
wi = cpu->queued_work_first;
cpu->queued_work_first = wi->next;
if (!cpu->queued_work_first) {
cpu->queued_work_last = NULL;
}
qemu_mutex_unlock(&cpu->work_mutex);
wi->func(cpu, wi->data);
qemu_mutex_lock(&cpu->work_mutex);
if (wi->free) {
g_free(wi);
} else {
atomic_mb_set(&wi->done, true);
}
}
qemu_mutex_unlock(&cpu->work_mutex);
qemu_cond_broadcast(&qemu_work_cond);
}
82 changes: 1 addition & 81 deletions cpus.c
Original file line number Diff line number Diff line change
Expand Up @@ -902,73 +902,21 @@ static QemuThread io_thread;
static QemuCond qemu_cpu_cond;
/* system init */
static QemuCond qemu_pause_cond;
static QemuCond qemu_work_cond;

void qemu_init_cpu_loop(void)
{
qemu_init_sigbus();
qemu_cond_init(&qemu_cpu_cond);
qemu_cond_init(&qemu_pause_cond);
qemu_cond_init(&qemu_work_cond);
qemu_cond_init(&qemu_io_proceeded_cond);
qemu_mutex_init(&qemu_global_mutex);

qemu_thread_get_self(&io_thread);
}

static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
{
qemu_mutex_lock(&cpu->work_mutex);
if (cpu->queued_work_first == NULL) {
cpu->queued_work_first = wi;
} else {
cpu->queued_work_last->next = wi;
}
cpu->queued_work_last = wi;
wi->next = NULL;
wi->done = false;
qemu_mutex_unlock(&cpu->work_mutex);

qemu_cpu_kick(cpu);
}

void run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
{
struct qemu_work_item wi;

if (qemu_cpu_is_self(cpu)) {
func(cpu, data);
return;
}

wi.func = func;
wi.data = data;
wi.free = false;

queue_work_on_cpu(cpu, &wi);
while (!atomic_mb_read(&wi.done)) {
CPUState *self_cpu = current_cpu;

qemu_cond_wait(&qemu_work_cond, &qemu_global_mutex);
current_cpu = self_cpu;
}
}

void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data)
{
struct qemu_work_item *wi;

if (qemu_cpu_is_self(cpu)) {
func(cpu, data);
return;
}

wi = g_malloc0(sizeof(struct qemu_work_item));
wi->func = func;
wi->data = data;
wi->free = true;

queue_work_on_cpu(cpu, wi);
do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
}

static void qemu_kvm_destroy_vcpu(CPUState *cpu)
Expand All @@ -983,34 +931,6 @@ static void qemu_tcg_destroy_vcpu(CPUState *cpu)
{
}

static void process_queued_cpu_work(CPUState *cpu)
{
struct qemu_work_item *wi;

if (cpu->queued_work_first == NULL) {
return;
}

qemu_mutex_lock(&cpu->work_mutex);
while (cpu->queued_work_first != NULL) {
wi = cpu->queued_work_first;
cpu->queued_work_first = wi->next;
if (!cpu->queued_work_first) {
cpu->queued_work_last = NULL;
}
qemu_mutex_unlock(&cpu->work_mutex);
wi->func(cpu, wi->data);
qemu_mutex_lock(&cpu->work_mutex);
if (wi->free) {
g_free(wi);
} else {
atomic_mb_set(&wi->done, true);
}
}
qemu_mutex_unlock(&cpu->work_mutex);
qemu_cond_broadcast(&qemu_work_cond);
}

static void qemu_wait_io_event_common(CPUState *cpu)
{
if (cpu->stop) {
Expand Down
27 changes: 19 additions & 8 deletions include/qom/cpu.h
Original file line number Diff line number Diff line change
Expand Up @@ -233,14 +233,7 @@ struct kvm_run;

/* work queue */
typedef void (*run_on_cpu_func)(CPUState *cpu, void *data);

struct qemu_work_item {
struct qemu_work_item *next;
run_on_cpu_func func;
void *data;
int done;
bool free;
};
struct qemu_work_item;

/**
* CPUState:
Expand Down Expand Up @@ -629,6 +622,18 @@ void qemu_cpu_kick(CPUState *cpu);
*/
bool cpu_is_stopped(CPUState *cpu);

/**
* do_run_on_cpu:
* @cpu: The vCPU to run on.
* @func: The function to be executed.
* @data: Data to pass to the function.
* @mutex: Mutex to release while waiting for @func to run.
*
* Used internally in the implementation of run_on_cpu.
*/
void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data,
QemuMutex *mutex);

/**
* run_on_cpu:
* @cpu: The vCPU to run on.
Expand Down Expand Up @@ -807,6 +812,12 @@ void cpu_remove(CPUState *cpu);
*/
void cpu_remove_sync(CPUState *cpu);

/**
* process_queued_cpu_work() - process all items on CPU work queue
* @cpu: The CPU which work queue to process.
*/
void process_queued_cpu_work(CPUState *cpu);

/**
* qemu_init_vcpu:
* @cpu: The vCPU to initialize.
Expand Down
Loading

0 comments on commit d148d90

Please sign in to comment.