forked from shenango/caladan
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathksched.h
150 lines (128 loc) · 3.44 KB
/
ksched.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
/*
* ksched.h - an interface to the ksched kernel module
*/
#pragma once
#include <sched.h>
#include <sys/ioctl.h>
#include <signal.h>
#include <base/stddef.h>
#include <base/atomic.h>
#include <base/limits.h>
#define __user
#include "../ksched/ksched.h"
extern int ksched_fd, ksched_count;
extern struct ksched_shm_cpu *ksched_shm;
extern cpu_set_t ksched_set;
extern unsigned int ksched_gens[NCPU];
/**
* ksched_run - runs a kthread on a specific core
* @core: the core to run a kthread on
* @tid: the kthread's TID (or zero to idle the core)
*/
static inline void ksched_run(unsigned int core, pid_t tid)
{
unsigned int gen = ++ksched_gens[core];
ksched_shm[core].tid = tid;
store_release(&ksched_shm[core].gen, gen);
}
/**
* ksched_poll_run_done - determines if the last ksched_run() call finished
* @core: the core on which kthread_run() was called
*
* Returns true if finished.
*/
static inline bool ksched_poll_run_done(unsigned int core)
{
return load_acquire(&ksched_shm[core].last_gen) == ksched_gens[core];
}
/**
* ksched_poll_idle - determines if a core is currently idle
* @core: the core to check if it is idle
*
* Returns true if idle.
*/
static inline bool ksched_poll_idle(unsigned int core)
{
return !load_acquire(&ksched_shm[core].busy);
}
static inline void ksched_idle_hint(unsigned int core, unsigned int hint)
{
ksched_shm[core].mwait_hint = hint;
}
enum {
KSCHED_INTR_CEDE = 0,
KSCHED_INTR_YIELD,
};
/**
* ksched_enqueue_intr - enqueues an interrupt request on a core
* @core: the core to interrupt
* @type: the type of interrupt to enqueue
*
* The interrupt will not be sent until ksched_send_intrs(). This is done to
* create an opportunity for batching interrupts. If ksched_run() is called on
* the same core after ksched_enqueue_intr(), it may prevent interrupts
* still pending for the last kthread from being delivered.
*/
static inline void ksched_enqueue_intr(unsigned int core, int type)
{
unsigned int signum;
switch (type) {
case KSCHED_INTR_CEDE:
signum = SIGUSR1;
break;
case KSCHED_INTR_YIELD:
signum = SIGUSR2;
break;
default:
WARN();
return;
}
ksched_shm[core].signum = signum;
store_release(&ksched_shm[core].sig, ksched_gens[core]);
CPU_SET(core, &ksched_set);
ksched_count++;
}
/**
* ksched_enqueue_pmc - enqueues a performance counter request on a core
* @core: the core to measure
* @sel: the architecture-specific counter selector
*/
static inline void ksched_enqueue_pmc(unsigned int core, uint64_t sel)
{
ksched_shm[core].pmcsel = sel;
store_release(&ksched_shm[core].pmc, 1);
CPU_SET(core, &ksched_set);
ksched_count++;
}
/**
* ksched_poll_pmc - polls for a performance counter result
* @core: the core to poll
* @val: a pointer to store the result
* @tsc: a pointer to store the timestamp of the result
*
* Returns true if succesful, otherwise counter is still being measured.
*/
static inline bool ksched_poll_pmc(unsigned int core, uint64_t *val, uint64_t *tsc)
{
if (load_acquire(&ksched_shm[core].pmc) != 0)
return false;
*val = ACCESS_ONCE(ksched_shm[core].pmcval);
*tsc = ACCESS_ONCE(ksched_shm[core].pmctsc);
return true;
}
/**
* ksched_send_intrs - sends any pending interrupts
*/
static inline void ksched_send_intrs(void)
{
struct ksched_intr_req req;
int ret;
if (ksched_count == 0)
return;
ksched_count = 0;
req.len = sizeof(ksched_set);
req.mask = &ksched_set;
ret = ioctl(ksched_fd, KSCHED_IOC_INTR, &req);
BUG_ON(ret);
CPU_ZERO(&ksched_set);
}