forked from zephyrproject-rtos/zephyr
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathipm_ivshmem.c
135 lines (110 loc) · 3.5 KB
/
ipm_ivshmem.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
/*
* Copyright (c) 2023 Linaro.
*
* SPDX-License-Identifier: Apache-2.0
*/
#define DT_DRV_COMPAT linaro_ivshmem_ipm
#include <stdint.h>
#include <string.h>
#include <zephyr/device.h>
#include <zephyr/drivers/ipm.h>
#include <zephyr/drivers/virtualization/ivshmem.h>
#include <zephyr/logging/log.h>
LOG_MODULE_REGISTER(ipm_ivshmem, CONFIG_IPM_LOG_LEVEL);
K_THREAD_STACK_DEFINE(ivshmem_ev_loop_stack, CONFIG_IPM_IVSHMEM_EVENT_LOOP_STACK_SIZE);
static struct k_thread ivshmem_ev_loop_thread;
struct ivshmem_ipm_data {
ipm_callback_t cb;
void *user_data;
};
struct ivshmem_ipm_config {
const struct device *ivshmem_dev;
};
static void ivshmem_ipm_event_loop_thread(void *arg, void *p2, void *p3)
{
ARG_UNUSED(p2);
ARG_UNUSED(p3);
unsigned int poll_signaled;
int ivshmem_vector_rx;
struct k_poll_signal sig;
struct k_poll_event events[] = {
K_POLL_EVENT_INITIALIZER(K_POLL_TYPE_SIGNAL,
K_POLL_MODE_NOTIFY_ONLY,
&sig),
};
const struct device *dev = (const struct device *)arg;
struct ivshmem_ipm_data *dev_data = (struct ivshmem_ipm_data *)dev->data;
struct ivshmem_ipm_config *dev_cfg = (struct ivshmem_ipm_config *)dev->config;
k_poll_signal_init(&sig);
int ret = ivshmem_register_handler(dev_cfg->ivshmem_dev, &sig, 0);
if (ret < 0) {
LOG_ERR("registering handlers must be supported: %d\n", ret);
k_panic();
}
while (1) {
LOG_DBG("%s: waiting interrupt from client...\n", __func__);
ret = k_poll(events, ARRAY_SIZE(events), K_FOREVER);
k_poll_signal_check(&sig, &poll_signaled, &ivshmem_vector_rx);
/* get ready for next signal */
k_poll_signal_reset(&sig);
if (dev_data->cb)
dev_data->cb(dev, dev_data->user_data, 0, NULL);
}
}
static int ivshmem_ipm_send(const struct device *dev, int wait, uint32_t id,
const void *data, int size)
{
ARG_UNUSED(wait);
ARG_UNUSED(data);
ARG_UNUSED(size);
struct ivshmem_ipm_config *dev_cfg = (struct ivshmem_ipm_config *)dev->config;
LOG_DBG("sending notification to the peer id 0x%x\n", id);
return ivshmem_int_peer(dev_cfg->ivshmem_dev, id, 0);
}
static void ivshmem_ipm_register_callback(const struct device *dev,
ipm_callback_t cb,
void *user_data)
{
struct ivshmem_ipm_data *dev_data = (struct ivshmem_ipm_data *)dev->data;
dev_data->cb = cb;
dev_data->user_data = user_data;
}
static int ivshmem_ipm_set_enabled(const struct device *dev, int enable)
{
/* some subsystems needs this minimal function just return success here*/
ARG_UNUSED(dev);
ARG_UNUSED(enable);
return 0;
}
static int ivshmem_ipm_init(const struct device *dev)
{
k_thread_create(&ivshmem_ev_loop_thread,
ivshmem_ev_loop_stack,
CONFIG_IPM_IVSHMEM_EVENT_LOOP_STACK_SIZE,
ivshmem_ipm_event_loop_thread,
(void *)dev, NULL, NULL,
CONFIG_IPM_IVSHMEM_EVENT_LOOP_PRIO,
0, K_NO_WAIT);
return 0;
}
static const struct ipm_driver_api ivshmem_ipm_driver_api = {
.send = ivshmem_ipm_send,
.register_callback = ivshmem_ipm_register_callback,
.set_enabled = ivshmem_ipm_set_enabled
};
#define IPM_IVSHMEM_INIT(inst) \
static const struct ivshmem_ipm_config ivshmem_ipm_cfg_##inst = { \
.ivshmem_dev = \
DEVICE_DT_GET(DT_INST_PHANDLE(inst, ivshmem))\
}; \
static struct ivshmem_ipm_data ivshmem_ipm_data_##inst = { \
.cb = NULL, \
.user_data = NULL, \
}; \
DEVICE_DT_INST_DEFINE(inst, \
ivshmem_ipm_init, \
NULL, \
&ivshmem_ipm_data_##inst, &ivshmem_ipm_cfg_##inst, \
POST_KERNEL, CONFIG_APPLICATION_INIT_PRIORITY, \
&ivshmem_ipm_driver_api); \
DT_INST_FOREACH_STATUS_OKAY(IPM_IVSHMEM_INIT);