@@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
48
48
49
49
struct blackfin_initial_pda __cpuinitdata initial_pda_coreb ;
50
50
51
- #define BFIN_IPI_TIMER 0
52
- #define BFIN_IPI_RESCHEDULE 1
53
- #define BFIN_IPI_CALL_FUNC 2
54
- #define BFIN_IPI_CPU_STOP 3
51
+ enum ipi_message_type {
52
+ BFIN_IPI_TIMER ,
53
+ BFIN_IPI_RESCHEDULE ,
54
+ BFIN_IPI_CALL_FUNC ,
55
+ BFIN_IPI_CALL_FUNC_SINGLE ,
56
+ BFIN_IPI_CPU_STOP ,
57
+ };
55
58
56
59
struct blackfin_flush_data {
57
60
unsigned long start ;
@@ -60,35 +63,20 @@ struct blackfin_flush_data {
60
63
61
64
void * secondary_stack ;
62
65
63
-
64
- struct smp_call_struct {
65
- void (* func )(void * info );
66
- void * info ;
67
- int wait ;
68
- cpumask_t * waitmask ;
69
- };
70
-
71
66
static struct blackfin_flush_data smp_flush_data ;
72
67
73
68
static DEFINE_SPINLOCK (stop_lock );
74
69
75
- struct ipi_message {
76
- unsigned long type ;
77
- struct smp_call_struct call_struct ;
78
- };
79
-
80
70
/* A magic number - stress test shows this is safe for common cases */
81
71
#define BFIN_IPI_MSGQ_LEN 5
82
72
83
73
/* Simple FIFO buffer, overflow leads to panic */
84
- struct ipi_message_queue {
85
- spinlock_t lock ;
74
+ struct ipi_data {
86
75
unsigned long count ;
87
- unsigned long head ; /* head of the queue */
88
- struct ipi_message ipi_message [BFIN_IPI_MSGQ_LEN ];
76
+ unsigned long bits ;
89
77
};
90
78
91
- static DEFINE_PER_CPU (struct ipi_message_queue , ipi_msg_queue ) ;
79
+ static DEFINE_PER_CPU (struct ipi_data , bfin_ipi ) ;
92
80
93
81
static void ipi_cpu_stop (unsigned int cpu )
94
82
{
@@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
129
117
blackfin_icache_flush_range (fdata -> start , fdata -> end );
130
118
}
131
119
132
- static void ipi_call_function (unsigned int cpu , struct ipi_message * msg )
133
- {
134
- int wait ;
135
- void (* func )(void * info );
136
- void * info ;
137
- func = msg -> call_struct .func ;
138
- info = msg -> call_struct .info ;
139
- wait = msg -> call_struct .wait ;
140
- func (info );
141
- if (wait ) {
142
- #ifdef __ARCH_SYNC_CORE_DCACHE
143
- /*
144
- * 'wait' usually means synchronization between CPUs.
145
- * Invalidate D cache in case shared data was changed
146
- * by func() to ensure cache coherence.
147
- */
148
- resync_core_dcache ();
149
- #endif
150
- cpumask_clear_cpu (cpu , msg -> call_struct .waitmask );
151
- }
152
- }
153
-
154
120
/* Use IRQ_SUPPLE_0 to request reschedule.
155
121
* When returning from interrupt to user space,
156
122
* there is chance to reschedule */
@@ -172,152 +138,95 @@ void ipi_timer(void)
172
138
173
139
static irqreturn_t ipi_handler_int1 (int irq , void * dev_instance )
174
140
{
175
- struct ipi_message * msg ;
176
- struct ipi_message_queue * msg_queue ;
141
+ struct ipi_data * bfin_ipi_data ;
177
142
unsigned int cpu = smp_processor_id ();
178
- unsigned long flags ;
143
+ unsigned long pending ;
144
+ unsigned long msg ;
179
145
180
146
platform_clear_ipi (cpu , IRQ_SUPPLE_1 );
181
147
182
- msg_queue = & __get_cpu_var (ipi_msg_queue );
183
-
184
- spin_lock_irqsave (& msg_queue -> lock , flags );
185
-
186
- while (msg_queue -> count ) {
187
- msg = & msg_queue -> ipi_message [msg_queue -> head ];
188
- switch (msg -> type ) {
189
- case BFIN_IPI_TIMER :
190
- ipi_timer ();
191
- break ;
192
- case BFIN_IPI_RESCHEDULE :
193
- scheduler_ipi ();
194
- break ;
195
- case BFIN_IPI_CALL_FUNC :
196
- ipi_call_function (cpu , msg );
197
- break ;
198
- case BFIN_IPI_CPU_STOP :
199
- ipi_cpu_stop (cpu );
200
- break ;
201
- default :
202
- printk (KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n" ,
203
- cpu , msg -> type );
204
- break ;
205
- }
206
- msg_queue -> head ++ ;
207
- msg_queue -> head %= BFIN_IPI_MSGQ_LEN ;
208
- msg_queue -> count -- ;
148
+ bfin_ipi_data = & __get_cpu_var (bfin_ipi );
149
+
150
+ while ((pending = xchg (& bfin_ipi_data -> bits , 0 )) != 0 ) {
151
+ msg = 0 ;
152
+ do {
153
+ msg = find_next_bit (& pending , BITS_PER_LONG , msg + 1 );
154
+ switch (msg ) {
155
+ case BFIN_IPI_TIMER :
156
+ ipi_timer ();
157
+ break ;
158
+ case BFIN_IPI_RESCHEDULE :
159
+ scheduler_ipi ();
160
+ break ;
161
+ case BFIN_IPI_CALL_FUNC :
162
+ generic_smp_call_function_interrupt ();
163
+ break ;
164
+
165
+ case BFIN_IPI_CALL_FUNC_SINGLE :
166
+ generic_smp_call_function_single_interrupt ();
167
+ break ;
168
+
169
+ case BFIN_IPI_CPU_STOP :
170
+ ipi_cpu_stop (cpu );
171
+ break ;
172
+ }
173
+ } while (msg < BITS_PER_LONG );
174
+
175
+ smp_mb ();
209
176
}
210
- spin_unlock_irqrestore (& msg_queue -> lock , flags );
211
177
return IRQ_HANDLED ;
212
178
}
213
179
214
- static void ipi_queue_init (void )
180
+ static void bfin_ipi_init (void )
215
181
{
216
182
unsigned int cpu ;
217
- struct ipi_message_queue * msg_queue ;
183
+ struct ipi_data * bfin_ipi_data ;
218
184
for_each_possible_cpu (cpu ) {
219
- msg_queue = & per_cpu (ipi_msg_queue , cpu );
220
- spin_lock_init (& msg_queue -> lock );
221
- msg_queue -> count = 0 ;
222
- msg_queue -> head = 0 ;
185
+ bfin_ipi_data = & per_cpu (bfin_ipi , cpu );
186
+ bfin_ipi_data -> bits = 0 ;
187
+ bfin_ipi_data -> count = 0 ;
223
188
}
224
189
}
225
190
226
- static inline void smp_send_message (cpumask_t callmap , unsigned long type ,
227
- void (* func ) (void * info ), void * info , int wait )
191
+ void send_ipi (const struct cpumask * cpumask , enum ipi_message_type msg )
228
192
{
229
193
unsigned int cpu ;
230
- struct ipi_message_queue * msg_queue ;
231
- struct ipi_message * msg ;
232
- unsigned long flags , next_msg ;
233
- cpumask_t waitmask ; /* waitmask is shared by all cpus */
234
-
235
- cpumask_copy (& waitmask , & callmap );
236
- for_each_cpu (cpu , & callmap ) {
237
- msg_queue = & per_cpu (ipi_msg_queue , cpu );
238
- spin_lock_irqsave (& msg_queue -> lock , flags );
239
- if (msg_queue -> count < BFIN_IPI_MSGQ_LEN ) {
240
- next_msg = (msg_queue -> head + msg_queue -> count )
241
- % BFIN_IPI_MSGQ_LEN ;
242
- msg = & msg_queue -> ipi_message [next_msg ];
243
- msg -> type = type ;
244
- if (type == BFIN_IPI_CALL_FUNC ) {
245
- msg -> call_struct .func = func ;
246
- msg -> call_struct .info = info ;
247
- msg -> call_struct .wait = wait ;
248
- msg -> call_struct .waitmask = & waitmask ;
249
- }
250
- msg_queue -> count ++ ;
251
- } else
252
- panic ("IPI message queue overflow\n" );
253
- spin_unlock_irqrestore (& msg_queue -> lock , flags );
194
+ struct ipi_data * bfin_ipi_data ;
195
+ unsigned long flags ;
196
+
197
+ local_irq_save (flags );
198
+
199
+ for_each_cpu (cpu , cpumask ) {
200
+ bfin_ipi_data = & per_cpu (bfin_ipi , cpu );
201
+ smp_mb ();
202
+ set_bit (msg , & bfin_ipi_data -> bits );
203
+ bfin_ipi_data -> count ++ ;
254
204
platform_send_ipi_cpu (cpu , IRQ_SUPPLE_1 );
255
205
}
256
206
257
- if (wait ) {
258
- while (!cpumask_empty (& waitmask ))
259
- blackfin_dcache_invalidate_range (
260
- (unsigned long )(& waitmask ),
261
- (unsigned long )(& waitmask ));
262
- #ifdef __ARCH_SYNC_CORE_DCACHE
263
- /*
264
- * Invalidate D cache in case shared data was changed by
265
- * other processors to ensure cache coherence.
266
- */
267
- resync_core_dcache ();
268
- #endif
269
- }
207
+ local_irq_restore (flags );
270
208
}
271
209
272
- int smp_call_function ( void ( * func )( void * info ), void * info , int wait )
210
+ void arch_send_call_function_single_ipi ( int cpu )
273
211
{
274
- cpumask_t callmap ;
275
-
276
- preempt_disable ();
277
- cpumask_copy (& callmap , cpu_online_mask );
278
- cpumask_clear_cpu (smp_processor_id (), & callmap );
279
- if (!cpumask_empty (& callmap ))
280
- smp_send_message (callmap , BFIN_IPI_CALL_FUNC , func , info , wait );
281
-
282
- preempt_enable ();
283
-
284
- return 0 ;
212
+ send_ipi (cpumask_of (cpu ), BFIN_IPI_CALL_FUNC_SINGLE );
285
213
}
286
- EXPORT_SYMBOL_GPL (smp_call_function );
287
214
288
- int smp_call_function_single (int cpuid , void (* func ) (void * info ), void * info ,
289
- int wait )
215
+ void arch_send_call_function_ipi_mask (const struct cpumask * mask )
290
216
{
291
- unsigned int cpu = cpuid ;
292
- cpumask_t callmap ;
293
-
294
- if (cpu_is_offline (cpu ))
295
- return 0 ;
296
- cpumask_clear (& callmap );
297
- cpumask_set_cpu (cpu , & callmap );
298
-
299
- smp_send_message (callmap , BFIN_IPI_CALL_FUNC , func , info , wait );
300
-
301
- return 0 ;
217
+ send_ipi (mask , BFIN_IPI_CALL_FUNC );
302
218
}
303
- EXPORT_SYMBOL_GPL (smp_call_function_single );
304
219
305
220
void smp_send_reschedule (int cpu )
306
221
{
307
- cpumask_t callmap ;
308
- /* simply trigger an ipi */
309
-
310
- cpumask_clear (& callmap );
311
- cpumask_set_cpu (cpu , & callmap );
312
-
313
- smp_send_message (callmap , BFIN_IPI_RESCHEDULE , NULL , NULL , 0 );
222
+ send_ipi (cpumask_of (cpu ), BFIN_IPI_RESCHEDULE );
314
223
315
224
return ;
316
225
}
317
226
318
227
void smp_send_msg (const struct cpumask * mask , unsigned long type )
319
228
{
320
- smp_send_message ( * mask , type , NULL , NULL , 0 );
229
+ send_ipi ( mask , type );
321
230
}
322
231
323
232
void smp_timer_broadcast (const struct cpumask * mask )
@@ -333,7 +242,7 @@ void smp_send_stop(void)
333
242
cpumask_copy (& callmap , cpu_online_mask );
334
243
cpumask_clear_cpu (smp_processor_id (), & callmap );
335
244
if (!cpumask_empty (& callmap ))
336
- smp_send_message ( callmap , BFIN_IPI_CPU_STOP , NULL , NULL , 0 );
245
+ send_ipi ( & callmap , BFIN_IPI_CPU_STOP );
337
246
338
247
preempt_enable ();
339
248
@@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
436
345
void __init smp_prepare_cpus (unsigned int max_cpus )
437
346
{
438
347
platform_prepare_cpus (max_cpus );
439
- ipi_queue_init ();
348
+ bfin_ipi_init ();
440
349
platform_request_ipi (IRQ_SUPPLE_0 , ipi_handler_int0 );
441
350
platform_request_ipi (IRQ_SUPPLE_1 , ipi_handler_int1 );
442
351
}
0 commit comments