VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c@ 55784

Last change on this file since 55784 was 54663, checked in by vboxsync, 10 years ago

IPRT: RTMpOnPair fix for Linux < 2.6.27

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 15.0 KB
Line 
1/* $Id: mp-r0drv-linux.c 54663 2015-03-06 10:14:06Z vboxsync $ */
2/** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2008-2014 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*******************************************************************************
29* Header Files *
30*******************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/thread.h>
39#include "r0drv/mp-r0drv.h"
40
41
42RTDECL(RTCPUID) RTMpCpuId(void)
43{
44 return smp_processor_id();
45}
46RT_EXPORT_SYMBOL(RTMpCpuId);
47
48
49RTDECL(int) RTMpCurSetIndex(void)
50{
51 return smp_processor_id();
52}
53RT_EXPORT_SYMBOL(RTMpCurSetIndex);
54
55
56RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
57{
58 return *pidCpu = smp_processor_id();
59}
60RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId);
61
62
63RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
64{
65 return idCpu < RTCPUSET_MAX_CPUS && idCpu < NR_CPUS ? (int)idCpu : -1;
66}
67RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex);
68
69
70RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
71{
72 return iCpu < NR_CPUS ? (RTCPUID)iCpu : NIL_RTCPUID;
73}
74RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex);
75
76
77RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
78{
79 return NR_CPUS - 1; //???
80}
81RT_EXPORT_SYMBOL(RTMpGetMaxCpuId);
82
83
84RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
85{
86#if defined(CONFIG_SMP)
87 if (RT_UNLIKELY(idCpu >= NR_CPUS))
88 return false;
89
90# if defined(cpu_possible)
91 return cpu_possible(idCpu);
92# else /* < 2.5.29 */
93 return idCpu < (RTCPUID)smp_num_cpus;
94# endif
95#else
96 return idCpu == RTMpCpuId();
97#endif
98}
99RT_EXPORT_SYMBOL(RTMpIsCpuPossible);
100
101
102RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
103{
104 RTCPUID idCpu;
105
106 RTCpuSetEmpty(pSet);
107 idCpu = RTMpGetMaxCpuId();
108 do
109 {
110 if (RTMpIsCpuPossible(idCpu))
111 RTCpuSetAdd(pSet, idCpu);
112 } while (idCpu-- > 0);
113 return pSet;
114}
115RT_EXPORT_SYMBOL(RTMpGetSet);
116
117
118RTDECL(RTCPUID) RTMpGetCount(void)
119{
120#ifdef CONFIG_SMP
121# if defined(CONFIG_HOTPLUG_CPU) /* introduced & uses cpu_present */
122 return num_present_cpus();
123# elif defined(num_possible_cpus)
124 return num_possible_cpus();
125# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
126 return smp_num_cpus;
127# else
128 RTCPUSET Set;
129 RTMpGetSet(&Set);
130 return RTCpuSetCount(&Set);
131# endif
132#else
133 return 1;
134#endif
135}
136RT_EXPORT_SYMBOL(RTMpGetCount);
137
138
139RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
140{
141#ifdef CONFIG_SMP
142 if (RT_UNLIKELY(idCpu >= NR_CPUS))
143 return false;
144# ifdef cpu_online
145 return cpu_online(idCpu);
146# else /* 2.4: */
147 return cpu_online_map & RT_BIT_64(idCpu);
148# endif
149#else
150 return idCpu == RTMpCpuId();
151#endif
152}
153RT_EXPORT_SYMBOL(RTMpIsCpuOnline);
154
155
156RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
157{
158#ifdef CONFIG_SMP
159 RTCPUID idCpu;
160
161 RTCpuSetEmpty(pSet);
162 idCpu = RTMpGetMaxCpuId();
163 do
164 {
165 if (RTMpIsCpuOnline(idCpu))
166 RTCpuSetAdd(pSet, idCpu);
167 } while (idCpu-- > 0);
168#else
169 RTCpuSetEmpty(pSet);
170 RTCpuSetAdd(pSet, RTMpCpuId());
171#endif
172 return pSet;
173}
174RT_EXPORT_SYMBOL(RTMpGetOnlineSet);
175
176
177RTDECL(RTCPUID) RTMpGetOnlineCount(void)
178{
179#ifdef CONFIG_SMP
180# if defined(num_online_cpus)
181 return num_online_cpus();
182# else
183 RTCPUSET Set;
184 RTMpGetOnlineSet(&Set);
185 return RTCpuSetCount(&Set);
186# endif
187#else
188 return 1;
189#endif
190}
191RT_EXPORT_SYMBOL(RTMpGetOnlineCount);
192
193
194RTDECL(bool) RTMpIsCpuWorkPending(void)
195{
196 /** @todo (not used on non-Windows platforms yet). */
197 return false;
198}
199RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending);
200
201
202/**
203 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
204 *
205 * @param pvInfo Pointer to the RTMPARGS package.
206 */
207static void rtmpLinuxWrapper(void *pvInfo)
208{
209 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
210 ASMAtomicIncU32(&pArgs->cHits);
211 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
212}
213
214
215/**
216 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
217 * increment after calling the worker.
218 *
219 * @param pvInfo Pointer to the RTMPARGS package.
220 */
221static void rtmpLinuxWrapperPostInc(void *pvInfo)
222{
223 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
224 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
225 ASMAtomicIncU32(&pArgs->cHits);
226}
227
228
229/**
230 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
231 *
232 * @param pvInfo Pointer to the RTMPARGS package.
233 */
234static void rtmpLinuxAllWrapper(void *pvInfo)
235{
236 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
237 PRTCPUSET pWorkerSet = pArgs->pWorkerSet;
238 RTCPUID idCpu = RTMpCpuId();
239 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
240
241 if (RTCpuSetIsMember(pWorkerSet, idCpu))
242 {
243 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
244 RTCpuSetDel(pWorkerSet, idCpu);
245 }
246}
247
248
249RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
250{
251 int rc;
252 RTMPARGS Args;
253 RTCPUSET OnlineSet;
254 RTCPUID idCpu;
255 uint32_t cLoops;
256
257 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
258
259 Args.pfnWorker = pfnWorker;
260 Args.pvUser1 = pvUser1;
261 Args.pvUser2 = pvUser2;
262 Args.idCpu = NIL_RTCPUID;
263 Args.cHits = 0;
264
265 RTThreadPreemptDisable(&PreemptState);
266 RTMpGetOnlineSet(&OnlineSet);
267 Args.pWorkerSet = &OnlineSet;
268 idCpu = RTMpCpuId();
269
270 if (RTCpuSetCount(&OnlineSet) > 1)
271 {
272 /* Fire the function on all other CPUs without waiting for completion. */
273#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
274 rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
275#else
276 rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */);
277#endif
278 Assert(!rc); NOREF(rc);
279 }
280
281 /* Fire the function on this CPU. */
282 Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2);
283 RTCpuSetDel(Args.pWorkerSet, idCpu);
284
285 /* Wait for all of them finish. */
286 cLoops = 64000;
287 while (!RTCpuSetIsEmpty(Args.pWorkerSet))
288 {
289 /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
290 if (!cLoops--)
291 {
292 RTCPUSET OnlineSetNow;
293 RTMpGetOnlineSet(&OnlineSetNow);
294 RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow);
295
296 cLoops = 64000;
297 }
298
299 ASMNopPause();
300 }
301
302 RTThreadPreemptRestore(&PreemptState);
303 return VINF_SUCCESS;
304}
305RT_EXPORT_SYMBOL(RTMpOnAll);
306
307
308RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
309{
310 int rc;
311 RTMPARGS Args;
312
313 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
314 Args.pfnWorker = pfnWorker;
315 Args.pvUser1 = pvUser1;
316 Args.pvUser2 = pvUser2;
317 Args.idCpu = NIL_RTCPUID;
318 Args.cHits = 0;
319
320 RTThreadPreemptDisable(&PreemptState);
321#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
322 rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
323#else /* older kernels */
324 rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
325#endif /* older kernels */
326 RTThreadPreemptRestore(&PreemptState);
327
328 Assert(rc == 0); NOREF(rc);
329 return VINF_SUCCESS;
330}
331RT_EXPORT_SYMBOL(RTMpOnOthers);
332
333
334#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
335/**
336 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
337 * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
338 *
339 * @param pvInfo Pointer to the RTMPARGS package.
340 */
341static void rtMpLinuxOnPairWrapper(void *pvInfo)
342{
343 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
344 RTCPUID idCpu = RTMpCpuId();
345
346 if ( idCpu == pArgs->idCpu
347 || idCpu == pArgs->idCpu2)
348 {
349 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
350 ASMAtomicIncU32(&pArgs->cHits);
351 }
352}
353#endif
354
355
356RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
357{
358 int rc;
359 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
360
361 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
362 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
363
364 /*
365 * Check that both CPUs are online before doing the broadcast call.
366 */
367 RTThreadPreemptDisable(&PreemptState);
368 if ( RTMpIsCpuOnline(idCpu1)
369 && RTMpIsCpuOnline(idCpu2))
370 {
371 /*
372 * Use the smp_call_function variant taking a cpu mask where available,
373 * falling back on broadcast with filter. Slight snag if one of the
374 * CPUs is the one we're running on, we must do the call and the post
375 * call wait ourselves.
376 */
377#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
378 cpumask_t DstCpuMask;
379#endif
380 RTCPUID idCpuSelf = RTMpCpuId();
381 bool const fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
382 RTMPARGS Args;
383 Args.pfnWorker = pfnWorker;
384 Args.pvUser1 = pvUser1;
385 Args.pvUser2 = pvUser2;
386 Args.idCpu = idCpu1;
387 Args.idCpu2 = idCpu2;
388 Args.cHits = 0;
389
390#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
391 cpumask_clear(&DstCpuMask);
392 cpumask_set_cpu(idCpu1, &DstCpuMask);
393 cpumask_set_cpu(idCpu2, &DstCpuMask);
394#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
395 cpus_clear(DstCpuMask);
396 cpu_set(idCpu1, DstCpuMask);
397 cpu_set(idCpu2, DstCpuMask);
398#endif
399
400#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
401 smp_call_function_many(&DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
402 rc = 0;
403#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
404 rc = smp_call_function_many(&DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
405#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
406 rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
407#else /* older kernels */
408 rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
409#endif /* older kernels */
410 Assert(rc == 0);
411
412 /* Call ourselves if necessary and wait for the other party to be done. */
413 if (fCallSelf)
414 {
415 uint32_t cLoops = 0;
416 rtmpLinuxWrapper(&Args);
417 while (ASMAtomicReadU32(&Args.cHits) < 2)
418 {
419 if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu2))
420 break;
421 cLoops++;
422 ASMNopPause();
423 }
424 }
425
426 Assert(Args.cHits <= 2);
427 if (Args.cHits == 2)
428 rc = VINF_SUCCESS;
429 else if (Args.cHits == 1)
430 rc = VERR_NOT_ALL_CPUS_SHOWED;
431 else if (Args.cHits == 0)
432 rc = VERR_CPU_OFFLINE;
433 else
434 rc = VERR_CPU_IPE_1;
435 }
436 /*
437 * A CPU must be present to be considered just offline.
438 */
439 else if ( RTMpIsCpuPresent(idCpu1)
440 && RTMpIsCpuPresent(idCpu2))
441 rc = VERR_CPU_OFFLINE;
442 else
443 rc = VERR_CPU_NOT_FOUND;
444 RTThreadPreemptRestore(&PreemptState);;
445 return rc;
446}
447RT_EXPORT_SYMBOL(RTMpOnPair);
448
449
450RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
451{
452 return true;
453}
454RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported);
455#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
456/**
457 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
458 * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
459 *
460 * @param pvInfo Pointer to the RTMPARGS package.
461 */
462static void rtmpOnSpecificLinuxWrapper(void *pvInfo)
463{
464 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
465 RTCPUID idCpu = RTMpCpuId();
466
467 if (idCpu == pArgs->idCpu)
468 {
469 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
470 ASMAtomicIncU32(&pArgs->cHits);
471 }
472}
473#endif
474
475
476RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
477{
478 int rc;
479 RTMPARGS Args;
480
481 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
482 Args.pfnWorker = pfnWorker;
483 Args.pvUser1 = pvUser1;
484 Args.pvUser2 = pvUser2;
485 Args.idCpu = idCpu;
486 Args.cHits = 0;
487
488 if (!RTMpIsCpuPossible(idCpu))
489 return VERR_CPU_NOT_FOUND;
490
491 RTThreadPreemptDisable(&PreemptState);
492 if (idCpu != RTMpCpuId())
493 {
494 if (RTMpIsCpuOnline(idCpu))
495 {
496#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
497 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
498#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
499 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
500#else /* older kernels */
501 rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
502#endif /* older kernels */
503 Assert(rc == 0);
504 rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
505 }
506 else
507 rc = VERR_CPU_OFFLINE;
508 }
509 else
510 {
511 rtmpLinuxWrapper(&Args);
512 rc = VINF_SUCCESS;
513 }
514 RTThreadPreemptRestore(&PreemptState);;
515
516 NOREF(rc);
517 return rc;
518}
519RT_EXPORT_SYMBOL(RTMpOnSpecific);
520
521
522#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
523/**
524 * Dummy callback used by RTMpPokeCpu.
525 *
526 * @param pvInfo Ignored.
527 */
528static void rtmpLinuxPokeCpuCallback(void *pvInfo)
529{
530 NOREF(pvInfo);
531}
532#endif
533
534
535RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
536{
537#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
538 int rc;
539
540 if (!RTMpIsCpuPossible(idCpu))
541 return VERR_CPU_NOT_FOUND;
542 if (!RTMpIsCpuOnline(idCpu))
543 return VERR_CPU_OFFLINE;
544
545# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
546 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */);
547# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
548 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */);
549# else /* older kernels */
550# error oops
551# endif /* older kernels */
552 NOREF(rc);
553 Assert(rc == 0);
554 return VINF_SUCCESS;
555
556#else /* older kernels */
557 /* no unicast here? */
558 return VERR_NOT_SUPPORTED;
559#endif /* older kernels */
560}
561RT_EXPORT_SYMBOL(RTMpPokeCpu);
562
563
564RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
565{
566 return true;
567}
568RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe);
569
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette