VirtualBox

source: vbox/trunk/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c@ 73265

Last change on this file since 73265 was 70698, checked in by vboxsync, 7 years ago

Linux drivers: fix a burn due to improved error checking.
bugref:4567: Linux kernel driver maintenance
Back-port candidate to 5.2.

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 16.1 KB
Line 
1/* $Id: mp-r0drv-linux.c 70698 2018-01-23 07:58:40Z vboxsync $ */
2/** @file
3 * IPRT - Multiprocessor, Ring-0 Driver, Linux.
4 */
5
6/*
7 * Copyright (C) 2008-2017 Oracle Corporation
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * The contents of this file may alternatively be used under the terms
18 * of the Common Development and Distribution License Version 1.0
19 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
20 * VirtualBox OSE distribution, in which case the provisions of the
21 * CDDL are applicable instead of those of the GPL.
22 *
23 * You may elect to license modified versions of this file under the
24 * terms and conditions of either the GPL or the CDDL or both.
25 */
26
27
28/*********************************************************************************************************************************
29* Header Files *
30*********************************************************************************************************************************/
31#include "the-linux-kernel.h"
32#include "internal/iprt.h"
33
34#include <iprt/mp.h>
35#include <iprt/cpuset.h>
36#include <iprt/err.h>
37#include <iprt/asm.h>
38#include <iprt/thread.h>
39#include "r0drv/mp-r0drv.h"
40
41#ifdef nr_cpumask_bits
42# define VBOX_NR_CPUMASK_BITS nr_cpumask_bits
43#else
44# define VBOX_NR_CPUMASK_BITS NR_CPUS
45#endif
46
47RTDECL(RTCPUID) RTMpCpuId(void)
48{
49 return smp_processor_id();
50}
51RT_EXPORT_SYMBOL(RTMpCpuId);
52
53
54RTDECL(int) RTMpCurSetIndex(void)
55{
56 return smp_processor_id();
57}
58RT_EXPORT_SYMBOL(RTMpCurSetIndex);
59
60
61RTDECL(int) RTMpCurSetIndexAndId(PRTCPUID pidCpu)
62{
63 return *pidCpu = smp_processor_id();
64}
65RT_EXPORT_SYMBOL(RTMpCurSetIndexAndId);
66
67
68RTDECL(int) RTMpCpuIdToSetIndex(RTCPUID idCpu)
69{
70 return idCpu < RTCPUSET_MAX_CPUS && idCpu < VBOX_NR_CPUMASK_BITS ? (int)idCpu : -1;
71}
72RT_EXPORT_SYMBOL(RTMpCpuIdToSetIndex);
73
74
75RTDECL(RTCPUID) RTMpCpuIdFromSetIndex(int iCpu)
76{
77 return iCpu < VBOX_NR_CPUMASK_BITS ? (RTCPUID)iCpu : NIL_RTCPUID;
78}
79RT_EXPORT_SYMBOL(RTMpCpuIdFromSetIndex);
80
81
82RTDECL(RTCPUID) RTMpGetMaxCpuId(void)
83{
84 return VBOX_NR_CPUMASK_BITS - 1; //???
85}
86RT_EXPORT_SYMBOL(RTMpGetMaxCpuId);
87
88
89RTDECL(bool) RTMpIsCpuPossible(RTCPUID idCpu)
90{
91#if defined(CONFIG_SMP)
92 if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
93 return false;
94
95# if defined(cpu_possible)
96 return cpu_possible(idCpu);
97# else /* < 2.5.29 */
98 return idCpu < (RTCPUID)smp_num_cpus;
99# endif
100#else
101 return idCpu == RTMpCpuId();
102#endif
103}
104RT_EXPORT_SYMBOL(RTMpIsCpuPossible);
105
106
107RTDECL(PRTCPUSET) RTMpGetSet(PRTCPUSET pSet)
108{
109 RTCPUID idCpu;
110
111 RTCpuSetEmpty(pSet);
112 idCpu = RTMpGetMaxCpuId();
113 do
114 {
115 if (RTMpIsCpuPossible(idCpu))
116 RTCpuSetAdd(pSet, idCpu);
117 } while (idCpu-- > 0);
118 return pSet;
119}
120RT_EXPORT_SYMBOL(RTMpGetSet);
121
122
123RTDECL(RTCPUID) RTMpGetCount(void)
124{
125#ifdef CONFIG_SMP
126# if defined(CONFIG_HOTPLUG_CPU) /* introduced & uses cpu_present */
127 return num_present_cpus();
128# elif defined(num_possible_cpus)
129 return num_possible_cpus();
130# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0)
131 return smp_num_cpus;
132# else
133 RTCPUSET Set;
134 RTMpGetSet(&Set);
135 return RTCpuSetCount(&Set);
136# endif
137#else
138 return 1;
139#endif
140}
141RT_EXPORT_SYMBOL(RTMpGetCount);
142
143
144RTDECL(bool) RTMpIsCpuOnline(RTCPUID idCpu)
145{
146#ifdef CONFIG_SMP
147 if (RT_UNLIKELY(idCpu >= VBOX_NR_CPUMASK_BITS))
148 return false;
149# ifdef cpu_online
150 return cpu_online(idCpu);
151# else /* 2.4: */
152 return cpu_online_map & RT_BIT_64(idCpu);
153# endif
154#else
155 return idCpu == RTMpCpuId();
156#endif
157}
158RT_EXPORT_SYMBOL(RTMpIsCpuOnline);
159
160
161RTDECL(PRTCPUSET) RTMpGetOnlineSet(PRTCPUSET pSet)
162{
163#ifdef CONFIG_SMP
164 RTCPUID idCpu;
165
166 RTCpuSetEmpty(pSet);
167 idCpu = RTMpGetMaxCpuId();
168 do
169 {
170 if (RTMpIsCpuOnline(idCpu))
171 RTCpuSetAdd(pSet, idCpu);
172 } while (idCpu-- > 0);
173#else
174 RTCpuSetEmpty(pSet);
175 RTCpuSetAdd(pSet, RTMpCpuId());
176#endif
177 return pSet;
178}
179RT_EXPORT_SYMBOL(RTMpGetOnlineSet);
180
181
182RTDECL(RTCPUID) RTMpGetOnlineCount(void)
183{
184#ifdef CONFIG_SMP
185# if defined(num_online_cpus)
186 return num_online_cpus();
187# else
188 RTCPUSET Set;
189 RTMpGetOnlineSet(&Set);
190 return RTCpuSetCount(&Set);
191# endif
192#else
193 return 1;
194#endif
195}
196RT_EXPORT_SYMBOL(RTMpGetOnlineCount);
197
198
199RTDECL(bool) RTMpIsCpuWorkPending(void)
200{
201 /** @todo (not used on non-Windows platforms yet). */
202 return false;
203}
204RT_EXPORT_SYMBOL(RTMpIsCpuWorkPending);
205
206
207/**
208 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER.
209 *
210 * @param pvInfo Pointer to the RTMPARGS package.
211 */
212static void rtmpLinuxWrapper(void *pvInfo)
213{
214 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
215 ASMAtomicIncU32(&pArgs->cHits);
216 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
217}
218
219
220#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
221/**
222 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER, does hit
223 * increment after calling the worker.
224 *
225 * @param pvInfo Pointer to the RTMPARGS package.
226 */
227static void rtmpLinuxWrapperPostInc(void *pvInfo)
228{
229 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
230 pArgs->pfnWorker(RTMpCpuId(), pArgs->pvUser1, pArgs->pvUser2);
231 ASMAtomicIncU32(&pArgs->cHits);
232}
233#endif
234
235
236/**
237 * Wrapper between the native linux all-cpu callbacks and PFNRTWORKER.
238 *
239 * @param pvInfo Pointer to the RTMPARGS package.
240 */
241static void rtmpLinuxAllWrapper(void *pvInfo)
242{
243 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
244 PRTCPUSET pWorkerSet = pArgs->pWorkerSet;
245 RTCPUID idCpu = RTMpCpuId();
246 Assert(!RTThreadPreemptIsEnabled(NIL_RTTHREAD));
247
248 if (RTCpuSetIsMember(pWorkerSet, idCpu))
249 {
250 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
251 RTCpuSetDel(pWorkerSet, idCpu);
252 }
253}
254
255
256RTDECL(int) RTMpOnAll(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
257{
258 IPRT_LINUX_SAVE_EFL_AC();
259 int rc;
260 RTMPARGS Args;
261 RTCPUSET OnlineSet;
262 RTCPUID idCpu;
263 uint32_t cLoops;
264
265 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
266
267 Args.pfnWorker = pfnWorker;
268 Args.pvUser1 = pvUser1;
269 Args.pvUser2 = pvUser2;
270 Args.idCpu = NIL_RTCPUID;
271 Args.cHits = 0;
272
273 RTThreadPreemptDisable(&PreemptState);
274 RTMpGetOnlineSet(&OnlineSet);
275 Args.pWorkerSet = &OnlineSet;
276 idCpu = RTMpCpuId();
277
278 if (RTCpuSetCount(&OnlineSet) > 1)
279 {
280 /* Fire the function on all other CPUs without waiting for completion. */
281#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
282 rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */);
283#else
284 rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */);
285#endif
286 Assert(!rc); NOREF(rc);
287 }
288
289 /* Fire the function on this CPU. */
290 Args.pfnWorker(idCpu, Args.pvUser1, Args.pvUser2);
291 RTCpuSetDel(Args.pWorkerSet, idCpu);
292
293 /* Wait for all of them finish. */
294 cLoops = 64000;
295 while (!RTCpuSetIsEmpty(Args.pWorkerSet))
296 {
297 /* Periodically check if any CPU in the wait set has gone offline, if so update the wait set. */
298 if (!cLoops--)
299 {
300 RTCPUSET OnlineSetNow;
301 RTMpGetOnlineSet(&OnlineSetNow);
302 RTCpuSetAnd(Args.pWorkerSet, &OnlineSetNow);
303
304 cLoops = 64000;
305 }
306
307 ASMNopPause();
308 }
309
310 RTThreadPreemptRestore(&PreemptState);
311 IPRT_LINUX_RESTORE_EFL_AC();
312 return VINF_SUCCESS;
313}
314RT_EXPORT_SYMBOL(RTMpOnAll);
315
316
317RTDECL(int) RTMpOnOthers(PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
318{
319 IPRT_LINUX_SAVE_EFL_AC();
320 int rc;
321 RTMPARGS Args;
322
323 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
324 Args.pfnWorker = pfnWorker;
325 Args.pvUser1 = pvUser1;
326 Args.pvUser2 = pvUser2;
327 Args.idCpu = NIL_RTCPUID;
328 Args.cHits = 0;
329
330 RTThreadPreemptDisable(&PreemptState);
331#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
332 rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */);
333#else /* older kernels */
334 rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
335#endif /* older kernels */
336 RTThreadPreemptRestore(&PreemptState);
337
338 Assert(rc == 0); NOREF(rc);
339 IPRT_LINUX_RESTORE_EFL_AC();
340 return VINF_SUCCESS;
341}
342RT_EXPORT_SYMBOL(RTMpOnOthers);
343
344
345#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
346/**
347 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
348 * employed by RTMpOnPair on older kernels that lacks smp_call_function_many.
349 *
350 * @param pvInfo Pointer to the RTMPARGS package.
351 */
352static void rtMpLinuxOnPairWrapper(void *pvInfo)
353{
354 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
355 RTCPUID idCpu = RTMpCpuId();
356
357 if ( idCpu == pArgs->idCpu
358 || idCpu == pArgs->idCpu2)
359 {
360 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
361 ASMAtomicIncU32(&pArgs->cHits);
362 }
363}
364#endif
365
366
367RTDECL(int) RTMpOnPair(RTCPUID idCpu1, RTCPUID idCpu2, uint32_t fFlags, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
368{
369 IPRT_LINUX_SAVE_EFL_AC();
370 int rc;
371 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
372
373 AssertReturn(idCpu1 != idCpu2, VERR_INVALID_PARAMETER);
374 AssertReturn(!(fFlags & RTMPON_F_VALID_MASK), VERR_INVALID_FLAGS);
375
376 /*
377 * Check that both CPUs are online before doing the broadcast call.
378 */
379 RTThreadPreemptDisable(&PreemptState);
380 if ( RTMpIsCpuOnline(idCpu1)
381 && RTMpIsCpuOnline(idCpu2))
382 {
383 /*
384 * Use the smp_call_function variant taking a cpu mask where available,
385 * falling back on broadcast with filter. Slight snag if one of the
386 * CPUs is the one we're running on, we must do the call and the post
387 * call wait ourselves.
388 */
389#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
390 /* 2.6.28 introduces CONFIG_CPUMASK_OFFSTACK */
391 cpumask_var_t DstCpuMask;
392#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
393 cpumask_t DstCpuMask;
394#endif
395 RTCPUID idCpuSelf = RTMpCpuId();
396 bool const fCallSelf = idCpuSelf == idCpu1 || idCpuSelf == idCpu2;
397 RTMPARGS Args;
398 Args.pfnWorker = pfnWorker;
399 Args.pvUser1 = pvUser1;
400 Args.pvUser2 = pvUser2;
401 Args.idCpu = idCpu1;
402 Args.idCpu2 = idCpu2;
403 Args.cHits = 0;
404
405#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30)
406 if (!zalloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
407 return VERR_NO_MEMORY;
408 cpumask_set_cpu(idCpu1, DstCpuMask);
409 cpumask_set_cpu(idCpu2, DstCpuMask);
410#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
411 if (!alloc_cpumask_var(&DstCpuMask, GFP_KERNEL))
412 return VERR_NO_MEMORY;
413 cpumask_clear(DstCpuMask);
414 cpumask_set_cpu(idCpu1, DstCpuMask);
415 cpumask_set_cpu(idCpu2, DstCpuMask);
416#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
417 cpus_clear(DstCpuMask);
418 cpu_set(idCpu1, DstCpuMask);
419 cpu_set(idCpu2, DstCpuMask);
420#endif
421
422#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
423 smp_call_function_many(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
424 rc = 0;
425#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
426 rc = smp_call_function_mask(DstCpuMask, rtmpLinuxWrapperPostInc, &Args, !fCallSelf /* wait */);
427#else /* older kernels */
428 rc = smp_call_function(rtMpLinuxOnPairWrapper, &Args, 0 /* retry */, !fCallSelf /* wait */);
429#endif /* older kernels */
430 Assert(rc == 0);
431
432 /* Call ourselves if necessary and wait for the other party to be done. */
433 if (fCallSelf)
434 {
435 uint32_t cLoops = 0;
436 rtmpLinuxWrapper(&Args);
437 while (ASMAtomicReadU32(&Args.cHits) < 2)
438 {
439 if ((cLoops & 0x1ff) == 0 && !RTMpIsCpuOnline(idCpuSelf == idCpu1 ? idCpu2 : idCpu1))
440 break;
441 cLoops++;
442 ASMNopPause();
443 }
444 }
445
446 Assert(Args.cHits <= 2);
447 if (Args.cHits == 2)
448 rc = VINF_SUCCESS;
449 else if (Args.cHits == 1)
450 rc = VERR_NOT_ALL_CPUS_SHOWED;
451 else if (Args.cHits == 0)
452 rc = VERR_CPU_OFFLINE;
453 else
454 rc = VERR_CPU_IPE_1;
455
456#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
457 free_cpumask_var(DstCpuMask);
458#endif
459 }
460 /*
461 * A CPU must be present to be considered just offline.
462 */
463 else if ( RTMpIsCpuPresent(idCpu1)
464 && RTMpIsCpuPresent(idCpu2))
465 rc = VERR_CPU_OFFLINE;
466 else
467 rc = VERR_CPU_NOT_FOUND;
468 RTThreadPreemptRestore(&PreemptState);;
469 IPRT_LINUX_RESTORE_EFL_AC();
470 return rc;
471}
472RT_EXPORT_SYMBOL(RTMpOnPair);
473
474
475RTDECL(bool) RTMpOnPairIsConcurrentExecSupported(void)
476{
477 return true;
478}
479RT_EXPORT_SYMBOL(RTMpOnPairIsConcurrentExecSupported);
480
481
482#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
483/**
484 * Wrapper between the native linux per-cpu callbacks and PFNRTWORKER
485 * employed by RTMpOnSpecific on older kernels that lacks smp_call_function_single.
486 *
487 * @param pvInfo Pointer to the RTMPARGS package.
488 */
489static void rtmpOnSpecificLinuxWrapper(void *pvInfo)
490{
491 PRTMPARGS pArgs = (PRTMPARGS)pvInfo;
492 RTCPUID idCpu = RTMpCpuId();
493
494 if (idCpu == pArgs->idCpu)
495 {
496 pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2);
497 ASMAtomicIncU32(&pArgs->cHits);
498 }
499}
500#endif
501
502
503RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2)
504{
505 IPRT_LINUX_SAVE_EFL_AC();
506 int rc;
507 RTMPARGS Args;
508
509 RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER;
510 Args.pfnWorker = pfnWorker;
511 Args.pvUser1 = pvUser1;
512 Args.pvUser2 = pvUser2;
513 Args.idCpu = idCpu;
514 Args.cHits = 0;
515
516 if (!RTMpIsCpuPossible(idCpu))
517 return VERR_CPU_NOT_FOUND;
518
519 RTThreadPreemptDisable(&PreemptState);
520 if (idCpu != RTMpCpuId())
521 {
522 if (RTMpIsCpuOnline(idCpu))
523 {
524#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
525 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 1 /* wait */);
526#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
527 rc = smp_call_function_single(idCpu, rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
528#else /* older kernels */
529 rc = smp_call_function(rtmpOnSpecificLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */);
530#endif /* older kernels */
531 Assert(rc == 0);
532 rc = Args.cHits ? VINF_SUCCESS : VERR_CPU_OFFLINE;
533 }
534 else
535 rc = VERR_CPU_OFFLINE;
536 }
537 else
538 {
539 rtmpLinuxWrapper(&Args);
540 rc = VINF_SUCCESS;
541 }
542 RTThreadPreemptRestore(&PreemptState);;
543
544 NOREF(rc);
545 IPRT_LINUX_RESTORE_EFL_AC();
546 return rc;
547}
548RT_EXPORT_SYMBOL(RTMpOnSpecific);
549
550
551#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
552/**
553 * Dummy callback used by RTMpPokeCpu.
554 *
555 * @param pvInfo Ignored.
556 */
557static void rtmpLinuxPokeCpuCallback(void *pvInfo)
558{
559 NOREF(pvInfo);
560}
561#endif
562
563
564RTDECL(int) RTMpPokeCpu(RTCPUID idCpu)
565{
566#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
567 int rc;
568 IPRT_LINUX_SAVE_EFL_AC();
569
570 if (!RTMpIsCpuPossible(idCpu))
571 return VERR_CPU_NOT_FOUND;
572 if (!RTMpIsCpuOnline(idCpu))
573 return VERR_CPU_OFFLINE;
574
575# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
576 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* wait */);
577# elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
578 rc = smp_call_function_single(idCpu, rtmpLinuxPokeCpuCallback, NULL, 0 /* retry */, 0 /* wait */);
579# else /* older kernels */
580# error oops
581# endif /* older kernels */
582 NOREF(rc);
583 Assert(rc == 0);
584 IPRT_LINUX_RESTORE_EFL_AC();
585 return VINF_SUCCESS;
586
587#else /* older kernels */
588 /* no unicast here? */
589 return VERR_NOT_SUPPORTED;
590#endif /* older kernels */
591}
592RT_EXPORT_SYMBOL(RTMpPokeCpu);
593
594
595RTDECL(bool) RTMpOnAllIsConcurrentSafe(void)
596{
597 return true;
598}
599RT_EXPORT_SYMBOL(RTMpOnAllIsConcurrentSafe);
600
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette