--- a/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c.ORIG 2019-09-03 11:44:20.000000000 +0200 +++ b/src/VBox/Runtime/r0drv/linux/mp-r0drv-linux.c 2019-09-17 13:29:19.953980946 +0200 @@ -283,12 +283,16 @@ if (RTCpuSetCount(&OnlineSet) > 1) { /* Fire the function on all other CPUs without waiting for completion. */ -# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* wait */); -# else +#else int rc = smp_call_function(rtmpLinuxAllWrapper, &Args, 0 /* retry */, 0 /* wait */); -# endif +#endif +# if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) Assert(!rc); NOREF(rc); +#endif } #endif @@ -326,7 +330,9 @@ { #ifdef CONFIG_SMP IPRT_LINUX_SAVE_EFL_AC(); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) int rc; +#endif RTMPARGS Args; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; @@ -337,14 +343,18 @@ Args.cHits = 0; RTThreadPreemptDisable(&PreemptState); -# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) +#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 3, 0) + smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */); +#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27) rc = smp_call_function(rtmpLinuxWrapper, &Args, 1 /* wait */); -# else /* older kernels */ +#else /* older kernels */ rc = smp_call_function(rtmpLinuxWrapper, &Args, 0 /* retry */, 1 /* wait */); -# endif /* older kernels */ +#endif /* older kernels */ RTThreadPreemptRestore(&PreemptState); +#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 3, 0) Assert(rc == 0); NOREF(rc); +#endif IPRT_LINUX_RESTORE_EFL_AC(); #else RT_NOREF(pfnWorker, pvUser1, pvUser2);