VirtualBox

source: vbox/trunk/src/VBox/Runtime/r3/freebsd/fileaio-freebsd.cpp@ 98032

Last change on this file since 98032 was 96407, checked in by vboxsync, 2 years ago

scm copyright and license note update

  • Property svn:eol-style set to native
  • Property svn:keywords set to Author Date Id Revision
File size: 22.8 KB
Line 
1/* $Id: fileaio-freebsd.cpp 96407 2022-08-22 17:43:14Z vboxsync $ */
2/** @file
3 * IPRT - File async I/O, native implementation for the FreeBSD host platform.
4 */
5
6/*
7 * Copyright (C) 2006-2022 Oracle and/or its affiliates.
8 *
9 * This file is part of VirtualBox base platform packages, as
10 * available from https://www.virtualbox.org.
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation, in version 3 of the
15 * License.
16 *
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, see <https://www.gnu.org/licenses>.
24 *
25 * The contents of this file may alternatively be used under the terms
26 * of the Common Development and Distribution License Version 1.0
27 * (CDDL), a copy of it is provided in the "COPYING.CDDL" file included
28 * in the VirtualBox distribution, in which case the provisions of the
29 * CDDL are applicable instead of those of the GPL.
30 *
31 * You may elect to license modified versions of this file under the
32 * terms and conditions of either the GPL or the CDDL or both.
33 *
34 * SPDX-License-Identifier: GPL-3.0-only OR CDDL-1.0
35 */
36
37
38/*********************************************************************************************************************************
39* Header Files *
40*********************************************************************************************************************************/
41#define LOG_GROUP RTLOGGROUP_FILE
42#include <iprt/asm.h>
43#include <iprt/file.h>
44#include <iprt/mem.h>
45#include <iprt/assert.h>
46#include <iprt/string.h>
47#include <iprt/err.h>
48#include <iprt/log.h>
49#include <iprt/thread.h>
50#include "internal/fileaio.h"
51
52#include <sys/types.h>
53#include <sys/event.h>
54#include <sys/time.h>
55#include <sys/sysctl.h>
56#include <aio.h>
57#include <errno.h>
58#include <unistd.h>
59#include <fcntl.h>
60
61
62/*********************************************************************************************************************************
63* Structures and Typedefs *
64*********************************************************************************************************************************/
65/**
66 * Async I/O completion context state.
67 */
68typedef struct RTFILEAIOCTXINTERNAL
69{
70 /** Handle to the kernel queue. */
71 int iKQueue;
72 /** Current number of requests active on this context. */
73 volatile int32_t cRequests;
74 /** The ID of the thread which is currently waiting for requests. */
75 volatile RTTHREAD hThreadWait;
76 /** Flag whether the thread was woken up. */
77 volatile bool fWokenUp;
78 /** Flag whether the thread is currently waiting in the syscall. */
79 volatile bool fWaiting;
80 /** Flags given during creation. */
81 uint32_t fFlags;
82 /** Magic value (RTFILEAIOCTX_MAGIC). */
83 uint32_t u32Magic;
84} RTFILEAIOCTXINTERNAL;
85/** Pointer to an internal context structure. */
86typedef RTFILEAIOCTXINTERNAL *PRTFILEAIOCTXINTERNAL;
87
88/**
89 * Async I/O request state.
90 */
91typedef struct RTFILEAIOREQINTERNAL
92{
93 /** The aio control block. Must be the FIRST
94 * element. */
95 struct aiocb AioCB;
96 /** Current state the request is in. */
97 RTFILEAIOREQSTATE enmState;
98 /** Flag whether this is a flush request. */
99 bool fFlush;
100 /** Opaque user data. */
101 void *pvUser;
102 /** Completion context we are assigned to. */
103 PRTFILEAIOCTXINTERNAL pCtxInt;
104 /** Number of bytes actually transferred. */
105 size_t cbTransfered;
106 /** Status code. */
107 int Rc;
108 /** Magic value (RTFILEAIOREQ_MAGIC). */
109 uint32_t u32Magic;
110} RTFILEAIOREQINTERNAL;
111/** Pointer to an internal request structure. */
112typedef RTFILEAIOREQINTERNAL *PRTFILEAIOREQINTERNAL;
113
114
115/*********************************************************************************************************************************
116* Defined Constants And Macros *
117*********************************************************************************************************************************/
118/** The max number of events to get in one call. */
119#define AIO_MAXIMUM_REQUESTS_PER_CONTEXT 64
120
121RTR3DECL(int) RTFileAioGetLimits(PRTFILEAIOLIMITS pAioLimits)
122{
123 int rcBSD = 0;
124 AssertPtrReturn(pAioLimits, VERR_INVALID_POINTER);
125
126 /*
127 * The AIO API is implemented in a kernel module which is not
128 * loaded by default.
129 * If it is loaded there are additional sysctl parameters.
130 */
131 int cReqsOutstandingMax = 0;
132 size_t cbParameter = sizeof(int);
133
134 rcBSD = sysctlbyname("vfs.aio.max_aio_per_proc", /* name */
135 &cReqsOutstandingMax, /* Where to store the old value. */
136 &cbParameter, /* Size of the memory pointed to. */
137 NULL, /* Where the new value is located. */
138 0); /* Where the size of the new value is stored. */
139 if (rcBSD == -1)
140 {
141 /* ENOENT means the value is unknown thus the module is not loaded. */
142 if (errno == ENOENT)
143 return VERR_NOT_SUPPORTED;
144 else
145 return RTErrConvertFromErrno(errno);
146 }
147
148 pAioLimits->cReqsOutstandingMax = cReqsOutstandingMax;
149 pAioLimits->cbBufferAlignment = 0;
150
151 return VINF_SUCCESS;
152}
153
154RTR3DECL(int) RTFileAioReqCreate(PRTFILEAIOREQ phReq)
155{
156 AssertPtrReturn(phReq, VERR_INVALID_POINTER);
157
158 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOREQINTERNAL));
159 if (RT_UNLIKELY(!pReqInt))
160 return VERR_NO_MEMORY;
161
162 /* Ininitialize static parts. */
163 pReqInt->AioCB.aio_sigevent.sigev_notify = SIGEV_KEVENT;
164 pReqInt->AioCB.aio_sigevent.sigev_value.sival_ptr = pReqInt;
165 pReqInt->pCtxInt = NULL;
166 pReqInt->u32Magic = RTFILEAIOREQ_MAGIC;
167 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
168
169 *phReq = (RTFILEAIOREQ)pReqInt;
170
171 return VINF_SUCCESS;
172}
173
174RTDECL(int) RTFileAioReqDestroy(RTFILEAIOREQ hReq)
175{
176 /*
177 * Validate the handle and ignore nil.
178 */
179 if (hReq == NIL_RTFILEAIOREQ)
180 return VINF_SUCCESS;
181 PRTFILEAIOREQINTERNAL pReqInt = hReq;
182 RTFILEAIOREQ_VALID_RETURN(pReqInt);
183 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
184
185 /*
186 * Trash the magic and free it.
187 */
188 ASMAtomicUoWriteU32(&pReqInt->u32Magic, ~RTFILEAIOREQ_MAGIC);
189 RTMemFree(pReqInt);
190 return VINF_SUCCESS;
191}
192
193/**
194 * Worker setting up the request.
195 */
196DECLINLINE(int) rtFileAioReqPrepareTransfer(RTFILEAIOREQ hReq, RTFILE hFile,
197 unsigned uTransferDirection,
198 RTFOFF off, void *pvBuf, size_t cbTransfer,
199 void *pvUser)
200{
201 /*
202 * Validate the input.
203 */
204 PRTFILEAIOREQINTERNAL pReqInt = hReq;
205 RTFILEAIOREQ_VALID_RETURN(pReqInt);
206 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
207 Assert(hFile != NIL_RTFILE);
208 AssertPtr(pvBuf);
209 Assert(off >= 0);
210 Assert(cbTransfer > 0);
211
212 pReqInt->AioCB.aio_sigevent.sigev_notify = SIGEV_KEVENT;
213 pReqInt->AioCB.aio_sigevent.sigev_value.sival_ptr = pReqInt;
214 pReqInt->AioCB.aio_lio_opcode = uTransferDirection;
215 pReqInt->AioCB.aio_fildes = RTFileToNative(hFile);
216 pReqInt->AioCB.aio_offset = off;
217 pReqInt->AioCB.aio_nbytes = cbTransfer;
218 pReqInt->AioCB.aio_buf = pvBuf;
219 pReqInt->fFlush = false;
220 pReqInt->pvUser = pvUser;
221 pReqInt->pCtxInt = NULL;
222 pReqInt->Rc = VERR_FILE_AIO_IN_PROGRESS;
223 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
224
225 return VINF_SUCCESS;
226}
227
228RTDECL(int) RTFileAioReqPrepareRead(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
229 void *pvBuf, size_t cbRead, void *pvUser)
230{
231 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_READ,
232 off, pvBuf, cbRead, pvUser);
233}
234
235RTDECL(int) RTFileAioReqPrepareWrite(RTFILEAIOREQ hReq, RTFILE hFile, RTFOFF off,
236 void const *pvBuf, size_t cbWrite, void *pvUser)
237{
238 return rtFileAioReqPrepareTransfer(hReq, hFile, LIO_WRITE,
239 off, (void *)pvBuf, cbWrite, pvUser);
240}
241
242RTDECL(int) RTFileAioReqPrepareFlush(RTFILEAIOREQ hReq, RTFILE hFile, void *pvUser)
243{
244 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)hReq;
245
246 RTFILEAIOREQ_VALID_RETURN(pReqInt);
247 Assert(hFile != NIL_RTFILE);
248 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
249
250 pReqInt->fFlush = true;
251 pReqInt->AioCB.aio_fildes = RTFileToNative(hFile);
252 pReqInt->AioCB.aio_offset = 0;
253 pReqInt->AioCB.aio_nbytes = 0;
254 pReqInt->AioCB.aio_buf = NULL;
255 pReqInt->pvUser = pvUser;
256 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
257
258 return VINF_SUCCESS;
259}
260
261RTDECL(void *) RTFileAioReqGetUser(RTFILEAIOREQ hReq)
262{
263 PRTFILEAIOREQINTERNAL pReqInt = hReq;
264 RTFILEAIOREQ_VALID_RETURN_RC(pReqInt, NULL);
265
266 return pReqInt->pvUser;
267}
268
269RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq)
270{
271 PRTFILEAIOREQINTERNAL pReqInt = hReq;
272 RTFILEAIOREQ_VALID_RETURN(pReqInt);
273 RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED);
274
275
276 int rcBSD = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB);
277
278 if (rcBSD == AIO_CANCELED)
279 {
280 /*
281 * Decrement request count because the request will never arrive at the
282 * completion port.
283 */
284 AssertMsg(RT_VALID_PTR(pReqInt->pCtxInt),
285 ("Invalid state. Request was canceled but wasn't submitted\n"));
286
287 ASMAtomicDecS32(&pReqInt->pCtxInt->cRequests);
288 pReqInt->Rc = VERR_FILE_AIO_CANCELED;
289 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
290 return VINF_SUCCESS;
291 }
292 else if (rcBSD == AIO_ALLDONE)
293 return VERR_FILE_AIO_COMPLETED;
294 else if (rcBSD == AIO_NOTCANCELED)
295 return VERR_FILE_AIO_IN_PROGRESS;
296 else
297 return RTErrConvertFromErrno(errno);
298}
299
300RTDECL(int) RTFileAioReqGetRC(RTFILEAIOREQ hReq, size_t *pcbTransfered)
301{
302 PRTFILEAIOREQINTERNAL pReqInt = hReq;
303 RTFILEAIOREQ_VALID_RETURN(pReqInt);
304 AssertPtrNull(pcbTransfered);
305 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_IN_PROGRESS);
306 RTFILEAIOREQ_NOT_STATE_RETURN_RC(pReqInt, PREPARED, VERR_FILE_AIO_NOT_SUBMITTED);
307
308 if ( (RT_SUCCESS(pReqInt->Rc))
309 && (pcbTransfered))
310 *pcbTransfered = pReqInt->cbTransfered;
311
312 return pReqInt->Rc;
313}
314
315RTDECL(int) RTFileAioCtxCreate(PRTFILEAIOCTX phAioCtx, uint32_t cAioReqsMax,
316 uint32_t fFlags)
317{
318 int rc = VINF_SUCCESS;
319 PRTFILEAIOCTXINTERNAL pCtxInt;
320 AssertPtrReturn(phAioCtx, VERR_INVALID_POINTER);
321 AssertReturn(!(fFlags & ~RTFILEAIOCTX_FLAGS_VALID_MASK), VERR_INVALID_PARAMETER);
322
323 pCtxInt = (PRTFILEAIOCTXINTERNAL)RTMemAllocZ(sizeof(RTFILEAIOCTXINTERNAL));
324 if (RT_UNLIKELY(!pCtxInt))
325 return VERR_NO_MEMORY;
326
327 /* Init the event handle. */
328 pCtxInt->iKQueue = kqueue();
329 if (RT_LIKELY(pCtxInt->iKQueue > 0))
330 {
331 pCtxInt->fFlags = fFlags;
332 pCtxInt->u32Magic = RTFILEAIOCTX_MAGIC;
333 *phAioCtx = (RTFILEAIOCTX)pCtxInt;
334 }
335 else
336 {
337 RTMemFree(pCtxInt);
338 rc = RTErrConvertFromErrno(errno);
339 }
340
341 return rc;
342}
343
344RTDECL(int) RTFileAioCtxDestroy(RTFILEAIOCTX hAioCtx)
345{
346 /* Validate the handle and ignore nil. */
347 if (hAioCtx == NIL_RTFILEAIOCTX)
348 return VINF_SUCCESS;
349 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
350 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
351
352 /* Cannot destroy a busy context. */
353 if (RT_UNLIKELY(pCtxInt->cRequests))
354 return VERR_FILE_AIO_BUSY;
355
356 close(pCtxInt->iKQueue);
357 ASMAtomicUoWriteU32(&pCtxInt->u32Magic, RTFILEAIOCTX_MAGIC_DEAD);
358 RTMemFree(pCtxInt);
359
360 return VINF_SUCCESS;
361}
362
363RTDECL(uint32_t) RTFileAioCtxGetMaxReqCount(RTFILEAIOCTX hAioCtx)
364{
365 return RTFILEAIO_UNLIMITED_REQS;
366}
367
368RTDECL(int) RTFileAioCtxAssociateWithFile(RTFILEAIOCTX hAioCtx, RTFILE hFile)
369{
370 return VINF_SUCCESS;
371}
372
373RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs)
374{
375 /*
376 * Parameter validation.
377 */
378 int rc = VINF_SUCCESS;
379 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
380 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
381 AssertReturn(cReqs > 0, VERR_INVALID_PARAMETER);
382 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
383
384 do
385 {
386 int rcBSD = 0;
387 size_t cReqsSubmit = 0;
388 size_t i = 0;
389 PRTFILEAIOREQINTERNAL pReqInt;
390
391 while ( (i < cReqs)
392 && (i < AIO_LISTIO_MAX))
393 {
394 pReqInt = pahReqs[i];
395 if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt))
396 {
397 /* Undo everything and stop submitting. */
398 for (size_t iUndo = 0; iUndo < i; iUndo++)
399 {
400 pReqInt = pahReqs[iUndo];
401 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
402 pReqInt->pCtxInt = NULL;
403 pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = 0;
404 }
405 rc = VERR_INVALID_HANDLE;
406 break;
407 }
408
409 pReqInt->AioCB.aio_sigevent.sigev_notify_kqueue = pCtxInt->iKQueue;
410 pReqInt->pCtxInt = pCtxInt;
411 RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED);
412
413 if (pReqInt->fFlush)
414 break;
415
416 cReqsSubmit++;
417 i++;
418 }
419
420 if (cReqsSubmit)
421 {
422 rcBSD = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL);
423 if (RT_UNLIKELY(rcBSD < 0))
424 {
425 if (errno == EAGAIN)
426 rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
427 else
428 rc = RTErrConvertFromErrno(errno);
429
430 /* Check which requests got actually submitted and which not. */
431 for (i = 0; i < cReqs; i++)
432 {
433 pReqInt = pahReqs[i];
434 rcBSD = aio_error(&pReqInt->AioCB);
435 if ( rcBSD == -1
436 && errno == EINVAL)
437 {
438 /* Was not submitted. */
439 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
440 pReqInt->pCtxInt = NULL;
441 }
442 else if (rcBSD != EINPROGRESS)
443 {
444 /* The request encountered an error. */
445 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
446 pReqInt->Rc = RTErrConvertFromErrno(rcBSD);
447 pReqInt->pCtxInt = NULL;
448 pReqInt->cbTransfered = 0;
449 }
450 }
451 break;
452 }
453
454 ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit);
455 cReqs -= cReqsSubmit;
456 pahReqs += cReqsSubmit;
457 }
458
459 /* Check if we have a flush request now. */
460 if (cReqs && RT_SUCCESS_NP(rc))
461 {
462 pReqInt = pahReqs[0];
463 RTFILEAIOREQ_VALID_RETURN(pReqInt);
464
465 if (pReqInt->fFlush)
466 {
467 /*
468 * lio_listio does not work with flush requests so
469 * we have to use aio_fsync directly.
470 */
471 rcBSD = aio_fsync(O_SYNC, &pReqInt->AioCB);
472 if (RT_UNLIKELY(rcBSD < 0))
473 {
474 if (rcBSD == EAGAIN)
475 {
476 /* Was not submitted. */
477 RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED);
478 pReqInt->pCtxInt = NULL;
479 return VERR_FILE_AIO_INSUFFICIENT_RESSOURCES;
480 }
481 else
482 {
483 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
484 pReqInt->Rc = RTErrConvertFromErrno(errno);
485 pReqInt->cbTransfered = 0;
486 return pReqInt->Rc;
487 }
488 }
489
490 ASMAtomicIncS32(&pCtxInt->cRequests);
491 cReqs--;
492 pahReqs++;
493 }
494 }
495 } while (cReqs);
496
497 return rc;
498}
499
500RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies,
501 PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs)
502{
503 int rc = VINF_SUCCESS;
504 int cRequestsCompleted = 0;
505
506 /*
507 * Validate the parameters, making sure to always set pcReqs.
508 */
509 AssertPtrReturn(pcReqs, VERR_INVALID_POINTER);
510 *pcReqs = 0; /* always set */
511 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
512 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
513 AssertPtrReturn(pahReqs, VERR_INVALID_POINTER);
514 AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER);
515 AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE);
516
517 if ( RT_UNLIKELY(ASMAtomicReadS32(&pCtxInt->cRequests) == 0)
518 && !(pCtxInt->fFlags & RTFILEAIOCTX_FLAGS_WAIT_WITHOUT_PENDING_REQUESTS))
519 return VERR_FILE_AIO_NO_REQUEST;
520
521 /*
522 * Convert the timeout if specified.
523 */
524 struct timespec *pTimeout = NULL;
525 struct timespec Timeout = {0,0};
526 uint64_t StartNanoTS = 0;
527 if (cMillies != RT_INDEFINITE_WAIT)
528 {
529 Timeout.tv_sec = cMillies / 1000;
530 Timeout.tv_nsec = cMillies % 1000 * 1000000;
531 pTimeout = &Timeout;
532 StartNanoTS = RTTimeNanoTS();
533 }
534
535 /* Wait for at least one. */
536 if (!cMinReqs)
537 cMinReqs = 1;
538
539 /* For the wakeup call. */
540 Assert(pCtxInt->hThreadWait == NIL_RTTHREAD);
541 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf());
542
543 while ( cMinReqs
544 && RT_SUCCESS_NP(rc))
545 {
546 struct kevent aKEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT];
547 int cRequestsToWait = cMinReqs < AIO_MAXIMUM_REQUESTS_PER_CONTEXT ? cReqs : AIO_MAXIMUM_REQUESTS_PER_CONTEXT;
548 int rcBSD;
549 uint64_t StartTime;
550
551 ASMAtomicXchgBool(&pCtxInt->fWaiting, true);
552 rcBSD = kevent(pCtxInt->iKQueue, NULL, 0, aKEvents, cRequestsToWait, pTimeout);
553 ASMAtomicXchgBool(&pCtxInt->fWaiting, false);
554
555 if (RT_UNLIKELY(rcBSD < 0))
556 {
557 rc = RTErrConvertFromErrno(errno);
558 break;
559 }
560
561 uint32_t const cDone = rcBSD;
562
563 /* Process received events. */
564 for (uint32_t i = 0; i < cDone; i++)
565 {
566 PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aKEvents[i].udata;
567 AssertPtr(pReqInt);
568 Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC);
569
570 /*
571 * Retrieve the status code here already because the
572 * user may omit the RTFileAioReqGetRC() call and
573 * we will leak kernel resources then.
574 * This will result in errors during submission
575 * of other requests as soon as the max_aio_queue_per_proc
576 * limit is reached.
577 */
578 int cbTransfered = aio_return(&pReqInt->AioCB);
579
580 if (cbTransfered < 0)
581 {
582 pReqInt->Rc = RTErrConvertFromErrno(cbTransfered);
583 pReqInt->cbTransfered = 0;
584 }
585 else
586 {
587 pReqInt->Rc = VINF_SUCCESS;
588 pReqInt->cbTransfered = cbTransfered;
589 }
590 RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED);
591 pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt;
592 }
593
594 /*
595 * Done Yet? If not advance and try again.
596 */
597 if (cDone >= cMinReqs)
598 break;
599 cMinReqs -= cDone;
600 cReqs -= cDone;
601
602 if (cMillies != RT_INDEFINITE_WAIT)
603 {
604 /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */
605 uint64_t NanoTS = RTTimeNanoTS();
606 uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000;
607 if (cMilliesElapsed >= cMillies)
608 {
609 rc = VERR_TIMEOUT;
610 break;
611 }
612
613 /* The syscall supposedly updates it, but we're paranoid. :-) */
614 Timeout.tv_sec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000;
615 Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000;
616 }
617 }
618
619 /*
620 * Update the context state and set the return value.
621 */
622 *pcReqs = cRequestsCompleted;
623 ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted);
624 Assert(pCtxInt->hThreadWait == RTThreadSelf());
625 ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD);
626
627 /*
628 * Clear the wakeup flag and set rc.
629 */
630 if ( pCtxInt->fWokenUp
631 && RT_SUCCESS(rc))
632 {
633 ASMAtomicXchgBool(&pCtxInt->fWokenUp, false);
634 rc = VERR_INTERRUPTED;
635 }
636
637 return rc;
638}
639
640RTDECL(int) RTFileAioCtxWakeup(RTFILEAIOCTX hAioCtx)
641{
642 PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx;
643 RTFILEAIOCTX_VALID_RETURN(pCtxInt);
644
645 /** @todo r=bird: Define the protocol for how to resume work after calling
646 * this function. */
647
648 bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUp, true);
649
650 /*
651 * Read the thread handle before the status flag.
652 * If we read the handle after the flag we might
653 * end up with an invalid handle because the thread
654 * waiting in RTFileAioCtxWakeup() might get scheduled
655 * before we read the flag and returns.
656 * We can ensure that the handle is valid if fWaiting is true
657 * when reading the handle before the status flag.
658 */
659 RTTHREAD hThread;
660 ASMAtomicReadHandle(&pCtxInt->hThreadWait, &hThread);
661 bool fWaiting = ASMAtomicReadBool(&pCtxInt->fWaiting);
662 if ( !fWokenUp
663 && fWaiting)
664 {
665 /*
666 * If a thread waits the handle must be valid.
667 * It is possible that the thread returns from
668 * kevent() before the signal is send.
669 * This is no problem because we already set fWokenUp
670 * to true which will let the thread return VERR_INTERRUPTED
671 * and the next call to RTFileAioCtxWait() will not
672 * return VERR_INTERRUPTED because signals are not saved
673 * and will simply vanish if the destination thread can't
674 * receive it.
675 */
676 Assert(hThread != NIL_RTTHREAD);
677 RTThreadPoke(hThread);
678 }
679
680 return VINF_SUCCESS;
681}
682
Note: See TracBrowser for help on using the repository browser.

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy Automated Access Etiquette