mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 19:49:43 +00:00
aio: add non-blocking variant of aio_wait
This will be used when polling the GSource attached to an AioContext. Reviewed-by: Anthony Liguori <aliguori@us.ibm.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
This commit is contained in:
parent
4231c88d27
commit
7c0628b20e
20
aio.c
20
aio.c
@ -93,13 +93,16 @@ void aio_set_event_notifier(AioContext *ctx,
|
|||||||
(AioFlushHandler *)io_flush, notifier);
|
(AioFlushHandler *)io_flush, notifier);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool aio_wait(AioContext *ctx)
|
bool aio_poll(AioContext *ctx, bool blocking)
|
||||||
{
|
{
|
||||||
|
static struct timeval tv0;
|
||||||
AioHandler *node;
|
AioHandler *node;
|
||||||
fd_set rdfds, wrfds;
|
fd_set rdfds, wrfds;
|
||||||
int max_fd = -1;
|
int max_fd = -1;
|
||||||
int ret;
|
int ret;
|
||||||
bool busy;
|
bool busy, progress;
|
||||||
|
|
||||||
|
progress = false;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* If there are callbacks left that have been queued, we need to call then.
|
* If there are callbacks left that have been queued, we need to call then.
|
||||||
@ -107,6 +110,11 @@ bool aio_wait(AioContext *ctx)
|
|||||||
* does not need a complete flush (as is the case for qemu_aio_wait loops).
|
* does not need a complete flush (as is the case for qemu_aio_wait loops).
|
||||||
*/
|
*/
|
||||||
if (aio_bh_poll(ctx)) {
|
if (aio_bh_poll(ctx)) {
|
||||||
|
blocking = false;
|
||||||
|
progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (progress && !blocking) {
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -142,11 +150,11 @@ bool aio_wait(AioContext *ctx)
|
|||||||
|
|
||||||
/* No AIO operations? Get us out of here */
|
/* No AIO operations? Get us out of here */
|
||||||
if (!busy) {
|
if (!busy) {
|
||||||
return false;
|
return progress;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* wait until next event */
|
/* wait until next event */
|
||||||
ret = select(max_fd, &rdfds, &wrfds, NULL, NULL);
|
ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0);
|
||||||
|
|
||||||
/* if we have any readable fds, dispatch event */
|
/* if we have any readable fds, dispatch event */
|
||||||
if (ret > 0) {
|
if (ret > 0) {
|
||||||
@ -161,11 +169,13 @@ bool aio_wait(AioContext *ctx)
|
|||||||
if (!node->deleted &&
|
if (!node->deleted &&
|
||||||
FD_ISSET(node->fd, &rdfds) &&
|
FD_ISSET(node->fd, &rdfds) &&
|
||||||
node->io_read) {
|
node->io_read) {
|
||||||
|
progress = true;
|
||||||
node->io_read(node->opaque);
|
node->io_read(node->opaque);
|
||||||
}
|
}
|
||||||
if (!node->deleted &&
|
if (!node->deleted &&
|
||||||
FD_ISSET(node->fd, &wrfds) &&
|
FD_ISSET(node->fd, &wrfds) &&
|
||||||
node->io_write) {
|
node->io_write) {
|
||||||
|
progress = true;
|
||||||
node->io_write(node->opaque);
|
node->io_write(node->opaque);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -181,5 +191,5 @@ bool aio_wait(AioContext *ctx)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return progress;
|
||||||
}
|
}
|
||||||
|
2
async.c
2
async.c
@ -144,5 +144,5 @@ AioContext *aio_context_new(void)
|
|||||||
|
|
||||||
void aio_flush(AioContext *ctx)
|
void aio_flush(AioContext *ctx)
|
||||||
{
|
{
|
||||||
while (aio_wait(ctx));
|
while (aio_poll(ctx, true));
|
||||||
}
|
}
|
||||||
|
@ -534,7 +534,7 @@ void qemu_aio_flush(void)
|
|||||||
|
|
||||||
bool qemu_aio_wait(void)
|
bool qemu_aio_wait(void)
|
||||||
{
|
{
|
||||||
return aio_wait(qemu_aio_context);
|
return aio_poll(qemu_aio_context, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void qemu_aio_set_fd_handler(int fd,
|
void qemu_aio_set_fd_handler(int fd,
|
||||||
|
21
qemu-aio.h
21
qemu-aio.h
@ -133,13 +133,22 @@ void qemu_bh_delete(QEMUBH *bh);
|
|||||||
* outstanding AIO operations have been completed or cancelled. */
|
* outstanding AIO operations have been completed or cancelled. */
|
||||||
void aio_flush(AioContext *ctx);
|
void aio_flush(AioContext *ctx);
|
||||||
|
|
||||||
/* Wait for a single AIO completion to occur. This function will wait
|
/* Progress in completing AIO work to occur. This can issue new pending
|
||||||
* until a single AIO event has completed and it will ensure something
|
* aio as a result of executing I/O completion or bh callbacks.
|
||||||
* has moved before returning. This can issue new pending aio as
|
|
||||||
* result of executing I/O completion or bh callbacks.
|
|
||||||
*
|
*
|
||||||
* Return whether there is still any pending AIO operation. */
|
* If there is no pending AIO operation or completion (bottom half),
|
||||||
bool aio_wait(AioContext *ctx);
|
* return false. If there are pending bottom halves, return true.
|
||||||
|
*
|
||||||
|
* If there are no pending bottom halves, but there are pending AIO
|
||||||
|
* operations, it may not be possible to make any progress without
|
||||||
|
* blocking. If @blocking is true, this function will wait until one
|
||||||
|
* or more AIO events have completed, to ensure something has moved
|
||||||
|
* before returning.
|
||||||
|
*
|
||||||
|
* If @blocking is false, this function will also return false if the
|
||||||
|
* function cannot make any progress without blocking.
|
||||||
|
*/
|
||||||
|
bool aio_poll(AioContext *ctx, bool blocking);
|
||||||
|
|
||||||
#ifdef CONFIG_POSIX
|
#ifdef CONFIG_POSIX
|
||||||
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
|
/* Returns 1 if there are still outstanding AIO requests; 0 otherwise */
|
||||||
|
Loading…
Reference in New Issue
Block a user