2013-12-01 21:23:41 +00:00
|
|
|
/*
|
|
|
|
* QEMU Block driver for NBD
|
|
|
|
*
|
2016-10-14 18:33:04 +00:00
|
|
|
* Copyright (C) 2016 Red Hat, Inc.
|
2013-12-01 21:23:41 +00:00
|
|
|
* Copyright (C) 2008 Bull S.A.S.
|
|
|
|
* Author: Laurent Vivier <Laurent.Vivier@bull.net>
|
|
|
|
*
|
|
|
|
* Some parts:
|
|
|
|
* Copyright (C) 2007 Anthony Liguori <anthony@codemonkey.ws>
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2016-01-18 18:01:42 +00:00
|
|
|
#include "qemu/osdep.h"
|
2017-05-26 11:09:13 +00:00
|
|
|
#include "qapi/error.h"
|
2013-12-01 21:23:41 +00:00
|
|
|
#include "nbd-client.h"
|
|
|
|
|
2017-09-18 21:46:49 +00:00
|
|
|
#define HANDLE_TO_INDEX(bs, handle) ((handle) ^ (uint64_t)(intptr_t)(bs))
|
|
|
|
#define INDEX_TO_HANDLE(bs, index) ((index) ^ (uint64_t)(intptr_t)(bs))
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2017-08-04 15:14:31 +00:00
|
|
|
static void nbd_recv_coroutines_wake_all(NBDClientSession *s)
|
2013-12-01 21:23:45 +00:00
|
|
|
{
|
|
|
|
int i;
|
|
|
|
|
|
|
|
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
|
2017-08-22 12:51:13 +00:00
|
|
|
NBDClientRequest *req = &s->requests[i];
|
|
|
|
|
|
|
|
if (req->coroutine && req->receiving) {
|
|
|
|
aio_co_wake(req->coroutine);
|
2013-12-01 21:23:45 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-06 21:06:16 +00:00
|
|
|
static void nbd_teardown_connection(BlockDriverState *bs)
|
2014-02-26 14:30:18 +00:00
|
|
|
{
|
2016-10-14 18:33:06 +00:00
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2015-02-06 21:06:16 +00:00
|
|
|
|
2016-02-10 18:41:01 +00:00
|
|
|
if (!client->ioc) { /* Already closed */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-02-26 14:30:18 +00:00
|
|
|
/* finish any pending coroutines */
|
2016-02-10 18:41:01 +00:00
|
|
|
qio_channel_shutdown(client->ioc,
|
|
|
|
QIO_CHANNEL_SHUTDOWN_BOTH,
|
|
|
|
NULL);
|
2017-03-14 11:11:56 +00:00
|
|
|
BDRV_POLL_WHILE(bs, client->read_reply_co);
|
2014-02-26 14:30:18 +00:00
|
|
|
|
2015-02-06 21:06:16 +00:00
|
|
|
nbd_client_detach_aio_context(bs);
|
2016-02-10 18:41:01 +00:00
|
|
|
object_unref(OBJECT(client->sioc));
|
|
|
|
client->sioc = NULL;
|
|
|
|
object_unref(OBJECT(client->ioc));
|
|
|
|
client->ioc = NULL;
|
2014-02-26 14:30:18 +00:00
|
|
|
}
|
|
|
|
|
2017-02-13 13:52:24 +00:00
|
|
|
static coroutine_fn void nbd_read_reply_entry(void *opaque)
|
2013-12-01 21:23:41 +00:00
|
|
|
{
|
2017-02-13 13:52:24 +00:00
|
|
|
NBDClientSession *s = opaque;
|
2013-12-01 21:23:41 +00:00
|
|
|
uint64_t i;
|
2017-08-17 16:14:13 +00:00
|
|
|
int ret = 0;
|
2017-05-26 11:09:13 +00:00
|
|
|
Error *local_err = NULL;
|
2013-12-01 21:23:41 +00:00
|
|
|
|
nbd-client: Fix regression when server sends garbage
When we switched NBD to use coroutines for qemu 2.9 (in particular,
commit a12a712a), we introduced a regression: if a server sends us
garbage (such as a corrupted magic number), we quit the read loop
but do not stop sending further queued commands, resulting in the
client hanging when it never reads the response to those additional
commands. In qemu 2.8, we properly detected that the server is no
longer reliable, and cancelled all existing pending commands with
EIO, then tore down the socket so that all further command attempts
get EPIPE.
Restore the proper behavior of quitting (almost) all communication
with a broken server: Once we know we are out of sync or otherwise
can't trust the server, we must assume that any further incoming
data is unreliable and therefore end all pending commands with EIO,
and quit trying to send any further commands. As an exception, we
still (try to) send NBD_CMD_DISC to let the server know we are going
away (in part, because it is easier to do that than to further
refactor nbd_teardown_connection, and in part because it is the
only command where we do not have to wait for a reply).
Based on a patch by Vladimir Sementsov-Ogievskiy.
A malicious server can be created with the following hack,
followed by setting NBD_SERVER_DEBUG to a non-zero value in the
environment when running qemu-nbd:
| --- a/nbd/server.c
| +++ b/nbd/server.c
| @@ -919,6 +919,17 @@ static int nbd_send_reply(QIOChannel *ioc, NBDReply *reply, Error **errp)
| stl_be_p(buf + 4, reply->error);
| stq_be_p(buf + 8, reply->handle);
|
| + static int debug;
| + static int count;
| + if (!count++) {
| + const char *str = getenv("NBD_SERVER_DEBUG");
| + if (str) {
| + debug = atoi(str);
| + }
| + }
| + if (debug && !(count % debug)) {
| + buf[0] = 0;
| + }
| return nbd_write(ioc, buf, sizeof(buf), errp);
| }
Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170814213426.24681-1-eblake@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-08-14 21:34:26 +00:00
|
|
|
while (!s->quit) {
|
2017-02-13 13:52:24 +00:00
|
|
|
assert(s->reply.handle == 0);
|
2017-05-26 11:09:13 +00:00
|
|
|
ret = nbd_receive_reply(s->ioc, &s->reply, &local_err);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
}
|
2017-03-14 11:11:56 +00:00
|
|
|
if (ret <= 0) {
|
2017-02-13 13:52:24 +00:00
|
|
|
break;
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2017-02-13 13:52:24 +00:00
|
|
|
/* There's no need for a mutex on the receive side, because the
|
|
|
|
* handler acts as a synchronization point and ensures that only
|
|
|
|
* one coroutine is called until the reply finishes.
|
|
|
|
*/
|
|
|
|
i = HANDLE_TO_INDEX(s, s->reply.handle);
|
2017-08-22 12:51:13 +00:00
|
|
|
if (i >= MAX_NBD_REQUESTS ||
|
|
|
|
!s->requests[i].coroutine ||
|
2017-10-27 10:40:35 +00:00
|
|
|
!s->requests[i].receiving ||
|
2017-10-27 10:40:37 +00:00
|
|
|
(nbd_reply_is_structured(&s->reply) && !s->info.structured_reply))
|
2017-10-27 10:40:35 +00:00
|
|
|
{
|
2017-02-13 13:52:24 +00:00
|
|
|
break;
|
|
|
|
}
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2017-08-22 12:51:13 +00:00
|
|
|
/* We're woken up again by the request itself. Note that there
|
2017-02-13 13:52:24 +00:00
|
|
|
* is no race between yielding and reentering read_reply_co. This
|
|
|
|
* is because:
|
|
|
|
*
|
2017-08-22 12:51:13 +00:00
|
|
|
* - if the request runs on the same AioContext, it is only
|
2017-02-13 13:52:24 +00:00
|
|
|
* entered after we yield
|
|
|
|
*
|
2017-08-22 12:51:13 +00:00
|
|
|
* - if the request runs on a different AioContext, reentering
|
2017-02-13 13:52:24 +00:00
|
|
|
* read_reply_co happens through a bottom half, which can only
|
|
|
|
* run after we yield.
|
|
|
|
*/
|
2017-08-22 12:51:13 +00:00
|
|
|
aio_co_wake(s->requests[i].coroutine);
|
2017-02-13 13:52:24 +00:00
|
|
|
qemu_coroutine_yield();
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
2017-03-14 11:11:56 +00:00
|
|
|
|
2017-08-22 12:51:13 +00:00
|
|
|
s->quit = true;
|
2017-08-04 15:14:31 +00:00
|
|
|
nbd_recv_coroutines_wake_all(s);
|
2017-02-13 13:52:24 +00:00
|
|
|
s->read_reply_co = NULL;
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2015-02-06 21:06:16 +00:00
|
|
|
static int nbd_co_send_request(BlockDriverState *bs,
|
2016-10-14 18:33:07 +00:00
|
|
|
NBDRequest *request,
|
2016-07-15 18:32:03 +00:00
|
|
|
QEMUIOVector *qiov)
|
2013-12-01 21:23:41 +00:00
|
|
|
{
|
2016-10-14 18:33:06 +00:00
|
|
|
NBDClientSession *s = nbd_get_client_session(bs);
|
2017-09-05 19:11:14 +00:00
|
|
|
int rc, i;
|
2013-12-01 21:23:41 +00:00
|
|
|
|
|
|
|
qemu_co_mutex_lock(&s->send_mutex);
|
2017-06-01 10:44:56 +00:00
|
|
|
while (s->in_flight == MAX_NBD_REQUESTS) {
|
|
|
|
qemu_co_queue_wait(&s->free_sema, &s->send_mutex);
|
|
|
|
}
|
|
|
|
s->in_flight++;
|
nbd: fix the co_queue multi-adding bug
When we tested the VM migartion between different hosts with NBD
devices, we found if we sent a cancel command after the drive_mirror
was just started, a coroutine re-enter error would occur. The stack
was as follow:
(gdb) bt
00) 0x00007fdfc744d885 in raise () from /lib64/libc.so.6
01) 0x00007fdfc744ee61 in abort () from /lib64/libc.so.6
02) 0x00007fdfca467cc5 in qemu_coroutine_enter (co=0x7fdfcaedb400, opaque=0x0)
at qemu-coroutine.c:118
03) 0x00007fdfca467f6c in qemu_co_queue_run_restart (co=0x7fdfcaedb400) at
qemu-coroutine-lock.c:59
04) 0x00007fdfca467be5 in coroutine_swap (from=0x7fdfcaf3c4e8,
to=0x7fdfcaedb400) at qemu-coroutine.c:96
05) 0x00007fdfca467cea in qemu_coroutine_enter (co=0x7fdfcaedb400, opaque=0x0)
at qemu-coroutine.c:123
06) 0x00007fdfca467f6c in qemu_co_queue_run_restart (co=0x7fdfcaedbdc0) at
qemu-coroutine-lock.c:59
07) 0x00007fdfca467be5 in coroutine_swap (from=0x7fdfcaf3c4e8,
to=0x7fdfcaedbdc0) at qemu-coroutine.c:96
08) 0x00007fdfca467cea in qemu_coroutine_enter (co=0x7fdfcaedbdc0, opaque=0x0)
at qemu-coroutine.c:123
09) 0x00007fdfca4a1fa4 in nbd_recv_coroutines_enter_all (s=0x7fdfcaef7dd0) at
block/nbd-client.c:41
10) 0x00007fdfca4a1ff9 in nbd_teardown_connection (client=0x7fdfcaef7dd0) at
block/nbd-client.c:50
11) 0x00007fdfca4a20f0 in nbd_reply_ready (opaque=0x7fdfcaef7dd0) at
block/nbd-client.c:92
12) 0x00007fdfca45ed80 in aio_dispatch (ctx=0x7fdfcae15e90) at aio-posix.c:144
13) 0x00007fdfca45ef1b in aio_poll (ctx=0x7fdfcae15e90, blocking=false) at
aio-posix.c:222
14) 0x00007fdfca448c34 in aio_ctx_dispatch (source=0x7fdfcae15e90, callback=0x0,
user_data=0x0) at async.c:212
15) 0x00007fdfc8f2f69a in g_main_context_dispatch () from
/usr/lib64/libglib-2.0.so.0
16) 0x00007fdfca45c391 in glib_pollfds_poll () at main-loop.c:190
17) 0x00007fdfca45c489 in os_host_main_loop_wait (timeout=1483677098) at
main-loop.c:235
18) 0x00007fdfca45c57b in main_loop_wait (nonblocking=0) at main-loop.c:484
19) 0x00007fdfca25f403 in main_loop () at vl.c:2249
20) 0x00007fdfca266fc2 in main (argc=42, argv=0x7ffff517d638,
envp=0x7ffff517d790) at vl.c:4814
We find the nbd_recv_coroutines_enter_all function (triggered by a cancel
command or a network connection breaking down) will enter a coroutine which
is waiting for the sending lock. If the lock is still held by another coroutine,
the entering coroutine will be added into the co_queue again. Latter, when the
lock is released, a coroutine re-enter error will occur.
This bug can be fixed simply by delaying the setting of recv_coroutine as
suggested by paolo. After applying this patch, we have tested the cancel
operation in mirror phase looply for more than 5 hous and everything is fine.
Without this patch, a coroutine re-enter error will occur in 5 minutes.
Signed-off-by: Bn Wu <wu.wubin@huawei.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1423552846-3896-1-git-send-email-wu.wubin@huawei.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2015-02-10 07:20:46 +00:00
|
|
|
|
|
|
|
for (i = 0; i < MAX_NBD_REQUESTS; i++) {
|
2017-08-22 12:51:13 +00:00
|
|
|
if (s->requests[i].coroutine == NULL) {
|
nbd: fix the co_queue multi-adding bug
When we tested the VM migartion between different hosts with NBD
devices, we found if we sent a cancel command after the drive_mirror
was just started, a coroutine re-enter error would occur. The stack
was as follow:
(gdb) bt
00) 0x00007fdfc744d885 in raise () from /lib64/libc.so.6
01) 0x00007fdfc744ee61 in abort () from /lib64/libc.so.6
02) 0x00007fdfca467cc5 in qemu_coroutine_enter (co=0x7fdfcaedb400, opaque=0x0)
at qemu-coroutine.c:118
03) 0x00007fdfca467f6c in qemu_co_queue_run_restart (co=0x7fdfcaedb400) at
qemu-coroutine-lock.c:59
04) 0x00007fdfca467be5 in coroutine_swap (from=0x7fdfcaf3c4e8,
to=0x7fdfcaedb400) at qemu-coroutine.c:96
05) 0x00007fdfca467cea in qemu_coroutine_enter (co=0x7fdfcaedb400, opaque=0x0)
at qemu-coroutine.c:123
06) 0x00007fdfca467f6c in qemu_co_queue_run_restart (co=0x7fdfcaedbdc0) at
qemu-coroutine-lock.c:59
07) 0x00007fdfca467be5 in coroutine_swap (from=0x7fdfcaf3c4e8,
to=0x7fdfcaedbdc0) at qemu-coroutine.c:96
08) 0x00007fdfca467cea in qemu_coroutine_enter (co=0x7fdfcaedbdc0, opaque=0x0)
at qemu-coroutine.c:123
09) 0x00007fdfca4a1fa4 in nbd_recv_coroutines_enter_all (s=0x7fdfcaef7dd0) at
block/nbd-client.c:41
10) 0x00007fdfca4a1ff9 in nbd_teardown_connection (client=0x7fdfcaef7dd0) at
block/nbd-client.c:50
11) 0x00007fdfca4a20f0 in nbd_reply_ready (opaque=0x7fdfcaef7dd0) at
block/nbd-client.c:92
12) 0x00007fdfca45ed80 in aio_dispatch (ctx=0x7fdfcae15e90) at aio-posix.c:144
13) 0x00007fdfca45ef1b in aio_poll (ctx=0x7fdfcae15e90, blocking=false) at
aio-posix.c:222
14) 0x00007fdfca448c34 in aio_ctx_dispatch (source=0x7fdfcae15e90, callback=0x0,
user_data=0x0) at async.c:212
15) 0x00007fdfc8f2f69a in g_main_context_dispatch () from
/usr/lib64/libglib-2.0.so.0
16) 0x00007fdfca45c391 in glib_pollfds_poll () at main-loop.c:190
17) 0x00007fdfca45c489 in os_host_main_loop_wait (timeout=1483677098) at
main-loop.c:235
18) 0x00007fdfca45c57b in main_loop_wait (nonblocking=0) at main-loop.c:484
19) 0x00007fdfca25f403 in main_loop () at vl.c:2249
20) 0x00007fdfca266fc2 in main (argc=42, argv=0x7ffff517d638,
envp=0x7ffff517d790) at vl.c:4814
We find the nbd_recv_coroutines_enter_all function (triggered by a cancel
command or a network connection breaking down) will enter a coroutine which
is waiting for the sending lock. If the lock is still held by another coroutine,
the entering coroutine will be added into the co_queue again. Latter, when the
lock is released, a coroutine re-enter error will occur.
This bug can be fixed simply by delaying the setting of recv_coroutine as
suggested by paolo. After applying this patch, we have tested the cancel
operation in mirror phase looply for more than 5 hous and everything is fine.
Without this patch, a coroutine re-enter error will occur in 5 minutes.
Signed-off-by: Bn Wu <wu.wubin@huawei.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1423552846-3896-1-git-send-email-wu.wubin@huawei.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2015-02-10 07:20:46 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-02-10 18:41:04 +00:00
|
|
|
g_assert(qemu_in_coroutine());
|
nbd: fix the co_queue multi-adding bug
When we tested the VM migartion between different hosts with NBD
devices, we found if we sent a cancel command after the drive_mirror
was just started, a coroutine re-enter error would occur. The stack
was as follow:
(gdb) bt
00) 0x00007fdfc744d885 in raise () from /lib64/libc.so.6
01) 0x00007fdfc744ee61 in abort () from /lib64/libc.so.6
02) 0x00007fdfca467cc5 in qemu_coroutine_enter (co=0x7fdfcaedb400, opaque=0x0)
at qemu-coroutine.c:118
03) 0x00007fdfca467f6c in qemu_co_queue_run_restart (co=0x7fdfcaedb400) at
qemu-coroutine-lock.c:59
04) 0x00007fdfca467be5 in coroutine_swap (from=0x7fdfcaf3c4e8,
to=0x7fdfcaedb400) at qemu-coroutine.c:96
05) 0x00007fdfca467cea in qemu_coroutine_enter (co=0x7fdfcaedb400, opaque=0x0)
at qemu-coroutine.c:123
06) 0x00007fdfca467f6c in qemu_co_queue_run_restart (co=0x7fdfcaedbdc0) at
qemu-coroutine-lock.c:59
07) 0x00007fdfca467be5 in coroutine_swap (from=0x7fdfcaf3c4e8,
to=0x7fdfcaedbdc0) at qemu-coroutine.c:96
08) 0x00007fdfca467cea in qemu_coroutine_enter (co=0x7fdfcaedbdc0, opaque=0x0)
at qemu-coroutine.c:123
09) 0x00007fdfca4a1fa4 in nbd_recv_coroutines_enter_all (s=0x7fdfcaef7dd0) at
block/nbd-client.c:41
10) 0x00007fdfca4a1ff9 in nbd_teardown_connection (client=0x7fdfcaef7dd0) at
block/nbd-client.c:50
11) 0x00007fdfca4a20f0 in nbd_reply_ready (opaque=0x7fdfcaef7dd0) at
block/nbd-client.c:92
12) 0x00007fdfca45ed80 in aio_dispatch (ctx=0x7fdfcae15e90) at aio-posix.c:144
13) 0x00007fdfca45ef1b in aio_poll (ctx=0x7fdfcae15e90, blocking=false) at
aio-posix.c:222
14) 0x00007fdfca448c34 in aio_ctx_dispatch (source=0x7fdfcae15e90, callback=0x0,
user_data=0x0) at async.c:212
15) 0x00007fdfc8f2f69a in g_main_context_dispatch () from
/usr/lib64/libglib-2.0.so.0
16) 0x00007fdfca45c391 in glib_pollfds_poll () at main-loop.c:190
17) 0x00007fdfca45c489 in os_host_main_loop_wait (timeout=1483677098) at
main-loop.c:235
18) 0x00007fdfca45c57b in main_loop_wait (nonblocking=0) at main-loop.c:484
19) 0x00007fdfca25f403 in main_loop () at vl.c:2249
20) 0x00007fdfca266fc2 in main (argc=42, argv=0x7ffff517d638,
envp=0x7ffff517d790) at vl.c:4814
We find the nbd_recv_coroutines_enter_all function (triggered by a cancel
command or a network connection breaking down) will enter a coroutine which
is waiting for the sending lock. If the lock is still held by another coroutine,
the entering coroutine will be added into the co_queue again. Latter, when the
lock is released, a coroutine re-enter error will occur.
This bug can be fixed simply by delaying the setting of recv_coroutine as
suggested by paolo. After applying this patch, we have tested the cancel
operation in mirror phase looply for more than 5 hous and everything is fine.
Without this patch, a coroutine re-enter error will occur in 5 minutes.
Signed-off-by: Bn Wu <wu.wubin@huawei.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1423552846-3896-1-git-send-email-wu.wubin@huawei.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2015-02-10 07:20:46 +00:00
|
|
|
assert(i < MAX_NBD_REQUESTS);
|
2017-08-22 12:51:13 +00:00
|
|
|
|
|
|
|
s->requests[i].coroutine = qemu_coroutine_self();
|
2017-10-27 10:40:37 +00:00
|
|
|
s->requests[i].offset = request->from;
|
2017-08-22 12:51:13 +00:00
|
|
|
s->requests[i].receiving = false;
|
|
|
|
|
nbd: fix the co_queue multi-adding bug
When we tested the VM migartion between different hosts with NBD
devices, we found if we sent a cancel command after the drive_mirror
was just started, a coroutine re-enter error would occur. The stack
was as follow:
(gdb) bt
00) 0x00007fdfc744d885 in raise () from /lib64/libc.so.6
01) 0x00007fdfc744ee61 in abort () from /lib64/libc.so.6
02) 0x00007fdfca467cc5 in qemu_coroutine_enter (co=0x7fdfcaedb400, opaque=0x0)
at qemu-coroutine.c:118
03) 0x00007fdfca467f6c in qemu_co_queue_run_restart (co=0x7fdfcaedb400) at
qemu-coroutine-lock.c:59
04) 0x00007fdfca467be5 in coroutine_swap (from=0x7fdfcaf3c4e8,
to=0x7fdfcaedb400) at qemu-coroutine.c:96
05) 0x00007fdfca467cea in qemu_coroutine_enter (co=0x7fdfcaedb400, opaque=0x0)
at qemu-coroutine.c:123
06) 0x00007fdfca467f6c in qemu_co_queue_run_restart (co=0x7fdfcaedbdc0) at
qemu-coroutine-lock.c:59
07) 0x00007fdfca467be5 in coroutine_swap (from=0x7fdfcaf3c4e8,
to=0x7fdfcaedbdc0) at qemu-coroutine.c:96
08) 0x00007fdfca467cea in qemu_coroutine_enter (co=0x7fdfcaedbdc0, opaque=0x0)
at qemu-coroutine.c:123
09) 0x00007fdfca4a1fa4 in nbd_recv_coroutines_enter_all (s=0x7fdfcaef7dd0) at
block/nbd-client.c:41
10) 0x00007fdfca4a1ff9 in nbd_teardown_connection (client=0x7fdfcaef7dd0) at
block/nbd-client.c:50
11) 0x00007fdfca4a20f0 in nbd_reply_ready (opaque=0x7fdfcaef7dd0) at
block/nbd-client.c:92
12) 0x00007fdfca45ed80 in aio_dispatch (ctx=0x7fdfcae15e90) at aio-posix.c:144
13) 0x00007fdfca45ef1b in aio_poll (ctx=0x7fdfcae15e90, blocking=false) at
aio-posix.c:222
14) 0x00007fdfca448c34 in aio_ctx_dispatch (source=0x7fdfcae15e90, callback=0x0,
user_data=0x0) at async.c:212
15) 0x00007fdfc8f2f69a in g_main_context_dispatch () from
/usr/lib64/libglib-2.0.so.0
16) 0x00007fdfca45c391 in glib_pollfds_poll () at main-loop.c:190
17) 0x00007fdfca45c489 in os_host_main_loop_wait (timeout=1483677098) at
main-loop.c:235
18) 0x00007fdfca45c57b in main_loop_wait (nonblocking=0) at main-loop.c:484
19) 0x00007fdfca25f403 in main_loop () at vl.c:2249
20) 0x00007fdfca266fc2 in main (argc=42, argv=0x7ffff517d638,
envp=0x7ffff517d790) at vl.c:4814
We find the nbd_recv_coroutines_enter_all function (triggered by a cancel
command or a network connection breaking down) will enter a coroutine which
is waiting for the sending lock. If the lock is still held by another coroutine,
the entering coroutine will be added into the co_queue again. Latter, when the
lock is released, a coroutine re-enter error will occur.
This bug can be fixed simply by delaying the setting of recv_coroutine as
suggested by paolo. After applying this patch, we have tested the cancel
operation in mirror phase looply for more than 5 hous and everything is fine.
Without this patch, a coroutine re-enter error will occur in 5 minutes.
Signed-off-by: Bn Wu <wu.wubin@huawei.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Message-id: 1423552846-3896-1-git-send-email-wu.wubin@huawei.com
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
2015-02-10 07:20:46 +00:00
|
|
|
request->handle = INDEX_TO_HANDLE(s, i);
|
2016-02-10 18:41:01 +00:00
|
|
|
|
nbd-client: Fix regression when server sends garbage
When we switched NBD to use coroutines for qemu 2.9 (in particular,
commit a12a712a), we introduced a regression: if a server sends us
garbage (such as a corrupted magic number), we quit the read loop
but do not stop sending further queued commands, resulting in the
client hanging when it never reads the response to those additional
commands. In qemu 2.8, we properly detected that the server is no
longer reliable, and cancelled all existing pending commands with
EIO, then tore down the socket so that all further command attempts
get EPIPE.
Restore the proper behavior of quitting (almost) all communication
with a broken server: Once we know we are out of sync or otherwise
can't trust the server, we must assume that any further incoming
data is unreliable and therefore end all pending commands with EIO,
and quit trying to send any further commands. As an exception, we
still (try to) send NBD_CMD_DISC to let the server know we are going
away (in part, because it is easier to do that than to further
refactor nbd_teardown_connection, and in part because it is the
only command where we do not have to wait for a reply).
Based on a patch by Vladimir Sementsov-Ogievskiy.
A malicious server can be created with the following hack,
followed by setting NBD_SERVER_DEBUG to a non-zero value in the
environment when running qemu-nbd:
| --- a/nbd/server.c
| +++ b/nbd/server.c
| @@ -919,6 +919,17 @@ static int nbd_send_reply(QIOChannel *ioc, NBDReply *reply, Error **errp)
| stl_be_p(buf + 4, reply->error);
| stq_be_p(buf + 8, reply->handle);
|
| + static int debug;
| + static int count;
| + if (!count++) {
| + const char *str = getenv("NBD_SERVER_DEBUG");
| + if (str) {
| + debug = atoi(str);
| + }
| + }
| + if (debug && !(count % debug)) {
| + buf[0] = 0;
| + }
| return nbd_write(ioc, buf, sizeof(buf), errp);
| }
Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170814213426.24681-1-eblake@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-08-14 21:34:26 +00:00
|
|
|
if (s->quit) {
|
nbd-client: avoid read_reply_co entry if send failed
The following segfault is encountered if the NBD server closes the UNIX
domain socket immediately after negotiation:
Program terminated with signal SIGSEGV, Segmentation fault.
#0 aio_co_schedule (ctx=0x0, co=0xd3c0ff2ef0) at util/async.c:441
441 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
(gdb) bt
#0 0x000000d3c01a50f8 in aio_co_schedule (ctx=0x0, co=0xd3c0ff2ef0) at util/async.c:441
#1 0x000000d3c012fa90 in nbd_coroutine_end (bs=bs@entry=0xd3c0fec650, request=<optimized out>) at block/nbd-client.c:207
#2 0x000000d3c012fb58 in nbd_client_co_preadv (bs=0xd3c0fec650, offset=0, bytes=<optimized out>, qiov=0x7ffc10a91b20, flags=0) at block/nbd-client.c:237
#3 0x000000d3c0128e63 in bdrv_driver_preadv (bs=bs@entry=0xd3c0fec650, offset=offset@entry=0, bytes=bytes@entry=512, qiov=qiov@entry=0x7ffc10a91b20, flags=0) at block/io.c:836
#4 0x000000d3c012c3e0 in bdrv_aligned_preadv (child=child@entry=0xd3c0ff51d0, req=req@entry=0x7f31885d6e90, offset=offset@entry=0, bytes=bytes@entry=512, align=align@entry=1, qiov=qiov@entry=0x7ffc10a91b20, f
+lags=0) at block/io.c:1086
#5 0x000000d3c012c6b8 in bdrv_co_preadv (child=0xd3c0ff51d0, offset=offset@entry=0, bytes=bytes@entry=512, qiov=qiov@entry=0x7ffc10a91b20, flags=flags@entry=0) at block/io.c:1182
#6 0x000000d3c011cc17 in blk_co_preadv (blk=0xd3c0ff4f80, offset=0, bytes=512, qiov=0x7ffc10a91b20, flags=0) at block/block-backend.c:1032
#7 0x000000d3c011ccec in blk_read_entry (opaque=0x7ffc10a91b40) at block/block-backend.c:1079
#8 0x000000d3c01bbb96 in coroutine_trampoline (i0=<optimized out>, i1=<optimized out>) at util/coroutine-ucontext.c:79
#9 0x00007f3196cb8600 in __start_context () at /lib64/libc.so.6
The problem is that nbd_client_init() uses
nbd_client_attach_aio_context() -> aio_co_schedule(new_context,
client->read_reply_co). Execution of read_reply_co is deferred to a BH
which doesn't run until later.
In the mean time blk_co_preadv() can be called and nbd_coroutine_end()
calls aio_wake() on read_reply_co. At this point in time
read_reply_co's ctx isn't set because it has never been entered yet.
This patch simplifies the nbd_co_send_request() ->
nbd_co_receive_reply() -> nbd_coroutine_end() lifecycle to just
nbd_co_send_request() -> nbd_co_receive_reply(). The request is "ended"
if an error occurs at any point. Callers no longer have to invoke
nbd_coroutine_end().
This cleanup also eliminates the segfault because we don't call
aio_co_schedule() to wake up s->read_reply_co if sending the request
failed. It is only necessary to wake up s->read_reply_co if a reply was
received.
Note this only happens with UNIX domain sockets on Linux. It doesn't
seem possible to reproduce this with TCP sockets.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20170829122745.14309-2-stefanha@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2017-08-29 12:27:43 +00:00
|
|
|
rc = -EIO;
|
|
|
|
goto err;
|
nbd-client: Fix regression when server sends garbage
When we switched NBD to use coroutines for qemu 2.9 (in particular,
commit a12a712a), we introduced a regression: if a server sends us
garbage (such as a corrupted magic number), we quit the read loop
but do not stop sending further queued commands, resulting in the
client hanging when it never reads the response to those additional
commands. In qemu 2.8, we properly detected that the server is no
longer reliable, and cancelled all existing pending commands with
EIO, then tore down the socket so that all further command attempts
get EPIPE.
Restore the proper behavior of quitting (almost) all communication
with a broken server: Once we know we are out of sync or otherwise
can't trust the server, we must assume that any further incoming
data is unreliable and therefore end all pending commands with EIO,
and quit trying to send any further commands. As an exception, we
still (try to) send NBD_CMD_DISC to let the server know we are going
away (in part, because it is easier to do that than to further
refactor nbd_teardown_connection, and in part because it is the
only command where we do not have to wait for a reply).
Based on a patch by Vladimir Sementsov-Ogievskiy.
A malicious server can be created with the following hack,
followed by setting NBD_SERVER_DEBUG to a non-zero value in the
environment when running qemu-nbd:
| --- a/nbd/server.c
| +++ b/nbd/server.c
| @@ -919,6 +919,17 @@ static int nbd_send_reply(QIOChannel *ioc, NBDReply *reply, Error **errp)
| stl_be_p(buf + 4, reply->error);
| stq_be_p(buf + 8, reply->handle);
|
| + static int debug;
| + static int count;
| + if (!count++) {
| + const char *str = getenv("NBD_SERVER_DEBUG");
| + if (str) {
| + debug = atoi(str);
| + }
| + }
| + if (debug && !(count % debug)) {
| + buf[0] = 0;
| + }
| return nbd_write(ioc, buf, sizeof(buf), errp);
| }
Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170814213426.24681-1-eblake@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-08-14 21:34:26 +00:00
|
|
|
}
|
2016-02-10 18:41:01 +00:00
|
|
|
if (!s->ioc) {
|
nbd-client: avoid read_reply_co entry if send failed
The following segfault is encountered if the NBD server closes the UNIX
domain socket immediately after negotiation:
Program terminated with signal SIGSEGV, Segmentation fault.
#0 aio_co_schedule (ctx=0x0, co=0xd3c0ff2ef0) at util/async.c:441
441 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
(gdb) bt
#0 0x000000d3c01a50f8 in aio_co_schedule (ctx=0x0, co=0xd3c0ff2ef0) at util/async.c:441
#1 0x000000d3c012fa90 in nbd_coroutine_end (bs=bs@entry=0xd3c0fec650, request=<optimized out>) at block/nbd-client.c:207
#2 0x000000d3c012fb58 in nbd_client_co_preadv (bs=0xd3c0fec650, offset=0, bytes=<optimized out>, qiov=0x7ffc10a91b20, flags=0) at block/nbd-client.c:237
#3 0x000000d3c0128e63 in bdrv_driver_preadv (bs=bs@entry=0xd3c0fec650, offset=offset@entry=0, bytes=bytes@entry=512, qiov=qiov@entry=0x7ffc10a91b20, flags=0) at block/io.c:836
#4 0x000000d3c012c3e0 in bdrv_aligned_preadv (child=child@entry=0xd3c0ff51d0, req=req@entry=0x7f31885d6e90, offset=offset@entry=0, bytes=bytes@entry=512, align=align@entry=1, qiov=qiov@entry=0x7ffc10a91b20, f
+lags=0) at block/io.c:1086
#5 0x000000d3c012c6b8 in bdrv_co_preadv (child=0xd3c0ff51d0, offset=offset@entry=0, bytes=bytes@entry=512, qiov=qiov@entry=0x7ffc10a91b20, flags=flags@entry=0) at block/io.c:1182
#6 0x000000d3c011cc17 in blk_co_preadv (blk=0xd3c0ff4f80, offset=0, bytes=512, qiov=0x7ffc10a91b20, flags=0) at block/block-backend.c:1032
#7 0x000000d3c011ccec in blk_read_entry (opaque=0x7ffc10a91b40) at block/block-backend.c:1079
#8 0x000000d3c01bbb96 in coroutine_trampoline (i0=<optimized out>, i1=<optimized out>) at util/coroutine-ucontext.c:79
#9 0x00007f3196cb8600 in __start_context () at /lib64/libc.so.6
The problem is that nbd_client_init() uses
nbd_client_attach_aio_context() -> aio_co_schedule(new_context,
client->read_reply_co). Execution of read_reply_co is deferred to a BH
which doesn't run until later.
In the mean time blk_co_preadv() can be called and nbd_coroutine_end()
calls aio_wake() on read_reply_co. At this point in time
read_reply_co's ctx isn't set because it has never been entered yet.
This patch simplifies the nbd_co_send_request() ->
nbd_co_receive_reply() -> nbd_coroutine_end() lifecycle to just
nbd_co_send_request() -> nbd_co_receive_reply(). The request is "ended"
if an error occurs at any point. Callers no longer have to invoke
nbd_coroutine_end().
This cleanup also eliminates the segfault because we don't call
aio_co_schedule() to wake up s->read_reply_co if sending the request
failed. It is only necessary to wake up s->read_reply_co if a reply was
received.
Note this only happens with UNIX domain sockets on Linux. It doesn't
seem possible to reproduce this with TCP sockets.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20170829122745.14309-2-stefanha@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2017-08-29 12:27:43 +00:00
|
|
|
rc = -EPIPE;
|
|
|
|
goto err;
|
2016-02-10 18:41:01 +00:00
|
|
|
}
|
|
|
|
|
2013-12-01 21:23:41 +00:00
|
|
|
if (qiov) {
|
2016-02-10 18:41:01 +00:00
|
|
|
qio_channel_set_cork(s->ioc, true);
|
2016-02-10 18:41:04 +00:00
|
|
|
rc = nbd_send_request(s->ioc, request);
|
nbd-client: Fix regression when server sends garbage
When we switched NBD to use coroutines for qemu 2.9 (in particular,
commit a12a712a), we introduced a regression: if a server sends us
garbage (such as a corrupted magic number), we quit the read loop
but do not stop sending further queued commands, resulting in the
client hanging when it never reads the response to those additional
commands. In qemu 2.8, we properly detected that the server is no
longer reliable, and cancelled all existing pending commands with
EIO, then tore down the socket so that all further command attempts
get EPIPE.
Restore the proper behavior of quitting (almost) all communication
with a broken server: Once we know we are out of sync or otherwise
can't trust the server, we must assume that any further incoming
data is unreliable and therefore end all pending commands with EIO,
and quit trying to send any further commands. As an exception, we
still (try to) send NBD_CMD_DISC to let the server know we are going
away (in part, because it is easier to do that than to further
refactor nbd_teardown_connection, and in part because it is the
only command where we do not have to wait for a reply).
Based on a patch by Vladimir Sementsov-Ogievskiy.
A malicious server can be created with the following hack,
followed by setting NBD_SERVER_DEBUG to a non-zero value in the
environment when running qemu-nbd:
| --- a/nbd/server.c
| +++ b/nbd/server.c
| @@ -919,6 +919,17 @@ static int nbd_send_reply(QIOChannel *ioc, NBDReply *reply, Error **errp)
| stl_be_p(buf + 4, reply->error);
| stq_be_p(buf + 8, reply->handle);
|
| + static int debug;
| + static int count;
| + if (!count++) {
| + const char *str = getenv("NBD_SERVER_DEBUG");
| + if (str) {
| + debug = atoi(str);
| + }
| + }
| + if (debug && !(count % debug)) {
| + buf[0] = 0;
| + }
| return nbd_write(ioc, buf, sizeof(buf), errp);
| }
Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170814213426.24681-1-eblake@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-08-14 21:34:26 +00:00
|
|
|
if (rc >= 0 && !s->quit) {
|
2017-09-05 19:11:14 +00:00
|
|
|
if (qio_channel_writev_all(s->ioc, qiov->iov, qiov->niov,
|
|
|
|
NULL) < 0) {
|
2013-12-01 21:23:41 +00:00
|
|
|
rc = -EIO;
|
|
|
|
}
|
2017-09-20 12:45:07 +00:00
|
|
|
} else if (rc >= 0) {
|
|
|
|
rc = -EIO;
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
2016-02-10 18:41:01 +00:00
|
|
|
qio_channel_set_cork(s->ioc, false);
|
2013-12-01 21:23:41 +00:00
|
|
|
} else {
|
2016-02-10 18:41:04 +00:00
|
|
|
rc = nbd_send_request(s->ioc, request);
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
nbd-client: avoid read_reply_co entry if send failed
The following segfault is encountered if the NBD server closes the UNIX
domain socket immediately after negotiation:
Program terminated with signal SIGSEGV, Segmentation fault.
#0 aio_co_schedule (ctx=0x0, co=0xd3c0ff2ef0) at util/async.c:441
441 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
(gdb) bt
#0 0x000000d3c01a50f8 in aio_co_schedule (ctx=0x0, co=0xd3c0ff2ef0) at util/async.c:441
#1 0x000000d3c012fa90 in nbd_coroutine_end (bs=bs@entry=0xd3c0fec650, request=<optimized out>) at block/nbd-client.c:207
#2 0x000000d3c012fb58 in nbd_client_co_preadv (bs=0xd3c0fec650, offset=0, bytes=<optimized out>, qiov=0x7ffc10a91b20, flags=0) at block/nbd-client.c:237
#3 0x000000d3c0128e63 in bdrv_driver_preadv (bs=bs@entry=0xd3c0fec650, offset=offset@entry=0, bytes=bytes@entry=512, qiov=qiov@entry=0x7ffc10a91b20, flags=0) at block/io.c:836
#4 0x000000d3c012c3e0 in bdrv_aligned_preadv (child=child@entry=0xd3c0ff51d0, req=req@entry=0x7f31885d6e90, offset=offset@entry=0, bytes=bytes@entry=512, align=align@entry=1, qiov=qiov@entry=0x7ffc10a91b20, f
+lags=0) at block/io.c:1086
#5 0x000000d3c012c6b8 in bdrv_co_preadv (child=0xd3c0ff51d0, offset=offset@entry=0, bytes=bytes@entry=512, qiov=qiov@entry=0x7ffc10a91b20, flags=flags@entry=0) at block/io.c:1182
#6 0x000000d3c011cc17 in blk_co_preadv (blk=0xd3c0ff4f80, offset=0, bytes=512, qiov=0x7ffc10a91b20, flags=0) at block/block-backend.c:1032
#7 0x000000d3c011ccec in blk_read_entry (opaque=0x7ffc10a91b40) at block/block-backend.c:1079
#8 0x000000d3c01bbb96 in coroutine_trampoline (i0=<optimized out>, i1=<optimized out>) at util/coroutine-ucontext.c:79
#9 0x00007f3196cb8600 in __start_context () at /lib64/libc.so.6
The problem is that nbd_client_init() uses
nbd_client_attach_aio_context() -> aio_co_schedule(new_context,
client->read_reply_co). Execution of read_reply_co is deferred to a BH
which doesn't run until later.
In the mean time blk_co_preadv() can be called and nbd_coroutine_end()
calls aio_wake() on read_reply_co. At this point in time
read_reply_co's ctx isn't set because it has never been entered yet.
This patch simplifies the nbd_co_send_request() ->
nbd_co_receive_reply() -> nbd_coroutine_end() lifecycle to just
nbd_co_send_request() -> nbd_co_receive_reply(). The request is "ended"
if an error occurs at any point. Callers no longer have to invoke
nbd_coroutine_end().
This cleanup also eliminates the segfault because we don't call
aio_co_schedule() to wake up s->read_reply_co if sending the request
failed. It is only necessary to wake up s->read_reply_co if a reply was
received.
Note this only happens with UNIX domain sockets on Linux. It doesn't
seem possible to reproduce this with TCP sockets.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20170829122745.14309-2-stefanha@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2017-08-29 12:27:43 +00:00
|
|
|
|
|
|
|
err:
|
nbd-client: Fix regression when server sends garbage
When we switched NBD to use coroutines for qemu 2.9 (in particular,
commit a12a712a), we introduced a regression: if a server sends us
garbage (such as a corrupted magic number), we quit the read loop
but do not stop sending further queued commands, resulting in the
client hanging when it never reads the response to those additional
commands. In qemu 2.8, we properly detected that the server is no
longer reliable, and cancelled all existing pending commands with
EIO, then tore down the socket so that all further command attempts
get EPIPE.
Restore the proper behavior of quitting (almost) all communication
with a broken server: Once we know we are out of sync or otherwise
can't trust the server, we must assume that any further incoming
data is unreliable and therefore end all pending commands with EIO,
and quit trying to send any further commands. As an exception, we
still (try to) send NBD_CMD_DISC to let the server know we are going
away (in part, because it is easier to do that than to further
refactor nbd_teardown_connection, and in part because it is the
only command where we do not have to wait for a reply).
Based on a patch by Vladimir Sementsov-Ogievskiy.
A malicious server can be created with the following hack,
followed by setting NBD_SERVER_DEBUG to a non-zero value in the
environment when running qemu-nbd:
| --- a/nbd/server.c
| +++ b/nbd/server.c
| @@ -919,6 +919,17 @@ static int nbd_send_reply(QIOChannel *ioc, NBDReply *reply, Error **errp)
| stl_be_p(buf + 4, reply->error);
| stq_be_p(buf + 8, reply->handle);
|
| + static int debug;
| + static int count;
| + if (!count++) {
| + const char *str = getenv("NBD_SERVER_DEBUG");
| + if (str) {
| + debug = atoi(str);
| + }
| + }
| + if (debug && !(count % debug)) {
| + buf[0] = 0;
| + }
| return nbd_write(ioc, buf, sizeof(buf), errp);
| }
Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170814213426.24681-1-eblake@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-08-14 21:34:26 +00:00
|
|
|
if (rc < 0) {
|
|
|
|
s->quit = true;
|
nbd-client: avoid read_reply_co entry if send failed
The following segfault is encountered if the NBD server closes the UNIX
domain socket immediately after negotiation:
Program terminated with signal SIGSEGV, Segmentation fault.
#0 aio_co_schedule (ctx=0x0, co=0xd3c0ff2ef0) at util/async.c:441
441 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
(gdb) bt
#0 0x000000d3c01a50f8 in aio_co_schedule (ctx=0x0, co=0xd3c0ff2ef0) at util/async.c:441
#1 0x000000d3c012fa90 in nbd_coroutine_end (bs=bs@entry=0xd3c0fec650, request=<optimized out>) at block/nbd-client.c:207
#2 0x000000d3c012fb58 in nbd_client_co_preadv (bs=0xd3c0fec650, offset=0, bytes=<optimized out>, qiov=0x7ffc10a91b20, flags=0) at block/nbd-client.c:237
#3 0x000000d3c0128e63 in bdrv_driver_preadv (bs=bs@entry=0xd3c0fec650, offset=offset@entry=0, bytes=bytes@entry=512, qiov=qiov@entry=0x7ffc10a91b20, flags=0) at block/io.c:836
#4 0x000000d3c012c3e0 in bdrv_aligned_preadv (child=child@entry=0xd3c0ff51d0, req=req@entry=0x7f31885d6e90, offset=offset@entry=0, bytes=bytes@entry=512, align=align@entry=1, qiov=qiov@entry=0x7ffc10a91b20, f
+lags=0) at block/io.c:1086
#5 0x000000d3c012c6b8 in bdrv_co_preadv (child=0xd3c0ff51d0, offset=offset@entry=0, bytes=bytes@entry=512, qiov=qiov@entry=0x7ffc10a91b20, flags=flags@entry=0) at block/io.c:1182
#6 0x000000d3c011cc17 in blk_co_preadv (blk=0xd3c0ff4f80, offset=0, bytes=512, qiov=0x7ffc10a91b20, flags=0) at block/block-backend.c:1032
#7 0x000000d3c011ccec in blk_read_entry (opaque=0x7ffc10a91b40) at block/block-backend.c:1079
#8 0x000000d3c01bbb96 in coroutine_trampoline (i0=<optimized out>, i1=<optimized out>) at util/coroutine-ucontext.c:79
#9 0x00007f3196cb8600 in __start_context () at /lib64/libc.so.6
The problem is that nbd_client_init() uses
nbd_client_attach_aio_context() -> aio_co_schedule(new_context,
client->read_reply_co). Execution of read_reply_co is deferred to a BH
which doesn't run until later.
In the mean time blk_co_preadv() can be called and nbd_coroutine_end()
calls aio_wake() on read_reply_co. At this point in time
read_reply_co's ctx isn't set because it has never been entered yet.
This patch simplifies the nbd_co_send_request() ->
nbd_co_receive_reply() -> nbd_coroutine_end() lifecycle to just
nbd_co_send_request() -> nbd_co_receive_reply(). The request is "ended"
if an error occurs at any point. Callers no longer have to invoke
nbd_coroutine_end().
This cleanup also eliminates the segfault because we don't call
aio_co_schedule() to wake up s->read_reply_co if sending the request
failed. It is only necessary to wake up s->read_reply_co if a reply was
received.
Note this only happens with UNIX domain sockets on Linux. It doesn't
seem possible to reproduce this with TCP sockets.
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Message-Id: <20170829122745.14309-2-stefanha@redhat.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
2017-08-29 12:27:43 +00:00
|
|
|
s->requests[i].coroutine = NULL;
|
|
|
|
s->in_flight--;
|
|
|
|
qemu_co_queue_next(&s->free_sema);
|
nbd-client: Fix regression when server sends garbage
When we switched NBD to use coroutines for qemu 2.9 (in particular,
commit a12a712a), we introduced a regression: if a server sends us
garbage (such as a corrupted magic number), we quit the read loop
but do not stop sending further queued commands, resulting in the
client hanging when it never reads the response to those additional
commands. In qemu 2.8, we properly detected that the server is no
longer reliable, and cancelled all existing pending commands with
EIO, then tore down the socket so that all further command attempts
get EPIPE.
Restore the proper behavior of quitting (almost) all communication
with a broken server: Once we know we are out of sync or otherwise
can't trust the server, we must assume that any further incoming
data is unreliable and therefore end all pending commands with EIO,
and quit trying to send any further commands. As an exception, we
still (try to) send NBD_CMD_DISC to let the server know we are going
away (in part, because it is easier to do that than to further
refactor nbd_teardown_connection, and in part because it is the
only command where we do not have to wait for a reply).
Based on a patch by Vladimir Sementsov-Ogievskiy.
A malicious server can be created with the following hack,
followed by setting NBD_SERVER_DEBUG to a non-zero value in the
environment when running qemu-nbd:
| --- a/nbd/server.c
| +++ b/nbd/server.c
| @@ -919,6 +919,17 @@ static int nbd_send_reply(QIOChannel *ioc, NBDReply *reply, Error **errp)
| stl_be_p(buf + 4, reply->error);
| stq_be_p(buf + 8, reply->handle);
|
| + static int debug;
| + static int count;
| + if (!count++) {
| + const char *str = getenv("NBD_SERVER_DEBUG");
| + if (str) {
| + debug = atoi(str);
| + }
| + }
| + if (debug && !(count % debug)) {
| + buf[0] = 0;
| + }
| return nbd_write(ioc, buf, sizeof(buf), errp);
| }
Reported-by: Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170814213426.24681-1-eblake@redhat.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
2017-08-14 21:34:26 +00:00
|
|
|
}
|
2013-12-01 21:23:41 +00:00
|
|
|
qemu_co_mutex_unlock(&s->send_mutex);
|
|
|
|
return rc;
|
|
|
|
}
|
|
|
|
|
2017-10-27 10:40:37 +00:00
|
|
|
static inline uint16_t payload_advance16(uint8_t **payload)
|
|
|
|
{
|
|
|
|
*payload += 2;
|
|
|
|
return lduw_be_p(*payload - 2);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint32_t payload_advance32(uint8_t **payload)
|
|
|
|
{
|
|
|
|
*payload += 4;
|
|
|
|
return ldl_be_p(*payload - 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline uint64_t payload_advance64(uint8_t **payload)
|
|
|
|
{
|
|
|
|
*payload += 8;
|
|
|
|
return ldq_be_p(*payload - 8);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nbd_parse_offset_hole_payload(NBDStructuredReplyChunk *chunk,
|
|
|
|
uint8_t *payload, uint64_t orig_offset,
|
|
|
|
QEMUIOVector *qiov, Error **errp)
|
|
|
|
{
|
|
|
|
uint64_t offset;
|
|
|
|
uint32_t hole_size;
|
|
|
|
|
|
|
|
if (chunk->length != sizeof(offset) + sizeof(hole_size)) {
|
|
|
|
error_setg(errp, "Protocol error: invalid payload for "
|
|
|
|
"NBD_REPLY_TYPE_OFFSET_HOLE");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = payload_advance64(&payload);
|
|
|
|
hole_size = payload_advance32(&payload);
|
|
|
|
|
|
|
|
if (offset < orig_offset || hole_size > qiov->size ||
|
|
|
|
offset > orig_offset + qiov->size - hole_size) {
|
|
|
|
error_setg(errp, "Protocol error: server sent chunk exceeding requested"
|
|
|
|
" region");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_iovec_memset(qiov, offset - orig_offset, 0, hole_size);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* nbd_parse_error_payload
|
|
|
|
* on success @errp contains message describing nbd error reply
|
|
|
|
*/
|
|
|
|
static int nbd_parse_error_payload(NBDStructuredReplyChunk *chunk,
|
|
|
|
uint8_t *payload, int *request_ret,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
uint32_t error;
|
|
|
|
uint16_t message_size;
|
|
|
|
|
|
|
|
assert(chunk->type & (1 << 15));
|
|
|
|
|
|
|
|
if (chunk->length < sizeof(error) + sizeof(message_size)) {
|
|
|
|
error_setg(errp,
|
|
|
|
"Protocol error: invalid payload for structured error");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
error = nbd_errno_to_system_errno(payload_advance32(&payload));
|
|
|
|
if (error == 0) {
|
2017-11-08 21:56:57 +00:00
|
|
|
error_setg(errp, "Protocol error: server sent structured error chunk "
|
2017-10-27 10:40:37 +00:00
|
|
|
"with error = 0");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*request_ret = -error;
|
|
|
|
message_size = payload_advance16(&payload);
|
|
|
|
|
|
|
|
if (message_size > chunk->length - sizeof(error) - sizeof(message_size)) {
|
2017-11-08 21:56:57 +00:00
|
|
|
error_setg(errp, "Protocol error: server sent structured error chunk "
|
2017-10-27 10:40:37 +00:00
|
|
|
"with incorrect message size");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* TODO: Add a trace point to mention the server complaint */
|
|
|
|
|
|
|
|
/* TODO handle ERROR_OFFSET */
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nbd_co_receive_offset_data_payload(NBDClientSession *s,
|
|
|
|
uint64_t orig_offset,
|
|
|
|
QEMUIOVector *qiov, Error **errp)
|
|
|
|
{
|
|
|
|
QEMUIOVector sub_qiov;
|
|
|
|
uint64_t offset;
|
|
|
|
size_t data_size;
|
|
|
|
int ret;
|
|
|
|
NBDStructuredReplyChunk *chunk = &s->reply.structured;
|
|
|
|
|
|
|
|
assert(nbd_reply_is_structured(&s->reply));
|
|
|
|
|
|
|
|
if (chunk->length < sizeof(offset)) {
|
|
|
|
error_setg(errp, "Protocol error: invalid payload for "
|
|
|
|
"NBD_REPLY_TYPE_OFFSET_DATA");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nbd_read(s->ioc, &offset, sizeof(offset), errp) < 0) {
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
be64_to_cpus(&offset);
|
|
|
|
|
|
|
|
data_size = chunk->length - sizeof(offset);
|
|
|
|
if (offset < orig_offset || data_size > qiov->size ||
|
|
|
|
offset > orig_offset + qiov->size - data_size) {
|
|
|
|
error_setg(errp, "Protocol error: server sent chunk exceeding requested"
|
|
|
|
" region");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_iovec_init(&sub_qiov, qiov->niov);
|
|
|
|
qemu_iovec_concat(&sub_qiov, qiov, offset - orig_offset, data_size);
|
|
|
|
ret = qio_channel_readv_all(s->ioc, sub_qiov.iov, sub_qiov.niov, errp);
|
|
|
|
qemu_iovec_destroy(&sub_qiov);
|
|
|
|
|
|
|
|
return ret < 0 ? -EIO : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
#define NBD_MAX_MALLOC_PAYLOAD 1000
|
|
|
|
/* nbd_co_receive_structured_payload
|
|
|
|
*/
|
|
|
|
static coroutine_fn int nbd_co_receive_structured_payload(
|
|
|
|
NBDClientSession *s, void **payload, Error **errp)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
uint32_t len;
|
|
|
|
|
|
|
|
assert(nbd_reply_is_structured(&s->reply));
|
|
|
|
|
|
|
|
len = s->reply.structured.length;
|
|
|
|
|
|
|
|
if (len == 0) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (payload == NULL) {
|
|
|
|
error_setg(errp, "Unexpected structured payload");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (len > NBD_MAX_MALLOC_PAYLOAD) {
|
|
|
|
error_setg(errp, "Payload too large");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
*payload = g_new(char, len);
|
|
|
|
ret = nbd_read(s->ioc, *payload, len, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
g_free(*payload);
|
|
|
|
*payload = NULL;
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* nbd_co_do_receive_one_chunk
|
|
|
|
* for simple reply:
|
|
|
|
* set request_ret to received reply error
|
|
|
|
* if qiov is not NULL: read payload to @qiov
|
|
|
|
* for structured reply chunk:
|
|
|
|
* if error chunk: read payload, set @request_ret, do not set @payload
|
|
|
|
* else if offset_data chunk: read payload data to @qiov, do not set @payload
|
|
|
|
* else: read payload to @payload
|
|
|
|
*
|
|
|
|
* If function fails, @errp contains corresponding error message, and the
|
|
|
|
* connection with the server is suspect. If it returns 0, then the
|
|
|
|
* transaction succeeded (although @request_ret may be a negative errno
|
|
|
|
* corresponding to the server's error reply), and errp is unchanged.
|
|
|
|
*/
|
|
|
|
static coroutine_fn int nbd_co_do_receive_one_chunk(
|
|
|
|
NBDClientSession *s, uint64_t handle, bool only_structured,
|
|
|
|
int *request_ret, QEMUIOVector *qiov, void **payload, Error **errp)
|
2013-12-01 21:23:41 +00:00
|
|
|
{
|
2017-09-20 12:45:05 +00:00
|
|
|
int ret;
|
2017-10-12 09:53:08 +00:00
|
|
|
int i = HANDLE_TO_INDEX(s, handle);
|
2017-10-27 10:40:37 +00:00
|
|
|
void *local_payload = NULL;
|
|
|
|
NBDStructuredReplyChunk *chunk;
|
|
|
|
|
|
|
|
if (payload) {
|
|
|
|
*payload = NULL;
|
|
|
|
}
|
|
|
|
*request_ret = 0;
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2017-02-13 13:52:24 +00:00
|
|
|
/* Wait until we're woken up by nbd_read_reply_entry. */
|
2017-08-22 12:51:13 +00:00
|
|
|
s->requests[i].receiving = true;
|
2013-12-01 21:23:41 +00:00
|
|
|
qemu_coroutine_yield();
|
2017-08-22 12:51:13 +00:00
|
|
|
s->requests[i].receiving = false;
|
2017-09-20 12:45:06 +00:00
|
|
|
if (!s->ioc || s->quit) {
|
2017-10-27 10:40:37 +00:00
|
|
|
error_setg(errp, "Connection closed");
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
|
|
|
assert(s->reply.handle == handle);
|
|
|
|
|
|
|
|
if (nbd_reply_is_simple(&s->reply)) {
|
|
|
|
if (only_structured) {
|
|
|
|
error_setg(errp, "Protocol error: simple reply when structured "
|
|
|
|
"reply chunk was expected");
|
|
|
|
return -EINVAL;
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2017-10-27 10:40:37 +00:00
|
|
|
*request_ret = -nbd_errno_to_system_errno(s->reply.simple.error);
|
|
|
|
if (*request_ret < 0 || !qiov) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return qio_channel_readv_all(s->ioc, qiov->iov, qiov->niov,
|
|
|
|
errp) < 0 ? -EIO : 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* handle structured reply chunk */
|
|
|
|
assert(s->info.structured_reply);
|
|
|
|
chunk = &s->reply.structured;
|
|
|
|
|
|
|
|
if (chunk->type == NBD_REPLY_TYPE_NONE) {
|
|
|
|
if (!(chunk->flags & NBD_REPLY_FLAG_DONE)) {
|
|
|
|
error_setg(errp, "Protocol error: NBD_REPLY_TYPE_NONE chunk without"
|
2017-11-08 21:56:57 +00:00
|
|
|
" NBD_REPLY_FLAG_DONE flag set");
|
2017-10-27 10:40:37 +00:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chunk->type == NBD_REPLY_TYPE_OFFSET_DATA) {
|
|
|
|
if (!qiov) {
|
|
|
|
error_setg(errp, "Unexpected NBD_REPLY_TYPE_OFFSET_DATA chunk");
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
return nbd_co_receive_offset_data_payload(s, s->requests[i].offset,
|
|
|
|
qiov, errp);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (nbd_reply_type_is_error(chunk->type)) {
|
|
|
|
payload = &local_payload;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nbd_co_receive_structured_payload(s, payload, errp);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
2017-02-13 13:52:24 +00:00
|
|
|
|
2017-10-27 10:40:37 +00:00
|
|
|
if (nbd_reply_type_is_error(chunk->type)) {
|
|
|
|
ret = nbd_parse_error_payload(chunk, local_payload, request_ret, errp);
|
|
|
|
g_free(local_payload);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* nbd_co_receive_one_chunk
|
|
|
|
* Read reply, wake up read_reply_co and set s->quit if needed.
|
|
|
|
* Return value is a fatal error code or normal nbd reply error code
|
|
|
|
*/
|
|
|
|
static coroutine_fn int nbd_co_receive_one_chunk(
|
|
|
|
NBDClientSession *s, uint64_t handle, bool only_structured,
|
|
|
|
QEMUIOVector *qiov, NBDReply *reply, void **payload, Error **errp)
|
|
|
|
{
|
|
|
|
int request_ret;
|
|
|
|
int ret = nbd_co_do_receive_one_chunk(s, handle, only_structured,
|
|
|
|
&request_ret, qiov, payload, errp);
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
s->quit = true;
|
|
|
|
} else {
|
|
|
|
/* For assert at loop start in nbd_read_reply_entry */
|
|
|
|
if (reply) {
|
|
|
|
*reply = s->reply;
|
|
|
|
}
|
|
|
|
s->reply.handle = 0;
|
|
|
|
ret = request_ret;
|
|
|
|
}
|
2017-02-13 13:52:24 +00:00
|
|
|
|
|
|
|
if (s->read_reply_co) {
|
|
|
|
aio_co_wake(s->read_reply_co);
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
2017-06-01 10:44:56 +00:00
|
|
|
|
2017-10-27 10:40:37 +00:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct NBDReplyChunkIter {
|
|
|
|
int ret;
|
|
|
|
Error *err;
|
|
|
|
bool done, only_structured;
|
|
|
|
} NBDReplyChunkIter;
|
|
|
|
|
|
|
|
static void nbd_iter_error(NBDReplyChunkIter *iter, bool fatal,
|
|
|
|
int ret, Error **local_err)
|
|
|
|
{
|
|
|
|
assert(ret < 0);
|
|
|
|
|
|
|
|
if (fatal || iter->ret == 0) {
|
|
|
|
if (iter->ret != 0) {
|
|
|
|
error_free(iter->err);
|
|
|
|
iter->err = NULL;
|
|
|
|
}
|
|
|
|
iter->ret = ret;
|
|
|
|
error_propagate(&iter->err, *local_err);
|
|
|
|
} else {
|
|
|
|
error_free(*local_err);
|
|
|
|
}
|
|
|
|
|
|
|
|
*local_err = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* NBD_FOREACH_REPLY_CHUNK
|
|
|
|
*/
|
|
|
|
#define NBD_FOREACH_REPLY_CHUNK(s, iter, handle, structured, \
|
|
|
|
qiov, reply, payload) \
|
|
|
|
for (iter = (NBDReplyChunkIter) { .only_structured = structured }; \
|
|
|
|
nbd_reply_chunk_iter_receive(s, &iter, handle, qiov, reply, payload);)
|
|
|
|
|
|
|
|
/* nbd_reply_chunk_iter_receive
|
|
|
|
*/
|
|
|
|
static bool nbd_reply_chunk_iter_receive(NBDClientSession *s,
|
|
|
|
NBDReplyChunkIter *iter,
|
|
|
|
uint64_t handle,
|
|
|
|
QEMUIOVector *qiov, NBDReply *reply,
|
|
|
|
void **payload)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
NBDReply local_reply;
|
|
|
|
NBDStructuredReplyChunk *chunk;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
if (s->quit) {
|
|
|
|
error_setg(&local_err, "Connection closed");
|
|
|
|
nbd_iter_error(iter, true, -EIO, &local_err);
|
|
|
|
goto break_loop;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (iter->done) {
|
|
|
|
/* Previous iteration was last. */
|
|
|
|
goto break_loop;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (reply == NULL) {
|
|
|
|
reply = &local_reply;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nbd_co_receive_one_chunk(s, handle, iter->only_structured,
|
|
|
|
qiov, reply, payload, &local_err);
|
|
|
|
if (ret < 0) {
|
|
|
|
/* If it is a fatal error s->quit is set by nbd_co_receive_one_chunk */
|
|
|
|
nbd_iter_error(iter, s->quit, ret, &local_err);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Do not execute the body of NBD_FOREACH_REPLY_CHUNK for simple reply. */
|
|
|
|
if (nbd_reply_is_simple(&s->reply) || s->quit) {
|
|
|
|
goto break_loop;
|
|
|
|
}
|
|
|
|
|
|
|
|
chunk = &reply->structured;
|
|
|
|
iter->only_structured = true;
|
|
|
|
|
|
|
|
if (chunk->type == NBD_REPLY_TYPE_NONE) {
|
|
|
|
/* NBD_REPLY_FLAG_DONE is already checked in nbd_co_receive_one_chunk */
|
|
|
|
assert(chunk->flags & NBD_REPLY_FLAG_DONE);
|
|
|
|
goto break_loop;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (chunk->flags & NBD_REPLY_FLAG_DONE) {
|
|
|
|
/* This iteration is last. */
|
|
|
|
iter->done = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Execute the loop body */
|
|
|
|
return true;
|
|
|
|
|
|
|
|
break_loop:
|
|
|
|
s->requests[HANDLE_TO_INDEX(s, handle)].coroutine = NULL;
|
|
|
|
|
2017-06-01 10:44:56 +00:00
|
|
|
qemu_co_mutex_lock(&s->send_mutex);
|
|
|
|
s->in_flight--;
|
|
|
|
qemu_co_queue_next(&s->free_sema);
|
|
|
|
qemu_co_mutex_unlock(&s->send_mutex);
|
2017-09-20 12:45:05 +00:00
|
|
|
|
2017-10-27 10:40:37 +00:00
|
|
|
return false;
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2017-10-27 10:40:37 +00:00
|
|
|
static int nbd_co_receive_return_code(NBDClientSession *s, uint64_t handle,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
NBDReplyChunkIter iter;
|
|
|
|
|
|
|
|
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, false, NULL, NULL, NULL) {
|
|
|
|
/* nbd_reply_chunk_iter_receive does all the work */
|
|
|
|
}
|
|
|
|
|
|
|
|
error_propagate(errp, iter.err);
|
|
|
|
return iter.ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nbd_co_receive_cmdread_reply(NBDClientSession *s, uint64_t handle,
|
|
|
|
uint64_t offset, QEMUIOVector *qiov,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
NBDReplyChunkIter iter;
|
|
|
|
NBDReply reply;
|
|
|
|
void *payload = NULL;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
NBD_FOREACH_REPLY_CHUNK(s, iter, handle, s->info.structured_reply,
|
|
|
|
qiov, &reply, &payload)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
NBDStructuredReplyChunk *chunk = &reply.structured;
|
|
|
|
|
|
|
|
assert(nbd_reply_is_structured(&reply));
|
|
|
|
|
|
|
|
switch (chunk->type) {
|
|
|
|
case NBD_REPLY_TYPE_OFFSET_DATA:
|
|
|
|
/* special cased in nbd_co_receive_one_chunk, data is already
|
|
|
|
* in qiov */
|
|
|
|
break;
|
|
|
|
case NBD_REPLY_TYPE_OFFSET_HOLE:
|
|
|
|
ret = nbd_parse_offset_hole_payload(&reply.structured, payload,
|
|
|
|
offset, qiov, &local_err);
|
|
|
|
if (ret < 0) {
|
|
|
|
s->quit = true;
|
|
|
|
nbd_iter_error(&iter, true, ret, &local_err);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
if (!nbd_reply_type_is_error(chunk->type)) {
|
|
|
|
/* not allowed reply type */
|
|
|
|
s->quit = true;
|
|
|
|
error_setg(&local_err,
|
|
|
|
"Unexpected reply type: %d (%s) for CMD_READ",
|
|
|
|
chunk->type, nbd_reply_type_lookup(chunk->type));
|
|
|
|
nbd_iter_error(&iter, true, -EINVAL, &local_err);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
g_free(payload);
|
|
|
|
payload = NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
error_propagate(errp, iter.err);
|
|
|
|
return iter.ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int nbd_co_request(BlockDriverState *bs, NBDRequest *request,
|
|
|
|
QEMUIOVector *write_qiov)
|
2017-08-29 21:48:31 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2017-10-27 10:40:37 +00:00
|
|
|
Error *local_err = NULL;
|
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2017-08-29 21:48:31 +00:00
|
|
|
|
2017-10-27 10:40:37 +00:00
|
|
|
assert(request->type != NBD_CMD_READ);
|
|
|
|
if (write_qiov) {
|
|
|
|
assert(request->type == NBD_CMD_WRITE);
|
|
|
|
assert(request->len == iov_size(write_qiov->iov, write_qiov->niov));
|
2017-10-12 09:53:07 +00:00
|
|
|
} else {
|
2017-10-27 10:40:37 +00:00
|
|
|
assert(request->type != NBD_CMD_WRITE);
|
2017-10-12 09:53:07 +00:00
|
|
|
}
|
2017-10-27 10:40:37 +00:00
|
|
|
ret = nbd_co_send_request(bs, request, write_qiov);
|
2017-08-29 21:48:31 +00:00
|
|
|
if (ret < 0) {
|
2017-09-20 12:45:05 +00:00
|
|
|
return ret;
|
2017-08-29 21:48:31 +00:00
|
|
|
}
|
2017-09-20 12:45:05 +00:00
|
|
|
|
2017-10-27 10:40:37 +00:00
|
|
|
ret = nbd_co_receive_return_code(client, request->handle, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
}
|
|
|
|
return ret;
|
2017-08-29 21:48:31 +00:00
|
|
|
}
|
|
|
|
|
2016-07-15 23:23:07 +00:00
|
|
|
int nbd_client_co_preadv(BlockDriverState *bs, uint64_t offset,
|
|
|
|
uint64_t bytes, QEMUIOVector *qiov, int flags)
|
2013-12-01 21:23:41 +00:00
|
|
|
{
|
2017-10-27 10:40:37 +00:00
|
|
|
int ret;
|
|
|
|
Error *local_err = NULL;
|
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2016-10-14 18:33:07 +00:00
|
|
|
NBDRequest request = {
|
2016-07-15 23:23:07 +00:00
|
|
|
.type = NBD_CMD_READ,
|
|
|
|
.from = offset,
|
|
|
|
.len = bytes,
|
|
|
|
};
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2016-07-15 23:23:07 +00:00
|
|
|
assert(bytes <= NBD_MAX_BUFFER_SIZE);
|
|
|
|
assert(!flags);
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2017-10-27 10:40:37 +00:00
|
|
|
ret = nbd_co_send_request(bs, &request, NULL);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = nbd_co_receive_cmdread_reply(client, request.handle, offset, qiov,
|
|
|
|
&local_err);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
}
|
|
|
|
return ret;
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2016-07-15 23:23:07 +00:00
|
|
|
int nbd_client_co_pwritev(BlockDriverState *bs, uint64_t offset,
|
|
|
|
uint64_t bytes, QEMUIOVector *qiov, int flags)
|
2013-12-01 21:23:41 +00:00
|
|
|
{
|
2016-10-14 18:33:06 +00:00
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2016-10-14 18:33:07 +00:00
|
|
|
NBDRequest request = {
|
2016-07-15 23:23:07 +00:00
|
|
|
.type = NBD_CMD_WRITE,
|
|
|
|
.from = offset,
|
|
|
|
.len = bytes,
|
|
|
|
};
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2016-05-03 22:39:08 +00:00
|
|
|
if (flags & BDRV_REQ_FUA) {
|
2017-07-07 20:30:41 +00:00
|
|
|
assert(client->info.flags & NBD_FLAG_SEND_FUA);
|
2016-10-14 18:33:04 +00:00
|
|
|
request.flags |= NBD_CMD_FLAG_FUA;
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2016-07-15 23:23:07 +00:00
|
|
|
assert(bytes <= NBD_MAX_BUFFER_SIZE);
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2017-08-29 21:48:31 +00:00
|
|
|
return nbd_co_request(bs, &request, qiov);
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2016-10-14 18:33:18 +00:00
|
|
|
int nbd_client_co_pwrite_zeroes(BlockDriverState *bs, int64_t offset,
|
2017-06-09 10:18:08 +00:00
|
|
|
int bytes, BdrvRequestFlags flags)
|
2016-10-14 18:33:18 +00:00
|
|
|
{
|
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
|
|
|
NBDRequest request = {
|
|
|
|
.type = NBD_CMD_WRITE_ZEROES,
|
|
|
|
.from = offset,
|
2017-06-09 10:18:08 +00:00
|
|
|
.len = bytes,
|
2016-10-14 18:33:18 +00:00
|
|
|
};
|
|
|
|
|
2017-07-07 20:30:41 +00:00
|
|
|
if (!(client->info.flags & NBD_FLAG_SEND_WRITE_ZEROES)) {
|
2016-10-14 18:33:18 +00:00
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (flags & BDRV_REQ_FUA) {
|
2017-07-07 20:30:41 +00:00
|
|
|
assert(client->info.flags & NBD_FLAG_SEND_FUA);
|
2016-10-14 18:33:18 +00:00
|
|
|
request.flags |= NBD_CMD_FLAG_FUA;
|
|
|
|
}
|
|
|
|
if (!(flags & BDRV_REQ_MAY_UNMAP)) {
|
|
|
|
request.flags |= NBD_CMD_FLAG_NO_HOLE;
|
|
|
|
}
|
|
|
|
|
2017-08-29 21:48:31 +00:00
|
|
|
return nbd_co_request(bs, &request, NULL);
|
2016-10-14 18:33:18 +00:00
|
|
|
}
|
|
|
|
|
2015-02-06 21:06:16 +00:00
|
|
|
int nbd_client_co_flush(BlockDriverState *bs)
|
2013-12-01 21:23:41 +00:00
|
|
|
{
|
2016-10-14 18:33:06 +00:00
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2016-10-14 18:33:07 +00:00
|
|
|
NBDRequest request = { .type = NBD_CMD_FLUSH };
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2017-07-07 20:30:41 +00:00
|
|
|
if (!(client->info.flags & NBD_FLAG_SEND_FLUSH)) {
|
2013-12-01 21:23:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
request.from = 0;
|
|
|
|
request.len = 0;
|
|
|
|
|
2017-08-29 21:48:31 +00:00
|
|
|
return nbd_co_request(bs, &request, NULL);
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2017-06-09 10:18:08 +00:00
|
|
|
int nbd_client_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
|
2013-12-01 21:23:41 +00:00
|
|
|
{
|
2016-10-14 18:33:06 +00:00
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2016-10-14 18:33:07 +00:00
|
|
|
NBDRequest request = {
|
2016-07-15 23:23:02 +00:00
|
|
|
.type = NBD_CMD_TRIM,
|
|
|
|
.from = offset,
|
2017-06-09 10:18:08 +00:00
|
|
|
.len = bytes,
|
2016-07-15 23:23:02 +00:00
|
|
|
};
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2017-07-07 20:30:41 +00:00
|
|
|
if (!(client->info.flags & NBD_FLAG_SEND_TRIM)) {
|
2013-12-01 21:23:41 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2017-08-29 21:48:31 +00:00
|
|
|
return nbd_co_request(bs, &request, NULL);
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2015-02-06 21:06:16 +00:00
|
|
|
void nbd_client_detach_aio_context(BlockDriverState *bs)
|
2014-05-08 14:34:43 +00:00
|
|
|
{
|
2017-02-13 13:52:24 +00:00
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2017-06-15 17:09:05 +00:00
|
|
|
qio_channel_detach_aio_context(QIO_CHANNEL(client->ioc));
|
2014-05-08 14:34:43 +00:00
|
|
|
}
|
|
|
|
|
2015-02-06 21:06:16 +00:00
|
|
|
void nbd_client_attach_aio_context(BlockDriverState *bs,
|
|
|
|
AioContext *new_context)
|
2014-05-08 14:34:43 +00:00
|
|
|
{
|
2017-02-13 13:52:24 +00:00
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2017-06-15 17:09:05 +00:00
|
|
|
qio_channel_attach_aio_context(QIO_CHANNEL(client->ioc), new_context);
|
2017-02-13 13:52:24 +00:00
|
|
|
aio_co_schedule(new_context, client->read_reply_co);
|
2014-05-08 14:34:43 +00:00
|
|
|
}
|
|
|
|
|
2015-02-06 21:06:16 +00:00
|
|
|
void nbd_client_close(BlockDriverState *bs)
|
2013-12-01 21:23:41 +00:00
|
|
|
{
|
2016-10-14 18:33:06 +00:00
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2016-10-14 18:33:07 +00:00
|
|
|
NBDRequest request = { .type = NBD_CMD_DISC };
|
2013-12-01 21:23:41 +00:00
|
|
|
|
2016-02-10 18:41:01 +00:00
|
|
|
if (client->ioc == NULL) {
|
2014-02-26 14:30:18 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-02-10 18:41:04 +00:00
|
|
|
nbd_send_request(client->ioc, &request);
|
2013-12-01 21:23:44 +00:00
|
|
|
|
2015-02-06 21:06:16 +00:00
|
|
|
nbd_teardown_connection(bs);
|
2013-12-01 21:23:41 +00:00
|
|
|
}
|
|
|
|
|
2016-02-10 18:41:12 +00:00
|
|
|
int nbd_client_init(BlockDriverState *bs,
|
|
|
|
QIOChannelSocket *sioc,
|
|
|
|
const char *export,
|
|
|
|
QCryptoTLSCreds *tlscreds,
|
|
|
|
const char *hostname,
|
|
|
|
Error **errp)
|
2013-12-01 21:23:41 +00:00
|
|
|
{
|
2016-10-14 18:33:06 +00:00
|
|
|
NBDClientSession *client = nbd_get_client_session(bs);
|
2013-12-01 21:23:41 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/* NBD handshake */
|
2013-12-01 21:23:43 +00:00
|
|
|
logout("session init %s\n", export);
|
2016-02-10 18:41:01 +00:00
|
|
|
qio_channel_set_blocking(QIO_CHANNEL(sioc), true, NULL);
|
|
|
|
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 20:30:49 +00:00
|
|
|
client->info.request_sizes = true;
|
2017-10-27 10:40:37 +00:00
|
|
|
client->info.structured_reply = true;
|
2016-02-10 18:41:04 +00:00
|
|
|
ret = nbd_receive_negotiate(QIO_CHANNEL(sioc), export,
|
2016-02-10 18:41:12 +00:00
|
|
|
tlscreds, hostname,
|
2017-07-07 20:30:41 +00:00
|
|
|
&client->ioc, &client->info, errp);
|
2013-12-01 21:23:41 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
logout("Failed to negotiate with the NBD server\n");
|
|
|
|
return ret;
|
|
|
|
}
|
2017-07-07 20:30:41 +00:00
|
|
|
if (client->info.flags & NBD_FLAG_SEND_FUA) {
|
2016-05-03 22:39:06 +00:00
|
|
|
bs->supported_write_flags = BDRV_REQ_FUA;
|
2016-11-17 20:13:54 +00:00
|
|
|
bs->supported_zero_flags |= BDRV_REQ_FUA;
|
|
|
|
}
|
2017-07-07 20:30:41 +00:00
|
|
|
if (client->info.flags & NBD_FLAG_SEND_WRITE_ZEROES) {
|
2016-11-17 20:13:54 +00:00
|
|
|
bs->supported_zero_flags |= BDRV_REQ_MAY_UNMAP;
|
2016-05-03 22:39:06 +00:00
|
|
|
}
|
nbd: Implement NBD_INFO_BLOCK_SIZE on client
The upstream NBD Protocol has defined a new extension to allow
the server to advertise block sizes to the client, as well as
a way for the client to inform the server whether it intends to
obey block sizes.
When using the block layer as the client, we will obey block
sizes; but when used as 'qemu-nbd -c' to hand off to the
kernel nbd module as the client, we are still waiting for the
kernel to implement a way for us to learn if it will honor
block sizes (perhaps by an addition to sysfs, rather than an
ioctl), as well as any way to tell the kernel what additional
block sizes to obey (NBD_SET_BLKSIZE appears to be accurate
for the minimum size, but preferred and maximum sizes would
probably be new ioctl()s), so until then, we need to make our
request for block sizes conditional.
When using ioctl(NBD_SET_BLKSIZE) to hand off to the kernel,
use the minimum block size as the sector size if it is larger
than 512, which also has the nice effect of cooperating with
(non-qemu) servers that don't do read-modify-write when
exposing a block device with 4k sectors; it might also allow
us to visit a file larger than 2T on a 32-bit kernel.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <20170707203049.534-10-eblake@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2017-07-07 20:30:49 +00:00
|
|
|
if (client->info.min_block > bs->bl.request_alignment) {
|
|
|
|
bs->bl.request_alignment = client->info.min_block;
|
|
|
|
}
|
2013-12-01 21:23:41 +00:00
|
|
|
|
|
|
|
qemu_co_mutex_init(&client->send_mutex);
|
nbd: Use CoQueue for free_sema instead of CoMutex
NBD is using the CoMutex in a way that wasn't anticipated. For example, if there are
N(N=26, MAX_NBD_REQUESTS=16) nbd write requests, so we will invoke nbd_client_co_pwritev
N times.
----------------------------------------------------------------------------------------
time request Actions
1 1 in_flight=1, Coroutine=C1
2 2 in_flight=2, Coroutine=C2
...
15 15 in_flight=15, Coroutine=C15
16 16 in_flight=16, Coroutine=C16, free_sema->holder=C16, mutex->locked=true
17 17 in_flight=16, Coroutine=C17, queue C17 into free_sema->queue
18 18 in_flight=16, Coroutine=C18, queue C18 into free_sema->queue
...
26 N in_flight=16, Coroutine=C26, queue C26 into free_sema->queue
----------------------------------------------------------------------------------------
Once nbd client recieves request No.16' reply, we will re-enter C16. It's ok, because
it's equal to 'free_sema->holder'.
----------------------------------------------------------------------------------------
time request Actions
27 16 in_flight=15, Coroutine=C16, free_sema->holder=C16, mutex->locked=false
----------------------------------------------------------------------------------------
Then nbd_coroutine_end invokes qemu_co_mutex_unlock what will pop coroutines from
free_sema->queue's head and enter C17. More free_sema->holder is C17 now.
----------------------------------------------------------------------------------------
time request Actions
28 17 in_flight=16, Coroutine=C17, free_sema->holder=C17, mutex->locked=true
----------------------------------------------------------------------------------------
In above scenario, we only recieves request No.16' reply. As time goes by, nbd client will
almostly recieves replies from requests 1 to 15 rather than request 17 who owns C17. In this
case, we will encounter assert "mutex->holder == self" failed since Kevin's commit 0e438cdc
"coroutine: Let CoMutex remember who holds it". For example, if nbd client recieves request
No.15' reply, qemu will stop unexpectedly:
----------------------------------------------------------------------------------------
time request Actions
29 15(most case) in_flight=15, Coroutine=C15, free_sema->holder=C17, mutex->locked=false
----------------------------------------------------------------------------------------
Per Paolo's suggestion "The simplest fix is to change it to CoQueue, which is like a condition
variable", this patch replaces CoMutex with CoQueue.
Cc: Wen Congyang <wency@cn.fujitsu.com>
Reported-by: zhanghailiang <zhang.zhanghailiang@huawei.com>
Suggested-by: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: Changlong Xie <xiecl.fnst@cn.fujitsu.com>
Message-Id: <1476267508-19499-1-git-send-email-xiecl.fnst@cn.fujitsu.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
2016-10-12 10:18:28 +00:00
|
|
|
qemu_co_queue_init(&client->free_sema);
|
2016-02-10 18:41:01 +00:00
|
|
|
client->sioc = sioc;
|
|
|
|
object_ref(OBJECT(client->sioc));
|
2016-02-10 18:41:11 +00:00
|
|
|
|
|
|
|
if (!client->ioc) {
|
|
|
|
client->ioc = QIO_CHANNEL(sioc);
|
|
|
|
object_ref(OBJECT(client->ioc));
|
|
|
|
}
|
2013-12-01 21:23:41 +00:00
|
|
|
|
|
|
|
/* Now that we're connected, set the socket to be non-blocking and
|
|
|
|
* kick the reply mechanism. */
|
2016-02-10 18:41:01 +00:00
|
|
|
qio_channel_set_blocking(QIO_CHANNEL(sioc), false, NULL);
|
2017-02-13 13:52:24 +00:00
|
|
|
client->read_reply_co = qemu_coroutine_create(nbd_read_reply_entry, client);
|
2015-02-06 21:06:16 +00:00
|
|
|
nbd_client_attach_aio_context(bs, bdrv_get_aio_context(bs));
|
2013-12-01 21:23:41 +00:00
|
|
|
|
|
|
|
logout("Established connection with NBD server\n");
|
|
|
|
return 0;
|
|
|
|
}
|