Pull request

This pull request contains a virtio-blk/scsi performance optimization, event
 loop scalability improvements, and a qtest-based device fuzzing framework.  I
 am including the fuzzing patches because I have reviewed them and Thomas Huth
 is currently away on leave.
 -----BEGIN PGP SIGNATURE-----
 
 iQEzBAABCAAdFiEEhpWov9P5fNqsNXdanKSrs4Grc8gFAl5Q6z0ACgkQnKSrs4Gr
 c8hWHwf6A8f4+CY+wvW1k0GkXlo6Z8QT3Lms0pMlbCcM+eTgRTz4HE2FK7lDFM8b
 xUfjy78pImXJyquGeLyzcFDMrJNJkHfz1BncYxDHs/rweWNM8R9Wy08my3YLRAnK
 MIM0unoWzXY8QKx+f61IEDGz1ZMVHTMpV99GhacMdx3xkyOTQk5TW1HYUUGYyiL6
 gE3eVlo6a9SR06fznmBIuV3Thbqu/jI4SVk3n+gb6+HU5L9T+X6y9UsungVAd68M
 oByV/RNu6+HL+YPt39XmIaUA2iOz4aq+7pw3r7w9BMrTr7AK522yoGkYXFpipCJA
 spLoz+YS1Lpmezl+CuJgIak+wTJqmA==
 =OnmP
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/stefanha/tags/block-pull-request' into staging

Pull request

This pull request contains a virtio-blk/scsi performance optimization, event
loop scalability improvements, and a qtest-based device fuzzing framework.  I
am including the fuzzing patches because I have reviewed them and Thomas Huth
is currently away on leave.

# gpg: Signature made Sat 22 Feb 2020 08:50:05 GMT
# gpg:                using RSA key 8695A8BFD3F97CDAAC35775A9CA4ABB381AB73C8
# gpg: Good signature from "Stefan Hajnoczi <stefanha@redhat.com>" [full]
# gpg:                 aka "Stefan Hajnoczi <stefanha@gmail.com>" [full]
# Primary key fingerprint: 8695 A8BF D3F9 7CDA AC35  775A 9CA4 ABB3 81AB 73C8

* remotes/stefanha/tags/block-pull-request: (31 commits)
  fuzz: add documentation to docs/devel/
  fuzz: add virtio-scsi fuzz target
  fuzz: add virtio-net fuzz target
  fuzz: add i440fx fuzz targets
  fuzz: add configure flag --enable-fuzzing
  fuzz: add target/fuzz makefile rules
  fuzz: add support for qos-assisted fuzz targets
  fuzz: support for fork-based fuzzing.
  main: keep rcu_atfork callback enabled for qtest
  exec: keep ram block across fork when using qtest
  fuzz: add fuzzer skeleton
  libqos: move useful qos-test funcs to qos_external
  libqos: split qos-test and libqos makefile vars
  libqos: rename i2c_send and i2c_recv
  qtest: add in-process incoming command handler
  libqtest: make bufwrite rely on the TransportOps
  libqtest: add a layer of abstraction to send/recv
  qtest: add qtest_server_send abstraction
  fuzz: add FUZZ_TARGET module type
  module: check module wasn't already initialized
  ...

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2020-02-24 11:38:54 +00:00
commit c1e667d259
51 changed files with 2366 additions and 401 deletions

View File

@ -2031,7 +2031,8 @@ F: include/qemu/main-loop.h
F: include/sysemu/runstate.h
F: util/main-loop.c
F: util/qemu-timer.c
F: vl.c
F: softmmu/vl.c
F: softmmu/main.c
F: qapi/run-state.json
Human Monitor (HMP)
@ -2183,6 +2184,14 @@ F: qtest.c
F: accel/qtest.c
F: tests/qtest/
Device Fuzzing
M: Alexander Bulekov <alxndr@bu.edu>
R: Paolo Bonzini <pbonzini@redhat.com>
R: Bandan Das <bsd@redhat.com>
R: Stefan Hajnoczi <stefanha@redhat.com>
S: Maintained
F: tests/qtest/fuzz/
Register API
M: Alistair Francis <alistair@alistair23.me>
S: Maintained

View File

@ -477,7 +477,7 @@ config-host.h-timestamp: config-host.mak
qemu-options.def: $(SRC_PATH)/qemu-options.hx $(SRC_PATH)/scripts/hxtool
$(call quiet-command,sh $(SRC_PATH)/scripts/hxtool -h < $< > $@,"GEN","$@")
TARGET_DIRS_RULES := $(foreach t, all clean install, $(addsuffix /$(t), $(TARGET_DIRS)))
TARGET_DIRS_RULES := $(foreach t, all fuzz clean install, $(addsuffix /$(t), $(TARGET_DIRS)))
SOFTMMU_ALL_RULES=$(filter %-softmmu/all, $(TARGET_DIRS_RULES))
$(SOFTMMU_ALL_RULES): $(authz-obj-y)
@ -490,6 +490,15 @@ ifdef DECOMPRESS_EDK2_BLOBS
$(SOFTMMU_ALL_RULES): $(edk2-decompressed)
endif
SOFTMMU_FUZZ_RULES=$(filter %-softmmu/fuzz, $(TARGET_DIRS_RULES))
$(SOFTMMU_FUZZ_RULES): $(authz-obj-y)
$(SOFTMMU_FUZZ_RULES): $(block-obj-y)
$(SOFTMMU_FUZZ_RULES): $(chardev-obj-y)
$(SOFTMMU_FUZZ_RULES): $(crypto-obj-y)
$(SOFTMMU_FUZZ_RULES): $(io-obj-y)
$(SOFTMMU_FUZZ_RULES): config-all-devices.mak
$(SOFTMMU_FUZZ_RULES): $(edk2-decompressed)
.PHONY: $(TARGET_DIRS_RULES)
# The $(TARGET_DIRS_RULES) are of the form SUBDIR/GOAL, so that
# $(dir $@) yields the sub-directory, and $(notdir $@) yields the sub-goal
@ -540,6 +549,9 @@ subdir-slirp: slirp/all
$(filter %/all, $(TARGET_DIRS_RULES)): libqemuutil.a $(common-obj-y) \
$(qom-obj-y)
$(filter %/fuzz, $(TARGET_DIRS_RULES)): libqemuutil.a $(common-obj-y) \
$(qom-obj-y) $(crypto-user-obj-$(CONFIG_USER_ONLY))
ROM_DIRS = $(addprefix pc-bios/, $(ROMS))
ROM_DIRS_RULES=$(foreach t, all clean, $(addsuffix /$(t), $(ROM_DIRS)))
# Only keep -O and -g cflags
@ -549,6 +561,7 @@ $(ROM_DIRS_RULES):
.PHONY: recurse-all recurse-clean recurse-install
recurse-all: $(addsuffix /all, $(TARGET_DIRS) $(ROM_DIRS))
recurse-fuzz: $(addsuffix /fuzz, $(TARGET_DIRS) $(ROM_DIRS))
recurse-clean: $(addsuffix /clean, $(TARGET_DIRS) $(ROM_DIRS))
recurse-install: $(addsuffix /install, $(TARGET_DIRS))
$(addsuffix /install, $(TARGET_DIRS)): all

View File

@ -58,8 +58,6 @@ common-obj-y += ui/
common-obj-m += ui/
common-obj-y += dma-helpers.o
common-obj-y += vl.o
vl.o-cflags := $(GPROF_CFLAGS) $(SDL_CFLAGS)
common-obj-$(CONFIG_TPM) += tpm.o
common-obj-y += backends/

View File

@ -160,6 +160,7 @@ obj-y += qapi/
obj-y += memory.o
obj-y += memory_mapping.o
obj-y += migration/ram.o
obj-y += softmmu/
LIBS := $(libs_softmmu) $(LIBS)
# Hardware support
@ -202,7 +203,7 @@ endif
COMMON_LDADDS = ../libqemuutil.a
# build either PROG or PROGW
$(QEMU_PROG_BUILD): $(all-obj-y) $(COMMON_LDADDS)
$(QEMU_PROG_BUILD): $(all-obj-y) $(COMMON_LDADDS) $(softmmu-main-y)
$(call LINK, $(filter-out %.mak, $^))
ifdef CONFIG_DARWIN
$(call quiet-command,Rez -append $(SRC_PATH)/pc-bios/qemu.rsrc -o $@,"REZ","$(TARGET_DIR)$@")
@ -227,6 +228,22 @@ ifdef CONFIG_TRACE_SYSTEMTAP
rm -f *.stp
endif
ifdef CONFIG_FUZZ
include $(SRC_PATH)/tests/qtest/fuzz/Makefile.include
include $(SRC_PATH)/tests/qtest/Makefile.include
fuzz: fuzz-vars
fuzz-vars: QEMU_CFLAGS := $(FUZZ_CFLAGS) $(QEMU_CFLAGS)
fuzz-vars: QEMU_LDFLAGS := $(FUZZ_LDFLAGS) $(QEMU_LDFLAGS)
fuzz-vars: $(QEMU_PROG_FUZZ)
dummy := $(call unnest-vars,, fuzz-obj-y)
$(QEMU_PROG_FUZZ): config-devices.mak $(all-obj-y) $(COMMON_LDADDS) $(fuzz-obj-y)
$(call LINK, $(filter-out %.mak, $^))
endif
install: all
ifneq ($(PROGS),)
$(call install-prog,$(PROGS),$(DESTDIR)$(bindir))

View File

@ -2636,10 +2636,7 @@ BdrvChild *bdrv_attach_child(BlockDriverState *parent_bs,
static void bdrv_detach_child(BdrvChild *child)
{
if (child->next.le_prev) {
QLIST_REMOVE(child, next);
child->next.le_prev = NULL;
}
QLIST_SAFE_REMOVE(child, next);
bdrv_replace_child(child, NULL);

View File

@ -216,9 +216,7 @@ static void char_spice_finalize(Object *obj)
vmc_unregister_interface(s);
if (s->next.le_prev) {
QLIST_REMOVE(s, next);
}
QLIST_SAFE_REMOVE(s, next);
g_free((char *)s->sin.subtype);
g_free((char *)s->sin.portname);

39
configure vendored
View File

@ -505,6 +505,7 @@ debug_mutex="no"
libpmem=""
default_devices="yes"
plugins="no"
fuzzing="no"
supported_cpu="no"
supported_os="no"
@ -635,6 +636,15 @@ int main(void) { return 0; }
EOF
}
write_c_fuzzer_skeleton() {
cat > $TMPC <<EOF
#include <stdint.h>
#include <sys/types.h>
int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size);
int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) { return 0; }
EOF
}
if check_define __linux__ ; then
targetos="Linux"
elif check_define _WIN32 ; then
@ -1558,6 +1568,10 @@ for opt do
;;
--disable-containers) use_containers="no"
;;
--enable-fuzzing) fuzzing=yes
;;
--disable-fuzzing) fuzzing=no
;;
*)
echo "ERROR: unknown option $opt"
echo "Try '$0 --help' for more information"
@ -6072,6 +6086,15 @@ EOF
fi
fi
##########################################
# checks for fuzzer
if test "$fuzzing" = "yes" ; then
write_c_fuzzer_skeleton
if compile_prog "$CPU_CFLAGS -Werror -fsanitize=address,fuzzer" ""; then
have_fuzzer=yes
fi
fi
##########################################
# check for libpmem
@ -6666,6 +6689,7 @@ echo "libpmem support $libpmem"
echo "libudev $libudev"
echo "default devices $default_devices"
echo "plugin support $plugins"
echo "fuzzing support $fuzzing"
if test "$supported_cpu" = "no"; then
echo
@ -7504,6 +7528,16 @@ fi
if test "$sheepdog" = "yes" ; then
echo "CONFIG_SHEEPDOG=y" >> $config_host_mak
fi
if test "$fuzzing" = "yes" ; then
if test "$have_fuzzer" = "yes"; then
FUZZ_LDFLAGS=" -fsanitize=address,fuzzer"
FUZZ_CFLAGS=" -fsanitize=address,fuzzer"
CFLAGS=" -fsanitize=address,fuzzer-no-link"
else
error_exit "Your compiler doesn't support -fsanitize=address,fuzzer"
exit 1
fi
fi
if test "$plugins" = "yes" ; then
echo "CONFIG_PLUGIN=y" >> $config_host_mak
@ -7605,6 +7639,11 @@ if test "$libudev" != "no"; then
echo "CONFIG_LIBUDEV=y" >> $config_host_mak
echo "LIBUDEV_LIBS=$libudev_libs" >> $config_host_mak
fi
if test "$fuzzing" != "no"; then
echo "CONFIG_FUZZ=y" >> $config_host_mak
echo "FUZZ_CFLAGS=$FUZZ_CFLAGS" >> $config_host_mak
echo "FUZZ_LDFLAGS=$FUZZ_LDFLAGS" >> $config_host_mak
fi
if test "$edk2_blobs" = "yes" ; then
echo "DECOMPRESS_EDK2_BLOBS=y" >> $config_host_mak

116
docs/devel/fuzzing.txt Normal file
View File

@ -0,0 +1,116 @@
= Fuzzing =
== Introduction ==
This document describes the virtual-device fuzzing infrastructure in QEMU and
how to use it to implement additional fuzzers.
== Basics ==
Fuzzing operates by passing inputs to an entry point/target function. The
fuzzer tracks the code coverage triggered by the input. Based on these
findings, the fuzzer mutates the input and repeats the fuzzing.
To fuzz QEMU, we rely on libfuzzer. Unlike other fuzzers such as AFL, libfuzzer
is an _in-process_ fuzzer. For the developer, this means that it is their
responsibility to ensure that state is reset between fuzzing-runs.
== Building the fuzzers ==
NOTE: If possible, build a 32-bit binary. When forking, the 32-bit fuzzer is
much faster, since the page-map has a smaller size. This is due to the fact that
AddressSanitizer mmaps ~20TB of memory, as part of its detection. This results
in a large page-map, and a much slower fork().
To build the fuzzers, install a recent version of clang:
Configure with (substitute the clang binaries with the version you installed):
CC=clang-8 CXX=clang++-8 /path/to/configure --enable-fuzzing
Fuzz targets are built similarly to system/softmmu:
make i386-softmmu/fuzz
This builds ./i386-softmmu/qemu-fuzz-i386
The first option to this command is: --fuzz_taget=FUZZ_NAME
To list all of the available fuzzers run qemu-fuzz-i386 with no arguments.
eg:
./i386-softmmu/qemu-fuzz-i386 --fuzz-target=virtio-net-fork-fuzz
Internally, libfuzzer parses all arguments that do not begin with "--".
Information about these is available by passing -help=1
Now the only thing left to do is wait for the fuzzer to trigger potential
crashes.
== Adding a new fuzzer ==
Coverage over virtual devices can be improved by adding additional fuzzers.
Fuzzers are kept in tests/qtest/fuzz/ and should be added to
tests/qtest/fuzz/Makefile.include
Fuzzers can rely on both qtest and libqos to communicate with virtual devices.
1. Create a new source file. For example ``tests/qtest/fuzz/foo-device-fuzz.c``.
2. Write the fuzzing code using the libqtest/libqos API. See existing fuzzers
for reference.
3. Register the fuzzer in ``tests/fuzz/Makefile.include`` by appending the
corresponding object to fuzz-obj-y
Fuzzers can be more-or-less thought of as special qtest programs which can
modify the qtest commands and/or qtest command arguments based on inputs
provided by libfuzzer. Libfuzzer passes a byte array and length. Commonly the
fuzzer loops over the byte-array interpreting it as a list of qtest commands,
addresses, or values.
= Implementation Details =
== The Fuzzer's Lifecycle ==
The fuzzer has two entrypoints that libfuzzer calls. libfuzzer provides it's
own main(), which performs some setup, and calls the entrypoints:
LLVMFuzzerInitialize: called prior to fuzzing. Used to initialize all of the
necessary state
LLVMFuzzerTestOneInput: called for each fuzzing run. Processes the input and
resets the state at the end of each run.
In more detail:
LLVMFuzzerInitialize parses the arguments to the fuzzer (must start with two
dashes, so they are ignored by libfuzzer main()). Currently, the arguments
select the fuzz target. Then, the qtest client is initialized. If the target
requires qos, qgraph is set up and the QOM/LIBQOS modules are initialized.
Then the QGraph is walked and the QEMU cmd_line is determined and saved.
After this, the vl.c:qemu__main is called to set up the guest. There are
target-specific hooks that can be called before and after qemu_main, for
additional setup(e.g. PCI setup, or VM snapshotting).
LLVMFuzzerTestOneInput: Uses qtest/qos functions to act based on the fuzz
input. It is also responsible for manually calling the main loop/main_loop_wait
to ensure that bottom halves are executed and any cleanup required before the
next input.
Since the same process is reused for many fuzzing runs, QEMU state needs to
be reset at the end of each run. There are currently two implemented
options for resetting state:
1. Reboot the guest between runs.
Pros: Straightforward and fast for simple fuzz targets.
Cons: Depending on the device, does not reset all device state. If the
device requires some initialization prior to being ready for fuzzing
(common for QOS-based targets), this initialization needs to be done after
each reboot.
Example target: i440fx-qtest-reboot-fuzz
2. Run each test case in a separate forked process and copy the coverage
information back to the parent. This is fairly similar to AFL's "deferred"
fork-server mode [3]
Pros: Relatively fast. Devices only need to be initialized once. No need
to do slow reboots or vmloads.
Cons: Not officially supported by libfuzzer. Does not work well for devices
that rely on dedicated threads.
Example target: virtio-net-fork-fuzz

12
exec.c
View File

@ -35,6 +35,7 @@
#include "sysemu/kvm.h"
#include "sysemu/sysemu.h"
#include "sysemu/tcg.h"
#include "sysemu/qtest.h"
#include "qemu/timer.h"
#include "qemu/config-file.h"
#include "qemu/error-report.h"
@ -2305,8 +2306,15 @@ static void ram_block_add(RAMBlock *new_block, Error **errp, bool shared)
if (new_block->host) {
qemu_ram_setup_dump(new_block->host, new_block->max_length);
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_HUGEPAGE);
/* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU */
qemu_madvise(new_block->host, new_block->max_length, QEMU_MADV_DONTFORK);
/*
* MADV_DONTFORK is also needed by KVM in absence of synchronous MMU
* Configure it unless the machine is a qtest server, in which case
* KVM is not used and it may be forked (eg for fuzzing purposes).
*/
if (!qtest_enabled()) {
qemu_madvise(new_block->host, new_block->max_length,
QEMU_MADV_DONTFORK);
}
ram_block_notify_add(new_block->host, new_block->max_length);
}
}

View File

@ -1272,7 +1272,7 @@ static Property virtio_blk_properties[] = {
DEFINE_PROP_BIT("request-merging", VirtIOBlock, conf.request_merging, 0,
true),
DEFINE_PROP_UINT16("num-queues", VirtIOBlock, conf.num_queues, 1),
DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 128),
DEFINE_PROP_UINT16("queue-size", VirtIOBlock, conf.queue_size, 256),
DEFINE_PROP_BOOL("seg-max-adjust", VirtIOBlock, conf.seg_max_adjust, true),
DEFINE_PROP_LINK("iothread", VirtIOBlock, conf.iothread, TYPE_IOTHREAD,
IOThread *),

View File

@ -28,6 +28,8 @@
#include "hw/mem/nvdimm.h"
GlobalProperty hw_compat_4_2[] = {
{ "virtio-blk-device", "queue-size", "128"},
{ "virtio-scsi-device", "virtqueue_size", "128"},
{ "virtio-blk-device", "x-enable-wce-if-config-wce", "off" },
{ "virtio-blk-device", "seg-max-adjust", "off"},
{ "virtio-scsi-device", "seg_max_adjust", "off"},

View File

@ -965,7 +965,7 @@ static void virtio_scsi_device_unrealize(DeviceState *dev, Error **errp)
static Property virtio_scsi_properties[] = {
DEFINE_PROP_UINT32("num_queues", VirtIOSCSI, parent_obj.conf.num_queues, 1),
DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSI,
parent_obj.conf.virtqueue_size, 128),
parent_obj.conf.virtqueue_size, 256),
DEFINE_PROP_BOOL("seg_max_adjust", VirtIOSCSI,
parent_obj.conf.seg_max_adjust, true),
DEFINE_PROP_UINT32("max_sectors", VirtIOSCSI, parent_obj.conf.max_sectors,

View File

@ -42,6 +42,7 @@ void qemu_aio_unref(void *p);
void qemu_aio_ref(void *p);
typedef struct AioHandler AioHandler;
typedef QLIST_HEAD(, AioHandler) AioHandlerList;
typedef void QEMUBHFunc(void *opaque);
typedef bool AioPollFn(void *opaque);
typedef void IOHandler(void *opaque);
@ -51,6 +52,19 @@ struct ThreadPool;
struct LinuxAioState;
struct LuringState;
/*
* Each aio_bh_poll() call carves off a slice of the BH list, so that newly
* scheduled BHs are not processed until the next aio_bh_poll() call. All
* active aio_bh_poll() calls chain their slices together in a list, so that
* nested aio_bh_poll() calls process all scheduled bottom halves.
*/
typedef QSLIST_HEAD(, QEMUBH) BHList;
typedef struct BHListSlice BHListSlice;
struct BHListSlice {
BHList bh_list;
QSIMPLEQ_ENTRY(BHListSlice) next;
};
struct AioContext {
GSource source;
@ -58,7 +72,10 @@ struct AioContext {
QemuRecMutex lock;
/* The list of registered AIO handlers. Protected by ctx->list_lock. */
QLIST_HEAD(, AioHandler) aio_handlers;
AioHandlerList aio_handlers;
/* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
AioHandlerList deleted_aio_handlers;
/* Used to avoid unnecessary event_notifier_set calls in aio_notify;
* accessed with atomic primitives. If this field is 0, everything
@ -91,8 +108,11 @@ struct AioContext {
*/
QemuLockCnt list_lock;
/* Anchor of the list of Bottom Halves belonging to the context */
struct QEMUBH *first_bh;
/* Bottom Halves pending aio_bh_poll() processing */
BHList bh_list;
/* Chained BH list slices for each nested aio_bh_poll() call */
QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
/* Used by aio_notify.
*

View File

@ -46,6 +46,7 @@ typedef enum {
MODULE_INIT_TRACE,
MODULE_INIT_XEN_BACKEND,
MODULE_INIT_LIBQOS,
MODULE_INIT_FUZZ_TARGET,
MODULE_INIT_MAX
} module_init_type;
@ -56,7 +57,8 @@ typedef enum {
#define xen_backend_init(function) module_init(function, \
MODULE_INIT_XEN_BACKEND)
#define libqos_init(function) module_init(function, MODULE_INIT_LIBQOS)
#define fuzz_target_init(function) module_init(function, \
MODULE_INIT_FUZZ_TARGET)
#define block_module_load_one(lib) module_load_one("block-", lib)
#define ui_module_load_one(lib) module_load_one("ui-", lib)
#define audio_module_load_one(lib) module_load_one("audio-", lib)

View File

@ -144,6 +144,23 @@ struct { \
*(elm)->field.le_prev = (elm)->field.le_next; \
} while (/*CONSTCOND*/0)
/*
* Like QLIST_REMOVE() but safe to call when elm is not in a list
*/
#define QLIST_SAFE_REMOVE(elm, field) do { \
if ((elm)->field.le_prev != NULL) { \
if ((elm)->field.le_next != NULL) \
(elm)->field.le_next->field.le_prev = \
(elm)->field.le_prev; \
*(elm)->field.le_prev = (elm)->field.le_next; \
(elm)->field.le_next = NULL; \
(elm)->field.le_prev = NULL; \
} \
} while (/*CONSTCOND*/0)
/* Is elm in a list? */
#define QLIST_IS_INSERTED(elm, field) ((elm)->field.le_prev != NULL)
#define QLIST_FOREACH(var, head, field) \
for ((var) = ((head)->lh_first); \
(var); \
@ -211,9 +228,20 @@ struct { \
(head)->slh_first = (head)->slh_first->field.sle_next; \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
#define QSLIST_REMOVE_AFTER(slistelm, field) do { \
(slistelm)->field.sle_next = \
QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \
QSLIST_NEXT(QSLIST_NEXT((slistelm), field), field); \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE(head, elm, type, field) do { \
if ((head)->slh_first == (elm)) { \
QSLIST_REMOVE_HEAD((head), field); \
} else { \
struct type *curelm = (head)->slh_first; \
while (curelm->field.sle_next != (elm)) \
curelm = curelm->field.sle_next; \
curelm->field.sle_next = curelm->field.sle_next->field.sle_next; \
} \
} while (/*CONSTCOND*/0)
#define QSLIST_FOREACH(var, head, field) \

View File

@ -262,6 +262,53 @@ extern "C" {
(var) && ((next) = atomic_rcu_read(&(var)->field.tqe_next), 1); \
(var) = (next))
/*
* RCU singly-linked list
*/
/* Singly-linked list access methods */
#define QSLIST_EMPTY_RCU(head) (atomic_read(&(head)->slh_first) == NULL)
#define QSLIST_FIRST_RCU(head) atomic_rcu_read(&(head)->slh_first)
#define QSLIST_NEXT_RCU(elm, field) atomic_rcu_read(&(elm)->field.sle_next)
/* Singly-linked list functions */
#define QSLIST_INSERT_HEAD_RCU(head, elm, field) do { \
(elm)->field.sle_next = (head)->slh_first; \
atomic_rcu_set(&(head)->slh_first, (elm)); \
} while (/*CONSTCOND*/0)
#define QSLIST_INSERT_AFTER_RCU(head, listelm, elm, field) do { \
(elm)->field.sle_next = (listelm)->field.sle_next; \
atomic_rcu_set(&(listelm)->field.sle_next, (elm)); \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_HEAD_RCU(head, field) do { \
atomic_set(&(head)->slh_first, (head)->slh_first->field.sle_next); \
} while (/*CONSTCOND*/0)
#define QSLIST_REMOVE_RCU(head, elm, type, field) do { \
if ((head)->slh_first == (elm)) { \
QSLIST_REMOVE_HEAD_RCU((head), field); \
} else { \
struct type *curr = (head)->slh_first; \
while (curr->field.sle_next != (elm)) { \
curr = curr->field.sle_next; \
} \
atomic_set(&curr->field.sle_next, \
curr->field.sle_next->field.sle_next); \
} \
} while (/*CONSTCOND*/0)
#define QSLIST_FOREACH_RCU(var, head, field) \
for ((var) = atomic_rcu_read(&(head)->slh_first); \
(var); \
(var) = atomic_rcu_read(&(var)->field.sle_next))
#define QSLIST_FOREACH_SAFE_RCU(var, head, field, next) \
for ((var) = atomic_rcu_read(&(head)->slh_first); \
(var) && ((next) = atomic_rcu_read(&(var)->field.sle_next), 1); \
(var) = (next))
#ifdef __cplusplus
}
#endif

View File

@ -26,4 +26,8 @@ bool qtest_driver(void);
void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **errp);
void qtest_server_set_send_handler(void (*send)(void *, const char *),
void *opaque);
void qtest_server_inproc_recv(void *opaque, const char *buf);
#endif

View File

@ -115,6 +115,10 @@ QemuOpts *qemu_get_machine_opts(void);
bool defaults_enabled(void);
void qemu_init(int argc, char **argv, char **envp);
void qemu_main_loop(void);
void qemu_cleanup(void);
extern QemuOptsList qemu_legacy_drive_opts;
extern QemuOptsList qemu_common_drive_opts;
extern QemuOptsList qemu_drive_opts;

31
qtest.c
View File

@ -43,6 +43,8 @@ static GString *inbuf;
static int irq_levels[MAX_IRQ];
static qemu_timeval start_time;
static bool qtest_opened;
static void (*qtest_server_send)(void*, const char*);
static void *qtest_server_send_opaque;
#define FMT_timeval "%ld.%06ld"
@ -229,8 +231,10 @@ static void GCC_FMT_ATTR(1, 2) qtest_log_send(const char *fmt, ...)
va_end(ap);
}
static void do_qtest_send(CharBackend *chr, const char *str, size_t len)
static void qtest_server_char_be_send(void *opaque, const char *str)
{
size_t len = strlen(str);
CharBackend* chr = (CharBackend *)opaque;
qemu_chr_fe_write_all(chr, (uint8_t *)str, len);
if (qtest_log_fp && qtest_opened) {
fprintf(qtest_log_fp, "%s", str);
@ -239,7 +243,7 @@ static void do_qtest_send(CharBackend *chr, const char *str, size_t len)
static void qtest_send(CharBackend *chr, const char *str)
{
do_qtest_send(chr, str, strlen(str));
qtest_server_send(qtest_server_send_opaque, str);
}
static void GCC_FMT_ATTR(2, 3) qtest_sendf(CharBackend *chr,
@ -784,9 +788,32 @@ void qtest_server_init(const char *qtest_chrdev, const char *qtest_log, Error **
qemu_chr_fe_set_echo(&qtest_chr, true);
inbuf = g_string_new("");
if (!qtest_server_send) {
qtest_server_set_send_handler(qtest_server_char_be_send, &qtest_chr);
}
}
void qtest_server_set_send_handler(void (*send)(void*, const char*), void *opaque)
{
qtest_server_send = send;
qtest_server_send_opaque = opaque;
}
bool qtest_driver(void)
{
return qtest_chr.chr != NULL;
}
void qtest_server_inproc_recv(void *dummy, const char *buf)
{
static GString *gstr;
if (!gstr) {
gstr = g_string_new(NULL);
}
g_string_append(gstr, buf);
if (gstr->str[gstr->len - 1] == '\n') {
qtest_process_inbuf(NULL, gstr);
g_string_truncate(gstr, 0);
}
}

View File

@ -462,7 +462,7 @@ sub top_of_kernel_tree {
my @tree_check = (
"COPYING", "MAINTAINERS", "Makefile",
"README.rst", "docs", "VERSION",
"vl.c"
"linux-user", "softmmu"
);
foreach my $check (@tree_check) {

View File

@ -795,7 +795,8 @@ sub top_of_tree {
&& (-f "${lk_path}Makefile")
&& (-d "${lk_path}docs")
&& (-f "${lk_path}VERSION")
&& (-f "${lk_path}vl.c")) {
&& (-d "${lk_path}linux-user/")
&& (-d "${lk_path}softmmu/")) {
return 1;
}
return 0;

3
softmmu/Makefile.objs Normal file
View File

@ -0,0 +1,3 @@
softmmu-main-y = softmmu/main.o
obj-y += vl.o
vl.o-cflags := $(GPROF_CFLAGS) $(SDL_CFLAGS)

53
softmmu/main.c Normal file
View File

@ -0,0 +1,53 @@
/*
* QEMU System Emulator
*
* Copyright (c) 2003-2020 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "qemu/osdep.h"
#include "qemu-common.h"
#include "sysemu/sysemu.h"
#ifdef CONFIG_SDL
#if defined(__APPLE__) || defined(main)
#include <SDL.h>
int main(int argc, char **argv)
{
return qemu_main(argc, argv, NULL);
}
#undef main
#define main qemu_main
#endif
#endif /* CONFIG_SDL */
#ifdef CONFIG_COCOA
#undef main
#define main qemu_main
#endif /* CONFIG_COCOA */
int main(int argc, char **argv, char **envp)
{
qemu_init(argc, argv, envp);
qemu_main_loop();
qemu_cleanup();
return 0;
}

View File

@ -36,25 +36,6 @@
#include "sysemu/seccomp.h"
#include "sysemu/tcg.h"
#ifdef CONFIG_SDL
#if defined(__APPLE__) || defined(main)
#include <SDL.h>
int qemu_main(int argc, char **argv, char **envp);
int main(int argc, char **argv)
{
return qemu_main(argc, argv, NULL);
}
#undef main
#define main qemu_main
#endif
#endif /* CONFIG_SDL */
#ifdef CONFIG_COCOA
#undef main
#define main qemu_main
#endif /* CONFIG_COCOA */
#include "qemu/error-report.h"
#include "qemu/sockets.h"
#include "sysemu/accel.h"
@ -1670,7 +1651,7 @@ static bool main_loop_should_exit(void)
return false;
}
static void main_loop(void)
void qemu_main_loop(void)
{
#ifdef CONFIG_PROFILER
int64_t ti;
@ -2805,7 +2786,7 @@ static void configure_accelerators(const char *progname)
}
}
int main(int argc, char **argv, char **envp)
void qemu_init(int argc, char **argv, char **envp)
{
int i;
int snapshot, linux_boot;
@ -3357,7 +3338,7 @@ int main(int argc, char **argv, char **envp)
case QEMU_OPTION_watchdog:
if (watchdog) {
error_report("only one watchdog option may be given");
return 1;
exit(1);
}
watchdog = optarg;
break;
@ -3801,7 +3782,17 @@ int main(int argc, char **argv, char **envp)
set_memory_options(&ram_slots, &maxram_size, machine_class);
os_daemonize();
rcu_disable_atfork();
/*
* If QTest is enabled, keep the rcu_atfork enabled, since system processes
* may be forked testing purposes (e.g. fork-server based fuzzing) The fork
* should happen before a signle cpu instruction is executed, to prevent
* deadlocks. See commit 73c6e40, rcu: "completely disable pthread_atfork
* callbacks as soon as possible"
*/
if (!qtest_enabled()) {
rcu_disable_atfork();
}
if (pid_file && !qemu_write_pidfile(pid_file, &err)) {
error_reportf_err(err, "cannot create PID file: ");
@ -4269,7 +4260,7 @@ int main(int argc, char **argv, char **envp)
parse_numa_opts(current_machine);
/* do monitor/qmp handling at preconfig state if requested */
main_loop();
qemu_main_loop();
audio_init_audiodevs();
@ -4387,7 +4378,7 @@ int main(int argc, char **argv, char **envp)
if (vmstate_dump_file) {
/* dump and exit */
dump_vmstate_json_to_file(vmstate_dump_file);
return 0;
exit(0);
}
if (incoming) {
@ -4404,8 +4395,11 @@ int main(int argc, char **argv, char **envp)
accel_setup_post(current_machine);
os_setup_post();
main_loop();
return;
}
void qemu_cleanup(void)
{
gdbserver_cleanup();
/*
@ -4442,6 +4436,4 @@ int main(int argc, char **argv, char **envp)
qemu_chr_cleanup();
user_creatable_cleanup();
/* TODO: unref root container, check all devices are ok */
return 0;
}

View File

@ -98,6 +98,7 @@ check-unit-y += tests/rcutorture$(EXESUF)
check-unit-y += tests/test-rcu-list$(EXESUF)
check-unit-y += tests/test-rcu-simpleq$(EXESUF)
check-unit-y += tests/test-rcu-tailq$(EXESUF)
check-unit-y += tests/test-rcu-slist$(EXESUF)
check-unit-y += tests/test-qdist$(EXESUF)
check-unit-y += tests/test-qht$(EXESUF)
check-unit-y += tests/test-qht-par$(EXESUF)
@ -415,6 +416,7 @@ tests/rcutorture$(EXESUF): tests/rcutorture.o $(test-util-obj-y)
tests/test-rcu-list$(EXESUF): tests/test-rcu-list.o $(test-util-obj-y)
tests/test-rcu-simpleq$(EXESUF): tests/test-rcu-simpleq.o $(test-util-obj-y)
tests/test-rcu-tailq$(EXESUF): tests/test-rcu-tailq.o $(test-util-obj-y)
tests/test-rcu-slist$(EXESUF): tests/test-rcu-slist.o $(test-util-obj-y)
tests/test-qdist$(EXESUF): tests/test-qdist.o $(test-util-obj-y)
tests/test-qht$(EXESUF): tests/test-qht.o $(test-util-obj-y)
tests/test-qht-par$(EXESUF): tests/test-qht-par.o tests/qht-bench$(EXESUF) $(test-util-obj-y)

View File

@ -157,52 +157,54 @@ check-qtest-s390x-y += migration-test
# libqos / qgraph :
libqgraph-obj-y = tests/qtest/libqos/qgraph.o
libqos-obj-y = $(libqgraph-obj-y) tests/qtest/libqos/pci.o tests/qtest/libqos/fw_cfg.o
libqos-obj-y += tests/qtest/libqos/malloc.o
libqos-obj-y += tests/qtest/libqos/libqos.o
libqos-spapr-obj-y = $(libqos-obj-y) tests/qtest/libqos/malloc-spapr.o
libqos-core-obj-y = $(libqgraph-obj-y) tests/qtest/libqos/pci.o tests/qtest/libqos/fw_cfg.o
libqos-core-obj-y += tests/qtest/libqos/malloc.o
libqos-core-obj-y += tests/qtest/libqos/libqos.o
libqos-spapr-obj-y = $(libqos-core-obj-y) tests/qtest/libqos/malloc-spapr.o
libqos-spapr-obj-y += tests/qtest/libqos/libqos-spapr.o
libqos-spapr-obj-y += tests/qtest/libqos/rtas.o
libqos-spapr-obj-y += tests/qtest/libqos/pci-spapr.o
libqos-pc-obj-y = $(libqos-obj-y) tests/qtest/libqos/pci-pc.o
libqos-pc-obj-y = $(libqos-core-obj-y) tests/qtest/libqos/pci-pc.o
libqos-pc-obj-y += tests/qtest/libqos/malloc-pc.o tests/qtest/libqos/libqos-pc.o
libqos-pc-obj-y += tests/qtest/libqos/ahci.o
libqos-usb-obj-y = $(libqos-spapr-obj-y) $(libqos-pc-obj-y) tests/qtest/libqos/usb.o
# qos devices:
qos-test-obj-y = tests/qtest/qos-test.o $(libqgraph-obj-y)
qos-test-obj-y += $(libqos-pc-obj-y) $(libqos-spapr-obj-y)
qos-test-obj-y += tests/qtest/libqos/e1000e.o
qos-test-obj-y += tests/qtest/libqos/i2c.o
qos-test-obj-y += tests/qtest/libqos/i2c-imx.o
qos-test-obj-y += tests/qtest/libqos/i2c-omap.o
qos-test-obj-y += tests/qtest/libqos/sdhci.o
qos-test-obj-y += tests/qtest/libqos/tpci200.o
qos-test-obj-y += tests/qtest/libqos/virtio.o
qos-test-obj-$(CONFIG_VIRTFS) += tests/qtest/libqos/virtio-9p.o
qos-test-obj-y += tests/qtest/libqos/virtio-balloon.o
qos-test-obj-y += tests/qtest/libqos/virtio-blk.o
qos-test-obj-y += tests/qtest/libqos/virtio-mmio.o
qos-test-obj-y += tests/qtest/libqos/virtio-net.o
qos-test-obj-y += tests/qtest/libqos/virtio-pci.o
qos-test-obj-y += tests/qtest/libqos/virtio-pci-modern.o
qos-test-obj-y += tests/qtest/libqos/virtio-rng.o
qos-test-obj-y += tests/qtest/libqos/virtio-scsi.o
qos-test-obj-y += tests/qtest/libqos/virtio-serial.o
libqos-obj-y = $(libqgraph-obj-y)
libqos-obj-y += $(libqos-pc-obj-y) $(libqos-spapr-obj-y)
libqos-obj-y += tests/qtest/libqos/qos_external.o
libqos-obj-y += tests/qtest/libqos/e1000e.o
libqos-obj-y += tests/qtest/libqos/i2c.o
libqos-obj-y += tests/qtest/libqos/i2c-imx.o
libqos-obj-y += tests/qtest/libqos/i2c-omap.o
libqos-obj-y += tests/qtest/libqos/sdhci.o
libqos-obj-y += tests/qtest/libqos/tpci200.o
libqos-obj-y += tests/qtest/libqos/virtio.o
libqos-obj-$(CONFIG_VIRTFS) += tests/qtest/libqos/virtio-9p.o
libqos-obj-y += tests/qtest/libqos/virtio-balloon.o
libqos-obj-y += tests/qtest/libqos/virtio-blk.o
libqos-obj-y += tests/qtest/libqos/virtio-mmio.o
libqos-obj-y += tests/qtest/libqos/virtio-net.o
libqos-obj-y += tests/qtest/libqos/virtio-pci.o
libqos-obj-y += tests/qtest/libqos/virtio-pci-modern.o
libqos-obj-y += tests/qtest/libqos/virtio-rng.o
libqos-obj-y += tests/qtest/libqos/virtio-scsi.o
libqos-obj-y += tests/qtest/libqos/virtio-serial.o
# qos machines:
qos-test-obj-y += tests/qtest/libqos/aarch64-xlnx-zcu102-machine.o
qos-test-obj-y += tests/qtest/libqos/arm-imx25-pdk-machine.o
qos-test-obj-y += tests/qtest/libqos/arm-n800-machine.o
qos-test-obj-y += tests/qtest/libqos/arm-raspi2-machine.o
qos-test-obj-y += tests/qtest/libqos/arm-sabrelite-machine.o
qos-test-obj-y += tests/qtest/libqos/arm-smdkc210-machine.o
qos-test-obj-y += tests/qtest/libqos/arm-virt-machine.o
qos-test-obj-y += tests/qtest/libqos/arm-xilinx-zynq-a9-machine.o
qos-test-obj-y += tests/qtest/libqos/ppc64_pseries-machine.o
qos-test-obj-y += tests/qtest/libqos/x86_64_pc-machine.o
libqos-obj-y += tests/qtest/libqos/aarch64-xlnx-zcu102-machine.o
libqos-obj-y += tests/qtest/libqos/arm-imx25-pdk-machine.o
libqos-obj-y += tests/qtest/libqos/arm-n800-machine.o
libqos-obj-y += tests/qtest/libqos/arm-raspi2-machine.o
libqos-obj-y += tests/qtest/libqos/arm-sabrelite-machine.o
libqos-obj-y += tests/qtest/libqos/arm-smdkc210-machine.o
libqos-obj-y += tests/qtest/libqos/arm-virt-machine.o
libqos-obj-y += tests/qtest/libqos/arm-xilinx-zynq-a9-machine.o
libqos-obj-y += tests/qtest/libqos/ppc64_pseries-machine.o
libqos-obj-y += tests/qtest/libqos/x86_64_pc-machine.o
# qos tests:
qos-test-obj-y += tests/qtest/qos-test.o
qos-test-obj-y += tests/qtest/ac97-test.o
qos-test-obj-y += tests/qtest/ds1338-test.o
qos-test-obj-y += tests/qtest/e1000-test.o
@ -234,7 +236,7 @@ check-unit-y += tests/test-qgraph$(EXESUF)
tests/test-qgraph$(EXESUF): tests/test-qgraph.o $(libqgraph-obj-y)
check-qtest-generic-y += qos-test
tests/qtest/qos-test$(EXESUF): $(qos-test-obj-y)
tests/qtest/qos-test$(EXESUF): $(qos-test-obj-y) $(libqos-obj-y)
# QTest dependencies:
tests/qtest/qmp-test$(EXESUF): tests/qtest/qmp-test.o

View File

@ -0,0 +1,18 @@
QEMU_PROG_FUZZ=qemu-fuzz-$(TARGET_NAME)$(EXESUF)
fuzz-obj-y += tests/qtest/libqtest.o
fuzz-obj-y += $(libqos-obj-y)
fuzz-obj-y += tests/qtest/fuzz/fuzz.o # Fuzzer skeleton
fuzz-obj-y += tests/qtest/fuzz/fork_fuzz.o
fuzz-obj-y += tests/qtest/fuzz/qos_fuzz.o
# Targets
fuzz-obj-y += tests/qtest/fuzz/i440fx_fuzz.o
fuzz-obj-y += tests/qtest/fuzz/virtio_net_fuzz.o
fuzz-obj-y += tests/qtest/fuzz/virtio_scsi_fuzz.o
FUZZ_CFLAGS += -I$(SRC_PATH)/tests -I$(SRC_PATH)/tests/qtest
# Linker Script to force coverage-counters into known regions which we can mark
# shared
FUZZ_LDFLAGS += -Xlinker -T$(SRC_PATH)/tests/qtest/fuzz/fork_fuzz.ld

View File

@ -0,0 +1,55 @@
/*
* Fork-based fuzzing helpers
*
* Copyright Red Hat Inc., 2019
*
* Authors:
* Alexander Bulekov <alxndr@bu.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include "fork_fuzz.h"
void counter_shm_init(void)
{
char *shm_path = g_strdup_printf("/qemu-fuzz-cntrs.%d", getpid());
int fd = shm_open(shm_path, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
g_free(shm_path);
if (fd == -1) {
perror("Error: ");
exit(1);
}
if (ftruncate(fd, &__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START) == -1) {
perror("Error: ");
exit(1);
}
/* Copy what's in the counter region to the shm.. */
void *rptr = mmap(NULL ,
&__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START,
PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
memcpy(rptr,
&__FUZZ_COUNTERS_START,
&__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START);
munmap(rptr, &__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START);
/* And map the shm over the counter region */
rptr = mmap(&__FUZZ_COUNTERS_START,
&__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START,
PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
close(fd);
if (!rptr) {
perror("Error: ");
exit(1);
}
}

View File

@ -0,0 +1,23 @@
/*
* Fork-based fuzzing helpers
*
* Copyright Red Hat Inc., 2019
*
* Authors:
* Alexander Bulekov <alxndr@bu.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef FORK_FUZZ_H
#define FORK_FUZZ_H
extern uint8_t __FUZZ_COUNTERS_START;
extern uint8_t __FUZZ_COUNTERS_END;
void counter_shm_init(void);
#endif

View File

@ -0,0 +1,37 @@
/* We adjust linker script modification to place all of the stuff that needs to
* persist across fuzzing runs into a contiguous seciton of memory. Then, it is
* easy to re-map the counter-related memory as shared.
*/
SECTIONS
{
.data.fuzz_start : ALIGN(4K)
{
__FUZZ_COUNTERS_START = .;
__start___sancov_cntrs = .;
*(_*sancov_cntrs);
__stop___sancov_cntrs = .;
/* Lowest stack counter */
*(__sancov_lowest_stack);
}
.data.fuzz_ordered :
{
/* Coverage counters. They're not necessary for fuzzing, but are useful
* for analyzing the fuzzing performance
*/
__start___llvm_prf_cnts = .;
*(*llvm_prf_cnts);
__stop___llvm_prf_cnts = .;
/* Internal Libfuzzer TracePC object which contains the ValueProfileMap */
FuzzerTracePC*(.bss*);
}
.data.fuzz_end : ALIGN(4K)
{
__FUZZ_COUNTERS_END = .;
}
}
/* Dont overwrite the SECTIONS in the default linker script. Instead insert the
* above into the default script */
INSERT AFTER .data;

179
tests/qtest/fuzz/fuzz.c Normal file
View File

@ -0,0 +1,179 @@
/*
* fuzzing driver
*
* Copyright Red Hat Inc., 2019
*
* Authors:
* Alexander Bulekov <alxndr@bu.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#include "qemu/osdep.h"
#include <wordexp.h>
#include "sysemu/qtest.h"
#include "sysemu/runstate.h"
#include "sysemu/sysemu.h"
#include "qemu/main-loop.h"
#include "tests/qtest/libqtest.h"
#include "tests/qtest/libqos/qgraph.h"
#include "fuzz.h"
#define MAX_EVENT_LOOPS 10
typedef struct FuzzTargetState {
FuzzTarget *target;
QSLIST_ENTRY(FuzzTargetState) target_list;
} FuzzTargetState;
typedef QSLIST_HEAD(, FuzzTargetState) FuzzTargetList;
static const char *fuzz_arch = TARGET_NAME;
static FuzzTargetList *fuzz_target_list;
static FuzzTarget *fuzz_target;
static QTestState *fuzz_qts;
void flush_events(QTestState *s)
{
int i = MAX_EVENT_LOOPS;
while (g_main_context_pending(NULL) && i-- > 0) {
main_loop_wait(false);
}
}
static QTestState *qtest_setup(void)
{
qtest_server_set_send_handler(&qtest_client_inproc_recv, &fuzz_qts);
return qtest_inproc_init(&fuzz_qts, false, fuzz_arch,
&qtest_server_inproc_recv);
}
void fuzz_add_target(const FuzzTarget *target)
{
FuzzTargetState *tmp;
FuzzTargetState *target_state;
if (!fuzz_target_list) {
fuzz_target_list = g_new0(FuzzTargetList, 1);
}
QSLIST_FOREACH(tmp, fuzz_target_list, target_list) {
if (g_strcmp0(tmp->target->name, target->name) == 0) {
fprintf(stderr, "Error: Fuzz target name %s already in use\n",
target->name);
abort();
}
}
target_state = g_new0(FuzzTargetState, 1);
target_state->target = g_new0(FuzzTarget, 1);
*(target_state->target) = *target;
QSLIST_INSERT_HEAD(fuzz_target_list, target_state, target_list);
}
static void usage(char *path)
{
printf("Usage: %s --fuzz-target=FUZZ_TARGET [LIBFUZZER ARGUMENTS]\n", path);
printf("where FUZZ_TARGET is one of:\n");
FuzzTargetState *tmp;
if (!fuzz_target_list) {
fprintf(stderr, "Fuzz target list not initialized\n");
abort();
}
QSLIST_FOREACH(tmp, fuzz_target_list, target_list) {
printf(" * %s : %s\n", tmp->target->name,
tmp->target->description);
}
exit(0);
}
static FuzzTarget *fuzz_get_target(char* name)
{
FuzzTargetState *tmp;
if (!fuzz_target_list) {
fprintf(stderr, "Fuzz target list not initialized\n");
abort();
}
QSLIST_FOREACH(tmp, fuzz_target_list, target_list) {
if (strcmp(tmp->target->name, name) == 0) {
return tmp->target;
}
}
return NULL;
}
/* Executed for each fuzzing-input */
int LLVMFuzzerTestOneInput(const unsigned char *Data, size_t Size)
{
/*
* Do the pre-fuzz-initialization before the first fuzzing iteration,
* instead of before the actual fuzz loop. This is needed since libfuzzer
* may fork off additional workers, prior to the fuzzing loop, and if
* pre_fuzz() sets up e.g. shared memory, this should be done for the
* individual worker processes
*/
static int pre_fuzz_done;
if (!pre_fuzz_done && fuzz_target->pre_fuzz) {
fuzz_target->pre_fuzz(fuzz_qts);
pre_fuzz_done = true;
}
fuzz_target->fuzz(fuzz_qts, Data, Size);
return 0;
}
/* Executed once, prior to fuzzing */
int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp)
{
char *target_name;
/* Initialize qgraph and modules */
qos_graph_init();
module_call_init(MODULE_INIT_FUZZ_TARGET);
module_call_init(MODULE_INIT_QOM);
module_call_init(MODULE_INIT_LIBQOS);
if (*argc <= 1) {
usage(**argv);
}
/* Identify the fuzz target */
target_name = (*argv)[1];
if (!strstr(target_name, "--fuzz-target=")) {
usage(**argv);
}
target_name += strlen("--fuzz-target=");
fuzz_target = fuzz_get_target(target_name);
if (!fuzz_target) {
usage(**argv);
}
fuzz_qts = qtest_setup();
if (fuzz_target->pre_vm_init) {
fuzz_target->pre_vm_init();
}
/* Run QEMU's softmmu main with the fuzz-target dependent arguments */
const char *init_cmdline = fuzz_target->get_init_cmdline(fuzz_target);
/* Split the runcmd into an argv and argc */
wordexp_t result;
wordexp(init_cmdline, &result, 0);
qemu_init(result.we_wordc, result.we_wordv, NULL);
return 0;
}

95
tests/qtest/fuzz/fuzz.h Normal file
View File

@ -0,0 +1,95 @@
/*
* fuzzing driver
*
* Copyright Red Hat Inc., 2019
*
* Authors:
* Alexander Bulekov <alxndr@bu.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef FUZZER_H_
#define FUZZER_H_
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
#include "tests/qtest/libqtest.h"
/**
* A libfuzzer fuzzing target
*
* The QEMU fuzzing binary is built with all available targets, each
* with a unique @name that can be specified on the command-line to
* select which target should run.
*
* A target must implement ->fuzz() to process a random input. If QEMU
* crashes in ->fuzz() then libfuzzer will record a failure.
*
* Fuzzing targets are registered with fuzz_add_target():
*
* static const FuzzTarget fuzz_target = {
* .name = "my-device-fifo",
* .description = "Fuzz the FIFO buffer registers of my-device",
* ...
* };
*
* static void register_fuzz_target(void)
* {
* fuzz_add_target(&fuzz_target);
* }
* fuzz_target_init(register_fuzz_target);
*/
typedef struct FuzzTarget {
const char *name; /* target identifier (passed to --fuzz-target=)*/
const char *description; /* help text */
/*
* returns the arg-list that is passed to qemu/softmmu init()
* Cannot be NULL
*/
const char* (*get_init_cmdline)(struct FuzzTarget *);
/*
* will run once, prior to running qemu/softmmu init.
* eg: set up shared-memory for communication with the child-process
* Can be NULL
*/
void(*pre_vm_init)(void);
/*
* will run once, after QEMU has been initialized, prior to the fuzz-loop.
* eg: detect the memory map
* Can be NULL
*/
void(*pre_fuzz)(QTestState *);
/*
* accepts and executes an input from libfuzzer. this is repeatedly
* executed during the fuzzing loop. Its should handle setup, input
* execution and cleanup.
* Cannot be NULL
*/
void(*fuzz)(QTestState *, const unsigned char *, size_t);
} FuzzTarget;
void flush_events(QTestState *);
void reboot(QTestState *);
/*
* makes a copy of *target and adds it to the target-list.
* i.e. fine to set up target on the caller's stack
*/
void fuzz_add_target(const FuzzTarget *target);
int LLVMFuzzerTestOneInput(const unsigned char *Data, size_t Size);
int LLVMFuzzerInitialize(int *argc, char ***argv, char ***envp);
#endif

View File

@ -0,0 +1,193 @@
/*
* I440FX Fuzzing Target
*
* Copyright Red Hat Inc., 2019
*
* Authors:
* Alexander Bulekov <alxndr@bu.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/main-loop.h"
#include "tests/qtest/libqtest.h"
#include "tests/qtest/libqos/pci.h"
#include "tests/qtest/libqos/pci-pc.h"
#include "fuzz.h"
#include "fuzz/qos_fuzz.h"
#include "fuzz/fork_fuzz.h"
#define I440FX_PCI_HOST_BRIDGE_CFG 0xcf8
#define I440FX_PCI_HOST_BRIDGE_DATA 0xcfc
/*
* the input to the fuzzing functions below is a buffer of random bytes. we
* want to convert these bytes into a sequence of qtest or qos calls. to do
* this we define some opcodes:
*/
enum action_id {
WRITEB,
WRITEW,
WRITEL,
READB,
READW,
READL,
ACTION_MAX
};
static void i440fx_fuzz_qtest(QTestState *s,
const unsigned char *Data, size_t Size) {
/*
* loop over the Data, breaking it up into actions. each action has an
* opcode, address offset and value
*/
typedef struct QTestFuzzAction {
uint8_t opcode;
uint8_t addr;
uint32_t value;
} QTestFuzzAction;
QTestFuzzAction a;
while (Size >= sizeof(a)) {
/* make a copy of the action so we can normalize the values in-place */
memcpy(&a, Data, sizeof(a));
/* select between two i440fx Port IO addresses */
uint16_t addr = a.addr % 2 ? I440FX_PCI_HOST_BRIDGE_CFG :
I440FX_PCI_HOST_BRIDGE_DATA;
switch (a.opcode % ACTION_MAX) {
case WRITEB:
qtest_outb(s, addr, (uint8_t)a.value);
break;
case WRITEW:
qtest_outw(s, addr, (uint16_t)a.value);
break;
case WRITEL:
qtest_outl(s, addr, (uint32_t)a.value);
break;
case READB:
qtest_inb(s, addr);
break;
case READW:
qtest_inw(s, addr);
break;
case READL:
qtest_inl(s, addr);
break;
}
/* Move to the next operation */
Size -= sizeof(a);
Data += sizeof(a);
}
flush_events(s);
}
static void i440fx_fuzz_qos(QTestState *s,
const unsigned char *Data, size_t Size) {
/*
* Same as i440fx_fuzz_qtest, but using QOS. devfn is incorporated into the
* value written over Port IO
*/
typedef struct QOSFuzzAction {
uint8_t opcode;
uint8_t offset;
int devfn;
uint32_t value;
} QOSFuzzAction;
static QPCIBus *bus;
if (!bus) {
bus = qpci_new_pc(s, fuzz_qos_alloc);
}
QOSFuzzAction a;
while (Size >= sizeof(a)) {
memcpy(&a, Data, sizeof(a));
switch (a.opcode % ACTION_MAX) {
case WRITEB:
bus->config_writeb(bus, a.devfn, a.offset, (uint8_t)a.value);
break;
case WRITEW:
bus->config_writew(bus, a.devfn, a.offset, (uint16_t)a.value);
break;
case WRITEL:
bus->config_writel(bus, a.devfn, a.offset, (uint32_t)a.value);
break;
case READB:
bus->config_readb(bus, a.devfn, a.offset);
break;
case READW:
bus->config_readw(bus, a.devfn, a.offset);
break;
case READL:
bus->config_readl(bus, a.devfn, a.offset);
break;
}
Size -= sizeof(a);
Data += sizeof(a);
}
flush_events(s);
}
static void i440fx_fuzz_qos_fork(QTestState *s,
const unsigned char *Data, size_t Size) {
if (fork() == 0) {
i440fx_fuzz_qos(s, Data, Size);
_Exit(0);
} else {
wait(NULL);
}
}
static const char *i440fx_qtest_argv = TARGET_NAME " -machine accel=qtest"
"-m 0 -display none";
static const char *i440fx_argv(FuzzTarget *t)
{
return i440fx_qtest_argv;
}
static void fork_init(void)
{
counter_shm_init();
}
static void register_pci_fuzz_targets(void)
{
/* Uses simple qtest commands and reboots to reset state */
fuzz_add_target(&(FuzzTarget){
.name = "i440fx-qtest-reboot-fuzz",
.description = "Fuzz the i440fx using raw qtest commands and"
"rebooting after each run",
.get_init_cmdline = i440fx_argv,
.fuzz = i440fx_fuzz_qtest});
/* Uses libqos and forks to prevent state leakage */
fuzz_add_qos_target(&(FuzzTarget){
.name = "i440fx-qos-fork-fuzz",
.description = "Fuzz the i440fx using raw qtest commands and"
"rebooting after each run",
.pre_vm_init = &fork_init,
.fuzz = i440fx_fuzz_qos_fork,},
"i440FX-pcihost",
&(QOSGraphTestOptions){}
);
/*
* Uses libqos. Doesn't do anything to reset state. Note that if we were to
* reboot after each run, we would also have to redo the qos-related
* initialization (qos_init_path)
*/
fuzz_add_qos_target(&(FuzzTarget){
.name = "i440fx-qos-noreset-fuzz",
.description = "Fuzz the i440fx using raw qtest commands and"
"rebooting after each run",
.fuzz = i440fx_fuzz_qos,},
"i440FX-pcihost",
&(QOSGraphTestOptions){}
);
}
fuzz_target_init(register_pci_fuzz_targets);

234
tests/qtest/fuzz/qos_fuzz.c Normal file
View File

@ -0,0 +1,234 @@
/*
* QOS-assisted fuzzing helpers
*
* Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
#include "qemu/osdep.h"
#include "qemu/units.h"
#include "qapi/error.h"
#include "qemu-common.h"
#include "exec/memory.h"
#include "exec/address-spaces.h"
#include "sysemu/sysemu.h"
#include "qemu/main-loop.h"
#include "tests/qtest/libqtest.h"
#include "tests/qtest/libqos/malloc.h"
#include "tests/qtest/libqos/qgraph.h"
#include "tests/qtest/libqos/qgraph_internal.h"
#include "tests/qtest/libqos/qos_external.h"
#include "fuzz.h"
#include "qos_fuzz.h"
#include "qapi/qapi-commands-machine.h"
#include "qapi/qapi-commands-qom.h"
#include "qapi/qmp/qlist.h"
void *fuzz_qos_obj;
QGuestAllocator *fuzz_qos_alloc;
static const char *fuzz_target_name;
static char **fuzz_path_vec;
/*
* Replaced the qmp commands with direct qmp_marshal calls.
* Probably there is a better way to do this
*/
static void qos_set_machines_devices_available(void)
{
QDict *req = qdict_new();
QObject *response;
QDict *args = qdict_new();
QList *lst;
Error *err = NULL;
qmp_marshal_query_machines(NULL, &response, &err);
assert(!err);
lst = qobject_to(QList, response);
apply_to_qlist(lst, true);
qobject_unref(response);
qdict_put_str(req, "execute", "qom-list-types");
qdict_put_str(args, "implements", "device");
qdict_put_bool(args, "abstract", true);
qdict_put_obj(req, "arguments", (QObject *) args);
qmp_marshal_qom_list_types(args, &response, &err);
assert(!err);
lst = qobject_to(QList, response);
apply_to_qlist(lst, false);
qobject_unref(response);
qobject_unref(req);
}
static char **current_path;
void *qos_allocate_objects(QTestState *qts, QGuestAllocator **p_alloc)
{
return allocate_objects(qts, current_path + 1, p_alloc);
}
static const char *qos_build_main_args(void)
{
char **path = fuzz_path_vec;
QOSGraphNode *test_node;
GString *cmd_line = g_string_new(path[0]);
void *test_arg;
if (!path) {
fprintf(stderr, "QOS Path not found\n");
abort();
}
/* Before test */
current_path = path;
test_node = qos_graph_get_node(path[(g_strv_length(path) - 1)]);
test_arg = test_node->u.test.arg;
if (test_node->u.test.before) {
test_arg = test_node->u.test.before(cmd_line, test_arg);
}
/* Prepend the arguments that we need */
g_string_prepend(cmd_line,
TARGET_NAME " -display none -machine accel=qtest -m 64 ");
return cmd_line->str;
}
/*
* This function is largely a copy of qos-test.c:walk_path. Since walk_path
* is itself a callback, its a little annoying to add another argument/layer of
* indirection
*/
static void walk_path(QOSGraphNode *orig_path, int len)
{
QOSGraphNode *path;
QOSGraphEdge *edge;
/* etype set to QEDGE_CONSUMED_BY so that machine can add to the command line */
QOSEdgeType etype = QEDGE_CONSUMED_BY;
/* twice QOS_PATH_MAX_ELEMENT_SIZE since each edge can have its arg */
char **path_vec = g_new0(char *, (QOS_PATH_MAX_ELEMENT_SIZE * 2));
int path_vec_size = 0;
char *after_cmd, *before_cmd, *after_device;
GString *after_device_str = g_string_new("");
char *node_name = orig_path->name, *path_str;
GString *cmd_line = g_string_new("");
GString *cmd_line2 = g_string_new("");
path = qos_graph_get_node(node_name); /* root */
node_name = qos_graph_edge_get_dest(path->path_edge); /* machine name */
path_vec[path_vec_size++] = node_name;
path_vec[path_vec_size++] = qos_get_machine_type(node_name);
for (;;) {
path = qos_graph_get_node(node_name);
if (!path->path_edge) {
break;
}
node_name = qos_graph_edge_get_dest(path->path_edge);
/* append node command line + previous edge command line */
if (path->command_line && etype == QEDGE_CONSUMED_BY) {
g_string_append(cmd_line, path->command_line);
g_string_append(cmd_line, after_device_str->str);
g_string_truncate(after_device_str, 0);
}
path_vec[path_vec_size++] = qos_graph_edge_get_name(path->path_edge);
/* detect if edge has command line args */
after_cmd = qos_graph_edge_get_after_cmd_line(path->path_edge);
after_device = qos_graph_edge_get_extra_device_opts(path->path_edge);
before_cmd = qos_graph_edge_get_before_cmd_line(path->path_edge);
edge = qos_graph_get_edge(path->name, node_name);
etype = qos_graph_edge_get_type(edge);
if (before_cmd) {
g_string_append(cmd_line, before_cmd);
}
if (after_cmd) {
g_string_append(cmd_line2, after_cmd);
}
if (after_device) {
g_string_append(after_device_str, after_device);
}
}
path_vec[path_vec_size++] = NULL;
g_string_append(cmd_line, after_device_str->str);
g_string_free(after_device_str, true);
g_string_append(cmd_line, cmd_line2->str);
g_string_free(cmd_line2, true);
/*
* here position 0 has <arch>/<machine>, position 1 has <machine>.
* The path must not have the <arch>, qtest_add_data_func adds it.
*/
path_str = g_strjoinv("/", path_vec + 1);
/* Check that this is the test we care about: */
char *test_name = strrchr(path_str, '/') + 1;
if (strcmp(test_name, fuzz_target_name) == 0) {
/*
* put arch/machine in position 1 so run_one_test can do its work
* and add the command line at position 0.
*/
path_vec[1] = path_vec[0];
path_vec[0] = g_string_free(cmd_line, false);
fuzz_path_vec = path_vec;
} else {
g_free(path_vec);
}
g_free(path_str);
}
static const char *qos_get_cmdline(FuzzTarget *t)
{
/*
* Set a global variable that we use to identify the qos_path for our
* fuzz_target
*/
fuzz_target_name = t->name;
qos_set_machines_devices_available();
qos_graph_foreach_test_path(walk_path);
return qos_build_main_args();
}
void fuzz_add_qos_target(
FuzzTarget *fuzz_opts,
const char *interface,
QOSGraphTestOptions *opts
)
{
qos_add_test(fuzz_opts->name, interface, NULL, opts);
fuzz_opts->get_init_cmdline = qos_get_cmdline;
fuzz_add_target(fuzz_opts);
}
void qos_init_path(QTestState *s)
{
fuzz_qos_obj = qos_allocate_objects(s , &fuzz_qos_alloc);
}

View File

@ -0,0 +1,33 @@
/*
* QOS-assisted fuzzing helpers
*
* Copyright Red Hat Inc., 2019
*
* Authors:
* Alexander Bulekov <alxndr@bu.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#ifndef _QOS_FUZZ_H_
#define _QOS_FUZZ_H_
#include "tests/qtest/fuzz/fuzz.h"
#include "tests/qtest/libqos/qgraph.h"
int qos_fuzz(const unsigned char *Data, size_t Size);
void qos_setup(void);
extern void *fuzz_qos_obj;
extern QGuestAllocator *fuzz_qos_alloc;
void fuzz_add_qos_target(
FuzzTarget *fuzz_opts,
const char *interface,
QOSGraphTestOptions *opts
);
void qos_init_path(QTestState *);
#endif

View File

@ -0,0 +1,198 @@
/*
* virtio-net Fuzzing Target
*
* Copyright Red Hat Inc., 2019
*
* Authors:
* Alexander Bulekov <alxndr@bu.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "standard-headers/linux/virtio_config.h"
#include "tests/qtest/libqtest.h"
#include "tests/qtest/libqos/virtio-net.h"
#include "fuzz.h"
#include "fork_fuzz.h"
#include "qos_fuzz.h"
#define QVIRTIO_NET_TIMEOUT_US (30 * 1000 * 1000)
#define QVIRTIO_RX_VQ 0
#define QVIRTIO_TX_VQ 1
#define QVIRTIO_CTRL_VQ 2
static int sockfds[2];
static bool sockfds_initialized;
static void virtio_net_fuzz_multi(QTestState *s,
const unsigned char *Data, size_t Size, bool check_used)
{
typedef struct vq_action {
uint8_t queue;
uint8_t length;
uint8_t write;
uint8_t next;
uint8_t rx;
} vq_action;
uint32_t free_head = 0;
QGuestAllocator *t_alloc = fuzz_qos_alloc;
QVirtioNet *net_if = fuzz_qos_obj;
QVirtioDevice *dev = net_if->vdev;
QVirtQueue *q;
vq_action vqa;
while (Size >= sizeof(vqa)) {
memcpy(&vqa, Data, sizeof(vqa));
Data += sizeof(vqa);
Size -= sizeof(vqa);
q = net_if->queues[vqa.queue % 3];
vqa.length = vqa.length >= Size ? Size : vqa.length;
/*
* Only attempt to write incoming packets, when using the socket
* backend. Otherwise, always place the input on a virtqueue.
*/
if (vqa.rx && sockfds_initialized) {
write(sockfds[0], Data, vqa.length);
} else {
vqa.rx = 0;
uint64_t req_addr = guest_alloc(t_alloc, vqa.length);
/*
* If checking used ring, ensure that the fuzzer doesn't trigger
* trivial asserion failure on zero-zied buffer
*/
qtest_memwrite(s, req_addr, Data, vqa.length);
free_head = qvirtqueue_add(s, q, req_addr, vqa.length,
vqa.write, vqa.next);
qvirtqueue_add(s, q, req_addr, vqa.length, vqa.write , vqa.next);
qvirtqueue_kick(s, dev, q, free_head);
}
/* Run the main loop */
qtest_clock_step(s, 100);
flush_events(s);
/* Wait on used descriptors */
if (check_used && !vqa.rx) {
gint64 start_time = g_get_monotonic_time();
/*
* normally, we could just use qvirtio_wait_used_elem, but since we
* must manually run the main-loop for all the bhs to run, we use
* this hack with flush_events(), to run the main_loop
*/
while (!vqa.rx && q != net_if->queues[QVIRTIO_RX_VQ]) {
uint32_t got_desc_idx;
/* Input led to a virtio_error */
if (dev->bus->get_status(dev) & VIRTIO_CONFIG_S_NEEDS_RESET) {
break;
}
if (dev->bus->get_queue_isr_status(dev, q) &&
qvirtqueue_get_buf(s, q, &got_desc_idx, NULL)) {
g_assert_cmpint(got_desc_idx, ==, free_head);
break;
}
g_assert(g_get_monotonic_time() - start_time
<= QVIRTIO_NET_TIMEOUT_US);
/* Run the main loop */
qtest_clock_step(s, 100);
flush_events(s);
}
}
Data += vqa.length;
Size -= vqa.length;
}
}
static void virtio_net_fork_fuzz(QTestState *s,
const unsigned char *Data, size_t Size)
{
if (fork() == 0) {
virtio_net_fuzz_multi(s, Data, Size, false);
flush_events(s);
_Exit(0);
} else {
wait(NULL);
}
}
static void virtio_net_fork_fuzz_check_used(QTestState *s,
const unsigned char *Data, size_t Size)
{
if (fork() == 0) {
virtio_net_fuzz_multi(s, Data, Size, true);
flush_events(s);
_Exit(0);
} else {
wait(NULL);
}
}
static void virtio_net_pre_fuzz(QTestState *s)
{
qos_init_path(s);
counter_shm_init();
}
static void *virtio_net_test_setup_socket(GString *cmd_line, void *arg)
{
int ret = socketpair(PF_UNIX, SOCK_STREAM, 0, sockfds);
g_assert_cmpint(ret, !=, -1);
fcntl(sockfds[0], F_SETFL, O_NONBLOCK);
sockfds_initialized = true;
g_string_append_printf(cmd_line, " -netdev socket,fd=%d,id=hs0 ",
sockfds[1]);
return arg;
}
static void *virtio_net_test_setup_user(GString *cmd_line, void *arg)
{
g_string_append_printf(cmd_line, " -netdev user,id=hs0 ");
return arg;
}
static void register_virtio_net_fuzz_targets(void)
{
fuzz_add_qos_target(&(FuzzTarget){
.name = "virtio-net-socket",
.description = "Fuzz the virtio-net virtual queues. Fuzz incoming "
"traffic using the socket backend",
.pre_fuzz = &virtio_net_pre_fuzz,
.fuzz = virtio_net_fork_fuzz,},
"virtio-net",
&(QOSGraphTestOptions){.before = virtio_net_test_setup_socket}
);
fuzz_add_qos_target(&(FuzzTarget){
.name = "virtio-net-socket-check-used",
.description = "Fuzz the virtio-net virtual queues. Wait for the "
"descriptors to be used. Timeout may indicate improperly handled "
"input",
.pre_fuzz = &virtio_net_pre_fuzz,
.fuzz = virtio_net_fork_fuzz_check_used,},
"virtio-net",
&(QOSGraphTestOptions){.before = virtio_net_test_setup_socket}
);
fuzz_add_qos_target(&(FuzzTarget){
.name = "virtio-net-slirp",
.description = "Fuzz the virtio-net virtual queues with the slirp "
" backend. Warning: May result in network traffic emitted from the "
" process. Run in an isolated network environment.",
.pre_fuzz = &virtio_net_pre_fuzz,
.fuzz = virtio_net_fork_fuzz,},
"virtio-net",
&(QOSGraphTestOptions){.before = virtio_net_test_setup_user}
);
}
fuzz_target_init(register_virtio_net_fuzz_targets);

View File

@ -0,0 +1,213 @@
/*
* virtio-serial Fuzzing Target
*
* Copyright Red Hat Inc., 2019
*
* Authors:
* Alexander Bulekov <alxndr@bu.edu>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*/
#include "qemu/osdep.h"
#include "tests/qtest/libqtest.h"
#include "libqos/virtio-scsi.h"
#include "libqos/virtio.h"
#include "libqos/virtio-pci.h"
#include "standard-headers/linux/virtio_ids.h"
#include "standard-headers/linux/virtio_pci.h"
#include "standard-headers/linux/virtio_scsi.h"
#include "fuzz.h"
#include "fork_fuzz.h"
#include "qos_fuzz.h"
#define PCI_SLOT 0x02
#define PCI_FN 0x00
#define QVIRTIO_SCSI_TIMEOUT_US (1 * 1000 * 1000)
#define MAX_NUM_QUEUES 64
/* Based on tests/virtio-scsi-test.c */
typedef struct {
int num_queues;
QVirtQueue *vq[MAX_NUM_QUEUES + 2];
} QVirtioSCSIQueues;
static QVirtioSCSIQueues *qvirtio_scsi_init(QVirtioDevice *dev, uint64_t mask)
{
QVirtioSCSIQueues *vs;
uint64_t feat;
int i;
vs = g_new0(QVirtioSCSIQueues, 1);
feat = qvirtio_get_features(dev);
if (mask) {
feat &= ~QVIRTIO_F_BAD_FEATURE | mask;
} else {
feat &= ~(QVIRTIO_F_BAD_FEATURE | (1ull << VIRTIO_RING_F_EVENT_IDX));
}
qvirtio_set_features(dev, feat);
vs->num_queues = qvirtio_config_readl(dev, 0);
for (i = 0; i < vs->num_queues + 2; i++) {
vs->vq[i] = qvirtqueue_setup(dev, fuzz_qos_alloc, i);
}
qvirtio_set_driver_ok(dev);
return vs;
}
static void virtio_scsi_fuzz(QTestState *s, QVirtioSCSIQueues* queues,
const unsigned char *Data, size_t Size)
{
/*
* Data is a sequence of random bytes. We split them up into "actions",
* followed by data:
* [vqa][dddddddd][vqa][dddd][vqa][dddddddddddd] ...
* The length of the data is specified by the preceding vqa.length
*/
typedef struct vq_action {
uint8_t queue;
uint8_t length;
uint8_t write;
uint8_t next;
uint8_t kick;
} vq_action;
/* Keep track of the free head for each queue we interact with */
bool vq_touched[MAX_NUM_QUEUES + 2] = {0};
uint32_t free_head[MAX_NUM_QUEUES + 2];
QGuestAllocator *t_alloc = fuzz_qos_alloc;
QVirtioSCSI *scsi = fuzz_qos_obj;
QVirtioDevice *dev = scsi->vdev;
QVirtQueue *q;
vq_action vqa;
while (Size >= sizeof(vqa)) {
/* Copy the action, so we can normalize length, queue and flags */
memcpy(&vqa, Data, sizeof(vqa));
Data += sizeof(vqa);
Size -= sizeof(vqa);
vqa.queue = vqa.queue % queues->num_queues;
/* Cap length at the number of remaining bytes in data */
vqa.length = vqa.length >= Size ? Size : vqa.length;
vqa.write = vqa.write & 1;
vqa.next = vqa.next & 1;
vqa.kick = vqa.kick & 1;
q = queues->vq[vqa.queue];
/* Copy the data into ram, and place it on the virtqueue */
uint64_t req_addr = guest_alloc(t_alloc, vqa.length);
qtest_memwrite(s, req_addr, Data, vqa.length);
if (vq_touched[vqa.queue] == 0) {
vq_touched[vqa.queue] = 1;
free_head[vqa.queue] = qvirtqueue_add(s, q, req_addr, vqa.length,
vqa.write, vqa.next);
} else {
qvirtqueue_add(s, q, req_addr, vqa.length, vqa.write , vqa.next);
}
if (vqa.kick) {
qvirtqueue_kick(s, dev, q, free_head[vqa.queue]);
free_head[vqa.queue] = 0;
}
Data += vqa.length;
Size -= vqa.length;
}
/* In the end, kick each queue we interacted with */
for (int i = 0; i < MAX_NUM_QUEUES + 2; i++) {
if (vq_touched[i]) {
qvirtqueue_kick(s, dev, queues->vq[i], free_head[i]);
}
}
}
static void virtio_scsi_fork_fuzz(QTestState *s,
const unsigned char *Data, size_t Size)
{
QVirtioSCSI *scsi = fuzz_qos_obj;
static QVirtioSCSIQueues *queues;
if (!queues) {
queues = qvirtio_scsi_init(scsi->vdev, 0);
}
if (fork() == 0) {
virtio_scsi_fuzz(s, queues, Data, Size);
flush_events(s);
_Exit(0);
} else {
wait(NULL);
}
}
static void virtio_scsi_with_flag_fuzz(QTestState *s,
const unsigned char *Data, size_t Size)
{
QVirtioSCSI *scsi = fuzz_qos_obj;
static QVirtioSCSIQueues *queues;
if (fork() == 0) {
if (Size >= sizeof(uint64_t)) {
queues = qvirtio_scsi_init(scsi->vdev, *(uint64_t *)Data);
virtio_scsi_fuzz(s, queues,
Data + sizeof(uint64_t), Size - sizeof(uint64_t));
flush_events(s);
}
_Exit(0);
} else {
wait(NULL);
}
}
static void virtio_scsi_pre_fuzz(QTestState *s)
{
qos_init_path(s);
counter_shm_init();
}
static void *virtio_scsi_test_setup(GString *cmd_line, void *arg)
{
g_string_append(cmd_line,
" -drive file=blkdebug::null-co://,"
"file.image.read-zeroes=on,"
"if=none,id=dr1,format=raw,file.align=4k "
"-device scsi-hd,drive=dr1,lun=0,scsi-id=1");
return arg;
}
static void register_virtio_scsi_fuzz_targets(void)
{
fuzz_add_qos_target(&(FuzzTarget){
.name = "virtio-scsi-fuzz",
.description = "Fuzz the virtio-scsi virtual queues, forking"
"for each fuzz run",
.pre_vm_init = &counter_shm_init,
.pre_fuzz = &virtio_scsi_pre_fuzz,
.fuzz = virtio_scsi_fork_fuzz,},
"virtio-scsi",
&(QOSGraphTestOptions){.before = virtio_scsi_test_setup}
);
fuzz_add_qos_target(&(FuzzTarget){
.name = "virtio-scsi-flags-fuzz",
.description = "Fuzz the virtio-scsi virtual queues, forking"
"for each fuzz run (also fuzzes the virtio flags)",
.pre_vm_init = &counter_shm_init,
.pre_fuzz = &virtio_scsi_pre_fuzz,
.fuzz = virtio_scsi_with_flag_fuzz,},
"virtio-scsi",
&(QOSGraphTestOptions){.before = virtio_scsi_test_setup}
);
}
fuzz_target_init(register_virtio_scsi_fuzz_targets);

View File

@ -10,12 +10,12 @@
#include "libqos/i2c.h"
#include "libqtest.h"
void i2c_send(QI2CDevice *i2cdev, const uint8_t *buf, uint16_t len)
void qi2c_send(QI2CDevice *i2cdev, const uint8_t *buf, uint16_t len)
{
i2cdev->bus->send(i2cdev->bus, i2cdev->addr, buf, len);
}
void i2c_recv(QI2CDevice *i2cdev, uint8_t *buf, uint16_t len)
void qi2c_recv(QI2CDevice *i2cdev, uint8_t *buf, uint16_t len)
{
i2cdev->bus->recv(i2cdev->bus, i2cdev->addr, buf, len);
}
@ -23,8 +23,8 @@ void i2c_recv(QI2CDevice *i2cdev, uint8_t *buf, uint16_t len)
void i2c_read_block(QI2CDevice *i2cdev, uint8_t reg,
uint8_t *buf, uint16_t len)
{
i2c_send(i2cdev, &reg, 1);
i2c_recv(i2cdev, buf, len);
qi2c_send(i2cdev, &reg, 1);
qi2c_recv(i2cdev, buf, len);
}
void i2c_write_block(QI2CDevice *i2cdev, uint8_t reg,
@ -33,7 +33,7 @@ void i2c_write_block(QI2CDevice *i2cdev, uint8_t reg,
uint8_t *cmd = g_malloc(len + 1);
cmd[0] = reg;
memcpy(&cmd[1], buf, len);
i2c_send(i2cdev, cmd, len + 1);
qi2c_send(i2cdev, cmd, len + 1);
g_free(cmd);
}

View File

@ -47,8 +47,8 @@ struct QI2CDevice {
void *i2c_device_create(void *i2c_bus, QGuestAllocator *alloc, void *addr);
void add_qi2c_address(QOSGraphEdgeOptions *opts, QI2CAddress *addr);
void i2c_send(QI2CDevice *dev, const uint8_t *buf, uint16_t len);
void i2c_recv(QI2CDevice *dev, uint8_t *buf, uint16_t len);
void qi2c_send(QI2CDevice *dev, const uint8_t *buf, uint16_t len);
void qi2c_recv(QI2CDevice *dev, uint8_t *buf, uint16_t len);
void i2c_read_block(QI2CDevice *dev, uint8_t reg,
uint8_t *buf, uint16_t len);

View File

@ -0,0 +1,168 @@
/*
* libqos driver framework
*
* Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
#include "qemu/osdep.h"
#include <getopt.h>
#include "libqtest.h"
#include "qapi/qmp/qdict.h"
#include "qapi/qmp/qbool.h"
#include "qapi/qmp/qstring.h"
#include "qemu/module.h"
#include "qapi/qmp/qlist.h"
#include "libqos/malloc.h"
#include "libqos/qgraph.h"
#include "libqos/qgraph_internal.h"
#include "libqos/qos_external.h"
void apply_to_node(const char *name, bool is_machine, bool is_abstract)
{
char *machine_name = NULL;
if (is_machine) {
const char *arch = qtest_get_arch();
machine_name = g_strconcat(arch, "/", name, NULL);
name = machine_name;
}
qos_graph_node_set_availability(name, true);
if (is_abstract) {
qos_delete_cmd_line(name);
}
g_free(machine_name);
}
/**
* apply_to_qlist(): using QMP queries QEMU for a list of
* machines and devices available, and sets the respective node
* as true. If a node is found, also all its produced and contained
* child are marked available.
*
* See qos_graph_node_set_availability() for more info
*/
void apply_to_qlist(QList *list, bool is_machine)
{
const QListEntry *p;
const char *name;
bool abstract;
QDict *minfo;
QObject *qobj;
QString *qstr;
QBool *qbool;
for (p = qlist_first(list); p; p = qlist_next(p)) {
minfo = qobject_to(QDict, qlist_entry_obj(p));
qobj = qdict_get(minfo, "name");
qstr = qobject_to(QString, qobj);
name = qstring_get_str(qstr);
qobj = qdict_get(minfo, "abstract");
if (qobj) {
qbool = qobject_to(QBool, qobj);
abstract = qbool_get_bool(qbool);
} else {
abstract = false;
}
apply_to_node(name, is_machine, abstract);
qobj = qdict_get(minfo, "alias");
if (qobj) {
qstr = qobject_to(QString, qobj);
name = qstring_get_str(qstr);
apply_to_node(name, is_machine, abstract);
}
}
}
QGuestAllocator *get_machine_allocator(QOSGraphObject *obj)
{
return obj->get_driver(obj, "memory");
}
/**
* allocate_objects(): given an array of nodes @arg,
* walks the path invoking all constructors and
* passing the corresponding parameter in order to
* continue the objects allocation.
* Once the test is reached, return the object it consumes.
*
* Since the machine and QEDGE_CONSUMED_BY nodes allocate
* memory in the constructor, g_test_queue_destroy is used so
* that after execution they can be safely free'd. (The test's
* ->before callback is also welcome to use g_test_queue_destroy).
*
* Note: as specified in walk_path() too, @arg is an array of
* char *, where arg[0] is a pointer to the command line
* string that will be used to properly start QEMU when executing
* the test, and the remaining elements represent the actual objects
* that will be allocated.
*/
void *allocate_objects(QTestState *qts, char **path, QGuestAllocator **p_alloc)
{
int current = 0;
QGuestAllocator *alloc;
QOSGraphObject *parent = NULL;
QOSGraphEdge *edge;
QOSGraphNode *node;
void *edge_arg;
void *obj;
node = qos_graph_get_node(path[current]);
g_assert(node->type == QNODE_MACHINE);
obj = qos_machine_new(node, qts);
qos_object_queue_destroy(obj);
alloc = get_machine_allocator(obj);
if (p_alloc) {
*p_alloc = alloc;
}
for (;;) {
if (node->type != QNODE_INTERFACE) {
qos_object_start_hw(obj);
parent = obj;
}
/* follow edge and get object for next node constructor */
current++;
edge = qos_graph_get_edge(path[current - 1], path[current]);
node = qos_graph_get_node(path[current]);
if (node->type == QNODE_TEST) {
g_assert(qos_graph_edge_get_type(edge) == QEDGE_CONSUMED_BY);
return obj;
}
switch (qos_graph_edge_get_type(edge)) {
case QEDGE_PRODUCES:
obj = parent->get_driver(parent, path[current]);
break;
case QEDGE_CONSUMED_BY:
edge_arg = qos_graph_edge_get_arg(edge);
obj = qos_driver_new(node, obj, alloc, edge_arg);
qos_object_queue_destroy(obj);
break;
case QEDGE_CONTAINS:
obj = parent->get_device(parent, path[current]);
break;
}
}
}

View File

@ -0,0 +1,28 @@
/*
* libqos driver framework
*
* Copyright (c) 2018 Emanuele Giuseppe Esposito <e.emanuelegiuseppe@gmail.com>
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License version 2 as published by the Free Software Foundation.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, see <http://www.gnu.org/licenses/>
*/
#ifndef QOS_EXTERNAL_H
#define QOS_EXTERNAL_H
#include "libqos/qgraph.h"
void apply_to_node(const char *name, bool is_machine, bool is_abstract);
void apply_to_qlist(QList *list, bool is_machine);
QGuestAllocator *get_machine_allocator(QOSGraphObject *obj);
void *allocate_objects(QTestState *qts, char **path, QGuestAllocator **p_alloc);
#endif

View File

@ -35,6 +35,23 @@
#define SOCKET_TIMEOUT 50
#define SOCKET_MAX_FDS 16
typedef void (*QTestSendFn)(QTestState *s, const char *buf);
typedef void (*ExternalSendFn)(void *s, const char *buf);
typedef GString* (*QTestRecvFn)(QTestState *);
typedef struct QTestClientTransportOps {
QTestSendFn send; /* for sending qtest commands */
/*
* use external_send to send qtest command strings through functions which
* do not accept a QTestState as the first parameter.
*/
ExternalSendFn external_send;
QTestRecvFn recv_line; /* for receiving qtest command responses */
} QTestTransportOps;
struct QTestState
{
int fd;
@ -45,6 +62,7 @@ struct QTestState
bool big_endian;
bool irq_level[MAX_IRQ];
GString *rx;
QTestTransportOps ops;
};
static GHookList abrt_hooks;
@ -52,6 +70,14 @@ static struct sigaction sigact_old;
static int qtest_query_target_endianness(QTestState *s);
static void qtest_client_socket_send(QTestState*, const char *buf);
static void socket_send(int fd, const char *buf, size_t size);
static GString *qtest_client_socket_recv_line(QTestState *);
static void qtest_client_set_tx_handler(QTestState *s, QTestSendFn send);
static void qtest_client_set_rx_handler(QTestState *s, QTestRecvFn recv);
static int init_socket(const char *socket_path)
{
struct sockaddr_un addr;
@ -234,6 +260,9 @@ QTestState *qtest_init_without_qmp_handshake(const char *extra_args)
sock = init_socket(socket_path);
qmpsock = init_socket(qmp_socket_path);
qtest_client_set_rx_handler(s, qtest_client_socket_recv_line);
qtest_client_set_tx_handler(s, qtest_client_socket_send);
qtest_add_abrt_handler(kill_qemu_hook_func, s);
command = g_strdup_printf("exec %s "
@ -379,13 +408,9 @@ static void socket_send(int fd, const char *buf, size_t size)
}
}
static void socket_sendf(int fd, const char *fmt, va_list ap)
static void qtest_client_socket_send(QTestState *s, const char *buf)
{
gchar *str = g_strdup_vprintf(fmt, ap);
size_t size = strlen(str);
socket_send(fd, str, size);
g_free(str);
socket_send(s->fd, buf, strlen(buf));
}
static void GCC_FMT_ATTR(2, 3) qtest_sendf(QTestState *s, const char *fmt, ...)
@ -393,8 +418,11 @@ static void GCC_FMT_ATTR(2, 3) qtest_sendf(QTestState *s, const char *fmt, ...)
va_list ap;
va_start(ap, fmt);
socket_sendf(s->fd, fmt, ap);
gchar *str = g_strdup_vprintf(fmt, ap);
va_end(ap);
s->ops.send(s, str);
g_free(str);
}
/* Sends a message and file descriptors to the socket.
@ -431,7 +459,7 @@ static void socket_send_fds(int socket_fd, int *fds, size_t fds_num,
g_assert_cmpint(ret, >, 0);
}
static GString *qtest_recv_line(QTestState *s)
static GString *qtest_client_socket_recv_line(QTestState *s)
{
GString *line;
size_t offset;
@ -468,7 +496,7 @@ static gchar **qtest_rsp(QTestState *s, int expected_args)
int i;
redo:
line = qtest_recv_line(s);
line = s->ops.recv_line(s);
words = g_strsplit(line->str, " ", 0);
g_string_free(line, TRUE);
@ -1058,8 +1086,8 @@ void qtest_bufwrite(QTestState *s, uint64_t addr, const void *data, size_t size)
bdata = g_base64_encode(data, size);
qtest_sendf(s, "b64write 0x%" PRIx64 " 0x%zx ", addr, size);
socket_send(s->fd, bdata, strlen(bdata));
socket_send(s->fd, "\n", 1);
s->ops.send(s, bdata);
s->ops.send(s, "\n");
qtest_rsp(s, 0);
g_free(bdata);
}
@ -1337,3 +1365,72 @@ void qmp_assert_error_class(QDict *rsp, const char *class)
qobject_unref(rsp);
}
static void qtest_client_set_tx_handler(QTestState *s,
QTestSendFn send)
{
s->ops.send = send;
}
static void qtest_client_set_rx_handler(QTestState *s, QTestRecvFn recv)
{
s->ops.recv_line = recv;
}
/* A type-safe wrapper for s->send() */
static void send_wrapper(QTestState *s, const char *buf)
{
s->ops.external_send(s, buf);
}
static GString *qtest_client_inproc_recv_line(QTestState *s)
{
GString *line;
size_t offset;
char *eol;
eol = strchr(s->rx->str, '\n');
offset = eol - s->rx->str;
line = g_string_new_len(s->rx->str, offset);
g_string_erase(s->rx, 0, offset + 1);
return line;
}
QTestState *qtest_inproc_init(QTestState **s, bool log, const char* arch,
void (*send)(void*, const char*))
{
QTestState *qts;
qts = g_new0(QTestState, 1);
*s = qts; /* Expose qts early on, since the query endianness relies on it */
qts->wstatus = 0;
for (int i = 0; i < MAX_IRQ; i++) {
qts->irq_level[i] = false;
}
qtest_client_set_rx_handler(qts, qtest_client_inproc_recv_line);
/* send() may not have a matching protoype, so use a type-safe wrapper */
qts->ops.external_send = send;
qtest_client_set_tx_handler(qts, send_wrapper);
qts->big_endian = qtest_query_target_endianness(qts);
/*
* Set a dummy path for QTEST_QEMU_BINARY. Doesn't need to exist, but this
* way, qtest_get_arch works for inproc qtest.
*/
gchar *bin_path = g_strconcat("/qemu-system-", arch, NULL);
setenv("QTEST_QEMU_BINARY", bin_path, 0);
g_free(bin_path);
return qts;
}
void qtest_client_inproc_recv(void *opaque, const char *str)
{
QTestState *qts = *(QTestState **)opaque;
if (!qts->rx) {
qts->rx = g_string_new(NULL);
}
g_string_append(qts->rx, str);
return;
}

View File

@ -729,4 +729,8 @@ bool qtest_probe_child(QTestState *s);
*/
void qtest_set_expected_status(QTestState *s, int status);
QTestState *qtest_inproc_init(QTestState **s, bool log, const char* arch,
void (*send)(void*, const char*));
void qtest_client_inproc_recv(void *opaque, const char *str);
#endif

View File

@ -32,22 +32,22 @@ static void receive_autoinc(void *obj, void *data, QGuestAllocator *alloc)
pca9552_init(i2cdev);
i2c_send(i2cdev, &reg, 1);
qi2c_send(i2cdev, &reg, 1);
/* PCA9552_LS0 */
i2c_recv(i2cdev, &resp, 1);
qi2c_recv(i2cdev, &resp, 1);
g_assert_cmphex(resp, ==, 0x54);
/* PCA9552_LS1 */
i2c_recv(i2cdev, &resp, 1);
qi2c_recv(i2cdev, &resp, 1);
g_assert_cmphex(resp, ==, 0x55);
/* PCA9552_LS2 */
i2c_recv(i2cdev, &resp, 1);
qi2c_recv(i2cdev, &resp, 1);
g_assert_cmphex(resp, ==, 0x55);
/* PCA9552_LS3 */
i2c_recv(i2cdev, &resp, 1);
qi2c_recv(i2cdev, &resp, 1);
g_assert_cmphex(resp, ==, 0x54);
}

View File

@ -27,65 +27,11 @@
#include "libqos/malloc.h"
#include "libqos/qgraph.h"
#include "libqos/qgraph_internal.h"
#include "libqos/qos_external.h"
static char *old_path;
static void apply_to_node(const char *name, bool is_machine, bool is_abstract)
{
char *machine_name = NULL;
if (is_machine) {
const char *arch = qtest_get_arch();
machine_name = g_strconcat(arch, "/", name, NULL);
name = machine_name;
}
qos_graph_node_set_availability(name, true);
if (is_abstract) {
qos_delete_cmd_line(name);
}
g_free(machine_name);
}
/**
* apply_to_qlist(): using QMP queries QEMU for a list of
* machines and devices available, and sets the respective node
* as true. If a node is found, also all its produced and contained
* child are marked available.
*
* See qos_graph_node_set_availability() for more info
*/
static void apply_to_qlist(QList *list, bool is_machine)
{
const QListEntry *p;
const char *name;
bool abstract;
QDict *minfo;
QObject *qobj;
QString *qstr;
QBool *qbool;
for (p = qlist_first(list); p; p = qlist_next(p)) {
minfo = qobject_to(QDict, qlist_entry_obj(p));
qobj = qdict_get(minfo, "name");
qstr = qobject_to(QString, qobj);
name = qstring_get_str(qstr);
qobj = qdict_get(minfo, "abstract");
if (qobj) {
qbool = qobject_to(QBool, qobj);
abstract = qbool_get_bool(qbool);
} else {
abstract = false;
}
apply_to_node(name, is_machine, abstract);
qobj = qdict_get(minfo, "alias");
if (qobj) {
qstr = qobject_to(QString, qobj);
name = qstring_get_str(qstr);
apply_to_node(name, is_machine, abstract);
}
}
}
/**
* qos_set_machines_devices_available(): sets availability of qgraph
@ -129,10 +75,6 @@ static void qos_set_machines_devices_available(void)
qobject_unref(response);
}
static QGuestAllocator *get_machine_allocator(QOSGraphObject *obj)
{
return obj->get_driver(obj, "memory");
}
static void restart_qemu_or_continue(char *path)
{
@ -159,78 +101,6 @@ void qos_invalidate_command_line(void)
old_path = NULL;
}
/**
* allocate_objects(): given an array of nodes @arg,
* walks the path invoking all constructors and
* passing the corresponding parameter in order to
* continue the objects allocation.
* Once the test is reached, return the object it consumes.
*
* Since the machine and QEDGE_CONSUMED_BY nodes allocate
* memory in the constructor, g_test_queue_destroy is used so
* that after execution they can be safely free'd. (The test's
* ->before callback is also welcome to use g_test_queue_destroy).
*
* Note: as specified in walk_path() too, @arg is an array of
* char *, where arg[0] is a pointer to the command line
* string that will be used to properly start QEMU when executing
* the test, and the remaining elements represent the actual objects
* that will be allocated.
*/
static void *allocate_objects(QTestState *qts, char **path, QGuestAllocator **p_alloc)
{
int current = 0;
QGuestAllocator *alloc;
QOSGraphObject *parent = NULL;
QOSGraphEdge *edge;
QOSGraphNode *node;
void *edge_arg;
void *obj;
node = qos_graph_get_node(path[current]);
g_assert(node->type == QNODE_MACHINE);
obj = qos_machine_new(node, qts);
qos_object_queue_destroy(obj);
alloc = get_machine_allocator(obj);
if (p_alloc) {
*p_alloc = alloc;
}
for (;;) {
if (node->type != QNODE_INTERFACE) {
qos_object_start_hw(obj);
parent = obj;
}
/* follow edge and get object for next node constructor */
current++;
edge = qos_graph_get_edge(path[current - 1], path[current]);
node = qos_graph_get_node(path[current]);
if (node->type == QNODE_TEST) {
g_assert(qos_graph_edge_get_type(edge) == QEDGE_CONSUMED_BY);
return obj;
}
switch (qos_graph_edge_get_type(edge)) {
case QEDGE_PRODUCES:
obj = parent->get_driver(parent, path[current]);
break;
case QEDGE_CONSUMED_BY:
edge_arg = qos_graph_edge_get_arg(edge);
obj = qos_driver_new(node, obj, alloc, edge_arg);
qos_object_queue_destroy(obj);
break;
case QEDGE_CONTAINS:
obj = parent->get_device(parent, path[current]);
break;
}
}
}
/* The argument to run_one_test, which is the test function that is registered
* with GTest, is a vector of strings. The first item is the initial command

View File

@ -615,7 +615,8 @@ static void test_source_bh_delete_from_cb(void)
g_assert_cmpint(data1.n, ==, data1.max);
g_assert(data1.bh == NULL);
g_assert(!g_main_context_iteration(NULL, false));
assert(g_main_context_iteration(NULL, false));
assert(!g_main_context_iteration(NULL, false));
}
static void test_source_bh_delete_from_cb_many(void)

View File

@ -93,6 +93,8 @@ struct list_element {
QSIMPLEQ_ENTRY(list_element) entry;
#elif TEST_LIST_TYPE == 3
QTAILQ_ENTRY(list_element) entry;
#elif TEST_LIST_TYPE == 4
QSLIST_ENTRY(list_element) entry;
#else
#error Invalid TEST_LIST_TYPE
#endif
@ -144,6 +146,20 @@ static QTAILQ_HEAD(, list_element) Q_list_head;
#define TEST_LIST_INSERT_HEAD_RCU QTAILQ_INSERT_HEAD_RCU
#define TEST_LIST_FOREACH_RCU QTAILQ_FOREACH_RCU
#define TEST_LIST_FOREACH_SAFE_RCU QTAILQ_FOREACH_SAFE_RCU
#elif TEST_LIST_TYPE == 4
static QSLIST_HEAD(, list_element) Q_list_head;
#define TEST_NAME "qslist"
#define TEST_LIST_REMOVE_RCU(el, f) \
QSLIST_REMOVE_RCU(&Q_list_head, el, list_element, f)
#define TEST_LIST_INSERT_AFTER_RCU(list_el, el, f) \
QSLIST_INSERT_AFTER_RCU(&Q_list_head, list_el, el, f)
#define TEST_LIST_INSERT_HEAD_RCU QSLIST_INSERT_HEAD_RCU
#define TEST_LIST_FOREACH_RCU QSLIST_FOREACH_RCU
#define TEST_LIST_FOREACH_SAFE_RCU QSLIST_FOREACH_SAFE_RCU
#else
#error Invalid TEST_LIST_TYPE
#endif

2
tests/test-rcu-slist.c Normal file
View File

@ -0,0 +1,2 @@
#define TEST_LIST_TYPE 4
#include "test-rcu-list.c"

View File

@ -15,6 +15,7 @@
#include "qemu/osdep.h"
#include "block/block.h"
#include "qemu/rcu.h"
#include "qemu/rcu_queue.h"
#include "qemu/sockets.h"
#include "qemu/cutils.h"
@ -31,12 +32,23 @@ struct AioHandler
AioPollFn *io_poll;
IOHandler *io_poll_begin;
IOHandler *io_poll_end;
int deleted;
void *opaque;
bool is_external;
QLIST_ENTRY(AioHandler) node;
QLIST_ENTRY(AioHandler) node_ready; /* only used during aio_poll() */
QLIST_ENTRY(AioHandler) node_deleted;
};
/* Add a handler to a ready list */
static void add_ready_handler(AioHandlerList *ready_list,
AioHandler *node,
int revents)
{
QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
node->pfd.revents = revents;
QLIST_INSERT_HEAD(ready_list, node, node_ready);
}
#ifdef CONFIG_EPOLL_CREATE1
/* The fd number threshold to switch to epoll */
@ -67,7 +79,7 @@ static bool aio_epoll_try_enable(AioContext *ctx)
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
int r;
if (node->deleted || !node->pfd.events) {
if (QLIST_IS_INSERTED(node, node_deleted) || !node->pfd.events) {
continue;
}
event.events = epoll_events_from_pfd(node->pfd.events);
@ -104,17 +116,22 @@ static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
}
}
static int aio_epoll(AioContext *ctx, GPollFD *pfds,
unsigned npfd, int64_t timeout)
static int aio_epoll(AioContext *ctx, AioHandlerList *ready_list,
int64_t timeout)
{
GPollFD pfd = {
.fd = ctx->epollfd,
.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR,
};
AioHandler *node;
int i, ret = 0;
struct epoll_event events[128];
assert(npfd == 1);
assert(pfds[0].fd == ctx->epollfd);
if (timeout > 0) {
ret = qemu_poll_ns(pfds, npfd, timeout);
ret = qemu_poll_ns(&pfd, 1, timeout);
if (ret > 0) {
timeout = 0;
}
}
if (timeout <= 0 || ret > 0) {
ret = epoll_wait(ctx->epollfd, events,
@ -125,11 +142,13 @@ static int aio_epoll(AioContext *ctx, GPollFD *pfds,
}
for (i = 0; i < ret; i++) {
int ev = events[i].events;
int revents = (ev & EPOLLIN ? G_IO_IN : 0) |
(ev & EPOLLOUT ? G_IO_OUT : 0) |
(ev & EPOLLHUP ? G_IO_HUP : 0) |
(ev & EPOLLERR ? G_IO_ERR : 0);
node = events[i].data.ptr;
node->pfd.revents = (ev & EPOLLIN ? G_IO_IN : 0) |
(ev & EPOLLOUT ? G_IO_OUT : 0) |
(ev & EPOLLHUP ? G_IO_HUP : 0) |
(ev & EPOLLERR ? G_IO_ERR : 0);
add_ready_handler(ready_list, node, revents);
}
}
out:
@ -167,8 +186,8 @@ static void aio_epoll_update(AioContext *ctx, AioHandler *node, bool is_new)
{
}
static int aio_epoll(AioContext *ctx, GPollFD *pfds,
unsigned npfd, int64_t timeout)
static int aio_epoll(AioContext *ctx, AioHandlerList *ready_list,
int64_t timeout)
{
assert(false);
}
@ -191,9 +210,11 @@ static AioHandler *find_aio_handler(AioContext *ctx, int fd)
AioHandler *node;
QLIST_FOREACH(node, &ctx->aio_handlers, node) {
if (node->pfd.fd == fd)
if (!node->deleted)
if (node->pfd.fd == fd) {
if (!QLIST_IS_INSERTED(node, node_deleted)) {
return node;
}
}
}
return NULL;
@ -212,7 +233,7 @@ static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
/* If a read is in progress, just mark the node as deleted */
if (qemu_lockcnt_count(&ctx->list_lock)) {
node->deleted = 1;
QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
node->pfd.revents = 0;
return false;
}
@ -354,7 +375,7 @@ static void poll_set_started(AioContext *ctx, bool started)
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
IOHandler *fn;
if (node->deleted) {
if (QLIST_IS_INSERTED(node, node_deleted)) {
continue;
}
@ -411,43 +432,82 @@ bool aio_pending(AioContext *ctx)
return result;
}
static void aio_free_deleted_handlers(AioContext *ctx)
{
AioHandler *node;
if (QLIST_EMPTY_RCU(&ctx->deleted_aio_handlers)) {
return;
}
if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
return; /* we are nested, let the parent do the freeing */
}
while ((node = QLIST_FIRST_RCU(&ctx->deleted_aio_handlers))) {
QLIST_REMOVE(node, node);
QLIST_REMOVE(node, node_deleted);
g_free(node);
}
qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
}
static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
{
bool progress = false;
int revents;
revents = node->pfd.revents & node->pfd.events;
node->pfd.revents = 0;
if (!QLIST_IS_INSERTED(node, node_deleted) &&
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
aio_node_check(ctx, node->is_external) &&
node->io_read) {
node->io_read(node->opaque);
/* aio_notify() does not count as progress */
if (node->opaque != &ctx->notifier) {
progress = true;
}
}
if (!QLIST_IS_INSERTED(node, node_deleted) &&
(revents & (G_IO_OUT | G_IO_ERR)) &&
aio_node_check(ctx, node->is_external) &&
node->io_write) {
node->io_write(node->opaque);
progress = true;
}
return progress;
}
/*
* If we have a list of ready handlers then this is more efficient than
* scanning all handlers with aio_dispatch_handlers().
*/
static bool aio_dispatch_ready_handlers(AioContext *ctx,
AioHandlerList *ready_list)
{
bool progress = false;
AioHandler *node;
while ((node = QLIST_FIRST(ready_list))) {
QLIST_SAFE_REMOVE(node, node_ready);
progress = aio_dispatch_handler(ctx, node) || progress;
}
return progress;
}
/* Slower than aio_dispatch_ready_handlers() but only used via glib */
static bool aio_dispatch_handlers(AioContext *ctx)
{
AioHandler *node, *tmp;
bool progress = false;
QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
int revents;
revents = node->pfd.revents & node->pfd.events;
node->pfd.revents = 0;
if (!node->deleted &&
(revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
aio_node_check(ctx, node->is_external) &&
node->io_read) {
node->io_read(node->opaque);
/* aio_notify() does not count as progress */
if (node->opaque != &ctx->notifier) {
progress = true;
}
}
if (!node->deleted &&
(revents & (G_IO_OUT | G_IO_ERR)) &&
aio_node_check(ctx, node->is_external) &&
node->io_write) {
node->io_write(node->opaque);
progress = true;
}
if (node->deleted) {
if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
QLIST_REMOVE(node, node);
g_free(node);
qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
}
}
progress = aio_dispatch_handler(ctx, node) || progress;
}
return progress;
@ -458,6 +518,7 @@ void aio_dispatch(AioContext *ctx)
qemu_lockcnt_inc(&ctx->list_lock);
aio_bh_poll(ctx);
aio_dispatch_handlers(ctx);
aio_free_deleted_handlers(ctx);
qemu_lockcnt_dec(&ctx->list_lock);
timerlistgroup_run_timers(&ctx->tlg);
@ -514,8 +575,18 @@ static bool run_poll_handlers_once(AioContext *ctx, int64_t *timeout)
bool progress = false;
AioHandler *node;
/*
* Optimization: ->io_poll() handlers often contain RCU read critical
* sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
* -> rcu_read_lock() -> ... sequences with expensive memory
* synchronization primitives. Make the entire polling loop an RCU
* critical section because nested rcu_read_lock()/rcu_read_unlock() calls
* are cheap.
*/
RCU_READ_LOCK_GUARD();
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->io_poll &&
if (!QLIST_IS_INSERTED(node, node_deleted) && node->io_poll &&
aio_node_check(ctx, node->is_external) &&
node->io_poll(node->opaque)) {
/*
@ -609,6 +680,7 @@ static bool try_poll_mode(AioContext *ctx, int64_t *timeout)
bool aio_poll(AioContext *ctx, bool blocking)
{
AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
AioHandler *node;
int i;
int ret = 0;
@ -649,7 +721,7 @@ bool aio_poll(AioContext *ctx, bool blocking)
if (!aio_epoll_enabled(ctx)) {
QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
if (!node->deleted && node->pfd.events
if (!QLIST_IS_INSERTED(node, node_deleted) && node->pfd.events
&& aio_node_check(ctx, node->is_external)) {
add_pollfd(node);
}
@ -658,13 +730,8 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* wait until next event */
if (aio_epoll_check_poll(ctx, pollfds, npfd, timeout)) {
AioHandler epoll_handler;
epoll_handler.pfd.fd = ctx->epollfd;
epoll_handler.pfd.events = G_IO_IN | G_IO_OUT | G_IO_HUP | G_IO_ERR;
npfd = 0;
add_pollfd(&epoll_handler);
ret = aio_epoll(ctx, pollfds, npfd, timeout);
npfd = 0; /* pollfds[] is not being used */
ret = aio_epoll(ctx, &ready_list, timeout);
} else {
ret = qemu_poll_ns(pollfds, npfd, timeout);
}
@ -719,7 +786,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
/* if we have any readable fds, dispatch event */
if (ret > 0) {
for (i = 0; i < npfd; i++) {
nodes[i]->pfd.revents = pollfds[i].revents;
int revents = pollfds[i].revents;
if (revents) {
add_ready_handler(&ready_list, nodes[i], revents);
}
}
}
@ -728,9 +799,11 @@ bool aio_poll(AioContext *ctx, bool blocking)
progress |= aio_bh_poll(ctx);
if (ret > 0) {
progress |= aio_dispatch_handlers(ctx);
progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
}
aio_free_deleted_handlers(ctx);
qemu_lockcnt_dec(&ctx->list_lock);
progress |= timerlistgroup_run_timers(&ctx->tlg);

View File

@ -29,6 +29,7 @@
#include "block/thread-pool.h"
#include "qemu/main-loop.h"
#include "qemu/atomic.h"
#include "qemu/rcu_queue.h"
#include "block/raw-aio.h"
#include "qemu/coroutine_int.h"
#include "trace.h"
@ -36,16 +37,76 @@
/***********************************************************/
/* bottom halves (can be seen as timers which expire ASAP) */
/* QEMUBH::flags values */
enum {
/* Already enqueued and waiting for aio_bh_poll() */
BH_PENDING = (1 << 0),
/* Invoke the callback */
BH_SCHEDULED = (1 << 1),
/* Delete without invoking callback */
BH_DELETED = (1 << 2),
/* Delete after invoking callback */
BH_ONESHOT = (1 << 3),
/* Schedule periodically when the event loop is idle */
BH_IDLE = (1 << 4),
};
struct QEMUBH {
AioContext *ctx;
QEMUBHFunc *cb;
void *opaque;
QEMUBH *next;
bool scheduled;
bool idle;
bool deleted;
QSLIST_ENTRY(QEMUBH) next;
unsigned flags;
};
/* Called concurrently from any thread */
static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
{
AioContext *ctx = bh->ctx;
unsigned old_flags;
/*
* The memory barrier implicit in atomic_fetch_or makes sure that:
* 1. idle & any writes needed by the callback are done before the
* locations are read in the aio_bh_poll.
* 2. ctx is loaded before the callback has a chance to execute and bh
* could be freed.
*/
old_flags = atomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
if (!(old_flags & BH_PENDING)) {
QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
}
aio_notify(ctx);
}
/* Only called from aio_bh_poll() and aio_ctx_finalize() */
static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
{
QEMUBH *bh = QSLIST_FIRST_RCU(head);
if (!bh) {
return NULL;
}
QSLIST_REMOVE_HEAD(head, next);
/*
* The atomic_and is paired with aio_bh_enqueue(). The implicit memory
* barrier ensures that the callback sees all writes done by the scheduling
* thread. It also ensures that the scheduling thread sees the cleared
* flag before bh->cb has run, and thus will call aio_notify again if
* necessary.
*/
*flags = atomic_fetch_and(&bh->flags,
~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
return bh;
}
void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
{
QEMUBH *bh;
@ -55,15 +116,7 @@ void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
.cb = cb,
.opaque = opaque,
};
qemu_lockcnt_lock(&ctx->list_lock);
bh->next = ctx->first_bh;
bh->scheduled = 1;
bh->deleted = 1;
/* Make sure that the members are ready before putting bh into list */
smp_wmb();
ctx->first_bh = bh;
qemu_lockcnt_unlock(&ctx->list_lock);
aio_notify(ctx);
aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
}
QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
@ -75,12 +128,6 @@ QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
.cb = cb,
.opaque = opaque,
};
qemu_lockcnt_lock(&ctx->list_lock);
bh->next = ctx->first_bh;
/* Make sure that the members are ready before putting bh into list */
smp_wmb();
ctx->first_bh = bh;
qemu_lockcnt_unlock(&ctx->list_lock);
return bh;
}
@ -89,91 +136,56 @@ void aio_bh_call(QEMUBH *bh)
bh->cb(bh->opaque);
}
/* Multiple occurrences of aio_bh_poll cannot be called concurrently.
* The count in ctx->list_lock is incremented before the call, and is
* not affected by the call.
*/
/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
int aio_bh_poll(AioContext *ctx)
{
QEMUBH *bh, **bhp, *next;
int ret;
bool deleted = false;
BHListSlice slice;
BHListSlice *s;
int ret = 0;
ret = 0;
for (bh = atomic_rcu_read(&ctx->first_bh); bh; bh = next) {
next = atomic_rcu_read(&bh->next);
/* The atomic_xchg is paired with the one in qemu_bh_schedule. The
* implicit memory barrier ensures that the callback sees all writes
* done by the scheduling thread. It also ensures that the scheduling
* thread sees the zero before bh->cb has run, and thus will call
* aio_notify again if necessary.
*/
if (atomic_xchg(&bh->scheduled, 0)) {
QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
QEMUBH *bh;
unsigned flags;
bh = aio_bh_dequeue(&s->bh_list, &flags);
if (!bh) {
QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
continue;
}
if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
/* Idle BHs don't count as progress */
if (!bh->idle) {
if (!(flags & BH_IDLE)) {
ret = 1;
}
bh->idle = 0;
aio_bh_call(bh);
}
if (bh->deleted) {
deleted = true;
if (flags & (BH_DELETED | BH_ONESHOT)) {
g_free(bh);
}
}
/* remove deleted bhs */
if (!deleted) {
return ret;
}
if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
bhp = &ctx->first_bh;
while (*bhp) {
bh = *bhp;
if (bh->deleted && !bh->scheduled) {
*bhp = bh->next;
g_free(bh);
} else {
bhp = &bh->next;
}
}
qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
}
return ret;
}
void qemu_bh_schedule_idle(QEMUBH *bh)
{
bh->idle = 1;
/* Make sure that idle & any writes needed by the callback are done
* before the locations are read in the aio_bh_poll.
*/
atomic_mb_set(&bh->scheduled, 1);
aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
}
void qemu_bh_schedule(QEMUBH *bh)
{
AioContext *ctx;
ctx = bh->ctx;
bh->idle = 0;
/* The memory barrier implicit in atomic_xchg makes sure that:
* 1. idle & any writes needed by the callback are done before the
* locations are read in the aio_bh_poll.
* 2. ctx is loaded before scheduled is set and the callback has a chance
* to execute.
*/
if (atomic_xchg(&bh->scheduled, 1) == 0) {
aio_notify(ctx);
}
aio_bh_enqueue(bh, BH_SCHEDULED);
}
/* This func is async.
*/
void qemu_bh_cancel(QEMUBH *bh)
{
atomic_mb_set(&bh->scheduled, 0);
atomic_and(&bh->flags, ~BH_SCHEDULED);
}
/* This func is async.The bottom half will do the delete action at the finial
@ -181,21 +193,16 @@ void qemu_bh_cancel(QEMUBH *bh)
*/
void qemu_bh_delete(QEMUBH *bh)
{
bh->scheduled = 0;
bh->deleted = 1;
aio_bh_enqueue(bh, BH_DELETED);
}
int64_t
aio_compute_timeout(AioContext *ctx)
static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
{
int64_t deadline;
int timeout = -1;
QEMUBH *bh;
for (bh = atomic_rcu_read(&ctx->first_bh); bh;
bh = atomic_rcu_read(&bh->next)) {
if (bh->scheduled) {
if (bh->idle) {
QSLIST_FOREACH_RCU(bh, head, next) {
if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
if (bh->flags & BH_IDLE) {
/* idle bottom halves will be polled at least
* every 10ms */
timeout = 10000000;
@ -207,6 +214,28 @@ aio_compute_timeout(AioContext *ctx)
}
}
return timeout;
}
int64_t
aio_compute_timeout(AioContext *ctx)
{
BHListSlice *s;
int64_t deadline;
int timeout = -1;
timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
if (timeout == 0) {
return 0;
}
QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
if (timeout == 0) {
return 0;
}
}
deadline = timerlistgroup_deadline_ns(&ctx->tlg);
if (deadline == 0) {
return 0;
@ -237,15 +266,24 @@ aio_ctx_check(GSource *source)
{
AioContext *ctx = (AioContext *) source;
QEMUBH *bh;
BHListSlice *s;
atomic_and(&ctx->notify_me, ~1);
aio_notify_accept(ctx);
for (bh = ctx->first_bh; bh; bh = bh->next) {
if (bh->scheduled) {
QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
return true;
}
}
QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
return true;
}
}
}
return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
}
@ -265,6 +303,8 @@ static void
aio_ctx_finalize(GSource *source)
{
AioContext *ctx = (AioContext *) source;
QEMUBH *bh;
unsigned flags;
thread_pool_free(ctx->thread_pool);
@ -287,18 +327,15 @@ aio_ctx_finalize(GSource *source)
assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
qemu_bh_delete(ctx->co_schedule_bh);
qemu_lockcnt_lock(&ctx->list_lock);
assert(!qemu_lockcnt_count(&ctx->list_lock));
while (ctx->first_bh) {
QEMUBH *next = ctx->first_bh->next;
/* There must be no aio_bh_poll() calls going on */
assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
/* qemu_bh_delete() must have been called on BHs in this AioContext */
assert(ctx->first_bh->deleted);
assert(flags & BH_DELETED);
g_free(ctx->first_bh);
ctx->first_bh = next;
g_free(bh);
}
qemu_lockcnt_unlock(&ctx->list_lock);
aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
event_notifier_cleanup(&ctx->notifier);
@ -445,6 +482,8 @@ AioContext *aio_context_new(Error **errp)
AioContext *ctx;
ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
QSLIST_INIT(&ctx->bh_list);
QSIMPLEQ_INIT(&ctx->bh_slice_list);
aio_context_setup(ctx);
ret = event_notifier_init(&ctx->notifier, false);

View File

@ -30,6 +30,7 @@ typedef struct ModuleEntry
typedef QTAILQ_HEAD(, ModuleEntry) ModuleTypeList;
static ModuleTypeList init_type_list[MODULE_INIT_MAX];
static bool modules_init_done[MODULE_INIT_MAX];
static ModuleTypeList dso_init_list;
@ -91,11 +92,17 @@ void module_call_init(module_init_type type)
ModuleTypeList *l;
ModuleEntry *e;
if (modules_init_done[type]) {
return;
}
l = find_type(type);
QTAILQ_FOREACH(e, l, node) {
e->init();
}
modules_init_done[type] = true;
}
#ifdef CONFIG_MODULES