mirror of
https://github.com/xemu-project/xemu.git
synced 2025-02-06 12:09:21 +00:00
deploy: 86344a8543194f4e21d3777a382c608fc4fff4f2
This commit is contained in:
commit
fb42d5e15e
2
.dir-locals.el
Normal file
2
.dir-locals.el
Normal file
@ -0,0 +1,2 @@
|
||||
((c-mode . ((c-file-style . "stroustrup")
|
||||
(indent-tabs-mode . nil))))
|
49
.editorconfig
Normal file
49
.editorconfig
Normal file
@ -0,0 +1,49 @@
|
||||
# EditorConfig is a file format and collection of text editor plugins
|
||||
# for maintaining consistent coding styles between different editors
|
||||
# and IDEs. Most popular editors support this either natively or via
|
||||
# plugin.
|
||||
#
|
||||
# Check https://editorconfig.org for details.
|
||||
#
|
||||
# Emacs: you need https://github.com/10sr/editorconfig-custom-majormode-el
|
||||
# to automatically enable the appropriate major-mode for your files
|
||||
# that aren't already caught by your existing config.
|
||||
#
|
||||
|
||||
root = true
|
||||
|
||||
[*]
|
||||
end_of_line = lf
|
||||
insert_final_newline = true
|
||||
charset = utf-8
|
||||
|
||||
[*.mak]
|
||||
indent_style = tab
|
||||
indent_size = 8
|
||||
emacs_mode = makefile
|
||||
|
||||
[Makefile*]
|
||||
indent_style = tab
|
||||
indent_size = 8
|
||||
emacs_mode = makefile
|
||||
|
||||
[*.{c,h,c.inc,h.inc}]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
emacs_mode = c
|
||||
|
||||
[*.sh]
|
||||
indent_style = space
|
||||
indent_size = 4
|
||||
|
||||
[*.{s,S}]
|
||||
indent_style = tab
|
||||
indent_size = 8
|
||||
emacs_mode = asm
|
||||
|
||||
[*.{vert,frag}]
|
||||
emacs_mode = glsl
|
||||
|
||||
[*.json]
|
||||
indent_style = space
|
||||
emacs_mode = python
|
7
.exrc
Normal file
7
.exrc
Normal file
@ -0,0 +1,7 @@
|
||||
"VIM settings to match QEMU coding style. They are activated by adding the
|
||||
"following settings (without the " symbol) as last two lines in $HOME/.vimrc:
|
||||
"set secure
|
||||
"set exrc
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set smarttab
|
8
.gdbinit
Normal file
8
.gdbinit
Normal file
@ -0,0 +1,8 @@
|
||||
# GDB may have ./.gdbinit loading disabled by default. In that case you can
|
||||
# follow the instructions it prints. They boil down to adding the following to
|
||||
# your home directory's ~/.gdbinit file:
|
||||
#
|
||||
# add-auto-load-safe-path /path/to/qemu/.gdbinit
|
||||
|
||||
# Load QEMU-specific sub-commands and settings
|
||||
source scripts/qemu-gdb.py
|
21
.git-blame-ignore-revs
Normal file
21
.git-blame-ignore-revs
Normal file
@ -0,0 +1,21 @@
|
||||
#
|
||||
# List of code-formatting clean ups the git blame can ignore
|
||||
#
|
||||
# git blame --ignore-revs-file .git-blame-ignore-revs
|
||||
#
|
||||
# or
|
||||
#
|
||||
# git config blame.ignoreRevsFile .git-blame-ignore-revs
|
||||
#
|
||||
|
||||
# gdbstub: clean-up indents
|
||||
ad9e4585b3c7425759d3eea697afbca71d2c2082
|
||||
|
||||
# e1000e: fix code style
|
||||
0eadd56bf53ab196a16d492d7dd31c62e1c24c32
|
||||
|
||||
# target/riscv: coding style fixes
|
||||
8c7feddddd9218b407792120bcfda0347ed16205
|
||||
|
||||
# replace TABs with spaces
|
||||
48805df9c22a0700fba4b3b548fafaa21726ca68
|
9
.gitattributes
vendored
Normal file
9
.gitattributes
vendored
Normal file
@ -0,0 +1,9 @@
|
||||
*.c.inc diff=c
|
||||
*.h.inc diff=c
|
||||
*.m diff=objc
|
||||
*.py diff=python
|
||||
*.rs diff=rust
|
||||
*.rs.inc diff=rust
|
||||
Cargo.lock diff=toml merge=binary
|
||||
|
||||
*.patch -text -whitespace
|
29
.gitignore
vendored
Normal file
29
.gitignore
vendored
Normal file
@ -0,0 +1,29 @@
|
||||
/GNUmakefile
|
||||
/build/
|
||||
/.cache/
|
||||
/.vscode/
|
||||
*.pyc
|
||||
.sdk
|
||||
.stgit-*
|
||||
.git-submodule-status
|
||||
.clang-format
|
||||
.gdb_history
|
||||
cscope.*
|
||||
tags
|
||||
TAGS
|
||||
GPATH
|
||||
GRTAGS
|
||||
GTAGS
|
||||
*~
|
||||
*.ast_raw
|
||||
*.depend_raw
|
||||
*.swp
|
||||
*.patch
|
||||
*.gcov
|
||||
|
||||
build.log
|
||||
/dist
|
||||
/xemu.ini
|
||||
/macos-libs/
|
||||
/macos-pkgs/
|
||||
/xemu.iconset/
|
136
.gitlab-ci.d/base.yml
Normal file
136
.gitlab-ci.d/base.yml
Normal file
@ -0,0 +1,136 @@
|
||||
|
||||
variables:
|
||||
# On stable branches this is changed by later rules. Should also
|
||||
# be overridden per pipeline if running pipelines concurrently
|
||||
# for different branches in contributor forks.
|
||||
QEMU_CI_CONTAINER_TAG: latest
|
||||
|
||||
# For purposes of CI rules, upstream is the gitlab.com/qemu-project
|
||||
# namespace. When testing CI, it might be usefult to override this
|
||||
# to point to a fork repo
|
||||
QEMU_CI_UPSTREAM: qemu-project
|
||||
|
||||
# The order of rules defined here is critically important.
|
||||
# They are evaluated in order and first match wins.
|
||||
#
|
||||
# Thus we group them into a number of stages, ordered from
|
||||
# most restrictive to least restrictive
|
||||
#
|
||||
# For pipelines running for stable "staging-X.Y" branches
|
||||
# we must override QEMU_CI_CONTAINER_TAG
|
||||
#
|
||||
.base_job_template:
|
||||
variables:
|
||||
# Each script line from will be in a collapsible section in the job output
|
||||
# and show the duration of each line.
|
||||
FF_SCRIPT_SECTIONS: 1
|
||||
# The project has a fairly fat GIT repo so we try and avoid bringing in things
|
||||
# we don't need. The --filter options avoid blobs and tree references we aren't going to use
|
||||
# and we also avoid fetching tags.
|
||||
GIT_FETCH_EXTRA_FLAGS: --filter=blob:none --filter=tree:0 --no-tags --prune --quiet
|
||||
|
||||
interruptible: true
|
||||
|
||||
rules:
|
||||
#############################################################
|
||||
# Stage 1: exclude scenarios where we definitely don't
|
||||
# want jobs to run
|
||||
#############################################################
|
||||
|
||||
# Never run jobs upstream on stable branch, staging branch jobs already ran
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /^stable-/'
|
||||
when: never
|
||||
|
||||
# Never run jobs upstream on tags, staging branch jobs already ran
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG'
|
||||
when: never
|
||||
|
||||
# Scheduled runs on mainline don't get pipelines except for the special Coverity job
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
|
||||
# Cirrus jobs can't run unless the creds / target repo are set
|
||||
- if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
|
||||
when: never
|
||||
|
||||
# Publishing jobs should only run on the default branch in upstream
|
||||
- if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
|
||||
when: never
|
||||
|
||||
# Non-publishing jobs should only run on staging branches in upstream
|
||||
- if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH !~ /staging/'
|
||||
when: never
|
||||
|
||||
# Jobs only intended for forks should always be skipped on upstream
|
||||
- if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
|
||||
when: never
|
||||
|
||||
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
when: never
|
||||
|
||||
# Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
|
||||
- if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
when: never
|
||||
|
||||
|
||||
#############################################################
|
||||
# Stage 2: fine tune execution of jobs in specific scenarios
|
||||
# where the catch all logic is inappropriate
|
||||
#############################################################
|
||||
|
||||
# Optional jobs should not be run unless manually triggered
|
||||
- if: '$QEMU_JOB_OPTIONAL && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
variables:
|
||||
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
|
||||
|
||||
- if: '$QEMU_JOB_OPTIONAL'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
|
||||
# Skipped jobs should not be run unless manually triggered
|
||||
- if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
variables:
|
||||
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
|
||||
|
||||
- if: '$QEMU_JOB_SKIPPED'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
|
||||
# Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
|
||||
- if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
|
||||
|
||||
#############################################################
|
||||
# Stage 3: catch all logic applying to any job not matching
|
||||
# an earlier criteria
|
||||
#############################################################
|
||||
|
||||
# Forks pipeline jobs don't start automatically unless
|
||||
# QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
|
||||
when: manual
|
||||
|
||||
# Upstream pipeline jobs start automatically unless told not to
|
||||
# by setting QEMU_CI=1
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
when: manual
|
||||
variables:
|
||||
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
|
||||
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
|
||||
when: manual
|
||||
|
||||
# Jobs can run if any jobs they depend on were successful
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
|
||||
when: on_success
|
||||
variables:
|
||||
QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
|
||||
|
||||
- when: on_success
|
128
.gitlab-ci.d/buildtest-template.yml
Normal file
128
.gitlab-ci.d/buildtest-template.yml
Normal file
@ -0,0 +1,128 @@
|
||||
.native_build_job_template:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start setup "Pre-script setup"
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
- cat /packages.txt
|
||||
- section_end setup
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- du -sh .git
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
${TARGETS:+--target-list="$TARGETS"}
|
||||
$CONFIGURE_ARGS ||
|
||||
{ cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- if test -n "$LD_JOBS";
|
||||
then
|
||||
pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
|
||||
fi || exit 1;
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- $MAKE -j"$JOBS"
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- ccache --show-stats
|
||||
|
||||
# We jump some hoops in common_test_job_template to avoid
|
||||
# rebuilding all the object files we skip in the artifacts
|
||||
.native_build_artifact_template:
|
||||
artifacts:
|
||||
when: on_success
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
- build
|
||||
- .git-submodule-status
|
||||
exclude:
|
||||
- build/**/*.p
|
||||
- build/**/*.a.p
|
||||
- build/**/*.c.o
|
||||
- build/**/*.c.o.d
|
||||
|
||||
.common_test_job_template:
|
||||
extends: .base_job_template
|
||||
stage: test
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start buildenv "Setting up to run tests"
|
||||
- scripts/git-submodule.sh update roms/SLOF
|
||||
- build/pyvenv/bin/meson subprojects download $(cd build/subprojects && echo *)
|
||||
- cd build
|
||||
- find . -type f -exec touch {} +
|
||||
# Avoid recompiling by hiding ninja with NINJA=":"
|
||||
# We also have to pre-cache the functional tests manually in this case
|
||||
- if [ "x${QEMU_TEST_CACHE_DIR}" != "x" ]; then
|
||||
$MAKE precache-functional ;
|
||||
fi
|
||||
- section_end buildenv
|
||||
- section_start test "Running tests"
|
||||
- $MAKE NINJA=":" $MAKE_CHECK_ARGS
|
||||
- section_end test
|
||||
|
||||
.native_test_job_template:
|
||||
extends: .common_test_job_template
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
when: always
|
||||
expire_in: 7 days
|
||||
paths:
|
||||
- build/meson-logs/testlog.txt
|
||||
reports:
|
||||
junit: build/meson-logs/testlog.junit.xml
|
||||
|
||||
.functional_test_job_template:
|
||||
extends: .common_test_job_template
|
||||
cache:
|
||||
key: "${CI_JOB_NAME}-cache"
|
||||
paths:
|
||||
- ${CI_PROJECT_DIR}/avocado-cache
|
||||
- ${CI_PROJECT_DIR}/functional-cache
|
||||
policy: pull-push
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
when: always
|
||||
expire_in: 7 days
|
||||
paths:
|
||||
- build/tests/results/latest/results.xml
|
||||
- build/tests/results/latest/test-results
|
||||
- build/tests/functional/*/*/*.log
|
||||
reports:
|
||||
junit: build/tests/results/latest/results.xml
|
||||
before_script:
|
||||
- mkdir -p ~/.config/avocado
|
||||
- echo "[datadir.paths]" > ~/.config/avocado/avocado.conf
|
||||
- echo "cache_dirs = ['${CI_PROJECT_DIR}/avocado-cache']"
|
||||
>> ~/.config/avocado/avocado.conf
|
||||
- echo -e '[job.output.testlogs]\nstatuses = ["FAIL", "INTERRUPT"]'
|
||||
>> ~/.config/avocado/avocado.conf
|
||||
- if [ -d ${CI_PROJECT_DIR}/avocado-cache ]; then
|
||||
du -chs ${CI_PROJECT_DIR}/*-cache ;
|
||||
fi
|
||||
- export AVOCADO_ALLOW_UNTRUSTED_CODE=1
|
||||
- export QEMU_TEST_ALLOW_UNTRUSTED_CODE=1
|
||||
- export QEMU_TEST_CACHE_DIR=${CI_PROJECT_DIR}/functional-cache
|
||||
after_script:
|
||||
- cd build
|
||||
- du -chs ${CI_PROJECT_DIR}/*-cache
|
||||
variables:
|
||||
QEMU_JOB_AVOCADO: 1
|
797
.gitlab-ci.d/buildtest.yml
Normal file
797
.gitlab-ci.d/buildtest.yml
Normal file
@ -0,0 +1,797 @@
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/buildtest-template.yml'
|
||||
|
||||
build-system-alpine:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
- job: amd64-alpine-container
|
||||
variables:
|
||||
IMAGE: alpine
|
||||
TARGETS: avr-softmmu loongarch64-softmmu mips64-softmmu mipsel-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
CONFIGURE_ARGS: --enable-docs --enable-trace-backends=log,simple,syslog
|
||||
|
||||
check-system-alpine:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-alpine
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: alpine
|
||||
MAKE_CHECK_ARGS: check-unit check-qtest
|
||||
|
||||
functional-system-alpine:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-system-alpine
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: alpine
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:avr arch:loongarch64 arch:mips64 arch:mipsel
|
||||
|
||||
build-system-ubuntu:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-ubuntu2204-container
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
CONFIGURE_ARGS: --enable-docs
|
||||
TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-ubuntu:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-ubuntu
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-ubuntu:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-system-ubuntu
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:alpha arch:microblazeel arch:mips64el
|
||||
|
||||
build-system-debian:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-debian-container
|
||||
variables:
|
||||
IMAGE: debian
|
||||
CONFIGURE_ARGS: --with-coroutine=sigaltstack
|
||||
TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
|
||||
sparc-softmmu xtensa-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-debian:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-debian
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-debian:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-system-debian
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:arm arch:i386 arch:riscv64 arch:sh4 arch:sparc arch:xtensa
|
||||
|
||||
crash-test-debian:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-debian
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
script:
|
||||
- cd build
|
||||
- make NINJA=":" check-venv
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
|
||||
|
||||
build-system-fedora:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-fedora-container
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs --enable-crypto-afalg --enable-rust
|
||||
TARGETS: microblaze-softmmu mips-softmmu
|
||||
xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
build-system-fedora-rust-nightly:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-fedora-rust-nightly-container
|
||||
variables:
|
||||
IMAGE: fedora-rust-nightly
|
||||
CONFIGURE_ARGS: --disable-docs --enable-rust --enable-strict-rust-lints
|
||||
TARGETS: aarch64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
allow_failure: true
|
||||
|
||||
check-system-fedora:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-fedora
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-fedora:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-system-fedora
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:microblaze arch:mips arch:xtensa arch:m68k
|
||||
arch:riscv32 arch:ppc arch:sparc64
|
||||
|
||||
crash-test-fedora:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-fedora
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
script:
|
||||
- cd build
|
||||
- make NINJA=":" check-venv
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
|
||||
- pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
|
||||
|
||||
build-system-centos:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-centos9-container
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-vfio-user-server
|
||||
--enable-modules --enable-trace-backends=dtrace --enable-docs
|
||||
TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
|
||||
x86_64-softmmu rx-softmmu sh4-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
# Previous QEMU release. Used for cross-version migration tests.
|
||||
build-previous-qemu:
|
||||
extends: .native_build_job_template
|
||||
artifacts:
|
||||
when: on_success
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
- build-previous
|
||||
exclude:
|
||||
- build-previous/**/*.p
|
||||
- build-previous/**/*.a.p
|
||||
- build-previous/**/*.c.o
|
||||
- build-previous/**/*.c.o.d
|
||||
needs:
|
||||
job: amd64-opensuse-leap-container
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
TARGETS: x86_64-softmmu aarch64-softmmu
|
||||
# Override the default flags as we need more to grab the old version
|
||||
GIT_FETCH_EXTRA_FLAGS: --prune --quiet
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- export QEMU_PREV_VERSION="$(sed 's/\([0-9.]*\)\.[0-9]*/v\1.0/' VERSION)"
|
||||
- git remote add upstream https://gitlab.com/qemu-project/qemu
|
||||
- git fetch upstream refs/tags/$QEMU_PREV_VERSION:refs/tags/$QEMU_PREV_VERSION
|
||||
- git checkout $QEMU_PREV_VERSION
|
||||
after_script:
|
||||
- mv build build-previous
|
||||
|
||||
.migration-compat-common:
|
||||
extends: .common_test_job_template
|
||||
needs:
|
||||
- job: build-previous-qemu
|
||||
- job: build-system-opensuse
|
||||
# The old QEMU could have bugs unrelated to migration that are
|
||||
# already fixed in the current development branch, so this test
|
||||
# might fail.
|
||||
allow_failure: true
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
script:
|
||||
# Use the migration-tests from the older QEMU tree. This avoids
|
||||
# testing an old QEMU against new features/tests that it is not
|
||||
# compatible with.
|
||||
- cd build-previous
|
||||
# Don't allow python-based tests to run. The
|
||||
# vmstate-checker-script test has a race that causes it to fail
|
||||
# sometimes. It cannot be fixed it because this job runs the test
|
||||
# from the old QEMU version. The test will be removed on master,
|
||||
# but this job will only see the change in the next release.
|
||||
#
|
||||
# TODO: remove this line after 9.2 release
|
||||
- unset PYTHON
|
||||
# old to new
|
||||
- QTEST_QEMU_BINARY_SRC=./qemu-system-${TARGET}
|
||||
QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
|
||||
# new to old
|
||||
- QTEST_QEMU_BINARY_DST=./qemu-system-${TARGET}
|
||||
QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
|
||||
|
||||
# This job needs to be disabled until we can have an aarch64 CPU model that
|
||||
# will both (1) support both KVM and TCG, and (2) provide a stable ABI.
|
||||
# Currently only "-cpu max" can provide (1), however it doesn't guarantee
|
||||
# (2). Mark this test skipped until later.
|
||||
migration-compat-aarch64:
|
||||
extends: .migration-compat-common
|
||||
variables:
|
||||
TARGET: aarch64
|
||||
QEMU_JOB_SKIPPED: 1
|
||||
|
||||
migration-compat-x86_64:
|
||||
extends: .migration-compat-common
|
||||
variables:
|
||||
TARGET: x86_64
|
||||
|
||||
check-system-centos:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-centos
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-centos:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-system-centos
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:ppc64 arch:or1k arch:s390x arch:x86_64 arch:rx
|
||||
arch:sh4
|
||||
|
||||
build-system-opensuse:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-opensuse-leap-container
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
TARGETS: s390x-softmmu x86_64-softmmu aarch64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
check-system-opensuse:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-system-opensuse
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-system-opensuse:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-system-opensuse
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: opensuse-leap
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
AVOCADO_TAGS: arch:s390x arch:x86_64 arch:aarch64
|
||||
|
||||
#
|
||||
# Flaky tests. We don't run these by default and they are allow fail
|
||||
# but often the CI system is the only way to trigger the failures.
|
||||
#
|
||||
|
||||
build-system-flaky:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-debian-container
|
||||
variables:
|
||||
IMAGE: debian
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
TARGETS: aarch64-softmmu arm-softmmu mips64el-softmmu
|
||||
ppc64-softmmu rx-softmmu s390x-softmmu sh4-softmmu x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
|
||||
functional-system-flaky:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-system-flaky
|
||||
artifacts: true
|
||||
allow_failure: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
QEMU_TEST_FLAKY_TESTS: 1
|
||||
AVOCADO_TAGS: flaky
|
||||
|
||||
# This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
|
||||
# the configure script. The container doesn't contain Xen headers so
|
||||
# Xen accelerator is not detected / selected. As result it build the
|
||||
# i386-softmmu and x86_64-softmmu with KVM being the single accelerator
|
||||
# available.
|
||||
# Also use a different coroutine implementation (which is only really of
|
||||
# interest to KVM users, i.e. with TCG disabled)
|
||||
build-tcg-disabled:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-centos9-container
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-tcg --audio-drv-list="" --with-coroutine=ucontext
|
||||
--disable-docs --disable-sdl --disable-gtk --disable-vnc
|
||||
|| { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make -j"$JOBS"
|
||||
- make check-unit
|
||||
- make check-qapi-schema
|
||||
- cd tests/qemu-iotests/
|
||||
- ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
|
||||
052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
|
||||
170 171 184 192 194 208 221 226 227 236 253 277 image-fleecing
|
||||
- ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122
|
||||
124 132 139 142 144 145 151 152 155 157 165 194 196 200 202
|
||||
208 209 216 218 227 234 246 247 248 250 254 255 257 258
|
||||
260 261 262 263 264 270 272 273 277 279 image-fleecing
|
||||
- cd ../..
|
||||
- make distclean
|
||||
|
||||
build-user:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-user-cross-container
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system
|
||||
--target-list-exclude=alpha-linux-user,sh4-linux-user
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-user-static:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-user-cross-container
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-system --static
|
||||
--target-list-exclude=alpha-linux-user,sh4-linux-user
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# targets stuck on older compilers
|
||||
build-legacy:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-legacy-cross-container
|
||||
variables:
|
||||
IMAGE: debian-legacy-test-cross
|
||||
TARGETS: alpha-linux-user alpha-softmmu sh4-linux-user
|
||||
CONFIGURE_ARGS: --disable-tools
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-user-hexagon:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: hexagon-cross-container
|
||||
variables:
|
||||
IMAGE: debian-hexagon-cross
|
||||
TARGETS: hexagon-linux-user
|
||||
CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# Build the softmmu targets we have check-tcg tests and compilers in
|
||||
# our omnibus all-test-cross container. Those targets that haven't got
|
||||
# Debian cross compiler support need to use special containers.
|
||||
build-some-softmmu:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-user-cross-container
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --disable-tools --enable-debug
|
||||
TARGETS: arm-softmmu aarch64-softmmu i386-softmmu riscv64-softmmu
|
||||
s390x-softmmu x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
build-loongarch64:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: loongarch-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-loongarch-cross
|
||||
CONFIGURE_ARGS: --disable-tools --enable-debug
|
||||
TARGETS: loongarch64-linux-user loongarch64-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
# We build tricore in a very minimal tricore only container
|
||||
build-tricore-softmmu:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: tricore-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-tricore-cross
|
||||
CONFIGURE_ARGS: --disable-tools --disable-fdt --enable-debug
|
||||
TARGETS: tricore-softmmu
|
||||
MAKE_CHECK_ARGS: check-tcg
|
||||
|
||||
clang-system:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-fedora-container
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-ubsan
|
||||
--extra-cflags=-fno-sanitize-recover=undefined
|
||||
TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu s390x-softmmu
|
||||
MAKE_CHECK_ARGS: check-qtest check-tcg
|
||||
|
||||
clang-user:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-user-cross-container
|
||||
timeout: 70m
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system --enable-ubsan
|
||||
--target-list-exclude=alpha-linux-user,microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
|
||||
--extra-cflags=-fno-sanitize-recover=undefined
|
||||
MAKE_CHECK_ARGS: check-unit check-tcg
|
||||
|
||||
# Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory.
|
||||
# On gitlab runners, default value sometimes end up calling 2 lds concurrently and
|
||||
# triggers an Out-Of-Memory error
|
||||
#
|
||||
# Since slirp callbacks are used in QEMU Timers, we cannot use libslirp with
|
||||
# CFI builds, and thus have to disable it here.
|
||||
#
|
||||
# Split in three sets of build/check/avocado to limit the execution time of each
|
||||
# job
|
||||
build-cfi-aarch64:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
- job: amd64-fedora-container
|
||||
variables:
|
||||
LD_JOBS: 1
|
||||
AR: llvm-ar
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
|
||||
--enable-safe-stack --disable-slirp
|
||||
TARGETS: aarch64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
# FIXME: This job is often failing, likely due to out-of-memory problems in
|
||||
# the constrained containers of the shared runners. Thus this is marked as
|
||||
# skipped until the situation has been solved.
|
||||
QEMU_JOB_SKIPPED: 1
|
||||
timeout: 90m
|
||||
|
||||
check-cfi-aarch64:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-aarch64
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-cfi-aarch64:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-aarch64
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
|
||||
build-cfi-ppc64-s390x:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
- job: amd64-fedora-container
|
||||
variables:
|
||||
LD_JOBS: 1
|
||||
AR: llvm-ar
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
|
||||
--enable-safe-stack --disable-slirp
|
||||
TARGETS: ppc64-softmmu s390x-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
# FIXME: This job is often failing, likely due to out-of-memory problems in
|
||||
# the constrained containers of the shared runners. Thus this is marked as
|
||||
# skipped until the situation has been solved.
|
||||
QEMU_JOB_SKIPPED: 1
|
||||
timeout: 80m
|
||||
|
||||
check-cfi-ppc64-s390x:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-ppc64-s390x
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-cfi-ppc64-s390x:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-ppc64-s390x
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
|
||||
build-cfi-x86_64:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
- job: amd64-fedora-container
|
||||
variables:
|
||||
LD_JOBS: 1
|
||||
AR: llvm-ar
|
||||
IMAGE: fedora
|
||||
CONFIGURE_ARGS: --cc=clang --cxx=clang++ --enable-cfi --enable-cfi-debug
|
||||
--enable-safe-stack --disable-slirp
|
||||
TARGETS: x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-build
|
||||
timeout: 70m
|
||||
|
||||
check-cfi-x86_64:
|
||||
extends: .native_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-x86_64
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
functional-cfi-x86_64:
|
||||
extends: .functional_test_job_template
|
||||
needs:
|
||||
- job: build-cfi-x86_64
|
||||
artifacts: true
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
MAKE_CHECK_ARGS: check-avocado check-functional
|
||||
|
||||
tsan-build:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-ubuntu2204-container
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++
|
||||
--enable-trace-backends=ust --disable-slirp
|
||||
TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
|
||||
# Remove when we switch to a distro with clang >= 18
|
||||
# https://github.com/google/sanitizers/issues/1716
|
||||
MAKE: setarch -R make
|
||||
|
||||
# gcov is a GCC features
|
||||
gcov:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-ubuntu2204-container
|
||||
timeout: 80m
|
||||
variables:
|
||||
IMAGE: ubuntu2204
|
||||
CONFIGURE_ARGS: --enable-gcov
|
||||
TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check-unit check-softfloat
|
||||
after_script:
|
||||
- cd build
|
||||
- gcovr --xml-pretty --exclude-unreachable-branches --print-summary
|
||||
-o coverage.xml --root ${CI_PROJECT_DIR} . *.p
|
||||
coverage: /^\s*lines:\s*\d+.\d+\%/
|
||||
artifacts:
|
||||
name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
|
||||
when: always
|
||||
expire_in: 2 days
|
||||
paths:
|
||||
- build/meson-logs/testlog.txt
|
||||
reports:
|
||||
junit: build/meson-logs/testlog.junit.xml
|
||||
coverage_report:
|
||||
coverage_format: cobertura
|
||||
path: build/coverage.xml
|
||||
|
||||
build-oss-fuzz:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-fedora-container
|
||||
variables:
|
||||
IMAGE: fedora
|
||||
script:
|
||||
- mkdir build-oss-fuzz
|
||||
- export LSAN_OPTIONS=suppressions=scripts/oss-fuzz/lsan_suppressions.txt
|
||||
- CC="clang" CXX="clang++" CFLAGS="-fsanitize=address"
|
||||
./scripts/oss-fuzz/build.sh
|
||||
- export ASAN_OPTIONS="fast_unwind_on_malloc=0"
|
||||
- failures=0
|
||||
- for fuzzer in $(find ./build-oss-fuzz/DEST_DIR/ -executable -type f
|
||||
| grep -v slirp); do
|
||||
grep "LLVMFuzzerTestOneInput" ${fuzzer} > /dev/null 2>&1 || continue ;
|
||||
echo Testing ${fuzzer} ... ;
|
||||
"${fuzzer}" -runs=1 -seed=1 || { echo "FAILED:"" ${fuzzer} exit code is $?"; failures=$(($failures+1)); };
|
||||
done
|
||||
- echo "Number of failures:"" $failures"
|
||||
- test $failures = 0
|
||||
|
||||
build-tci:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-debian-user-cross-container
|
||||
variables:
|
||||
IMAGE: debian-all-test-cross
|
||||
script:
|
||||
- TARGETS="aarch64 arm hppa m68k microblaze ppc64 s390x x86_64"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter --disable-kvm --disable-docs --disable-gtk --disable-vnc
|
||||
--target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
|
||||
|| { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
- make -j"$JOBS"
|
||||
- make tests/qtest/boot-serial-test tests/qtest/cdrom-test tests/qtest/pxe-test
|
||||
- for tg in $TARGETS ; do
|
||||
export QTEST_QEMU_BINARY="./qemu-system-${tg}" ;
|
||||
./tests/qtest/boot-serial-test || exit 1 ;
|
||||
./tests/qtest/cdrom-test || exit 1 ;
|
||||
done
|
||||
- QTEST_QEMU_BINARY="./qemu-system-x86_64" ./tests/qtest/pxe-test
|
||||
- QTEST_QEMU_BINARY="./qemu-system-s390x" ./tests/qtest/pxe-test -m slow
|
||||
- make check-tcg
|
||||
|
||||
# Check our reduced build configurations
|
||||
build-without-defaults:
|
||||
extends: .native_build_job_template
|
||||
needs:
|
||||
job: amd64-centos9-container
|
||||
variables:
|
||||
IMAGE: centos9
|
||||
CONFIGURE_ARGS:
|
||||
--without-default-devices
|
||||
--without-default-features
|
||||
--disable-fdt
|
||||
--disable-pie
|
||||
--disable-qom-cast-debug
|
||||
--disable-strip
|
||||
--target-list-exclude=aarch64-softmmu,microblaze-softmmu,mips64-softmmu,mipsel-softmmu,ppc64-softmmu,sh4el-softmmu,xtensa-softmmu,x86_64-softmmu
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
build-libvhost-user:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
|
||||
needs:
|
||||
job: amd64-fedora-container
|
||||
script:
|
||||
- mkdir subprojects/libvhost-user/build
|
||||
- cd subprojects/libvhost-user/build
|
||||
- meson
|
||||
- ninja
|
||||
|
||||
# No targets are built here, just tools, docs, and unit tests. This
|
||||
# also feeds into the eventual documentation deployment steps later
|
||||
build-tools-and-docs-debian:
|
||||
extends:
|
||||
- .native_build_job_template
|
||||
- .native_build_artifact_template
|
||||
needs:
|
||||
job: amd64-debian-container
|
||||
# when running on 'master' we use pre-existing container
|
||||
optional: true
|
||||
variables:
|
||||
IMAGE: debian
|
||||
MAKE_CHECK_ARGS: check-unit ctags TAGS cscope
|
||||
CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
|
||||
QEMU_JOB_PUBLISH: 1
|
||||
|
||||
# Prepare for GitLab pages deployment. Anything copied into the
|
||||
# "public" directory will be deployed to $USER.gitlab.io/$PROJECT
|
||||
#
|
||||
# GitLab publishes from any branch that triggers a CI pipeline
|
||||
#
|
||||
# For the main repo we don't want to publish from 'staging'
|
||||
# since that content may not be pushed, nor do we wish to
|
||||
# publish from 'stable-NNN' branches as that content is outdated.
|
||||
# Thus we restrict to just the default branch
|
||||
#
|
||||
# For contributor forks we want to publish from any repo so
|
||||
# that users can see the results of their commits, regardless
|
||||
# of what topic branch they're currently using
|
||||
pages:
|
||||
extends: .base_job_template
|
||||
image: $CI_REGISTRY_IMAGE/qemu/debian:$QEMU_CI_CONTAINER_TAG
|
||||
stage: test
|
||||
needs:
|
||||
- job: build-tools-and-docs-debian
|
||||
script:
|
||||
- mkdir -p public
|
||||
# HTML-ised source tree
|
||||
- make gtags
|
||||
# We unset variables to work around a bug in some htags versions
|
||||
# which causes it to fail when the environment is large
|
||||
- CI_COMMIT_MESSAGE= CI_COMMIT_TAG_MESSAGE= htags
|
||||
-anT --tree-view=filetree -m qemu_init
|
||||
-t "Welcome to the QEMU sourcecode"
|
||||
- mv HTML public/src
|
||||
# Project documentation
|
||||
- make -C build install DESTDIR=$(pwd)/temp-install
|
||||
- mv temp-install/usr/local/share/doc/qemu/* public/
|
||||
artifacts:
|
||||
when: on_success
|
||||
paths:
|
||||
- public
|
||||
variables:
|
||||
QEMU_JOB_PUBLISH: 1
|
||||
|
||||
coverity:
|
||||
image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
|
||||
stage: build
|
||||
allow_failure: true
|
||||
timeout: 3h
|
||||
needs:
|
||||
- job: amd64-fedora-container
|
||||
optional: true
|
||||
before_script:
|
||||
- dnf install -y curl wget
|
||||
script:
|
||||
# would be nice to cancel the job if over quota (https://gitlab.com/gitlab-org/gitlab/-/issues/256089)
|
||||
# for example:
|
||||
# curl --request POST --header "PRIVATE-TOKEN: $CI_JOB_TOKEN" "${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/jobs/${CI_JOB_ID}/cancel
|
||||
- 'scripts/coverity-scan/run-coverity-scan --check-upload-only || { exitcode=$?; if test $exitcode = 1; then
|
||||
exit 0;
|
||||
else
|
||||
exit $exitcode;
|
||||
fi; };
|
||||
scripts/coverity-scan/run-coverity-scan --update-tools-only > update-tools.log 2>&1 || { cat update-tools.log; exit 1; };
|
||||
scripts/coverity-scan/run-coverity-scan --no-update-tools'
|
||||
rules:
|
||||
- if: '$COVERITY_TOKEN == null'
|
||||
when: never
|
||||
- if: '$COVERITY_EMAIL == null'
|
||||
when: never
|
||||
# Never included on upstream pipelines, except for schedules
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: on_success
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
|
||||
when: never
|
||||
# Forks don't get any pipeline unless QEMU_CI=1 or QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2"'
|
||||
when: never
|
||||
# Always manual on forks even if $QEMU_CI == "2"
|
||||
- when: manual
|
96
.gitlab-ci.d/check-dco.py
Executable file
96
.gitlab-ci.d/check-dco.py
Executable file
@ -0,0 +1,96 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# check-dco.py: validate all commits are signed off
|
||||
#
|
||||
# Copyright (C) 2020 Red Hat, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
namespace = "qemu-project"
|
||||
if len(sys.argv) >= 2:
|
||||
namespace = sys.argv[1]
|
||||
|
||||
cwd = os.getcwd()
|
||||
reponame = os.path.basename(cwd)
|
||||
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
|
||||
|
||||
print(f"adding upstream git repo @ {repourl}")
|
||||
subprocess.check_call(["git", "remote", "add", "check-dco", repourl])
|
||||
subprocess.check_call(["git", "fetch", "check-dco", "master"])
|
||||
|
||||
ancestor = subprocess.check_output(["git", "merge-base",
|
||||
"check-dco/master", "HEAD"],
|
||||
universal_newlines=True)
|
||||
|
||||
ancestor = ancestor.strip()
|
||||
|
||||
subprocess.check_call(["git", "remote", "rm", "check-dco"])
|
||||
|
||||
errors = False
|
||||
|
||||
print("\nChecking for 'Signed-off-by: NAME <EMAIL>' " +
|
||||
"on all commits since %s...\n" % ancestor)
|
||||
|
||||
log = subprocess.check_output(["git", "log", "--format=%H %s",
|
||||
ancestor + "..."],
|
||||
universal_newlines=True)
|
||||
|
||||
if log == "":
|
||||
commits = []
|
||||
else:
|
||||
commits = [[c[0:40], c[41:]] for c in log.strip().split("\n")]
|
||||
|
||||
for sha, subject in commits:
|
||||
|
||||
msg = subprocess.check_output(["git", "show", "-s", sha],
|
||||
universal_newlines=True)
|
||||
lines = msg.strip().split("\n")
|
||||
|
||||
print("🔍 %s %s" % (sha, subject))
|
||||
sob = False
|
||||
for line in lines:
|
||||
if "Signed-off-by:" in line:
|
||||
sob = True
|
||||
if "localhost" in line:
|
||||
print(" ❌ FAIL: bad email in %s" % line)
|
||||
errors = True
|
||||
|
||||
if not sob:
|
||||
print(" ❌ FAIL missing Signed-off-by tag")
|
||||
errors = True
|
||||
|
||||
if errors:
|
||||
print("""
|
||||
|
||||
❌ ERROR: One or more commits are missing a valid Signed-off-By tag.
|
||||
|
||||
|
||||
This project requires all contributors to assert that their contributions
|
||||
are provided in compliance with the terms of the Developer's Certificate
|
||||
of Origin 1.1 (DCO):
|
||||
|
||||
https://developercertificate.org/
|
||||
|
||||
To indicate acceptance of the DCO every commit must have a tag
|
||||
|
||||
Signed-off-by: YOUR NAME <EMAIL>
|
||||
|
||||
where "YOUR NAME" is your commonly known identity in the context
|
||||
of the community.
|
||||
|
||||
This can be achieved by passing the "-s" flag to the "git commit" command.
|
||||
|
||||
To bulk update all commits on current branch "git rebase" can be used:
|
||||
|
||||
git rebase -i master -x 'git commit --amend --no-edit -s'
|
||||
|
||||
""")
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
55
.gitlab-ci.d/check-patch.py
Executable file
55
.gitlab-ci.d/check-patch.py
Executable file
@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env python3
|
||||
#
|
||||
# check-patch.py: run checkpatch.pl across all commits in a branch
|
||||
#
|
||||
# Copyright (C) 2020 Red Hat, Inc.
|
||||
#
|
||||
# SPDX-License-Identifier: GPL-2.0-or-later
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import sys
|
||||
import subprocess
|
||||
|
||||
namespace = "qemu-project"
|
||||
if len(sys.argv) >= 2:
|
||||
namespace = sys.argv[1]
|
||||
|
||||
cwd = os.getcwd()
|
||||
reponame = os.path.basename(cwd)
|
||||
repourl = "https://gitlab.com/%s/%s.git" % (namespace, reponame)
|
||||
|
||||
print(f"adding upstream git repo @ {repourl}")
|
||||
# GitLab CI environment does not give us any direct info about the
|
||||
# base for the user's branch. We thus need to figure out a common
|
||||
# ancestor between the user's branch and current git master.
|
||||
subprocess.check_call(["git", "remote", "add", "check-patch", repourl])
|
||||
subprocess.check_call(["git", "fetch", "check-patch", "master"])
|
||||
|
||||
ancestor = subprocess.check_output(["git", "merge-base",
|
||||
"check-patch/master", "HEAD"],
|
||||
universal_newlines=True)
|
||||
|
||||
ancestor = ancestor.strip()
|
||||
|
||||
log = subprocess.check_output(["git", "log", "--format=%H %s",
|
||||
ancestor + "..."],
|
||||
universal_newlines=True)
|
||||
|
||||
subprocess.check_call(["git", "remote", "rm", "check-patch"])
|
||||
|
||||
if log == "":
|
||||
print("\nNo commits since %s, skipping checks\n" % ancestor)
|
||||
sys.exit(0)
|
||||
|
||||
errors = False
|
||||
|
||||
print("\nChecking all commits since %s...\n" % ancestor, flush=True)
|
||||
|
||||
ret = subprocess.run(["scripts/checkpatch.pl", "--terse", ancestor + "..."])
|
||||
|
||||
if ret.returncode != 0:
|
||||
print(" ❌ FAIL one or more commits failed scripts/checkpatch.pl")
|
||||
sys.exit(1)
|
||||
|
||||
sys.exit(0)
|
75
.gitlab-ci.d/cirrus.yml
Normal file
75
.gitlab-ci.d/cirrus.yml
Normal file
@ -0,0 +1,75 @@
|
||||
# Jobs that we delegate to Cirrus CI because they require an operating
|
||||
# system other than Linux. These jobs will only run if the required
|
||||
# setup has been performed on the GitLab account.
|
||||
#
|
||||
# The Cirrus CI configuration is generated by replacing target-specific
|
||||
# variables in a generic template: some of these variables are provided
|
||||
# when the GitLab CI job is defined, others are taken from a shell
|
||||
# snippet generated using lcitool.
|
||||
#
|
||||
# Note that the $PATH environment variable has to be treated with
|
||||
# special care, because we can't just override it at the GitLab CI job
|
||||
# definition level or we risk breaking it completely.
|
||||
.cirrus_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:latest
|
||||
needs: []
|
||||
# 20 mins larger than "timeout_in" in cirrus/build.yml
|
||||
# as there's often a 5-10 minute delay before Cirrus CI
|
||||
# actually starts the task
|
||||
timeout: 80m
|
||||
script:
|
||||
- source .gitlab-ci.d/cirrus/$NAME.vars
|
||||
- sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
|
||||
-e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g"
|
||||
-e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g"
|
||||
-e "s|[@]CIRRUS_VM_INSTANCE_TYPE@|$CIRRUS_VM_INSTANCE_TYPE|g"
|
||||
-e "s|[@]CIRRUS_VM_IMAGE_SELECTOR@|$CIRRUS_VM_IMAGE_SELECTOR|g"
|
||||
-e "s|[@]CIRRUS_VM_IMAGE_NAME@|$CIRRUS_VM_IMAGE_NAME|g"
|
||||
-e "s|[@]CIRRUS_VM_CPUS@|$CIRRUS_VM_CPUS|g"
|
||||
-e "s|[@]CIRRUS_VM_RAM@|$CIRRUS_VM_RAM|g"
|
||||
-e "s|[@]UPDATE_COMMAND@|$UPDATE_COMMAND|g"
|
||||
-e "s|[@]INSTALL_COMMAND@|$INSTALL_COMMAND|g"
|
||||
-e "s|[@]PATH@|$PATH_EXTRA${PATH_EXTRA:+:}\$PATH|g"
|
||||
-e "s|[@]PKG_CONFIG_PATH@|$PKG_CONFIG_PATH|g"
|
||||
-e "s|[@]PKGS@|$PKGS|g"
|
||||
-e "s|[@]MAKE@|$MAKE|g"
|
||||
-e "s|[@]PYTHON@|$PYTHON|g"
|
||||
-e "s|[@]PIP3@|$PIP3|g"
|
||||
-e "s|[@]PYPI_PKGS@|$PYPI_PKGS|g"
|
||||
-e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
|
||||
-e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g"
|
||||
<.gitlab-ci.d/cirrus/build.yml >.gitlab-ci.d/cirrus/$NAME.yml
|
||||
- cat .gitlab-ci.d/cirrus/$NAME.yml
|
||||
- cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml
|
||||
variables:
|
||||
QEMU_JOB_CIRRUS: 1
|
||||
|
||||
x64-freebsd-14-build:
|
||||
extends: .cirrus_build_job
|
||||
variables:
|
||||
NAME: freebsd-14
|
||||
CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image_family
|
||||
CIRRUS_VM_IMAGE_NAME: freebsd-14-1
|
||||
CIRRUS_VM_CPUS: 8
|
||||
CIRRUS_VM_RAM: 8G
|
||||
UPDATE_COMMAND: pkg update; pkg upgrade -y
|
||||
INSTALL_COMMAND: pkg install -y
|
||||
CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblaze-softmmu,mips64el-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4eb-softmmu,xtensa-softmmu
|
||||
TEST_TARGETS: check
|
||||
|
||||
aarch64-macos-build:
|
||||
extends: .cirrus_build_job
|
||||
variables:
|
||||
NAME: macos-14
|
||||
CIRRUS_VM_INSTANCE_TYPE: macos_instance
|
||||
CIRRUS_VM_IMAGE_SELECTOR: image
|
||||
CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-runner:sonoma
|
||||
UPDATE_COMMAND: brew update
|
||||
INSTALL_COMMAND: brew install
|
||||
PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin
|
||||
PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig
|
||||
CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblazeel-softmmu,mips64-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4-softmmu,xtensaeb-softmmu
|
||||
TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64
|
54
.gitlab-ci.d/cirrus/README.rst
Normal file
54
.gitlab-ci.d/cirrus/README.rst
Normal file
@ -0,0 +1,54 @@
|
||||
Cirrus CI integration
|
||||
=====================
|
||||
|
||||
GitLab CI shared runners only provide a docker environment running on Linux.
|
||||
While it is possible to provide private runners for non-Linux platforms this
|
||||
is not something most contributors/maintainers will wish to do.
|
||||
|
||||
To work around this limitation, we take advantage of `Cirrus CI`_'s free
|
||||
offering: more specifically, we use the `cirrus-run`_ script to trigger Cirrus
|
||||
CI jobs from GitLab CI jobs so that Cirrus CI job output is integrated into
|
||||
the main GitLab CI pipeline dashboard.
|
||||
|
||||
There is, however, some one-time setup required. If you want FreeBSD and macOS
|
||||
builds to happen when you push to your GitLab repository, you need to
|
||||
|
||||
* set up a GitHub repository for the project, eg. ``yourusername/qemu``.
|
||||
This repository needs to exist for cirrus-run to work, but it doesn't need to
|
||||
be kept up to date, so you can create it and then forget about it;
|
||||
|
||||
* enable the `Cirrus CI GitHub app`_ for your GitHub account;
|
||||
|
||||
* sign up for Cirrus CI. It's enough to log into the website using your GitHub
|
||||
account;
|
||||
|
||||
* grab an API token from the `Cirrus CI settings`_ page;
|
||||
|
||||
* it may be necessary to push an empty ``.cirrus.yml`` file to your github fork
|
||||
for Cirrus CI to properly recognize the project. You can check whether
|
||||
Cirrus CI knows about your project by navigating to:
|
||||
|
||||
``https://cirrus-ci.com/yourusername/qemu``
|
||||
|
||||
* in the *CI/CD / Variables* section of the settings page for your GitLab
|
||||
repository, create two new variables:
|
||||
|
||||
* ``CIRRUS_GITHUB_REPO``, containing the name of the GitHub repository
|
||||
created earlier, eg. ``yourusername/qemu``;
|
||||
|
||||
* ``CIRRUS_API_TOKEN``, containing the Cirrus CI API token generated earlier.
|
||||
This variable **must** be marked as *Masked*, because anyone with knowledge
|
||||
of it can impersonate you as far as Cirrus CI is concerned.
|
||||
|
||||
Neither of these variables should be marked as *Protected*, because in
|
||||
general you'll want to be able to trigger Cirrus CI builds from non-protected
|
||||
branches.
|
||||
|
||||
Once this one-time setup is complete, you can just keep pushing to your GitLab
|
||||
repository as usual and you'll automatically get the additional CI coverage.
|
||||
|
||||
|
||||
.. _Cirrus CI GitHub app: https://github.com/marketplace/cirrus-ci
|
||||
.. _Cirrus CI settings: https://cirrus-ci.com/settings/profile/
|
||||
.. _Cirrus CI: https://cirrus-ci.com/
|
||||
.. _cirrus-run: https://github.com/sio/cirrus-run/
|
42
.gitlab-ci.d/cirrus/build.yml
Normal file
42
.gitlab-ci.d/cirrus/build.yml
Normal file
@ -0,0 +1,42 @@
|
||||
@CIRRUS_VM_INSTANCE_TYPE@:
|
||||
@CIRRUS_VM_IMAGE_SELECTOR@: @CIRRUS_VM_IMAGE_NAME@
|
||||
cpu: @CIRRUS_VM_CPUS@
|
||||
memory: @CIRRUS_VM_RAM@
|
||||
|
||||
env:
|
||||
CIRRUS_CLONE_DEPTH: 1
|
||||
CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@"
|
||||
CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@"
|
||||
CI_COMMIT_SHA: "@CI_COMMIT_SHA@"
|
||||
PATH: "@PATH@"
|
||||
PKG_CONFIG_PATH: "@PKG_CONFIG_PATH@"
|
||||
PYTHON: "@PYTHON@"
|
||||
MAKE: "@MAKE@"
|
||||
CONFIGURE_ARGS: "@CONFIGURE_ARGS@"
|
||||
TEST_TARGETS: "@TEST_TARGETS@"
|
||||
|
||||
build_task:
|
||||
# A little shorter than GitLab timeout in ../cirrus.yml
|
||||
timeout_in: 60m
|
||||
install_script:
|
||||
- @UPDATE_COMMAND@
|
||||
- @INSTALL_COMMAND@ @PKGS@
|
||||
- if test -n "@PYPI_PKGS@" ; then PYLIB=$(@PYTHON@ -c 'import sysconfig; print(sysconfig.get_path("stdlib"))'); rm -f $PYLIB/EXTERNALLY-MANAGED; @PIP3@ install @PYPI_PKGS@ ; fi
|
||||
clone_script:
|
||||
- git clone --depth 100 "$CI_REPOSITORY_URL" .
|
||||
- git fetch origin "$CI_COMMIT_REF_NAME"
|
||||
- git reset --hard "$CI_COMMIT_SHA"
|
||||
step_script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-werror $CONFIGURE_ARGS
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- $MAKE -j$(sysctl -n hw.ncpu)
|
||||
- for TARGET in $TEST_TARGETS ;
|
||||
do
|
||||
$MAKE -j$(sysctl -n hw.ncpu) $TARGET V=1 ;
|
||||
done
|
||||
always:
|
||||
build_result_artifacts:
|
||||
path: build/meson-logs/*log.txt
|
||||
type: text/plain
|
16
.gitlab-ci.d/cirrus/freebsd-14.vars
Normal file
16
.gitlab-ci.d/cirrus/freebsd-14.vars
Normal file
@ -0,0 +1,16 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables freebsd-14 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/usr/local/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/usr/local/bin/gmake'
|
||||
NINJA='/usr/local/bin/ninja'
|
||||
PACKAGING_COMMAND='pkg'
|
||||
PIP3='/usr/local/bin/pip'
|
||||
PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk-vnc gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py311-numpy py311-pillow py311-pip py311-pyyaml py311-sphinx py311-sphinx_rtd_theme py311-tomli python3 rpm2cpio rust rust-bindgen-cli sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
|
||||
PYPI_PKGS=''
|
||||
PYTHON='/usr/local/bin/python3'
|
16
.gitlab-ci.d/cirrus/macos-14.vars
Normal file
16
.gitlab-ci.d/cirrus/macos-14.vars
Normal file
@ -0,0 +1,16 @@
|
||||
# THIS FILE WAS AUTO-GENERATED
|
||||
#
|
||||
# $ lcitool variables macos-14 qemu
|
||||
#
|
||||
# https://gitlab.com/libvirt/libvirt-ci
|
||||
|
||||
CCACHE='/opt/homebrew/bin/ccache'
|
||||
CPAN_PKGS=''
|
||||
CROSS_PKGS=''
|
||||
MAKE='/opt/homebrew/bin/gmake'
|
||||
NINJA='/opt/homebrew/bin/ninja'
|
||||
PACKAGING_COMMAND='brew'
|
||||
PIP3='/opt/homebrew/bin/pip3'
|
||||
PKGS='bash bc bindgen bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 gtk-vnc jemalloc jpeg-turbo json-c libcbor libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio rust sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd'
|
||||
PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
|
||||
PYTHON='/opt/homebrew/bin/python3'
|
12
.gitlab-ci.d/container-core.yml
Normal file
12
.gitlab-ci.d/container-core.yml
Normal file
@ -0,0 +1,12 @@
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/container-template.yml'
|
||||
|
||||
amd64-centos9-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: centos9
|
||||
|
||||
amd64-fedora-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora
|
96
.gitlab-ci.d/container-cross.yml
Normal file
96
.gitlab-ci.d/container-cross.yml
Normal file
@ -0,0 +1,96 @@
|
||||
amd64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-amd64-cross
|
||||
|
||||
amd64-debian-user-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-all-test-cross
|
||||
|
||||
amd64-debian-legacy-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-legacy-test-cross
|
||||
|
||||
arm64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-arm64-cross
|
||||
|
||||
armhf-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-armhf-cross
|
||||
|
||||
hexagon-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-hexagon-cross
|
||||
|
||||
loongarch-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-loongarch-cross
|
||||
|
||||
i686-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-i686-cross
|
||||
|
||||
mips64el-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mips64el-cross
|
||||
|
||||
mipsel-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-mipsel-cross
|
||||
|
||||
ppc64el-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-ppc64el-cross
|
||||
|
||||
riscv64-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
# as we are currently based on 'sid/unstable' we may break so...
|
||||
allow_failure: true
|
||||
variables:
|
||||
NAME: debian-riscv64-cross
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
|
||||
s390x-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-s390x-cross
|
||||
|
||||
tricore-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian-tricore-cross
|
||||
|
||||
xtensa-debian-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: debian-xtensa-cross
|
||||
|
||||
win64-fedora-cross-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-win64-cross
|
21
.gitlab-ci.d/container-template.yml
Normal file
21
.gitlab-ci.d/container-template.yml
Normal file
@ -0,0 +1,21 @@
|
||||
.container_job_template:
|
||||
extends: .base_job_template
|
||||
image: docker:latest
|
||||
stage: containers
|
||||
services:
|
||||
- docker:dind
|
||||
before_script:
|
||||
- export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:$QEMU_CI_CONTAINER_TAG"
|
||||
# Always ':latest' because we always use upstream as a common cache source
|
||||
- export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
|
||||
- docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
|
||||
- until docker info; do sleep 1; done
|
||||
script:
|
||||
- echo "TAG:$TAG"
|
||||
- echo "COMMON_TAG:$COMMON_TAG"
|
||||
- docker build --tag "$TAG" --cache-from "$TAG" --cache-from "$COMMON_TAG"
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1
|
||||
-f "tests/docker/dockerfiles/$NAME.docker" "."
|
||||
- docker push "$TAG"
|
||||
after_script:
|
||||
- docker logout
|
35
.gitlab-ci.d/containers.yml
Normal file
35
.gitlab-ci.d/containers.yml
Normal file
@ -0,0 +1,35 @@
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/container-core.yml'
|
||||
- local: '/.gitlab-ci.d/container-cross.yml'
|
||||
|
||||
amd64-alpine-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: alpine
|
||||
|
||||
amd64-debian-container:
|
||||
extends: .container_job_template
|
||||
stage: containers
|
||||
variables:
|
||||
NAME: debian
|
||||
|
||||
amd64-ubuntu2204-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: ubuntu2204
|
||||
|
||||
amd64-opensuse-leap-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: opensuse-leap
|
||||
|
||||
python-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: python
|
||||
|
||||
amd64-fedora-rust-nightly-container:
|
||||
extends: .container_job_template
|
||||
variables:
|
||||
NAME: fedora-rust-nightly
|
||||
allow_failure: true
|
133
.gitlab-ci.d/crossbuild-template.yml
Normal file
133
.gitlab-ci.d/crossbuild-template.yml
Normal file
@ -0,0 +1,133 @@
|
||||
.cross_system_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache
|
||||
key: "$CI_JOB_NAME"
|
||||
when: always
|
||||
timeout: 80m
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- section_start setup "Pre-script setup"
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
- cat /packages.txt
|
||||
- section_end setup
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ccache --zero-stats
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs --enable-fdt=system
|
||||
--disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
|
||||
--target-list-exclude="arm-softmmu
|
||||
i386-softmmu microblaze-softmmu mips-softmmu mipsel-softmmu
|
||||
mips64-softmmu ppc-softmmu riscv32-softmmu sh4-softmmu
|
||||
sparc-softmmu xtensa-softmmu $CROSS_SKIP_TARGETS"
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- make -j"$JOBS" all check-build
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
- section_start installer "Building the installer"
|
||||
- if grep -q "EXESUF=.exe" config-host.mak;
|
||||
then make installer;
|
||||
version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
|
||||
mv -v qemu-setup*.exe qemu-setup-${version}.exe;
|
||||
fi
|
||||
- section_end installer
|
||||
- ccache --show-stats
|
||||
|
||||
# Job to cross-build specific accelerators.
|
||||
#
|
||||
# Set the $ACCEL variable to select the specific accelerator (default to
|
||||
# KVM), and set extra options (such disabling other accelerators) via the
|
||||
# $EXTRA_CONFIGURE_OPTS variable.
|
||||
.cross_accel_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
timeout: 60m
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- export PATH="$CCACHE_WRAPPERSDIR:$PATH"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
--disable-tools --enable-${ACCEL:-kvm} $EXTRA_CONFIGURE_OPTS
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- make -j"$JOBS" all check-build
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
|
||||
.cross_user_build_job:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
|
||||
cache:
|
||||
paths:
|
||||
- ccache/
|
||||
key: "$CI_JOB_NAME"
|
||||
before_script:
|
||||
- source scripts/ci/gitlab-ci-section
|
||||
- JOBS=$(expr $(nproc) + 1)
|
||||
script:
|
||||
- export CCACHE_BASEDIR="$(pwd)"
|
||||
- export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
|
||||
- export CCACHE_MAXSIZE="500M"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- section_start configure "Running configure"
|
||||
- ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
|
||||
--disable-system --target-list-exclude="aarch64_be-linux-user
|
||||
alpha-linux-user m68k-linux-user microblazeel-linux-user
|
||||
or1k-linux-user ppc-linux-user sparc-linux-user
|
||||
xtensa-linux-user $CROSS_SKIP_TARGETS"
|
||||
- section_end configure
|
||||
- section_start build "Building QEMU"
|
||||
- make -j"$JOBS" all check-build
|
||||
- section_end build
|
||||
- section_start test "Running tests"
|
||||
- if test -n "$MAKE_CHECK_ARGS";
|
||||
then
|
||||
$MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
|
||||
fi
|
||||
- section_end test
|
||||
|
||||
# We can still run some tests on some of our cross build jobs. They can add this
|
||||
# template to their extends to save the build logs and test results
|
||||
.cross_test_artifacts:
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
when: always
|
||||
expire_in: 7 days
|
||||
paths:
|
||||
- build/meson-logs/testlog.txt
|
||||
reports:
|
||||
junit: build/meson-logs/testlog.junit.xml
|
202
.gitlab-ci.d/crossbuilds.yml
Normal file
202
.gitlab-ci.d/crossbuilds.yml
Normal file
@ -0,0 +1,202 @@
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/crossbuild-template.yml'
|
||||
|
||||
cross-armhf-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
job: armhf-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-armhf-cross
|
||||
|
||||
cross-arm64-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: arm64-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-arm64-cross
|
||||
|
||||
cross-arm64-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
job: arm64-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-arm64-cross
|
||||
|
||||
cross-arm64-kvm-only:
|
||||
extends: .cross_accel_build_job
|
||||
needs:
|
||||
job: arm64-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-arm64-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-features
|
||||
|
||||
cross-i686-system:
|
||||
extends:
|
||||
- .cross_system_build_job
|
||||
- .cross_test_artifacts
|
||||
needs:
|
||||
job: i686-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-i686-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-kvm
|
||||
MAKE_CHECK_ARGS: check-qtest
|
||||
|
||||
cross-i686-user:
|
||||
extends:
|
||||
- .cross_user_build_job
|
||||
- .cross_test_artifacts
|
||||
needs:
|
||||
job: i686-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-i686-cross
|
||||
MAKE_CHECK_ARGS: check
|
||||
|
||||
cross-i686-tci:
|
||||
extends:
|
||||
- .cross_accel_build_job
|
||||
- .cross_test_artifacts
|
||||
timeout: 60m
|
||||
needs:
|
||||
job: i686-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-i686-cross
|
||||
ACCEL: tcg-interpreter
|
||||
EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins --disable-kvm
|
||||
# Force tests to run with reduced parallelism, to see whether this
|
||||
# reduces the flakiness of this CI job. The CI
|
||||
# environment by default shows us 8 CPUs and so we
|
||||
# would otherwise be using a parallelism of 9.
|
||||
MAKE_CHECK_ARGS: check check-tcg -j2
|
||||
|
||||
cross-mipsel-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: mipsel-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-mipsel-cross
|
||||
|
||||
cross-mipsel-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
job: mipsel-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-mipsel-cross
|
||||
|
||||
cross-mips64el-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: mips64el-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-mips64el-cross
|
||||
|
||||
cross-mips64el-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
job: mips64el-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-mips64el-cross
|
||||
|
||||
cross-ppc64el-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: ppc64el-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-ppc64el-cross
|
||||
|
||||
cross-ppc64el-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
job: ppc64el-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-ppc64el-cross
|
||||
|
||||
cross-ppc64el-kvm-only:
|
||||
extends: .cross_accel_build_job
|
||||
needs:
|
||||
job: ppc64el-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-ppc64el-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-devices
|
||||
|
||||
# The riscv64 cross-builds currently use a 'sid' container to get
|
||||
# compilers and libraries. Until something more stable is found we
|
||||
# allow_failure so as not to block CI.
|
||||
cross-riscv64-system:
|
||||
extends: .cross_system_build_job
|
||||
allow_failure: true
|
||||
needs:
|
||||
job: riscv64-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-riscv64-cross
|
||||
|
||||
cross-riscv64-user:
|
||||
extends: .cross_user_build_job
|
||||
allow_failure: true
|
||||
needs:
|
||||
job: riscv64-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-riscv64-cross
|
||||
|
||||
cross-s390x-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: s390x-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-s390x-cross
|
||||
|
||||
cross-s390x-user:
|
||||
extends: .cross_user_build_job
|
||||
needs:
|
||||
job: s390x-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-s390x-cross
|
||||
|
||||
cross-s390x-kvm-only:
|
||||
extends: .cross_accel_build_job
|
||||
needs:
|
||||
job: s390x-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-s390x-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --enable-trace-backends=ftrace
|
||||
|
||||
cross-mips64el-kvm-only:
|
||||
extends: .cross_accel_build_job
|
||||
needs:
|
||||
job: mips64el-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-mips64el-cross
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu
|
||||
|
||||
cross-win64-system:
|
||||
extends: .cross_system_build_job
|
||||
needs:
|
||||
job: win64-fedora-cross-container
|
||||
variables:
|
||||
IMAGE: fedora-win64-cross
|
||||
EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins
|
||||
CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu
|
||||
m68k-softmmu microblazeel-softmmu
|
||||
or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
|
||||
tricore-softmmu xtensaeb-softmmu
|
||||
artifacts:
|
||||
when: on_success
|
||||
paths:
|
||||
- build/qemu-setup*.exe
|
||||
|
||||
cross-amd64-xen-only:
|
||||
extends: .cross_accel_build_job
|
||||
needs:
|
||||
job: amd64-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-amd64-cross
|
||||
ACCEL: xen
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --disable-kvm
|
||||
|
||||
cross-arm64-xen-only:
|
||||
extends: .cross_accel_build_job
|
||||
needs:
|
||||
job: arm64-debian-cross-container
|
||||
variables:
|
||||
IMAGE: debian-arm64-cross
|
||||
ACCEL: xen
|
||||
EXTRA_CONFIGURE_OPTS: --disable-tcg --disable-kvm
|
34
.gitlab-ci.d/custom-runners.yml
Normal file
34
.gitlab-ci.d/custom-runners.yml
Normal file
@ -0,0 +1,34 @@
|
||||
# The CI jobs defined here require GitLab runners installed and
|
||||
# registered on machines that match their operating system names,
|
||||
# versions and architectures. This is in contrast to the other CI
|
||||
# jobs that are intended to run on GitLab's "shared" runners.
|
||||
|
||||
# Different than the default approach on "shared" runners, based on
|
||||
# containers, the custom runners have no such *requirement*, as those
|
||||
# jobs should be capable of running on operating systems with no
|
||||
# compatible container implementation, or no support from
|
||||
# gitlab-runner. To avoid problems that gitlab-runner can cause while
|
||||
# reusing the GIT repository, let's enable the clone strategy, which
|
||||
# guarantees a fresh repository on each job run.
|
||||
|
||||
# All custom runners can extend this template to upload the testlog
|
||||
# data as an artifact and also feed the junit report
|
||||
.custom_runner_template:
|
||||
extends: .base_job_template
|
||||
variables:
|
||||
GIT_STRATEGY: clone
|
||||
GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
expire_in: 7 days
|
||||
when: always
|
||||
paths:
|
||||
- build/build.ninja
|
||||
- build/meson-logs
|
||||
reports:
|
||||
junit: build/meson-logs/testlog.junit.xml
|
||||
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml'
|
25
.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml
Normal file
25
.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml
Normal file
@ -0,0 +1,25 @@
|
||||
# All ubuntu-22.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 22.04"
|
||||
|
||||
ubuntu-22.04-aarch32-all:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- aarch32
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$AARCH32_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --cross-prefix=arm-linux-gnueabihf-
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
151
.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml
Normal file
151
.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml
Normal file
@ -0,0 +1,151 @@
|
||||
# All ubuntu-22.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 22.04"
|
||||
|
||||
ubuntu-22.04-aarch64-all-linux-static:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- aarch64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$AARCH64_RUNNER_AVAILABLE"
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
# Disable -static-pie due to build error with system libc:
|
||||
# https://bugs.launchpad.net/ubuntu/+source/glibc/+bug/1987438
|
||||
- ../configure --enable-debug --static --disable-system --disable-pie
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make check-tcg
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|
||||
ubuntu-22.04-aarch64-all:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- aarch64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$AARCH64_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|
||||
ubuntu-22.04-aarch64-without-defaults:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- aarch64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$AARCH64_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-user --without-default-devices --without-default-features
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|
||||
ubuntu-22.04-aarch64-alldbg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- aarch64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$AARCH64_RUNNER_AVAILABLE"
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-debug
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make clean
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|
||||
ubuntu-22.04-aarch64-clang:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- aarch64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$AARCH64_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-ubsan
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
||||
|
||||
ubuntu-22.04-aarch64-tci:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- aarch64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$AARCH64_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
|
||||
ubuntu-22.04-aarch64-notcg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- aarch64
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$AARCH64_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-tcg --with-devices-aarch64=minimal
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc --ignore=40`
|
||||
- make --output-sync -j`nproc --ignore=40` check
|
128
.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml
Normal file
128
.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml
Normal file
@ -0,0 +1,128 @@
|
||||
# All ubuntu-22.04 jobs should run successfully in an environment
|
||||
# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
|
||||
# "Install basic packages to build QEMU on Ubuntu 22.04"
|
||||
|
||||
ubuntu-22.04-s390x-all-linux:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-debug --disable-system --disable-tools --disable-docs
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync check-tcg
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-all-system:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
timeout: 75m
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-user
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-alldbg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-debug
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make clean
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-clang:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --cc=clang --cxx=clang++ --enable-ubsan
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
||||
|
||||
ubuntu-22.04-s390x-tci:
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --enable-tcg-interpreter
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
|
||||
ubuntu-22.04-s390x-notcg:
|
||||
extends: .custom_runner_template
|
||||
needs: []
|
||||
stage: build
|
||||
tags:
|
||||
- ubuntu_22.04
|
||||
- s390x
|
||||
rules:
|
||||
- if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
|
||||
when: manual
|
||||
allow_failure: true
|
||||
- if: "$S390X_RUNNER_AVAILABLE"
|
||||
when: manual
|
||||
allow_failure: true
|
||||
script:
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ../configure --disable-tcg
|
||||
|| { cat config.log meson-logs/meson-log.txt; exit 1; }
|
||||
- make --output-sync -j`nproc`
|
||||
- make --output-sync -j`nproc` check
|
88
.gitlab-ci.d/opensbi.yml
Normal file
88
.gitlab-ci.d/opensbi.yml
Normal file
@ -0,0 +1,88 @@
|
||||
# All jobs needing docker-opensbi must use the same rules it uses.
|
||||
.opensbi_job_rules:
|
||||
rules:
|
||||
# Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
|
||||
- if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
when: never
|
||||
|
||||
# In forks, if QEMU_CI=1 is set, then create manual job
|
||||
# if any files affecting the build output are touched
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
|
||||
changes:
|
||||
- .gitlab-ci.d/opensbi.yml
|
||||
- .gitlab-ci.d/opensbi/Dockerfile
|
||||
- roms/opensbi/*
|
||||
when: manual
|
||||
|
||||
# In forks, if QEMU_CI=1 is set, then create manual job
|
||||
# if the branch/tag starts with 'opensbi'
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_REF_NAME =~ /^opensbi/'
|
||||
when: manual
|
||||
|
||||
# In forks, if QEMU_CI=1 is set, then create manual job
|
||||
# if the last commit msg contains 'OpenSBI' (case insensitive)
|
||||
- if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i'
|
||||
when: manual
|
||||
|
||||
# Scheduled runs on mainline don't get pipelines except for the special Coverity job
|
||||
- if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
|
||||
when: never
|
||||
|
||||
# Run if any files affecting the build output are touched
|
||||
- changes:
|
||||
- .gitlab-ci.d/opensbi.yml
|
||||
- .gitlab-ci.d/opensbi/Dockerfile
|
||||
- roms/opensbi/*
|
||||
when: on_success
|
||||
|
||||
# Run if the branch/tag starts with 'opensbi'
|
||||
- if: '$CI_COMMIT_REF_NAME =~ /^opensbi/'
|
||||
when: on_success
|
||||
|
||||
# Run if the last commit msg contains 'OpenSBI' (case insensitive)
|
||||
- if: '$CI_COMMIT_MESSAGE =~ /opensbi/i'
|
||||
when: on_success
|
||||
|
||||
docker-opensbi:
|
||||
extends: .opensbi_job_rules
|
||||
stage: containers
|
||||
image: docker:latest
|
||||
services:
|
||||
- docker:dind
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
|
||||
before_script:
|
||||
- docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
|
||||
- until docker info; do sleep 1; done
|
||||
script:
|
||||
- docker pull $IMAGE_TAG || true
|
||||
- docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
--tag $IMAGE_TAG .gitlab-ci.d/opensbi
|
||||
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
- docker push $IMAGE_TAG
|
||||
|
||||
build-opensbi:
|
||||
extends: .opensbi_job_rules
|
||||
stage: build
|
||||
needs: ['docker-opensbi']
|
||||
artifacts:
|
||||
when: on_success
|
||||
paths: # 'artifacts.zip' will contains the following files:
|
||||
- pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
|
||||
- pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
|
||||
- opensbi32-generic-stdout.log
|
||||
- opensbi32-generic-stderr.log
|
||||
- opensbi64-generic-stdout.log
|
||||
- opensbi64-generic-stderr.log
|
||||
image: $CI_REGISTRY_IMAGE:opensbi-cross-build
|
||||
variables:
|
||||
GIT_DEPTH: 3
|
||||
script: # Clone the required submodules and build OpenSBI
|
||||
- git submodule update --init roms/opensbi
|
||||
- export JOBS=$(($(getconf _NPROCESSORS_ONLN) + 1))
|
||||
- echo "=== Using ${JOBS} simultaneous jobs ==="
|
||||
- make -j${JOBS} -C roms/opensbi clean
|
||||
- make -j${JOBS} -C roms opensbi32-generic 2>&1 1>opensbi32-generic-stdout.log | tee -a opensbi32-generic-stderr.log >&2
|
||||
- make -j${JOBS} -C roms/opensbi clean
|
||||
- make -j${JOBS} -C roms opensbi64-generic 2>&1 1>opensbi64-generic-stdout.log | tee -a opensbi64-generic-stderr.log >&2
|
34
.gitlab-ci.d/opensbi/Dockerfile
Normal file
34
.gitlab-ci.d/opensbi/Dockerfile
Normal file
@ -0,0 +1,34 @@
|
||||
#
|
||||
# Docker image to cross-compile OpenSBI firmware binaries
|
||||
#
|
||||
FROM ubuntu:18.04
|
||||
|
||||
MAINTAINER Bin Meng <bmeng.cn@gmail.com>
|
||||
|
||||
# Install packages required to build OpenSBI
|
||||
RUN apt update \
|
||||
&& \
|
||||
\
|
||||
DEBIAN_FRONTEND=noninteractive \
|
||||
apt install --assume-yes --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
git \
|
||||
make \
|
||||
python3 \
|
||||
wget \
|
||||
&& \
|
||||
\
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Manually install the kernel.org "Crosstool" based toolchains for gcc-8.3
|
||||
RUN wget -O - \
|
||||
https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/8.3.0/x86_64-gcc-8.3.0-nolibc-riscv32-linux.tar.xz \
|
||||
| tar -C /opt -xJ
|
||||
RUN wget -O - \
|
||||
https://mirrors.edge.kernel.org/pub/tools/crosstool/files/bin/x86_64/8.3.0/x86_64-gcc-8.3.0-nolibc-riscv64-linux.tar.xz \
|
||||
| tar -C /opt -xJ
|
||||
|
||||
# Export the toolchains to the system path
|
||||
ENV PATH="/opt/gcc-8.3.0-nolibc/riscv32-linux/bin:${PATH}"
|
||||
ENV PATH="/opt/gcc-8.3.0-nolibc/riscv64-linux/bin:${PATH}"
|
21
.gitlab-ci.d/qemu-project.yml
Normal file
21
.gitlab-ci.d/qemu-project.yml
Normal file
@ -0,0 +1,21 @@
|
||||
# This file contains the set of jobs run by the QEMU project:
|
||||
# https://gitlab.com/qemu-project/qemu/-/pipelines
|
||||
|
||||
variables:
|
||||
RUNNER_TAG: ""
|
||||
|
||||
default:
|
||||
tags:
|
||||
- $RUNNER_TAG
|
||||
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/base.yml'
|
||||
- local: '/.gitlab-ci.d/stages.yml'
|
||||
- local: '/.gitlab-ci.d/opensbi.yml'
|
||||
- local: '/.gitlab-ci.d/containers.yml'
|
||||
- local: '/.gitlab-ci.d/crossbuilds.yml'
|
||||
- local: '/.gitlab-ci.d/buildtest.yml'
|
||||
- local: '/.gitlab-ci.d/static_checks.yml'
|
||||
- local: '/.gitlab-ci.d/custom-runners.yml'
|
||||
- local: '/.gitlab-ci.d/cirrus.yml'
|
||||
- local: '/.gitlab-ci.d/windows.yml'
|
7
.gitlab-ci.d/stages.yml
Normal file
7
.gitlab-ci.d/stages.yml
Normal file
@ -0,0 +1,7 @@
|
||||
# Currently we have two build stages after our containers are built:
|
||||
# - build (for traditional build and test or first stage build)
|
||||
# - test (for test stages, using build artefacts from a build stage)
|
||||
stages:
|
||||
- containers
|
||||
- build
|
||||
- test
|
48
.gitlab-ci.d/static_checks.yml
Normal file
48
.gitlab-ci.d/static_checks.yml
Normal file
@ -0,0 +1,48 @@
|
||||
check-patch:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: python:3.10-alpine
|
||||
needs: []
|
||||
script:
|
||||
- .gitlab-ci.d/check-patch.py
|
||||
variables:
|
||||
GIT_DEPTH: 1000
|
||||
QEMU_JOB_ONLY_FORKS: 1
|
||||
before_script:
|
||||
- apk -U add git perl
|
||||
allow_failure: true
|
||||
|
||||
check-dco:
|
||||
extends: .base_job_template
|
||||
stage: build
|
||||
image: python:3.10-alpine
|
||||
needs: []
|
||||
script: .gitlab-ci.d/check-dco.py
|
||||
variables:
|
||||
GIT_DEPTH: 1000
|
||||
before_script:
|
||||
- apk -U add git
|
||||
|
||||
check-python-minreqs:
|
||||
extends: .base_job_template
|
||||
stage: test
|
||||
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
|
||||
script:
|
||||
- make -C python check-minreqs
|
||||
variables:
|
||||
GIT_DEPTH: 1
|
||||
needs:
|
||||
job: python-container
|
||||
|
||||
check-python-tox:
|
||||
extends: .base_job_template
|
||||
stage: test
|
||||
image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
|
||||
script:
|
||||
- make -C python check-tox
|
||||
variables:
|
||||
GIT_DEPTH: 1
|
||||
QEMU_TOX_EXTRA_ARGS: --skip-missing-interpreters=false
|
||||
QEMU_JOB_OPTIONAL: 1
|
||||
needs:
|
||||
job: python-container
|
106
.gitlab-ci.d/windows.yml
Normal file
106
.gitlab-ci.d/windows.yml
Normal file
@ -0,0 +1,106 @@
|
||||
msys2-64bit:
|
||||
extends: .base_job_template
|
||||
tags:
|
||||
- saas-windows-medium-amd64
|
||||
cache:
|
||||
key: "$CI_JOB_NAME"
|
||||
paths:
|
||||
- msys64/var/cache
|
||||
- ccache
|
||||
when: always
|
||||
needs: []
|
||||
stage: build
|
||||
timeout: 100m
|
||||
variables:
|
||||
# Select the "64 bit, gcc and MSVCRT" MSYS2 environment
|
||||
MSYSTEM: MINGW64
|
||||
# This feature doesn't (currently) work with PowerShell, it stops
|
||||
# the echo'ing of commands being run and doesn't show any timing
|
||||
FF_SCRIPT_SECTIONS: 0
|
||||
CONFIGURE_ARGS: --disable-system --enable-tools -Ddebug=false -Doptimization=0
|
||||
# The Windows git is a bit older so override the default
|
||||
GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
|
||||
artifacts:
|
||||
name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
|
||||
expire_in: 7 days
|
||||
paths:
|
||||
- build/meson-logs/testlog.txt
|
||||
reports:
|
||||
junit: "build/meson-logs/testlog.junit.xml"
|
||||
before_script:
|
||||
- Write-Output "Acquiring msys2.exe installer at $(Get-Date -Format u)"
|
||||
- If ( !(Test-Path -Path msys64\var\cache ) ) {
|
||||
mkdir msys64\var\cache
|
||||
}
|
||||
- Invoke-WebRequest
|
||||
"https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe.sig"
|
||||
-outfile "msys2.exe.sig"
|
||||
- if ( Test-Path -Path msys64\var\cache\msys2.exe.sig ) {
|
||||
Write-Output "Cached installer sig" ;
|
||||
if ( ((Get-FileHash msys2.exe.sig).Hash -ne (Get-FileHash msys64\var\cache\msys2.exe.sig).Hash) ) {
|
||||
Write-Output "Mis-matched installer sig, new installer download required" ;
|
||||
Remove-Item -Path msys64\var\cache\msys2.exe.sig ;
|
||||
if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
|
||||
Remove-Item -Path msys64\var\cache\msys2.exe
|
||||
}
|
||||
} else {
|
||||
Write-Output "Matched installer sig, cached installer still valid"
|
||||
}
|
||||
} else {
|
||||
Write-Output "No cached installer sig, new installer download required" ;
|
||||
if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
|
||||
Remove-Item -Path msys64\var\cache\msys2.exe
|
||||
}
|
||||
}
|
||||
- if ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
|
||||
Write-Output "Fetching latest installer" ;
|
||||
Invoke-WebRequest
|
||||
"https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe"
|
||||
-outfile "msys64\var\cache\msys2.exe" ;
|
||||
Copy-Item -Path msys2.exe.sig -Destination msys64\var\cache\msys2.exe.sig
|
||||
} else {
|
||||
Write-Output "Using cached installer"
|
||||
}
|
||||
- Write-Output "Invoking msys2.exe installer at $(Get-Date -Format u)"
|
||||
- msys64\var\cache\msys2.exe -y
|
||||
- ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw)
|
||||
-replace '--refresh-keys', '--version') |
|
||||
Set-Content -Path ${CI_PROJECT_DIR}\msys64\etc\\post-install\\07-pacman-key.post
|
||||
- .\msys64\usr\bin\bash -lc "sed -i 's/^CheckSpace/#CheckSpace/g' /etc/pacman.conf"
|
||||
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Core update
|
||||
- .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu' # Normal update
|
||||
- taskkill /F /FI "MODULES eq msys-2.0.dll"
|
||||
script:
|
||||
- Write-Output "Installing mingw packages at $(Get-Date -Format u)"
|
||||
- .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
|
||||
bison diffutils flex
|
||||
git grep make sed
|
||||
mingw-w64-x86_64-binutils
|
||||
mingw-w64-x86_64-ccache
|
||||
mingw-w64-x86_64-curl
|
||||
mingw-w64-x86_64-gcc
|
||||
mingw-w64-x86_64-glib2
|
||||
mingw-w64-x86_64-libnfs
|
||||
mingw-w64-x86_64-libssh
|
||||
mingw-w64-x86_64-ninja
|
||||
mingw-w64-x86_64-pixman
|
||||
mingw-w64-x86_64-pkgconf
|
||||
mingw-w64-x86_64-python
|
||||
mingw-w64-x86_64-zstd"
|
||||
- Write-Output "Running build at $(Get-Date -Format u)"
|
||||
- $env:JOBS = $(.\msys64\usr\bin\bash -lc nproc)
|
||||
- $env:CHERE_INVOKING = 'yes' # Preserve the current working directory
|
||||
- $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
|
||||
- $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR"
|
||||
- $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache"
|
||||
- $env:CCACHE_MAXSIZE = "500M"
|
||||
- $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode
|
||||
- $env:CC = "ccache gcc"
|
||||
- mkdir build
|
||||
- cd build
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --zero-stats"
|
||||
- ..\msys64\usr\bin\bash -lc "../configure $CONFIGURE_ARGS"
|
||||
- ..\msys64\usr\bin\bash -lc "make -j$env:JOBS"
|
||||
- ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
|
||||
- ..\msys64\usr\bin\bash -lc "ccache --show-stats"
|
||||
- Write-Output "Finished build at $(Get-Date -Format u)"
|
24
.gitlab-ci.yml
Normal file
24
.gitlab-ci.yml
Normal file
@ -0,0 +1,24 @@
|
||||
#
|
||||
# This is the GitLab CI configuration file for the mainstream QEMU
|
||||
# project: https://gitlab.com/qemu-project/qemu/-/pipelines
|
||||
#
|
||||
# !!! DO NOT ADD ANY NEW CONFIGURATION TO THIS FILE !!!
|
||||
#
|
||||
# Only documentation or comments is accepted.
|
||||
#
|
||||
# To use a different set of jobs than the mainstream QEMU project,
|
||||
# you need to set the location of your custom yml file at "custom CI/CD
|
||||
# configuration path", on your GitLab CI namespace:
|
||||
# https://docs.gitlab.com/ee/ci/pipelines/settings.html#custom-cicd-configuration-path
|
||||
#
|
||||
# ----------------------------------------------------------------------
|
||||
#
|
||||
# QEMU CI jobs are based on templates. Some templates provide
|
||||
# user-configurable options, modifiable via configuration variables.
|
||||
#
|
||||
# See https://qemu-project.gitlab.io/qemu/devel/ci.html#custom-ci-cd-variables
|
||||
# for more information.
|
||||
#
|
||||
|
||||
include:
|
||||
- local: '/.gitlab-ci.d/qemu-project.yml'
|
64
.gitlab/issue_templates/bug.md
Normal file
64
.gitlab/issue_templates/bug.md
Normal file
@ -0,0 +1,64 @@
|
||||
<!--
|
||||
This is the upstream QEMU issue tracker.
|
||||
|
||||
If you are able to, it will greatly facilitate bug triage if you attempt
|
||||
to reproduce the problem with the latest qemu.git master built from
|
||||
source. See https://www.qemu.org/download/#source for instructions on
|
||||
how to do this.
|
||||
|
||||
QEMU generally supports the last two releases advertised on
|
||||
https://www.qemu.org/. Problems with distro-packaged versions of QEMU
|
||||
older than this should be reported to the distribution instead.
|
||||
|
||||
See https://www.qemu.org/contribute/report-a-bug/ for additional
|
||||
guidance.
|
||||
|
||||
If this is a security issue, please consult
|
||||
https://www.qemu.org/contribute/security-process/
|
||||
-->
|
||||
|
||||
## Host environment
|
||||
- Operating system: <!-- Windows 10 21H1, Fedora 37, etc. -->
|
||||
- OS/kernel version: <!-- For POSIX hosts, use `uname -a` -->
|
||||
- Architecture: <!-- x86, ARM, s390x, etc. -->
|
||||
- QEMU flavor: <!-- qemu-system-x86_64, qemu-aarch64, qemu-img, etc. -->
|
||||
- QEMU version: <!-- e.g. `qemu-system-x86_64 --version` -->
|
||||
- QEMU command line:
|
||||
<!--
|
||||
Give the smallest, complete command line that exhibits the problem.
|
||||
|
||||
If you are using libvirt, virsh, or vmm, you can likely find the QEMU
|
||||
command line arguments in /var/log/libvirt/qemu/$GUEST.log.
|
||||
-->
|
||||
```
|
||||
./qemu-system-x86_64 -M q35 -m 4096 -enable-kvm -hda fedora32.qcow2
|
||||
```
|
||||
|
||||
## Emulated/Virtualized environment
|
||||
- Operating system: <!-- Windows 10 21H1, Fedora 37, etc. -->
|
||||
- OS/kernel version: <!-- For POSIX guests, use `uname -a`. -->
|
||||
- Architecture: <!-- x86, ARM, s390x, etc. -->
|
||||
|
||||
|
||||
## Description of problem
|
||||
<!-- Describe the problem, including any error/crash messages seen. -->
|
||||
|
||||
|
||||
## Steps to reproduce
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
|
||||
|
||||
## Additional information
|
||||
|
||||
<!--
|
||||
Attach logs, stack traces, screenshots, etc. Compress the files if necessary.
|
||||
If using libvirt, libvirt logs and XML domain information may be relevant.
|
||||
-->
|
||||
|
||||
<!--
|
||||
The line below ensures that proper tags are added to the issue.
|
||||
Please do not remove it.
|
||||
-->
|
||||
/label ~"kind::Bug"
|
32
.gitlab/issue_templates/feature_request.md
Normal file
32
.gitlab/issue_templates/feature_request.md
Normal file
@ -0,0 +1,32 @@
|
||||
<!--
|
||||
This is the upstream QEMU issue tracker.
|
||||
|
||||
Please note that QEMU, like most open source projects, relies on
|
||||
contributors who have motivation, skills and available time to work on
|
||||
implementing particular features.
|
||||
|
||||
Feature requests can be helpful for determining demand and interest, but
|
||||
they are not a guarantee that a contributor will volunteer to implement
|
||||
it. We welcome and encourage even draft patches to implement a feature
|
||||
be sent to the mailing list where it can be discussed and developed
|
||||
further by the community.
|
||||
|
||||
Thank you for your interest in helping us to make QEMU better!
|
||||
-->
|
||||
|
||||
## Goal
|
||||
<!-- Describe the final result you want to achieve. Avoid design specifics. -->
|
||||
|
||||
|
||||
## Technical details
|
||||
<!-- Describe technical details, design specifics, suggestions, versions, etc. -->
|
||||
|
||||
|
||||
## Additional information
|
||||
<!-- Patch or branch references, any other useful information -->
|
||||
|
||||
<!--
|
||||
The line below ensures that proper tags are added to the issue.
|
||||
Please do not remove it.
|
||||
-->
|
||||
/label ~"kind::Feature Request"
|
45
.gitmodules
vendored
Normal file
45
.gitmodules
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
[submodule "roms/seabios"]
|
||||
path = roms/seabios
|
||||
url = https://gitlab.com/qemu-project/seabios.git/
|
||||
[submodule "roms/SLOF"]
|
||||
path = roms/SLOF
|
||||
url = https://gitlab.com/qemu-project/SLOF.git
|
||||
[submodule "roms/ipxe"]
|
||||
path = roms/ipxe
|
||||
url = https://gitlab.com/qemu-project/ipxe.git
|
||||
[submodule "roms/openbios"]
|
||||
path = roms/openbios
|
||||
url = https://gitlab.com/qemu-project/openbios.git
|
||||
[submodule "roms/qemu-palcode"]
|
||||
path = roms/qemu-palcode
|
||||
url = https://gitlab.com/qemu-project/qemu-palcode.git
|
||||
[submodule "roms/u-boot"]
|
||||
path = roms/u-boot
|
||||
url = https://gitlab.com/qemu-project/u-boot.git
|
||||
[submodule "roms/skiboot"]
|
||||
path = roms/skiboot
|
||||
url = https://gitlab.com/qemu-project/skiboot.git
|
||||
[submodule "roms/QemuMacDrivers"]
|
||||
path = roms/QemuMacDrivers
|
||||
url = https://gitlab.com/qemu-project/QemuMacDrivers.git
|
||||
[submodule "roms/seabios-hppa"]
|
||||
path = roms/seabios-hppa
|
||||
url = https://gitlab.com/qemu-project/seabios-hppa.git
|
||||
[submodule "roms/u-boot-sam460ex"]
|
||||
path = roms/u-boot-sam460ex
|
||||
url = https://gitlab.com/qemu-project/u-boot-sam460ex.git
|
||||
[submodule "roms/edk2"]
|
||||
path = roms/edk2
|
||||
url = https://gitlab.com/qemu-project/edk2.git
|
||||
[submodule "roms/opensbi"]
|
||||
path = roms/opensbi
|
||||
url = https://gitlab.com/qemu-project/opensbi.git
|
||||
[submodule "roms/qboot"]
|
||||
path = roms/qboot
|
||||
url = https://gitlab.com/qemu-project/qboot.git
|
||||
[submodule "roms/vbootrom"]
|
||||
path = roms/vbootrom
|
||||
url = https://gitlab.com/qemu-project/vbootrom.git
|
||||
[submodule "tests/lcitool/libvirt-ci"]
|
||||
path = tests/lcitool/libvirt-ci
|
||||
url = https://gitlab.com/libvirt/libvirt-ci.git
|
51
.gitpublish
Normal file
51
.gitpublish
Normal file
@ -0,0 +1,51 @@
|
||||
#
|
||||
# Common git-publish profiles that can be used to send patches to QEMU upstream.
|
||||
#
|
||||
# See https://github.com/stefanha/git-publish for more information
|
||||
#
|
||||
[gitpublishprofile "default"]
|
||||
base = master
|
||||
to = qemu-devel@nongnu.org
|
||||
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
|
||||
|
||||
[gitpublishprofile "rfc"]
|
||||
base = master
|
||||
prefix = RFC PATCH
|
||||
to = qemu-devel@nongnu.org
|
||||
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
|
||||
|
||||
[gitpublishprofile "stable"]
|
||||
base = master
|
||||
to = qemu-devel@nongnu.org
|
||||
cc = qemu-stable@nongnu.org
|
||||
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
|
||||
|
||||
[gitpublishprofile "trivial"]
|
||||
base = master
|
||||
to = qemu-devel@nongnu.org
|
||||
cc = qemu-trivial@nongnu.org
|
||||
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
|
||||
|
||||
[gitpublishprofile "block"]
|
||||
base = master
|
||||
to = qemu-devel@nongnu.org
|
||||
cc = qemu-block@nongnu.org
|
||||
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
|
||||
|
||||
[gitpublishprofile "arm"]
|
||||
base = master
|
||||
to = qemu-devel@nongnu.org
|
||||
cc = qemu-arm@nongnu.org
|
||||
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
|
||||
|
||||
[gitpublishprofile "s390"]
|
||||
base = master
|
||||
to = qemu-devel@nongnu.org
|
||||
cc = qemu-s390@nongnu.org
|
||||
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
|
||||
|
||||
[gitpublishprofile "ppc"]
|
||||
base = master
|
||||
to = qemu-devel@nongnu.org
|
||||
cc = qemu-ppc@nongnu.org
|
||||
cccmd = scripts/get_maintainer.pl --noroles --norolestats --nogit --nogit-fallback 2>/dev/null
|
229
.mailmap
Normal file
229
.mailmap
Normal file
@ -0,0 +1,229 @@
|
||||
# This mailmap fixes up author names/addresses.
|
||||
#
|
||||
# If you are adding to this file consider if a similar change needs to
|
||||
# be made to contrib/gitdm/aliases. They are not however completely
|
||||
# analogous. .mailmap is concerned with fixing up damaged author
|
||||
# fields where as the gitdm equivalent is more concerned with making
|
||||
# sure multiple email addresses get mapped onto the same author.
|
||||
#
|
||||
# From man git-shortlog the forms are:
|
||||
#
|
||||
# Proper Name <commit@email.xx>
|
||||
# <proper@email.xx> <commit@email.xx>
|
||||
# Proper Name <proper@email.xx> <commit@email.xx>
|
||||
# Proper Name <proper@email.xx> Commit Name <commit@email.xx>
|
||||
#
|
||||
|
||||
# The first section translates weird addresses from the original git import
|
||||
# into proper addresses so that they are counted properly by git shortlog.
|
||||
Andrzej Zaborowski <balrogg@gmail.com> balrog <balrog@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
Anthony Liguori <anthony@codemonkey.ws> aliguori <aliguori@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
Aurelien Jarno <aurelien@aurel32.net> aurel32 <aurel32@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
Blue Swirl <blauwirbel@gmail.com> blueswir1 <blueswir1@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
Edgar E. Iglesias <edgar.iglesias@gmail.com> edgar_igl <edgar_igl@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
Fabrice Bellard <fabrice@bellard.org> bellard <bellard@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
Jocelyn Mayer <l_indien@magic.fr> j_mayer <j_mayer@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
Paul Brook <paul@codesourcery.com> pbrook <pbrook@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
Thiemo Seufer <ths@networkno.de> ths <ths@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
|
||||
# Corrupted Author fields
|
||||
Aaron Larson <alarson@ddci.com> alarson@ddci.com
|
||||
Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber>
|
||||
fanwenjie <fanwj@mail.ustc.edu.cn> fanwj@mail.ustc.edu.cn <fanwj@mail.ustc.edu.cn>
|
||||
Jason Wang <jasowang@redhat.com> Jason Wang <jasowang>
|
||||
Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
|
||||
Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
|
||||
Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
|
||||
Timothée Cocault <timothee.cocault@gmail.com> timothee.cocault@gmail.com <timothee.cocault@gmail.com>
|
||||
Stefan Weil <sw@weilnetz.de> <weil@mail.berlios.de>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@kiwi.(none)>
|
||||
|
||||
# There is also a:
|
||||
# (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
|
||||
# for the cvs2svn initialization commit e63c3dc74bf.
|
||||
|
||||
# Next, translate a few commits where mailman rewrote the From: line due
|
||||
# to strict SPF and DMARC. Usually, our build process should be flagging
|
||||
# commits like these before maintainer merges; if you find the need to add
|
||||
# a line here, please also report a bug against the part of the build
|
||||
# process that let the mis-attribution slip through in the first place.
|
||||
#
|
||||
# If the mailing list munges your emails, use:
|
||||
# git config sendemail.from '"Your Name" <your.email@example.com>'
|
||||
# the use of "" in that line will differ from the typically unquoted
|
||||
# 'git config user.name', which in turn is sufficient for 'git send-email'
|
||||
# to add an extra From: line in the body of your email that takes
|
||||
# precedence over any munged From: in the mail's headers.
|
||||
# See https://lists.openembedded.org/g/openembedded-core/message/166515
|
||||
# and https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg06784.html
|
||||
Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-trivial@nongnu.org>
|
||||
Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> Andrey Drobyshev via <qemu-block@nongnu.org>
|
||||
BALATON Zoltan <balaton@eik.bme.hu> BALATON Zoltan via <qemu-ppc@nongnu.org>
|
||||
|
||||
# Next, replace old addresses by a more recent one.
|
||||
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
|
||||
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@imgtec.com>
|
||||
Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com>
|
||||
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com>
|
||||
Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
|
||||
Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
|
||||
Ani Sinha <anisinha@redhat.com> <ani@anisinha.ca>
|
||||
Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
|
||||
Brian Cain <brian.cain@oss.qualcomm.com> <bcain@quicinc.com>
|
||||
Brian Cain <brian.cain@oss.qualcomm.com> <quic_bcain@quicinc.com>
|
||||
Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
|
||||
Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
|
||||
Filip Bozuta <filip.bozuta@syrmia.com> <filip.bozuta@rt-rk.com.com>
|
||||
Frederic Konrad <konrad.frederic@yahoo.fr> <fred.konrad@greensocs.com>
|
||||
Frederic Konrad <konrad.frederic@yahoo.fr> <konrad@adacore.com>
|
||||
Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
|
||||
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
|
||||
James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
|
||||
Juan Quintela <quintela@trasno.org> <quintela@redhat.com>
|
||||
Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
|
||||
Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
|
||||
Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
|
||||
Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com>
|
||||
Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu>
|
||||
Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
|
||||
Paul Brook <paul@nowt.org> <paul@codesourcery.com>
|
||||
Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
|
||||
Paul Burton <paulburton@kernel.org> <paul.burton@imgtec.com>
|
||||
Paul Burton <paulburton@kernel.org> <paul@archlinuxmips.org>
|
||||
Paul Burton <paulburton@kernel.org> <pburton@wavecomp.com>
|
||||
Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org>
|
||||
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
|
||||
Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
|
||||
Roman Bolshakov <rbolshakov@ddn.com> <r.bolshakov@yadro.com>
|
||||
Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech>
|
||||
Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
|
||||
Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@weilnetz.de>
|
||||
Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com>
|
||||
Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
|
||||
|
||||
# Also list preferred name forms where people have changed their
|
||||
# git author config, or had utf8/latin1 encoding issues.
|
||||
Aaron Lindsay <aaron@os.amperecomputing.com>
|
||||
Aaron Larson <alarson@ddci.com>
|
||||
Alexey Gerasimenko <x1917x@gmail.com>
|
||||
Alex Chen <alex.chen@huawei.com>
|
||||
Alex Ivanov <void@aleksoft.net>
|
||||
Andreas Färber <afaerber@suse.de>
|
||||
Bandan Das <bsd@redhat.com>
|
||||
Benjamin MARSILI <mlspirat42@gmail.com>
|
||||
Benoît Canet <benoit.canet@gmail.com>
|
||||
Benoît Canet <benoit.canet@irqsave.net>
|
||||
Benoît Canet <benoit.canet@nodalink.com>
|
||||
Boqun Feng <boqun.feng@gmail.com>
|
||||
Boqun Feng <boqun.feng@intel.com>
|
||||
Brad Smith <brad@comstyle.com>
|
||||
Brijesh Singh <brijesh.singh@amd.com>
|
||||
Brilly Wu <brillywu@viatech.com.cn>
|
||||
Cédric Vincent <cedric.vincent@st.com>
|
||||
CheneyLin <linzc@zju.edu.cn>
|
||||
Chen Gang <chengang@emindsoft.com.cn>
|
||||
Chen Gang <gang.chen.5i5j@gmail.com>
|
||||
Chen Gang <gang.chen@sunrus.com.cn>
|
||||
Chen Wei-Ren <chenwj@iis.sinica.edu.tw>
|
||||
Christophe Lyon <christophe.lyon@st.com>
|
||||
Collin L. Walling <walling@linux.ibm.com>
|
||||
Daniel P. Berrangé <berrange@redhat.com>
|
||||
Eduardo Otubo <otubo@redhat.com>
|
||||
Erik Smit <erik.lucas.smit@gmail.com>
|
||||
Fabrice Desclaux <fabrice.desclaux@cea.fr>
|
||||
Fernando Luis Vázquez Cao <fernando_b1@lab.ntt.co.jp>
|
||||
Fernando Luis Vázquez Cao <fernando@oss.ntt.co.jp>
|
||||
Gautham R. Shenoy <ego@in.ibm.com>
|
||||
Gautham R. Shenoy <ego@linux.vnet.ibm.com>
|
||||
Gonglei (Arei) <arei.gonglei@huawei.com>
|
||||
Guang Wang <wang.guang55@zte.com.cn>
|
||||
Haibin Zhang <haibinzhang@tencent.com>
|
||||
Hailiang Zhang <zhang.zhanghailiang@huawei.com>
|
||||
Hanna Reitz <hreitz@redhat.com> <mreitz@redhat.com>
|
||||
Hervé Poussineau <hpoussin@reactos.org>
|
||||
Hyman Huang <huangy81@chinatelecom.cn>
|
||||
Jakub Jermář <jakub@jermar.eu>
|
||||
Jakub Jermář <jakub.jermar@kernkonzept.com>
|
||||
Jean-Christophe Dubois <jcd@tribudubois.net>
|
||||
Jindřich Makovička <makovick@gmail.com>
|
||||
John Arbuckle <programmingkidx@gmail.com>
|
||||
Juha Riihimäki <juha.riihimaki@nokia.com>
|
||||
Juha Riihimäki <Juha.Riihimaki@nokia.com>
|
||||
Jun Li <junmuzi@gmail.com>
|
||||
Laurent Vivier <Laurent@lvivier.info>
|
||||
Leandro Lupori <leandro.lupori@gmail.com>
|
||||
Li Guang <lig.fnst@cn.fujitsu.com>
|
||||
Liming Wang <walimisdev@gmail.com>
|
||||
linzhecheng <linzc@zju.edu.cn>
|
||||
Liran Schour <lirans@il.ibm.com>
|
||||
Liu Yu <yu.liu@freescale.com>
|
||||
Liu Yu <Yu.Liu@freescale.com>
|
||||
Li Zhang <zhlcindy@gmail.com>
|
||||
Li Zhang <zhlcindy@linux.vnet.ibm.com>
|
||||
Lluís Vilanova <vilanova@ac.upc.edu>
|
||||
Lluís Vilanova <xscript@gmx.net>
|
||||
Longpeng (Mike) <longpeng2@huawei.com>
|
||||
Luc Michel <luc.michel@git.antfield.fr>
|
||||
Luc Michel <luc.michel@greensocs.com>
|
||||
Marc Marí <marc.mari.barcelo@gmail.com>
|
||||
Marc Marí <markmb@redhat.com>
|
||||
Michael Avdienko <whitearchey@gmail.com>
|
||||
Michael S. Tsirkin <mst@redhat.com>
|
||||
Munkyu Im <munkyu.im@samsung.com>
|
||||
Nicholas Bellinger <nab@linux-iscsi.org>
|
||||
Nicholas Thomas <nick@bytemark.co.uk>
|
||||
Nikunj A Dadhania <nikunj@linux.vnet.ibm.com>
|
||||
Orit Wasserman <owasserm@redhat.com>
|
||||
Paolo Bonzini <pbonzini@redhat.com>
|
||||
Pan Nengyuan <pannengyuan@huawei.com>
|
||||
Pavel Dovgaluk <dovgaluk@ispras.ru>
|
||||
Pavel Dovgaluk <pavel.dovgaluk@gmail.com>
|
||||
Pavel Dovgaluk <Pavel.Dovgaluk@ispras.ru>
|
||||
Peter Chubb <peter.chubb@nicta.com.au>
|
||||
Peter Crosthwaite <crosthwaite.peter@gmail.com>
|
||||
Peter Crosthwaite <peter.crosthwaite@petalogix.com>
|
||||
Peter Crosthwaite <peter.crosthwaite@xilinx.com>
|
||||
Prasad J Pandit <pjp@fedoraproject.org>
|
||||
Prasad J Pandit <ppandit@redhat.com>
|
||||
Qiao Nuohan <qiaonuohan@cn.fujitsu.com>
|
||||
Reimar Döffinger <Reimar.Doeffinger@gmx.de>
|
||||
Remy Noel <remy.noel@blade-group.com>
|
||||
Roger Pau Monné <roger.pau@citrix.com>
|
||||
Shin'ichiro Kawasaki <kawasaki@juno.dti.ne.jp>
|
||||
Shin'ichiro Kawasaki <shinichiro.kawasaki@wdc.com>
|
||||
Sochin Jiang <sochin.jiang@huawei.com>
|
||||
Stefan Berger <stefanb@linux.vnet.ibm.com> <stefanb@linux.ibm.com>
|
||||
Takashi Yoshii <takasi-y@ops.dti.ne.jp>
|
||||
Thomas Huth <thuth@redhat.com>
|
||||
Thomas Knych <thomaswk@google.com>
|
||||
Timothy Baldwin <T.E.Baldwin99@members.leeds.ac.uk>
|
||||
Tony Nguyen <tony.nguyen@bt.com>
|
||||
Venkateswararao Jujjuri <jvrao@linux.vnet.ibm.com>
|
||||
Vibi Sreenivasan <vibi_sreenivasan@cms.com>
|
||||
Vijaya Kumar K <vijayak@cavium.com>
|
||||
Vijaya Kumar K <Vijaya.Kumar@cavium.com>
|
||||
Vijay Kumar <vijaykumar@bravegnu.org>
|
||||
Vijay Kumar <vijaykumar@zilogic.com>
|
||||
Wang Guang <wang.guang55@zte.com.cn>
|
||||
Wenchao Xia <xiawenc@linux.vnet.ibm.com>
|
||||
Wenshuang Ma <kevinnma@tencent.com>
|
||||
Xiaoqiang Zhao <zxq_yx_007@163.com>
|
||||
Xinhua Cao <caoxinhua@huawei.com>
|
||||
Xiong Zhang <xiong.y.zhang@intel.com>
|
||||
Yin Yin <yin.yin@cs2c.com.cn>
|
||||
Yu-Chen Lin <npes87184@gmail.com>
|
||||
Yu-Chen Lin <npes87184@gmail.com> <yuchenlin@synology.com>
|
||||
YunQiang Su <syq@debian.org>
|
||||
YunQiang Su <ysu@wavecomp.com>
|
||||
Yuri Pudgorodskiy <yur@virtuozzo.com>
|
||||
Zhengui Li <lizhengui@huawei.com>
|
||||
Zhenwei Pi <pizhenwei@bytedance.com>
|
||||
Zhenwei Pi <zhenwei.pi@youruncloud.com>
|
||||
Zhuang Yanying <ann.zhuangyanying@huawei.com>
|
299
.patchew.yml
Normal file
299
.patchew.yml
Normal file
@ -0,0 +1,299 @@
|
||||
---
|
||||
# Note: this file is still unused. It serves as a documentation for the
|
||||
# Patchew configuration in case patchew.org disappears or has to be
|
||||
# reinstalled.
|
||||
#
|
||||
# Patchew configuration is available to project administrators at
|
||||
# https://patchew.org/api/v1/projects/1/config/ and can be configured
|
||||
# to YAML using the following Python script:
|
||||
#
|
||||
# import json
|
||||
# import sys
|
||||
# import ruamel.yaml
|
||||
#
|
||||
# json_str = sys.stdin.read()
|
||||
# yaml = ruamel.yaml.YAML()
|
||||
# yaml.explicit_start = True
|
||||
# data = json.loads(json_str, object_pairs_hook=ruamel.yaml.comments.CommentedMap)
|
||||
# ruamel.yaml.scalarstring.walk_tree(data)
|
||||
# yaml.dump(data, sys.stdout)
|
||||
|
||||
email:
|
||||
notifications:
|
||||
timeouts:
|
||||
event: TestingReport
|
||||
enabled: true
|
||||
to_user: false
|
||||
reply_subject: true
|
||||
set_reply_to: true
|
||||
in_reply_to: true
|
||||
reply_to_all: false
|
||||
subject_template: none
|
||||
to: fam@euphon.net
|
||||
cc: ''
|
||||
body_template: |
|
||||
{% if not is_timeout %} {{ cancel }} {% endif %}
|
||||
|
||||
Test '{{ test }}' timeout, log:
|
||||
|
||||
{{ log }}
|
||||
ENOSPC:
|
||||
event: TestingReport
|
||||
enabled: true
|
||||
to_user: false
|
||||
reply_subject: false
|
||||
set_reply_to: false
|
||||
in_reply_to: true
|
||||
reply_to_all: false
|
||||
subject_template: Out of space error
|
||||
to: fam@euphon.net
|
||||
cc: ''
|
||||
body_template: |
|
||||
{% if passed %}
|
||||
{{ cancel }}
|
||||
{% endif %}
|
||||
|
||||
{% if 'No space left on device' in log %}
|
||||
Tester {{ tester }} out of space when running {{ test }}
|
||||
|
||||
{{ log }}
|
||||
{% else %}
|
||||
{{ cancel }}
|
||||
{% endif %}
|
||||
FailureShort:
|
||||
event: TestingReport
|
||||
enabled: true
|
||||
to_user: false
|
||||
reply_subject: true
|
||||
set_reply_to: true
|
||||
in_reply_to: true
|
||||
reply_to_all: true
|
||||
subject_template: Testing failed
|
||||
to: ''
|
||||
cc: ''
|
||||
body_template: |
|
||||
{% if passed or not obj.message_id or is_timeout %}
|
||||
{{ cancel }}
|
||||
{% endif %}
|
||||
{% if 'No space left on device' in log %}
|
||||
{{ cancel }}
|
||||
{% endif %}
|
||||
Patchew URL: https://patchew.org/QEMU/{{ obj.message_id }}/
|
||||
|
||||
{% ansi2text log as logtext %}
|
||||
{% if test == "checkpatch" %}
|
||||
Hi,
|
||||
|
||||
This series seems to have some coding style problems. See output below for
|
||||
more information:
|
||||
|
||||
{{ logtext }}
|
||||
{% elif test == "docker-mingw@fedora" or test == "docker-quick@centos8" or test == "asan" %}
|
||||
Hi,
|
||||
|
||||
This series failed the {{ test }} build test. Please find the testing commands and
|
||||
their output below. If you have Docker installed, you can probably reproduce it
|
||||
locally.
|
||||
|
||||
{% lines_between logtext start="^=== TEST SCRIPT BEGIN ===$" stop="^=== TEST SCRIPT END ===$" %}
|
||||
{% lines_between logtext start="^=== OUTPUT BEGIN ===$" stop="=== OUTPUT END ===$" as output %}
|
||||
{% grep_C output regex="\b(FAIL|XPASS|ERROR|WARN|error:|warning:)" n=3 %}
|
||||
{% elif test == "s390x" or test == "FreeBSD" or test == "ppcle" or test == "ppcbe" %}
|
||||
Hi,
|
||||
|
||||
This series failed build test on {{test}} host. Please find the details below.
|
||||
|
||||
{% lines_between logtext start="^=== TEST SCRIPT BEGIN ===$" stop="^=== TEST SCRIPT END ===$" %}
|
||||
{% lines_between logtext start="^=== OUTPUT BEGIN ===$" stop="=== OUTPUT END ===$" as output %}
|
||||
{% grep_C output regex="\b(FAIL|XPASS|ERROR|WARN|error:|warning:)" n=3 %}
|
||||
{% else %}
|
||||
{{ cancel }}
|
||||
{% endif %}
|
||||
|
||||
The full log is available at
|
||||
{{ log_url }}.
|
||||
---
|
||||
Email generated automatically by Patchew [https://patchew.org/].
|
||||
Please send your feedback to patchew-devel@redhat.com
|
||||
testing:
|
||||
tests:
|
||||
asan:
|
||||
enabled: true
|
||||
requirements: docker
|
||||
timeout: 3600
|
||||
script: |
|
||||
#!/bin/bash
|
||||
time make docker-test-debug@fedora TARGET_LIST=x86_64-softmmu J=14 NETWORK=1
|
||||
docker-quick@centos8:
|
||||
enabled: false
|
||||
requirements: docker,x86_64
|
||||
timeout: 3600
|
||||
script: |
|
||||
#!/bin/bash
|
||||
time make docker-test-quick@centos8 SHOW_ENV=1 J=14 NETWORK=1
|
||||
checkpatch:
|
||||
enabled: true
|
||||
requirements: ''
|
||||
timeout: 600
|
||||
script: |
|
||||
#!/bin/bash
|
||||
git rev-parse base > /dev/null || exit 0
|
||||
./scripts/checkpatch.pl --mailback base..
|
||||
docker-mingw@fedora:
|
||||
enabled: true
|
||||
requirements: docker,x86_64
|
||||
timeout: 3600
|
||||
script: |
|
||||
#! /bin/bash
|
||||
test "$(uname -m)" = "x86_64"
|
||||
ppcle:
|
||||
enabled: false
|
||||
requirements: ppcle
|
||||
timeout: 3600
|
||||
script: |
|
||||
#!/bin/bash
|
||||
# Testing script will be invoked under the git checkout with
|
||||
# HEAD pointing to a commit that has the patches applied on top of "base"
|
||||
# branch
|
||||
set -e
|
||||
CC=$HOME/bin/cc
|
||||
INSTALL=$PWD/install
|
||||
BUILD=$PWD/build
|
||||
mkdir -p $BUILD $INSTALL
|
||||
SRC=$PWD
|
||||
cd $BUILD
|
||||
$SRC/configure --cc=$CC --prefix=$INSTALL
|
||||
make -j4
|
||||
# XXX: we need reliable clean up
|
||||
# make check -j4 V=1
|
||||
make install
|
||||
|
||||
echo
|
||||
echo "=== ENV ==="
|
||||
env
|
||||
|
||||
echo
|
||||
echo "=== PACKAGES ==="
|
||||
rpm -qa
|
||||
ppcbe:
|
||||
enabled: false
|
||||
requirements: ppcbe
|
||||
timeout: 3600
|
||||
script: |
|
||||
#!/bin/bash
|
||||
# Testing script will be invoked under the git checkout with
|
||||
# HEAD pointing to a commit that has the patches applied on top of "base"
|
||||
# branch
|
||||
set -e
|
||||
CC=$HOME/bin/cc
|
||||
INSTALL=$PWD/install
|
||||
BUILD=$PWD/build
|
||||
mkdir -p $BUILD $INSTALL
|
||||
SRC=$PWD
|
||||
cd $BUILD
|
||||
$SRC/configure --cc=$CC --prefix=$INSTALL
|
||||
make -j4
|
||||
# XXX: we need reliable clean up
|
||||
# make check -j4 V=1
|
||||
make install
|
||||
|
||||
echo
|
||||
echo "=== ENV ==="
|
||||
env
|
||||
|
||||
echo
|
||||
echo "=== PACKAGES ==="
|
||||
rpm -qa
|
||||
FreeBSD:
|
||||
enabled: true
|
||||
requirements: qemu-x86,x86_64,git
|
||||
timeout: 3600
|
||||
script: |
|
||||
#!/bin/bash
|
||||
# Testing script will be invoked under the git checkout with
|
||||
# HEAD pointing to a commit that has the patches applied on top of "base"
|
||||
# branch
|
||||
if qemu-system-x86_64 --help >/dev/null 2>&1; then
|
||||
QEMU=qemu-system-x86_64
|
||||
elif /usr/libexec/qemu-kvm --help >/dev/null 2>&1; then
|
||||
QEMU=/usr/libexec/qemu-kvm
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
make vm-build-freebsd J=21 QEMU=$QEMU
|
||||
exit 0
|
||||
docker-clang@ubuntu:
|
||||
enabled: true
|
||||
requirements: docker,x86_64
|
||||
timeout: 3600
|
||||
script: |
|
||||
#!/bin/bash
|
||||
time make docker-test-clang@ubuntu SHOW_ENV=1 J=14 NETWORK=1
|
||||
s390x:
|
||||
enabled: true
|
||||
requirements: s390x
|
||||
timeout: 3600
|
||||
script: |
|
||||
#!/bin/bash
|
||||
# Testing script will be invoked under the git checkout with
|
||||
# HEAD pointing to a commit that has the patches applied on top of "base"
|
||||
# branch
|
||||
set -e
|
||||
CC=$HOME/bin/cc
|
||||
INSTALL=$PWD/install
|
||||
BUILD=$PWD/build
|
||||
mkdir -p $BUILD $INSTALL
|
||||
SRC=$PWD
|
||||
cd $BUILD
|
||||
$SRC/configure --cc=$CC --prefix=$INSTALL
|
||||
make -j4
|
||||
# XXX: we need reliable clean up
|
||||
# make check -j4 V=1
|
||||
make install
|
||||
|
||||
echo
|
||||
echo "=== ENV ==="
|
||||
env
|
||||
|
||||
echo
|
||||
echo "=== PACKAGES ==="
|
||||
rpm -qa
|
||||
requirements:
|
||||
x86_64:
|
||||
script: |
|
||||
#! /bin/bash
|
||||
test "$(uname -m)" = "x86_64"
|
||||
qemu-x86:
|
||||
script: |
|
||||
#!/bin/bash
|
||||
if qemu-system-x86_64 --help >/dev/null 2>&1; then
|
||||
:
|
||||
elif /usr/libexec/qemu-kvm --help >/dev/null 2>&1; then
|
||||
:
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
ppcle:
|
||||
script: |
|
||||
#!/bin/bash
|
||||
test "$(uname -m)" = "ppc64le"
|
||||
ppcbe:
|
||||
script: |
|
||||
#!/bin/bash
|
||||
test "$(uname -m)" = "ppc64"
|
||||
git:
|
||||
script: |
|
||||
#! /bin/bash
|
||||
git config user.name > /dev/null 2>&1
|
||||
docker:
|
||||
script: |
|
||||
#!/bin/bash
|
||||
docker ps || sudo -n docker ps
|
||||
s390x:
|
||||
script: |
|
||||
#!/bin/bash
|
||||
test "$(uname -m)" = "s390x"
|
||||
git:
|
||||
push_to: git@github.com:patchew-project/qemu
|
||||
public_repo: https://github.com/patchew-project/qemu
|
||||
url_template: https://github.com/patchew-project/qemu/tree/%t
|
25
.readthedocs.yml
Normal file
25
.readthedocs.yml
Normal file
@ -0,0 +1,25 @@
|
||||
# .readthedocs.yml
|
||||
# Read the Docs configuration file
|
||||
# See https://docs.readthedocs.io/en/stable/config-file/v2.html for details
|
||||
|
||||
# Required
|
||||
version: 2
|
||||
|
||||
# Set the version of Python and other tools you might need
|
||||
build:
|
||||
os: ubuntu-22.04
|
||||
tools:
|
||||
python: "3.11"
|
||||
|
||||
# Build documentation in the docs/ directory with Sphinx
|
||||
sphinx:
|
||||
configuration: docs/conf.py
|
||||
|
||||
# We recommend specifying your dependencies to enable reproducible builds:
|
||||
# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
|
||||
python:
|
||||
install:
|
||||
- requirements: docs/requirements.txt
|
||||
|
||||
# We want all the document formats
|
||||
formats: all
|
275
.travis.yml
Normal file
275
.travis.yml
Normal file
@ -0,0 +1,275 @@
|
||||
os: linux
|
||||
dist: jammy
|
||||
language: c
|
||||
compiler:
|
||||
- gcc
|
||||
cache:
|
||||
# There is one cache per branch and compiler version.
|
||||
# characteristics of each job are used to identify the cache:
|
||||
# - OS name (currently only linux)
|
||||
# - OS distribution (e.g. "jammy" for Linux)
|
||||
# - Names and values of visible environment variables set in .travis.yml or Settings panel
|
||||
timeout: 1200
|
||||
ccache: true
|
||||
pip: true
|
||||
|
||||
|
||||
# The channel name "irc.oftc.net#qemu" is encrypted against qemu/qemu
|
||||
# to prevent IRC notifications from forks. This was created using:
|
||||
# $ travis encrypt -r "qemu/qemu" "irc.oftc.net#qemu"
|
||||
notifications:
|
||||
irc:
|
||||
channels:
|
||||
- secure: "F7GDRgjuOo5IUyRLqSkmDL7kvdU4UcH3Lm/W2db2JnDHTGCqgEdaYEYKciyCLZ57vOTsTsOgesN8iUT7hNHBd1KWKjZe9KDTZWppWRYVwAwQMzVeSOsbbU4tRoJ6Pp+3qhH1Z0eGYR9ZgKYAoTumDFgSAYRp4IscKS8jkoedOqM="
|
||||
on_success: change
|
||||
on_failure: always
|
||||
|
||||
|
||||
env:
|
||||
global:
|
||||
- SRC_DIR=".."
|
||||
- BUILD_DIR="build"
|
||||
- BASE_CONFIG="--disable-docs --disable-tools"
|
||||
- TEST_BUILD_CMD=""
|
||||
- TEST_CMD="make check V=1"
|
||||
# This is broadly a list of "mainline" system targets which have support across the major distros
|
||||
- MAIN_SYSTEM_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
|
||||
- CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime"
|
||||
- CCACHE_MAXSIZE=1G
|
||||
- G_MESSAGES_DEBUG=error
|
||||
|
||||
|
||||
git:
|
||||
# we want to do this ourselves
|
||||
submodules: false
|
||||
|
||||
# Common first phase for all steps
|
||||
# We no longer use nproc to calculate jobs:
|
||||
# https://travis-ci.community/t/nproc-reports-32-cores-on-arm64/5851
|
||||
before_install:
|
||||
- if command -v ccache ; then ccache --zero-stats ; fi
|
||||
- export JOBS=3
|
||||
- echo "=== Using ${JOBS} simultaneous jobs ==="
|
||||
|
||||
# Configure step - may be overridden
|
||||
before_script:
|
||||
- mkdir -p ${BUILD_DIR} && cd ${BUILD_DIR}
|
||||
- ${SRC_DIR}/configure ${BASE_CONFIG} ${CONFIG} || { cat config.log meson-logs/meson-log.txt && exit 1; }
|
||||
|
||||
# Main build & test - rarely overridden - controlled by TEST_CMD
|
||||
script:
|
||||
- BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
|
||||
- |
|
||||
if [ "$BUILD_RC" -eq 0 ] && [ -n "$TEST_BUILD_CMD" ]; then
|
||||
${TEST_BUILD_CMD} || BUILD_RC=$?
|
||||
else
|
||||
$(exit $BUILD_RC);
|
||||
fi
|
||||
- |
|
||||
if [ "$BUILD_RC" -eq 0 ] ; then
|
||||
${TEST_CMD} ;
|
||||
else
|
||||
$(exit $BUILD_RC);
|
||||
fi
|
||||
after_script:
|
||||
- df -h
|
||||
- if command -v ccache ; then ccache --show-stats ; fi
|
||||
|
||||
|
||||
jobs:
|
||||
include:
|
||||
|
||||
- name: "[aarch64] GCC check-tcg"
|
||||
arch: arm64
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
- libattr1-dev
|
||||
- libbrlapi-dev
|
||||
- libcacard-dev
|
||||
- libcap-ng-dev
|
||||
- libfdt-dev
|
||||
- libgcrypt20-dev
|
||||
- libgnutls28-dev
|
||||
- libgtk-3-dev
|
||||
- libiscsi-dev
|
||||
- liblttng-ust-dev
|
||||
- libncurses5-dev
|
||||
- libnfs-dev
|
||||
- libpixman-1-dev
|
||||
- libpng-dev
|
||||
- librados-dev
|
||||
- libsdl2-dev
|
||||
- libseccomp-dev
|
||||
- liburcu-dev
|
||||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers --enable-fdt=system
|
||||
--target-list=${MAIN_SYSTEM_TARGETS} --cxx=/bin/false"
|
||||
|
||||
- name: "[ppc64] Clang check-tcg"
|
||||
arch: ppc64le
|
||||
compiler: clang
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
- libattr1-dev
|
||||
- libbrlapi-dev
|
||||
- libcacard-dev
|
||||
- libcap-ng-dev
|
||||
- libfdt-dev
|
||||
- libgcrypt20-dev
|
||||
- libgnutls28-dev
|
||||
- libgtk-3-dev
|
||||
- libiscsi-dev
|
||||
- liblttng-ust-dev
|
||||
- libncurses5-dev
|
||||
- libnfs-dev
|
||||
- libpixman-1-dev
|
||||
- libpng-dev
|
||||
- librados-dev
|
||||
- libsdl2-dev
|
||||
- libseccomp-dev
|
||||
- liburcu-dev
|
||||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers --enable-fdt=system
|
||||
--target-list=ppc64-softmmu,ppc64le-linux-user"
|
||||
|
||||
- name: "[s390x] GCC check-tcg"
|
||||
arch: s390x
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
- libattr1-dev
|
||||
- libbrlapi-dev
|
||||
- libcacard-dev
|
||||
- libcap-ng-dev
|
||||
- libfdt-dev
|
||||
- libgcrypt20-dev
|
||||
- libgnutls28-dev
|
||||
- libgtk-3-dev
|
||||
- libiscsi-dev
|
||||
- liblttng-ust-dev
|
||||
- libncurses5-dev
|
||||
- libnfs-dev
|
||||
- libpixman-1-dev
|
||||
- libpng-dev
|
||||
- librados-dev
|
||||
- libsdl2-dev
|
||||
- libseccomp-dev
|
||||
- liburcu-dev
|
||||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers
|
||||
--target-list=hppa-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
|
||||
script:
|
||||
- BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
|
||||
- |
|
||||
if [ "$BUILD_RC" -eq 0 ] ; then
|
||||
mv pc-bios/s390-ccw/*.img qemu-bundle/usr/local/share/qemu ;
|
||||
${TEST_CMD} ;
|
||||
else
|
||||
$(exit $BUILD_RC);
|
||||
fi
|
||||
|
||||
- name: "[s390x] Clang (other-system)"
|
||||
arch: s390x
|
||||
compiler: clang
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
- libattr1-dev
|
||||
- libcacard-dev
|
||||
- libcap-ng-dev
|
||||
- libfdt-dev
|
||||
- libgnutls28-dev
|
||||
- libiscsi-dev
|
||||
- liblttng-ust-dev
|
||||
- liblzo2-dev
|
||||
- libncurses-dev
|
||||
- libnfs-dev
|
||||
- libpixman-1-dev
|
||||
- libsdl2-dev
|
||||
- libsdl2-image-dev
|
||||
- libseccomp-dev
|
||||
- libsnappy-dev
|
||||
- libzstd-dev
|
||||
- nettle-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
# Tests dependencies
|
||||
- genisoimage
|
||||
env:
|
||||
- CONFIG="--disable-containers --audio-drv-list=sdl --disable-user
|
||||
--target-list=arm-softmmu,avr-softmmu,microblaze-softmmu,sh4eb-softmmu,sparc64-softmmu,xtensaeb-softmmu"
|
||||
|
||||
- name: "[s390x] GCC (user)"
|
||||
arch: s390x
|
||||
addons:
|
||||
apt_packages:
|
||||
- libgcrypt20-dev
|
||||
- libglib2.0-dev
|
||||
- libgnutls28-dev
|
||||
- ninja-build
|
||||
- flex
|
||||
- bison
|
||||
- python3-tomli
|
||||
env:
|
||||
- TEST_CMD="make check check-tcg V=1"
|
||||
- CONFIG="--disable-containers --disable-system"
|
||||
|
||||
- name: "[s390x] Clang (disable-tcg)"
|
||||
arch: s390x
|
||||
compiler: clang
|
||||
addons:
|
||||
apt_packages:
|
||||
- libaio-dev
|
||||
- libattr1-dev
|
||||
- libbrlapi-dev
|
||||
- libcacard-dev
|
||||
- libcap-ng-dev
|
||||
- libfdt-dev
|
||||
- libgcrypt20-dev
|
||||
- libgnutls28-dev
|
||||
- libgtk-3-dev
|
||||
- libiscsi-dev
|
||||
- liblttng-ust-dev
|
||||
- libncurses5-dev
|
||||
- libnfs-dev
|
||||
- libpixman-1-dev
|
||||
- libpng-dev
|
||||
- librados-dev
|
||||
- libsdl2-dev
|
||||
- libseccomp-dev
|
||||
- liburcu-dev
|
||||
- libusb-1.0-0-dev
|
||||
- libvdeplug-dev
|
||||
- libvte-2.91-dev
|
||||
- ninja-build
|
||||
- python3-tomli
|
||||
env:
|
||||
- TEST_CMD="make check-unit"
|
||||
- CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools
|
||||
--enable-fdt=system --host-cc=clang --cxx=clang++"
|
339
COPYING
Normal file
339
COPYING
Normal file
@ -0,0 +1,339 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 2, June 1991
|
||||
|
||||
Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
License is intended to guarantee your freedom to share and change free
|
||||
software--to make sure the software is free for all its users. This
|
||||
General Public License applies to most of the Free Software
|
||||
Foundation's software and to any other program whose authors commit to
|
||||
using it. (Some other Free Software Foundation software is covered by
|
||||
the GNU Lesser General Public License instead.) You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
this service if you wish), that you receive source code or can get it
|
||||
if you want it, that you can change the software or use pieces of it
|
||||
in new free programs; and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
anyone to deny you these rights or to ask you to surrender the rights.
|
||||
These restrictions translate to certain responsibilities for you if you
|
||||
distribute copies of the software, or if you modify it.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must give the recipients all the rights that
|
||||
you have. You must make sure that they, too, receive or can get the
|
||||
source code. And you must show them these terms so they know their
|
||||
rights.
|
||||
|
||||
We protect your rights with two steps: (1) copyright the software, and
|
||||
(2) offer you this license which gives you legal permission to copy,
|
||||
distribute and/or modify the software.
|
||||
|
||||
Also, for each author's protection and ours, we want to make certain
|
||||
that everyone understands that there is no warranty for this free
|
||||
software. If the software is modified by someone else and passed on, we
|
||||
want its recipients to know that what they have is not the original, so
|
||||
that any problems introduced by others will not reflect on the original
|
||||
authors' reputations.
|
||||
|
||||
Finally, any free program is threatened constantly by software
|
||||
patents. We wish to avoid the danger that redistributors of a free
|
||||
program will individually obtain patent licenses, in effect making the
|
||||
program proprietary. To prevent this, we have made it clear that any
|
||||
patent must be licensed for everyone's free use or not licensed at all.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License applies to any program or other work which contains
|
||||
a notice placed by the copyright holder saying it may be distributed
|
||||
under the terms of this General Public License. The "Program", below,
|
||||
refers to any such program or work, and a "work based on the Program"
|
||||
means either the Program or any derivative work under copyright law:
|
||||
that is to say, a work containing the Program or a portion of it,
|
||||
either verbatim or with modifications and/or translated into another
|
||||
language. (Hereinafter, translation is included without limitation in
|
||||
the term "modification".) Each licensee is addressed as "you".
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running the Program is not restricted, and the output from the Program
|
||||
is covered only if its contents constitute a work based on the
|
||||
Program (independent of having been made by running the Program).
|
||||
Whether that is true depends on what the Program does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Program's
|
||||
source code as you receive it, in any medium, provided that you
|
||||
conspicuously and appropriately publish on each copy an appropriate
|
||||
copyright notice and disclaimer of warranty; keep intact all the
|
||||
notices that refer to this License and to the absence of any warranty;
|
||||
and give any other recipients of the Program a copy of this License
|
||||
along with the Program.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy, and
|
||||
you may at your option offer warranty protection in exchange for a fee.
|
||||
|
||||
2. You may modify your copy or copies of the Program or any portion
|
||||
of it, thus forming a work based on the Program, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) You must cause the modified files to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
b) You must cause any work that you distribute or publish, that in
|
||||
whole or in part contains or is derived from the Program or any
|
||||
part thereof, to be licensed as a whole at no charge to all third
|
||||
parties under the terms of this License.
|
||||
|
||||
c) If the modified program normally reads commands interactively
|
||||
when run, you must cause it, when started running for such
|
||||
interactive use in the most ordinary way, to print or display an
|
||||
announcement including an appropriate copyright notice and a
|
||||
notice that there is no warranty (or else, saying that you provide
|
||||
a warranty) and that users may redistribute the program under
|
||||
these conditions, and telling the user how to view a copy of this
|
||||
License. (Exception: if the Program itself is interactive but
|
||||
does not normally print such an announcement, your work based on
|
||||
the Program is not required to print an announcement.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Program,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Program, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Program.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Program
|
||||
with the Program (or with a work based on the Program) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may copy and distribute the Program (or a work based on it,
|
||||
under Section 2) in object code or executable form under the terms of
|
||||
Sections 1 and 2 above provided that you also do one of the following:
|
||||
|
||||
a) Accompany it with the complete corresponding machine-readable
|
||||
source code, which must be distributed under the terms of Sections
|
||||
1 and 2 above on a medium customarily used for software interchange; or,
|
||||
|
||||
b) Accompany it with a written offer, valid for at least three
|
||||
years, to give any third party, for a charge no more than your
|
||||
cost of physically performing source distribution, a complete
|
||||
machine-readable copy of the corresponding source code, to be
|
||||
distributed under the terms of Sections 1 and 2 above on a medium
|
||||
customarily used for software interchange; or,
|
||||
|
||||
c) Accompany it with the information you received as to the offer
|
||||
to distribute corresponding source code. (This alternative is
|
||||
allowed only for noncommercial distribution and only if you
|
||||
received the program in object code or executable form with such
|
||||
an offer, in accord with Subsection b above.)
|
||||
|
||||
The source code for a work means the preferred form of the work for
|
||||
making modifications to it. For an executable work, complete source
|
||||
code means all the source code for all modules it contains, plus any
|
||||
associated interface definition files, plus the scripts used to
|
||||
control compilation and installation of the executable. However, as a
|
||||
special exception, the source code distributed need not include
|
||||
anything that is normally distributed (in either source or binary
|
||||
form) with the major components (compiler, kernel, and so on) of the
|
||||
operating system on which the executable runs, unless that component
|
||||
itself accompanies the executable.
|
||||
|
||||
If distribution of executable or object code is made by offering
|
||||
access to copy from a designated place, then offering equivalent
|
||||
access to copy the source code from the same place counts as
|
||||
distribution of the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
4. You may not copy, modify, sublicense, or distribute the Program
|
||||
except as expressly provided under this License. Any attempt
|
||||
otherwise to copy, modify, sublicense or distribute the Program is
|
||||
void, and will automatically terminate your rights under this License.
|
||||
However, parties who have received copies, or rights, from you under
|
||||
this License will not have their licenses terminated so long as such
|
||||
parties remain in full compliance.
|
||||
|
||||
5. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Program or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Program (or any work based on the
|
||||
Program), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Program or works based on it.
|
||||
|
||||
6. Each time you redistribute the Program (or any work based on the
|
||||
Program), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute or modify the Program subject to
|
||||
these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties to
|
||||
this License.
|
||||
|
||||
7. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Program at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Program by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Program.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under
|
||||
any particular circumstance, the balance of the section is intended to
|
||||
apply and the section as a whole is intended to apply in other
|
||||
circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system, which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
8. If the distribution and/or use of the Program is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Program under this License
|
||||
may add an explicit geographical distribution limitation excluding
|
||||
those countries, so that distribution is permitted only in or among
|
||||
countries not thus excluded. In such case, this License incorporates
|
||||
the limitation as if written in the body of this License.
|
||||
|
||||
9. The Free Software Foundation may publish revised and/or new versions
|
||||
of the General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Program
|
||||
specifies a version number of this License which applies to it and "any
|
||||
later version", you have the option of following the terms and conditions
|
||||
either of that version or of any later version published by the Free
|
||||
Software Foundation. If the Program does not specify a version number of
|
||||
this License, you may choose any version ever published by the Free Software
|
||||
Foundation.
|
||||
|
||||
10. If you wish to incorporate parts of the Program into other free
|
||||
programs whose distribution conditions are different, write to the author
|
||||
to ask for permission. For software which is copyrighted by the Free
|
||||
Software Foundation, write to the Free Software Foundation; we sometimes
|
||||
make exceptions for this. Our decision will be guided by the two goals
|
||||
of preserving the free status of all derivatives of our free software and
|
||||
of promoting the sharing and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
|
||||
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
|
||||
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
|
||||
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
|
||||
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
|
||||
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
|
||||
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
|
||||
REPAIR OR CORRECTION.
|
||||
|
||||
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
|
||||
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
|
||||
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
|
||||
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
|
||||
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
|
||||
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
|
||||
POSSIBILITY OF SUCH DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software; you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation; either version 2 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License along
|
||||
with this program; if not, write to the Free Software Foundation, Inc.,
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program is interactive, make it output a short notice like this
|
||||
when it starts in an interactive mode:
|
||||
|
||||
Gnomovision version 69, Copyright (C) year name of author
|
||||
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, the commands you use may
|
||||
be called something other than `show w' and `show c'; they could even be
|
||||
mouse-clicks or menu items--whatever suits your program.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the program, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
|
||||
`Gnomovision' (which makes passes at compilers) written by James Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1989
|
||||
Ty Coon, President of Vice
|
||||
|
||||
This General Public License does not permit incorporating your program into
|
||||
proprietary programs. If your program is a subroutine library, you may
|
||||
consider it more useful to permit linking proprietary applications with the
|
||||
library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License.
|
502
COPYING.LIB
Normal file
502
COPYING.LIB
Normal file
@ -0,0 +1,502 @@
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
Version 2.1, February 1999
|
||||
|
||||
Copyright (C) 1991, 1999 Free Software Foundation, Inc.
|
||||
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
[This is the first released version of the Lesser GPL. It also counts
|
||||
as the successor of the GNU Library Public License, version 2, hence
|
||||
the version number 2.1.]
|
||||
|
||||
Preamble
|
||||
|
||||
The licenses for most software are designed to take away your
|
||||
freedom to share and change it. By contrast, the GNU General Public
|
||||
Licenses are intended to guarantee your freedom to share and change
|
||||
free software--to make sure the software is free for all its users.
|
||||
|
||||
This license, the Lesser General Public License, applies to some
|
||||
specially designated software packages--typically libraries--of the
|
||||
Free Software Foundation and other authors who decide to use it. You
|
||||
can use it too, but we suggest you first think carefully about whether
|
||||
this license or the ordinary General Public License is the better
|
||||
strategy to use in any particular case, based on the explanations below.
|
||||
|
||||
When we speak of free software, we are referring to freedom of use,
|
||||
not price. Our General Public Licenses are designed to make sure that
|
||||
you have the freedom to distribute copies of free software (and charge
|
||||
for this service if you wish); that you receive source code or can get
|
||||
it if you want it; that you can change the software and use pieces of
|
||||
it in new free programs; and that you are informed that you can do
|
||||
these things.
|
||||
|
||||
To protect your rights, we need to make restrictions that forbid
|
||||
distributors to deny you these rights or to ask you to surrender these
|
||||
rights. These restrictions translate to certain responsibilities for
|
||||
you if you distribute copies of the library or if you modify it.
|
||||
|
||||
For example, if you distribute copies of the library, whether gratis
|
||||
or for a fee, you must give the recipients all the rights that we gave
|
||||
you. You must make sure that they, too, receive or can get the source
|
||||
code. If you link other code with the library, you must provide
|
||||
complete object files to the recipients, so that they can relink them
|
||||
with the library after making changes to the library and recompiling
|
||||
it. And you must show them these terms so they know their rights.
|
||||
|
||||
We protect your rights with a two-step method: (1) we copyright the
|
||||
library, and (2) we offer you this license, which gives you legal
|
||||
permission to copy, distribute and/or modify the library.
|
||||
|
||||
To protect each distributor, we want to make it very clear that
|
||||
there is no warranty for the free library. Also, if the library is
|
||||
modified by someone else and passed on, the recipients should know
|
||||
that what they have is not the original version, so that the original
|
||||
author's reputation will not be affected by problems that might be
|
||||
introduced by others.
|
||||
|
||||
Finally, software patents pose a constant threat to the existence of
|
||||
any free program. We wish to make sure that a company cannot
|
||||
effectively restrict the users of a free program by obtaining a
|
||||
restrictive license from a patent holder. Therefore, we insist that
|
||||
any patent license obtained for a version of the library must be
|
||||
consistent with the full freedom of use specified in this license.
|
||||
|
||||
Most GNU software, including some libraries, is covered by the
|
||||
ordinary GNU General Public License. This license, the GNU Lesser
|
||||
General Public License, applies to certain designated libraries, and
|
||||
is quite different from the ordinary General Public License. We use
|
||||
this license for certain libraries in order to permit linking those
|
||||
libraries into non-free programs.
|
||||
|
||||
When a program is linked with a library, whether statically or using
|
||||
a shared library, the combination of the two is legally speaking a
|
||||
combined work, a derivative of the original library. The ordinary
|
||||
General Public License therefore permits such linking only if the
|
||||
entire combination fits its criteria of freedom. The Lesser General
|
||||
Public License permits more lax criteria for linking other code with
|
||||
the library.
|
||||
|
||||
We call this license the "Lesser" General Public License because it
|
||||
does Less to protect the user's freedom than the ordinary General
|
||||
Public License. It also provides other free software developers Less
|
||||
of an advantage over competing non-free programs. These disadvantages
|
||||
are the reason we use the ordinary General Public License for many
|
||||
libraries. However, the Lesser license provides advantages in certain
|
||||
special circumstances.
|
||||
|
||||
For example, on rare occasions, there may be a special need to
|
||||
encourage the widest possible use of a certain library, so that it becomes
|
||||
a de-facto standard. To achieve this, non-free programs must be
|
||||
allowed to use the library. A more frequent case is that a free
|
||||
library does the same job as widely used non-free libraries. In this
|
||||
case, there is little to gain by limiting the free library to free
|
||||
software only, so we use the Lesser General Public License.
|
||||
|
||||
In other cases, permission to use a particular library in non-free
|
||||
programs enables a greater number of people to use a large body of
|
||||
free software. For example, permission to use the GNU C Library in
|
||||
non-free programs enables many more people to use the whole GNU
|
||||
operating system, as well as its variant, the GNU/Linux operating
|
||||
system.
|
||||
|
||||
Although the Lesser General Public License is Less protective of the
|
||||
users' freedom, it does ensure that the user of a program that is
|
||||
linked with the Library has the freedom and the wherewithal to run
|
||||
that program using a modified version of the Library.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow. Pay close attention to the difference between a
|
||||
"work based on the library" and a "work that uses the library". The
|
||||
former contains code derived from the library, whereas the latter must
|
||||
be combined with the library in order to run.
|
||||
|
||||
GNU LESSER GENERAL PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. This License Agreement applies to any software library or other
|
||||
program which contains a notice placed by the copyright holder or
|
||||
other authorized party saying it may be distributed under the terms of
|
||||
this Lesser General Public License (also called "this License").
|
||||
Each licensee is addressed as "you".
|
||||
|
||||
A "library" means a collection of software functions and/or data
|
||||
prepared so as to be conveniently linked with application programs
|
||||
(which use some of those functions and data) to form executables.
|
||||
|
||||
The "Library", below, refers to any such software library or work
|
||||
which has been distributed under these terms. A "work based on the
|
||||
Library" means either the Library or any derivative work under
|
||||
copyright law: that is to say, a work containing the Library or a
|
||||
portion of it, either verbatim or with modifications and/or translated
|
||||
straightforwardly into another language. (Hereinafter, translation is
|
||||
included without limitation in the term "modification".)
|
||||
|
||||
"Source code" for a work means the preferred form of the work for
|
||||
making modifications to it. For a library, complete source code means
|
||||
all the source code for all modules it contains, plus any associated
|
||||
interface definition files, plus the scripts used to control compilation
|
||||
and installation of the library.
|
||||
|
||||
Activities other than copying, distribution and modification are not
|
||||
covered by this License; they are outside its scope. The act of
|
||||
running a program using the Library is not restricted, and output from
|
||||
such a program is covered only if its contents constitute a work based
|
||||
on the Library (independent of the use of the Library in a tool for
|
||||
writing it). Whether that is true depends on what the Library does
|
||||
and what the program that uses the Library does.
|
||||
|
||||
1. You may copy and distribute verbatim copies of the Library's
|
||||
complete source code as you receive it, in any medium, provided that
|
||||
you conspicuously and appropriately publish on each copy an
|
||||
appropriate copyright notice and disclaimer of warranty; keep intact
|
||||
all the notices that refer to this License and to the absence of any
|
||||
warranty; and distribute a copy of this License along with the
|
||||
Library.
|
||||
|
||||
You may charge a fee for the physical act of transferring a copy,
|
||||
and you may at your option offer warranty protection in exchange for a
|
||||
fee.
|
||||
|
||||
2. You may modify your copy or copies of the Library or any portion
|
||||
of it, thus forming a work based on the Library, and copy and
|
||||
distribute such modifications or work under the terms of Section 1
|
||||
above, provided that you also meet all of these conditions:
|
||||
|
||||
a) The modified work must itself be a software library.
|
||||
|
||||
b) You must cause the files modified to carry prominent notices
|
||||
stating that you changed the files and the date of any change.
|
||||
|
||||
c) You must cause the whole of the work to be licensed at no
|
||||
charge to all third parties under the terms of this License.
|
||||
|
||||
d) If a facility in the modified Library refers to a function or a
|
||||
table of data to be supplied by an application program that uses
|
||||
the facility, other than as an argument passed when the facility
|
||||
is invoked, then you must make a good faith effort to ensure that,
|
||||
in the event an application does not supply such function or
|
||||
table, the facility still operates, and performs whatever part of
|
||||
its purpose remains meaningful.
|
||||
|
||||
(For example, a function in a library to compute square roots has
|
||||
a purpose that is entirely well-defined independent of the
|
||||
application. Therefore, Subsection 2d requires that any
|
||||
application-supplied function or table used by this function must
|
||||
be optional: if the application does not supply it, the square
|
||||
root function must still compute square roots.)
|
||||
|
||||
These requirements apply to the modified work as a whole. If
|
||||
identifiable sections of that work are not derived from the Library,
|
||||
and can be reasonably considered independent and separate works in
|
||||
themselves, then this License, and its terms, do not apply to those
|
||||
sections when you distribute them as separate works. But when you
|
||||
distribute the same sections as part of a whole which is a work based
|
||||
on the Library, the distribution of the whole must be on the terms of
|
||||
this License, whose permissions for other licensees extend to the
|
||||
entire whole, and thus to each and every part regardless of who wrote
|
||||
it.
|
||||
|
||||
Thus, it is not the intent of this section to claim rights or contest
|
||||
your rights to work written entirely by you; rather, the intent is to
|
||||
exercise the right to control the distribution of derivative or
|
||||
collective works based on the Library.
|
||||
|
||||
In addition, mere aggregation of another work not based on the Library
|
||||
with the Library (or with a work based on the Library) on a volume of
|
||||
a storage or distribution medium does not bring the other work under
|
||||
the scope of this License.
|
||||
|
||||
3. You may opt to apply the terms of the ordinary GNU General Public
|
||||
License instead of this License to a given copy of the Library. To do
|
||||
this, you must alter all the notices that refer to this License, so
|
||||
that they refer to the ordinary GNU General Public License, version 2,
|
||||
instead of to this License. (If a newer version than version 2 of the
|
||||
ordinary GNU General Public License has appeared, then you can specify
|
||||
that version instead if you wish.) Do not make any other change in
|
||||
these notices.
|
||||
|
||||
Once this change is made in a given copy, it is irreversible for
|
||||
that copy, so the ordinary GNU General Public License applies to all
|
||||
subsequent copies and derivative works made from that copy.
|
||||
|
||||
This option is useful when you wish to copy part of the code of
|
||||
the Library into a program that is not a library.
|
||||
|
||||
4. You may copy and distribute the Library (or a portion or
|
||||
derivative of it, under Section 2) in object code or executable form
|
||||
under the terms of Sections 1 and 2 above provided that you accompany
|
||||
it with the complete corresponding machine-readable source code, which
|
||||
must be distributed under the terms of Sections 1 and 2 above on a
|
||||
medium customarily used for software interchange.
|
||||
|
||||
If distribution of object code is made by offering access to copy
|
||||
from a designated place, then offering equivalent access to copy the
|
||||
source code from the same place satisfies the requirement to
|
||||
distribute the source code, even though third parties are not
|
||||
compelled to copy the source along with the object code.
|
||||
|
||||
5. A program that contains no derivative of any portion of the
|
||||
Library, but is designed to work with the Library by being compiled or
|
||||
linked with it, is called a "work that uses the Library". Such a
|
||||
work, in isolation, is not a derivative work of the Library, and
|
||||
therefore falls outside the scope of this License.
|
||||
|
||||
However, linking a "work that uses the Library" with the Library
|
||||
creates an executable that is a derivative of the Library (because it
|
||||
contains portions of the Library), rather than a "work that uses the
|
||||
library". The executable is therefore covered by this License.
|
||||
Section 6 states terms for distribution of such executables.
|
||||
|
||||
When a "work that uses the Library" uses material from a header file
|
||||
that is part of the Library, the object code for the work may be a
|
||||
derivative work of the Library even though the source code is not.
|
||||
Whether this is true is especially significant if the work can be
|
||||
linked without the Library, or if the work is itself a library. The
|
||||
threshold for this to be true is not precisely defined by law.
|
||||
|
||||
If such an object file uses only numerical parameters, data
|
||||
structure layouts and accessors, and small macros and small inline
|
||||
functions (ten lines or less in length), then the use of the object
|
||||
file is unrestricted, regardless of whether it is legally a derivative
|
||||
work. (Executables containing this object code plus portions of the
|
||||
Library will still fall under Section 6.)
|
||||
|
||||
Otherwise, if the work is a derivative of the Library, you may
|
||||
distribute the object code for the work under the terms of Section 6.
|
||||
Any executables containing that work also fall under Section 6,
|
||||
whether or not they are linked directly with the Library itself.
|
||||
|
||||
6. As an exception to the Sections above, you may also combine or
|
||||
link a "work that uses the Library" with the Library to produce a
|
||||
work containing portions of the Library, and distribute that work
|
||||
under terms of your choice, provided that the terms permit
|
||||
modification of the work for the customer's own use and reverse
|
||||
engineering for debugging such modifications.
|
||||
|
||||
You must give prominent notice with each copy of the work that the
|
||||
Library is used in it and that the Library and its use are covered by
|
||||
this License. You must supply a copy of this License. If the work
|
||||
during execution displays copyright notices, you must include the
|
||||
copyright notice for the Library among them, as well as a reference
|
||||
directing the user to the copy of this License. Also, you must do one
|
||||
of these things:
|
||||
|
||||
a) Accompany the work with the complete corresponding
|
||||
machine-readable source code for the Library including whatever
|
||||
changes were used in the work (which must be distributed under
|
||||
Sections 1 and 2 above); and, if the work is an executable linked
|
||||
with the Library, with the complete machine-readable "work that
|
||||
uses the Library", as object code and/or source code, so that the
|
||||
user can modify the Library and then relink to produce a modified
|
||||
executable containing the modified Library. (It is understood
|
||||
that the user who changes the contents of definitions files in the
|
||||
Library will not necessarily be able to recompile the application
|
||||
to use the modified definitions.)
|
||||
|
||||
b) Use a suitable shared library mechanism for linking with the
|
||||
Library. A suitable mechanism is one that (1) uses at run time a
|
||||
copy of the library already present on the user's computer system,
|
||||
rather than copying library functions into the executable, and (2)
|
||||
will operate properly with a modified version of the library, if
|
||||
the user installs one, as long as the modified version is
|
||||
interface-compatible with the version that the work was made with.
|
||||
|
||||
c) Accompany the work with a written offer, valid for at
|
||||
least three years, to give the same user the materials
|
||||
specified in Subsection 6a, above, for a charge no more
|
||||
than the cost of performing this distribution.
|
||||
|
||||
d) If distribution of the work is made by offering access to copy
|
||||
from a designated place, offer equivalent access to copy the above
|
||||
specified materials from the same place.
|
||||
|
||||
e) Verify that the user has already received a copy of these
|
||||
materials or that you have already sent this user a copy.
|
||||
|
||||
For an executable, the required form of the "work that uses the
|
||||
Library" must include any data and utility programs needed for
|
||||
reproducing the executable from it. However, as a special exception,
|
||||
the materials to be distributed need not include anything that is
|
||||
normally distributed (in either source or binary form) with the major
|
||||
components (compiler, kernel, and so on) of the operating system on
|
||||
which the executable runs, unless that component itself accompanies
|
||||
the executable.
|
||||
|
||||
It may happen that this requirement contradicts the license
|
||||
restrictions of other proprietary libraries that do not normally
|
||||
accompany the operating system. Such a contradiction means you cannot
|
||||
use both them and the Library together in an executable that you
|
||||
distribute.
|
||||
|
||||
7. You may place library facilities that are a work based on the
|
||||
Library side-by-side in a single library together with other library
|
||||
facilities not covered by this License, and distribute such a combined
|
||||
library, provided that the separate distribution of the work based on
|
||||
the Library and of the other library facilities is otherwise
|
||||
permitted, and provided that you do these two things:
|
||||
|
||||
a) Accompany the combined library with a copy of the same work
|
||||
based on the Library, uncombined with any other library
|
||||
facilities. This must be distributed under the terms of the
|
||||
Sections above.
|
||||
|
||||
b) Give prominent notice with the combined library of the fact
|
||||
that part of it is a work based on the Library, and explaining
|
||||
where to find the accompanying uncombined form of the same work.
|
||||
|
||||
8. You may not copy, modify, sublicense, link with, or distribute
|
||||
the Library except as expressly provided under this License. Any
|
||||
attempt otherwise to copy, modify, sublicense, link with, or
|
||||
distribute the Library is void, and will automatically terminate your
|
||||
rights under this License. However, parties who have received copies,
|
||||
or rights, from you under this License will not have their licenses
|
||||
terminated so long as such parties remain in full compliance.
|
||||
|
||||
9. You are not required to accept this License, since you have not
|
||||
signed it. However, nothing else grants you permission to modify or
|
||||
distribute the Library or its derivative works. These actions are
|
||||
prohibited by law if you do not accept this License. Therefore, by
|
||||
modifying or distributing the Library (or any work based on the
|
||||
Library), you indicate your acceptance of this License to do so, and
|
||||
all its terms and conditions for copying, distributing or modifying
|
||||
the Library or works based on it.
|
||||
|
||||
10. Each time you redistribute the Library (or any work based on the
|
||||
Library), the recipient automatically receives a license from the
|
||||
original licensor to copy, distribute, link with or modify the Library
|
||||
subject to these terms and conditions. You may not impose any further
|
||||
restrictions on the recipients' exercise of the rights granted herein.
|
||||
You are not responsible for enforcing compliance by third parties with
|
||||
this License.
|
||||
|
||||
11. If, as a consequence of a court judgment or allegation of patent
|
||||
infringement or for any other reason (not limited to patent issues),
|
||||
conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot
|
||||
distribute so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you
|
||||
may not distribute the Library at all. For example, if a patent
|
||||
license would not permit royalty-free redistribution of the Library by
|
||||
all those who receive copies directly or indirectly through you, then
|
||||
the only way you could satisfy both it and this License would be to
|
||||
refrain entirely from distribution of the Library.
|
||||
|
||||
If any portion of this section is held invalid or unenforceable under any
|
||||
particular circumstance, the balance of the section is intended to apply,
|
||||
and the section as a whole is intended to apply in other circumstances.
|
||||
|
||||
It is not the purpose of this section to induce you to infringe any
|
||||
patents or other property right claims or to contest validity of any
|
||||
such claims; this section has the sole purpose of protecting the
|
||||
integrity of the free software distribution system which is
|
||||
implemented by public license practices. Many people have made
|
||||
generous contributions to the wide range of software distributed
|
||||
through that system in reliance on consistent application of that
|
||||
system; it is up to the author/donor to decide if he or she is willing
|
||||
to distribute software through any other system and a licensee cannot
|
||||
impose that choice.
|
||||
|
||||
This section is intended to make thoroughly clear what is believed to
|
||||
be a consequence of the rest of this License.
|
||||
|
||||
12. If the distribution and/or use of the Library is restricted in
|
||||
certain countries either by patents or by copyrighted interfaces, the
|
||||
original copyright holder who places the Library under this License may add
|
||||
an explicit geographical distribution limitation excluding those countries,
|
||||
so that distribution is permitted only in or among countries not thus
|
||||
excluded. In such case, this License incorporates the limitation as if
|
||||
written in the body of this License.
|
||||
|
||||
13. The Free Software Foundation may publish revised and/or new
|
||||
versions of the Lesser General Public License from time to time.
|
||||
Such new versions will be similar in spirit to the present version,
|
||||
but may differ in detail to address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the Library
|
||||
specifies a version number of this License which applies to it and
|
||||
"any later version", you have the option of following the terms and
|
||||
conditions either of that version or of any later version published by
|
||||
the Free Software Foundation. If the Library does not specify a
|
||||
license version number, you may choose any version ever published by
|
||||
the Free Software Foundation.
|
||||
|
||||
14. If you wish to incorporate parts of the Library into other free
|
||||
programs whose distribution conditions are incompatible with these,
|
||||
write to the author to ask for permission. For software which is
|
||||
copyrighted by the Free Software Foundation, write to the Free
|
||||
Software Foundation; we sometimes make exceptions for this. Our
|
||||
decision will be guided by the two goals of preserving the free status
|
||||
of all derivatives of our free software and of promoting the sharing
|
||||
and reuse of software generally.
|
||||
|
||||
NO WARRANTY
|
||||
|
||||
15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
|
||||
WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
|
||||
EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
|
||||
OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
|
||||
KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
|
||||
LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
|
||||
THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
|
||||
WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
|
||||
AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
|
||||
FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
|
||||
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
|
||||
LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
|
||||
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
|
||||
FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
|
||||
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
|
||||
DAMAGES.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Libraries
|
||||
|
||||
If you develop a new library, and you want it to be of the greatest
|
||||
possible use to the public, we recommend making it free software that
|
||||
everyone can redistribute and change. You can do so by permitting
|
||||
redistribution under these terms (or, alternatively, under the terms of the
|
||||
ordinary General Public License).
|
||||
|
||||
To apply these terms, attach the following notices to the library. It is
|
||||
safest to attach them to the start of each source file to most effectively
|
||||
convey the exclusion of warranty; and each file should have at least the
|
||||
"copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the library's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This library is free software; you can redistribute it and/or
|
||||
modify it under the terms of the GNU Lesser General Public
|
||||
License as published by the Free Software Foundation; either
|
||||
version 2.1 of the License, or (at your option) any later version.
|
||||
|
||||
This library is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
Lesser General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Lesser General Public
|
||||
License along with this library; if not, write to the Free Software
|
||||
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
You should also get your employer (if you work as a programmer) or your
|
||||
school, if any, to sign a "copyright disclaimer" for the library, if
|
||||
necessary. Here is a sample; alter the names:
|
||||
|
||||
Yoyodyne, Inc., hereby disclaims all copyright interest in the
|
||||
library `Frob' (a library for tweaking knobs) written by James Random Hacker.
|
||||
|
||||
<signature of Ty Coon>, 1 April 1990
|
||||
Ty Coon, President of Vice
|
||||
|
||||
That's all there is to it!
|
38
Info.plist
Normal file
38
Info.plist
Normal file
@ -0,0 +1,38 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>CFBundleDevelopmentRegion</key>
|
||||
<string>en</string>
|
||||
<key>CFBundleExecutable</key>
|
||||
<string>xemu</string>
|
||||
<key>CFBundleIconFile</key>
|
||||
<string>xemu.icns</string>
|
||||
<key>CFBundleIdentifier</key>
|
||||
<string>xemu.app.0</string>
|
||||
<key>CFBundleInfoDictionaryVersion</key>
|
||||
<string>6.0</string>
|
||||
<key>CFBundleName</key>
|
||||
<string>xemu</string>
|
||||
<key>CFBundlePackageType</key>
|
||||
<string>APPL</string>
|
||||
<key>CFBundleShortVersionString</key>
|
||||
<string>1</string>
|
||||
<key>CFBundleSignature</key>
|
||||
<string>xemu</string>
|
||||
<key>CFBundleVersion</key>
|
||||
<string>1</string>
|
||||
<key>LSApplicationCategoryType</key>
|
||||
<string>public.app-category.games</string>
|
||||
<key>LSMinimumSystemVersion</key>
|
||||
<string>10.6</string>
|
||||
<key>NSPrincipalClass</key>
|
||||
<string>NSApplication</string>
|
||||
<key>NSHighResolutionCapable</key>
|
||||
<true/>
|
||||
<key>com.apple.security.cs.allow-jit</key>
|
||||
<true/>
|
||||
<key>NSMicrophoneUsageDescription</key>
|
||||
<string>Microphone used for peripheral emulation, e.g. Xbox Live Communicator.</string>
|
||||
</dict>
|
||||
</plist>
|
7
Kconfig
Normal file
7
Kconfig
Normal file
@ -0,0 +1,7 @@
|
||||
source Kconfig.host
|
||||
source backends/Kconfig
|
||||
source accel/Kconfig
|
||||
source target/Kconfig
|
||||
source hw/Kconfig
|
||||
source semihosting/Kconfig
|
||||
source rust/Kconfig
|
63
Kconfig.host
Normal file
63
Kconfig.host
Normal file
@ -0,0 +1,63 @@
|
||||
# These are "proxy" symbols used to pass config-host.mak values
|
||||
# down to Kconfig. See also kconfig_external_symbols in
|
||||
# meson.build: these two need to be kept in sync.
|
||||
|
||||
config LINUX
|
||||
bool
|
||||
|
||||
config LIBCBOR
|
||||
bool
|
||||
|
||||
config GNUTLS
|
||||
bool
|
||||
|
||||
config OPENGL
|
||||
bool
|
||||
|
||||
config X11
|
||||
bool
|
||||
|
||||
config PIXMAN
|
||||
bool
|
||||
|
||||
config SPICE
|
||||
bool
|
||||
|
||||
config IVSHMEM
|
||||
bool
|
||||
|
||||
config TPM
|
||||
bool
|
||||
|
||||
config FDT
|
||||
bool
|
||||
|
||||
config VHOST_USER
|
||||
bool
|
||||
|
||||
config VHOST_VDPA
|
||||
bool
|
||||
|
||||
config VHOST_KERNEL
|
||||
bool
|
||||
|
||||
config VIRTFS
|
||||
bool
|
||||
|
||||
config MULTIPROCESS_ALLOWED
|
||||
bool
|
||||
imply MULTIPROCESS
|
||||
|
||||
config FUZZ
|
||||
bool
|
||||
select SPARSE_MEM
|
||||
|
||||
config VFIO_USER_SERVER_ALLOWED
|
||||
bool
|
||||
imply VFIO_USER_SERVER
|
||||
|
||||
config HV_BALLOON_POSSIBLE
|
||||
bool
|
||||
|
||||
config HAVE_RUST
|
||||
bool
|
27
LICENSE
Normal file
27
LICENSE
Normal file
@ -0,0 +1,27 @@
|
||||
The QEMU distribution includes both the QEMU emulator and
|
||||
various firmware files. These are separate programs that are
|
||||
distributed together for our users' convenience, and they have
|
||||
separate licenses.
|
||||
|
||||
The following points clarify the license of the QEMU emulator:
|
||||
|
||||
1) The QEMU emulator as a whole is released under the GNU General
|
||||
Public License, version 2.
|
||||
|
||||
2) Parts of the QEMU emulator have specific licenses which are compatible
|
||||
with the GNU General Public License, version 2. Hence each source file
|
||||
contains its own licensing information. Source files with no licensing
|
||||
information are released under the GNU General Public License, version
|
||||
2 or (at your option) any later version.
|
||||
|
||||
As of July 2013, contributions under version 2 of the GNU General Public
|
||||
License (and no later version) are only accepted for the following files
|
||||
or directories: bsd-user/, linux-user/, hw/vfio/, hw/xen/xen_pt*.
|
||||
|
||||
3) The Tiny Code Generator (TCG) is mostly under the BSD or MIT licenses;
|
||||
but some parts may be GPLv2 or other licenses. Again, see the
|
||||
specific licensing information in each source file.
|
||||
|
||||
4) QEMU is a trademark of Fabrice Bellard.
|
||||
|
||||
Fabrice Bellard and the QEMU team
|
4275
MAINTAINERS
Normal file
4275
MAINTAINERS
Normal file
File diff suppressed because it is too large
Load Diff
336
Makefile
Normal file
336
Makefile
Normal file
@ -0,0 +1,336 @@
|
||||
# Makefile for QEMU.
|
||||
|
||||
ifneq ($(words $(subst :, ,$(CURDIR))), 1)
|
||||
$(error main directory cannot contain spaces nor colons)
|
||||
endif
|
||||
|
||||
# Always point to the root of the build tree (needs GNU make).
|
||||
BUILD_DIR=$(CURDIR)
|
||||
|
||||
# Before including a proper config-host.mak, assume we are in the source tree
|
||||
SRC_PATH=.
|
||||
|
||||
# Don't use implicit rules or variables
|
||||
# we have explicit rules for everything
|
||||
MAKEFLAGS += -rR
|
||||
|
||||
SHELL = bash -o pipefail
|
||||
|
||||
# Usage: $(call quiet-command,command and args,"NAME","args to print")
|
||||
# This will run "command and args", and either:
|
||||
# if V=1 just print the whole command and args
|
||||
# otherwise print the 'quiet' output in the format " NAME args to print"
|
||||
# NAME should be a short name of the command, 7 letters or fewer.
|
||||
# If called with only a single argument, will print nothing in quiet mode.
|
||||
quiet-command-run = $(if $(V),,$(if $2,printf " %-7s %s\n" $2 $3 && ))$1
|
||||
quiet-@ = $(if $(V),,@)
|
||||
quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
|
||||
|
||||
UNCHECKED_GOALS := TAGS gtags cscope ctags dist \
|
||||
help check-help print-% \
|
||||
docker docker-% lcitool-refresh vm-help vm-test vm-build-%
|
||||
|
||||
all:
|
||||
.PHONY: all clean distclean recurse-all dist msi FORCE
|
||||
|
||||
# Don't try to regenerate Makefile or configure
|
||||
# We don't generate any of them
|
||||
Makefile: ;
|
||||
configure: ;
|
||||
|
||||
# All following code might depend on configuration variables
|
||||
ifneq ($(wildcard config-host.mak),)
|
||||
include config-host.mak
|
||||
|
||||
include Makefile.prereqs
|
||||
Makefile.prereqs: config-host.mak
|
||||
|
||||
# 0. ensure the build tree is okay
|
||||
|
||||
# Check that we're not trying to do an out-of-tree build from
|
||||
# a tree that's been used for an in-tree build.
|
||||
ifneq ($(realpath $(SRC_PATH)),$(realpath .))
|
||||
ifneq ($(wildcard $(SRC_PATH)/config-host.mak),)
|
||||
$(error This is an out of tree build but your source tree ($(SRC_PATH)) \
|
||||
seems to have been used for an in-tree build. You can fix this by running \
|
||||
"$(MAKE) distclean && rm -rf *-linux-user *-softmmu" in your source tree)
|
||||
endif
|
||||
endif
|
||||
|
||||
# force a rerun of configure if config-host.mak is too old or corrupted
|
||||
ifeq ($(MESON),)
|
||||
.PHONY: config-host.mak
|
||||
x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
endif
|
||||
ifeq ($(NINJA),)
|
||||
.PHONY: config-host.mak
|
||||
x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
else
|
||||
export NINJA
|
||||
endif
|
||||
ifeq ($(wildcard build.ninja),)
|
||||
.PHONY: config-host.mak
|
||||
x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
endif
|
||||
ifeq ($(origin prefix),file)
|
||||
.PHONY: config-host.mak
|
||||
x := $(shell rm -rf meson-private meson-info meson-logs)
|
||||
endif
|
||||
|
||||
# 1. ensure config-host.mak is up-to-date
|
||||
config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh \
|
||||
$(SRC_PATH)/pythondeps.toml $(SRC_PATH)/QEMU_VERSION
|
||||
@echo config-host.mak is out-of-date, running configure
|
||||
@if test -f meson-private/coredata.dat; then \
|
||||
./config.status --skip-meson; \
|
||||
else \
|
||||
./config.status; \
|
||||
fi
|
||||
|
||||
# 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
|
||||
# works), but otherwise never needs to be updated
|
||||
|
||||
meson-private/coredata.dat: meson.stamp
|
||||
meson.stamp: config-host.mak
|
||||
@touch meson.stamp
|
||||
|
||||
# 3. ensure meson-generated build files are up-to-date
|
||||
|
||||
ifneq ($(NINJA),)
|
||||
Makefile.ninja: build.ninja
|
||||
$(quiet-@){ \
|
||||
echo 'ninja-targets = \'; \
|
||||
$(NINJA) -t targets all | sed 's/:.*//; $$!s/$$/ \\/'; \
|
||||
echo 'build-files = \'; \
|
||||
$(NINJA) -t query build.ninja | sed -n '1,/^ input:/d; /^ outputs:/q; s/$$/ \\/p'; \
|
||||
} > $@.tmp && mv $@.tmp $@
|
||||
-include Makefile.ninja
|
||||
endif
|
||||
|
||||
ifneq ($(MESON),)
|
||||
# The path to meson always points to pyvenv/bin/meson, but the absolute
|
||||
# paths could change. In that case, force a regeneration of build.ninja.
|
||||
# Note that this invocation of $(NINJA), just like when Make rebuilds
|
||||
# Makefiles, does not include -n.
|
||||
build.ninja: build.ninja.stamp
|
||||
$(build-files):
|
||||
build.ninja.stamp: meson.stamp $(build-files)
|
||||
@if test "$$(cat build.ninja.stamp)" = "$(MESON)" && test -n "$(NINJA)"; then \
|
||||
$(NINJA) build.ninja; \
|
||||
else \
|
||||
echo "$(MESON) setup --reconfigure $(SRC_PATH)"; \
|
||||
$(MESON) setup --reconfigure $(SRC_PATH); \
|
||||
fi && echo "$(MESON)" > $@
|
||||
|
||||
Makefile.mtest: build.ninja scripts/mtest2make.py
|
||||
$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
|
||||
-include Makefile.mtest
|
||||
|
||||
.PHONY: update-buildoptions
|
||||
all update-buildoptions: $(SRC_PATH)/scripts/meson-buildoptions.sh
|
||||
$(SRC_PATH)/scripts/meson-buildoptions.sh: $(SRC_PATH)/meson_options.txt
|
||||
$(MESON) introspect --buildoptions $(SRC_PATH)/meson.build | $(PYTHON) \
|
||||
scripts/meson-buildoptions.py > $@.tmp && mv $@.tmp $@
|
||||
endif
|
||||
|
||||
# 4. Rules to bridge to other makefiles
|
||||
|
||||
ifneq ($(NINJA),)
|
||||
# Filter out long options to avoid flags like --no-print-directory which
|
||||
# may result in false positive match for MAKE.n
|
||||
MAKE.n = $(findstring n,$(firstword $(filter-out --%,$(MAKEFLAGS))))
|
||||
MAKE.k = $(findstring k,$(firstword $(filter-out --%,$(MAKEFLAGS))))
|
||||
MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS))))
|
||||
MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq)
|
||||
NINJAFLAGS = \
|
||||
$(if $V,-v) \
|
||||
$(if $(MAKE.n), -n) \
|
||||
$(if $(MAKE.k), -k0) \
|
||||
$(filter-out -j, \
|
||||
$(or $(filter -l% -j%, $(MAKEFLAGS)), \
|
||||
$(if $(filter --jobserver-auth=%, $(MAKEFLAGS)),, -j1))) \
|
||||
-d keepdepfile
|
||||
ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
|
||||
ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g))
|
||||
|
||||
makefile-targets := build.ninja ctags TAGS cscope dist clean
|
||||
# "ninja -t targets" also lists all prerequisites. If build system
|
||||
# files are marked as PHONY, however, Make will always try to execute
|
||||
# "ninja build.ninja".
|
||||
ninja-targets := $(filter-out $(build-files) $(makefile-targets), $(ninja-targets))
|
||||
.PHONY: $(ninja-targets) run-ninja
|
||||
$(ninja-targets): run-ninja
|
||||
|
||||
# Use "| cat" to give Ninja a more "make-y" output. Use "+" to bypass the
|
||||
# --output-sync line.
|
||||
run-ninja: config-host.mak
|
||||
ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),)
|
||||
+$(if $(MAKE.nq),@:,$(quiet-@)$(NINJA) $(NINJAFLAGS) \
|
||||
$(sort $(filter $(ninja-targets), $(ninja-cmd-goals))) | cat)
|
||||
endif
|
||||
endif
|
||||
|
||||
else # config-host.mak does not exist
|
||||
ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
|
||||
# $(error Please call configure before running make)
|
||||
endif
|
||||
endif # config-host.mak does not exist
|
||||
|
||||
SUBDIR_MAKEFLAGS=$(if $(V),,--no-print-directory --quiet)
|
||||
|
||||
include $(SRC_PATH)/tests/Makefile.include
|
||||
|
||||
all: recurse-all
|
||||
|
||||
SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS)))
|
||||
.PHONY: $(SUBDIR_RULES)
|
||||
$(SUBDIR_RULES):
|
||||
$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
|
||||
|
||||
.PHONY: recurse-all recurse-clean
|
||||
recurse-all: $(addsuffix /all, $(SUBDIRS))
|
||||
recurse-clean: $(addsuffix /clean, $(SUBDIRS))
|
||||
recurse-distclean: $(addsuffix /distclean, $(SUBDIRS))
|
||||
|
||||
######################################################################
|
||||
|
||||
clean: recurse-clean
|
||||
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean || :
|
||||
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) clean-ctlist || :
|
||||
find . \( -name '*.so' -o -name '*.dll' -o \
|
||||
-name '*.[oda]' -o -name '*.gcno' \) -type f \
|
||||
! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-aarch64.a \
|
||||
! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \
|
||||
-exec rm {} +
|
||||
rm -f TAGS cscope.* *~ */*~
|
||||
@$(MAKE) -Ctests/qemu-iotests clean
|
||||
|
||||
VERSION = $(shell cat $(SRC_PATH)/QEMU_VERSION)
|
||||
|
||||
dist: qemu-$(VERSION).tar.bz2
|
||||
|
||||
qemu-%.tar.bz2:
|
||||
$(SRC_PATH)/scripts/make-release "$(SRC_PATH)" "$(patsubst qemu-%.tar.bz2,%,$@)"
|
||||
|
||||
distclean: clean recurse-distclean
|
||||
-$(quiet-@)test -f build.ninja && $(NINJA) $(NINJAFLAGS) -t clean -g || :
|
||||
rm -f config-host.mak Makefile.prereqs
|
||||
rm -f tests/tcg/*/config-target.mak tests/tcg/config-host.mak
|
||||
rm -f config.status
|
||||
rm -f roms/seabios/config.mak
|
||||
rm -f qemu-plugins-ld.symbols qemu-plugins-ld64.symbols
|
||||
rm -f *-config-target.h *-config-devices.mak *-config-devices.h
|
||||
rm -rf meson-private meson-logs meson-info compile_commands.json
|
||||
rm -f Makefile.ninja Makefile.mtest build.ninja.stamp meson.stamp
|
||||
rm -f config.log
|
||||
rm -f linux-headers/asm
|
||||
rm -Rf .sdk qemu-bundle
|
||||
|
||||
find-src-path = find "$(SRC_PATH)" -path "$(SRC_PATH)/meson" -prune -o \
|
||||
-type l -prune -o \( -name "*.[chsS]" -o -name "*.[ch].inc" \)
|
||||
|
||||
.PHONY: ctags
|
||||
ctags:
|
||||
$(call quiet-command, \
|
||||
rm -f "$(SRC_PATH)/"tags, \
|
||||
"CTAGS", "Remove old tags")
|
||||
$(call quiet-command, \
|
||||
$(find-src-path) -exec ctags \
|
||||
-f "$(SRC_PATH)/"tags --append {} +, \
|
||||
"CTAGS", "Re-index $(SRC_PATH)")
|
||||
|
||||
.PHONY: gtags
|
||||
gtags:
|
||||
$(call quiet-command, \
|
||||
rm -f "$(SRC_PATH)/"GTAGS; \
|
||||
rm -f "$(SRC_PATH)/"GRTAGS; \
|
||||
rm -f "$(SRC_PATH)/"GPATH, \
|
||||
"GTAGS", "Remove old $@ files")
|
||||
$(call quiet-command, \
|
||||
(cd $(SRC_PATH) && \
|
||||
$(find-src-path) -print | gtags -f -), \
|
||||
"GTAGS", "Re-index $(SRC_PATH)")
|
||||
|
||||
.PHONY: TAGS
|
||||
TAGS:
|
||||
$(call quiet-command, \
|
||||
rm -f "$(SRC_PATH)/"TAGS, \
|
||||
"TAGS", "Remove old $@")
|
||||
$(call quiet-command, \
|
||||
$(find-src-path) -exec etags \
|
||||
-f "$(SRC_PATH)/"TAGS --append {} +, \
|
||||
"TAGS", "Re-index $(SRC_PATH)")
|
||||
|
||||
.PHONY: cscope
|
||||
cscope:
|
||||
$(call quiet-command, \
|
||||
rm -f "$(SRC_PATH)/"cscope.* , \
|
||||
"cscope", "Remove old $@ files")
|
||||
$(call quiet-command, \
|
||||
($(find-src-path) -print | sed -e 's,^\./,,' \
|
||||
> "$(SRC_PATH)/cscope.files"), \
|
||||
"cscope", "Create file list")
|
||||
$(call quiet-command, \
|
||||
cscope -b -i"$(SRC_PATH)/cscope.files" \
|
||||
-f"$(SRC_PATH)"/cscope.out, \
|
||||
"cscope", "Re-index $(SRC_PATH)")
|
||||
|
||||
# Needed by "meson install"
|
||||
export DESTDIR
|
||||
|
||||
include $(SRC_PATH)/tests/lcitool/Makefile.include
|
||||
include $(SRC_PATH)/tests/docker/Makefile.include
|
||||
include $(SRC_PATH)/tests/vm/Makefile.include
|
||||
|
||||
print-help-run = printf " %-30s - %s\\n" "$1" "$2"
|
||||
print-help = @$(call print-help-run,$1,$2)
|
||||
|
||||
.PHONY: update-linux-vdso
|
||||
update-linux-vdso:
|
||||
@for m in $(SRC_PATH)/linux-user/*/Makefile.vdso; do \
|
||||
$(MAKE) $(SUBDIR_MAKEFLAGS) -C $$(dirname $$m) -f Makefile.vdso \
|
||||
SRC_PATH=$(SRC_PATH) BUILD_DIR=$(BUILD_DIR); \
|
||||
done
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo 'Generic targets:'
|
||||
$(call print-help,all,Build all)
|
||||
$(call print-help,dir/file.o,Build specified target only)
|
||||
$(call print-help,install,Install QEMU, documentation and tools)
|
||||
$(call print-help,ctags/gtags/TAGS,Generate tags file for editors)
|
||||
$(call print-help,cscope,Generate cscope index)
|
||||
$(call print-help,sparse,Run sparse on the QEMU source)
|
||||
@echo ''
|
||||
@echo 'Cleaning targets:'
|
||||
$(call print-help,clean,Remove most generated files but keep the config)
|
||||
$(call print-help,distclean,Remove all generated files)
|
||||
$(call print-help,dist,Build a distributable tarball)
|
||||
@echo ''
|
||||
@echo 'Linux-user targets:'
|
||||
$(call print-help,update-linux-vdso,Build linux-user vdso images)
|
||||
@echo ''
|
||||
@echo 'Test targets:'
|
||||
$(call print-help,check,Run all tests (check-help for details))
|
||||
$(call print-help,bench,Run all benchmarks)
|
||||
$(call print-help,lcitool-help,Help about targets for managing build environment manifests)
|
||||
$(call print-help,docker-help,Help about targets running tests inside containers)
|
||||
$(call print-help,vm-help,Help about targets running tests inside VM)
|
||||
@echo ''
|
||||
@echo 'Documentation targets:'
|
||||
$(call print-help,html man,Build documentation in specified format)
|
||||
@echo ''
|
||||
ifneq ($(filter msi, $(ninja-targets)),)
|
||||
@echo 'Windows targets:'
|
||||
$(call print-help,installer,Build NSIS-based installer for QEMU)
|
||||
$(call print-help,msi,Build MSI-based installer for qemu-ga)
|
||||
@echo ''
|
||||
endif
|
||||
$(call print-help,$(MAKE) [targets],(quiet build, default))
|
||||
$(call print-help,$(MAKE) V=1 [targets],(verbose build))
|
||||
|
||||
# will delete the target of a rule if commands exit with a nonzero exit status
|
||||
.DELETE_ON_ERROR:
|
||||
|
||||
print-%:
|
||||
@echo '$*=$($*)'
|
1
QEMU_VERSION
Normal file
1
QEMU_VERSION
Normal file
@ -0,0 +1 @@
|
||||
9.2.0
|
1
README.md
Normal file
1
README.md
Normal file
@ -0,0 +1 @@
|
||||
Please visit [https://xemu.app](https://xemu.app) for more information.
|
171
README.rst
Normal file
171
README.rst
Normal file
@ -0,0 +1,171 @@
|
||||
===========
|
||||
QEMU README
|
||||
===========
|
||||
|
||||
QEMU is a generic and open source machine & userspace emulator and
|
||||
virtualizer.
|
||||
|
||||
QEMU is capable of emulating a complete machine in software without any
|
||||
need for hardware virtualization support. By using dynamic translation,
|
||||
it achieves very good performance. QEMU can also integrate with the Xen
|
||||
and KVM hypervisors to provide emulated hardware while allowing the
|
||||
hypervisor to manage the CPU. With hypervisor support, QEMU can achieve
|
||||
near native performance for CPUs. When QEMU emulates CPUs directly it is
|
||||
capable of running operating systems made for one machine (e.g. an ARMv7
|
||||
board) on a different machine (e.g. an x86_64 PC board).
|
||||
|
||||
QEMU is also capable of providing userspace API virtualization for Linux
|
||||
and BSD kernel interfaces. This allows binaries compiled against one
|
||||
architecture ABI (e.g. the Linux PPC64 ABI) to be run on a host using a
|
||||
different architecture ABI (e.g. the Linux x86_64 ABI). This does not
|
||||
involve any hardware emulation, simply CPU and syscall emulation.
|
||||
|
||||
QEMU aims to fit into a variety of use cases. It can be invoked directly
|
||||
by users wishing to have full control over its behaviour and settings.
|
||||
It also aims to facilitate integration into higher level management
|
||||
layers, by providing a stable command line interface and monitor API.
|
||||
It is commonly invoked indirectly via the libvirt library when using
|
||||
open source applications such as oVirt, OpenStack and virt-manager.
|
||||
|
||||
QEMU as a whole is released under the GNU General Public License,
|
||||
version 2. For full licensing details, consult the LICENSE file.
|
||||
|
||||
|
||||
Documentation
|
||||
=============
|
||||
|
||||
Documentation can be found hosted online at
|
||||
`<https://www.qemu.org/documentation/>`_. The documentation for the
|
||||
current development version that is available at
|
||||
`<https://www.qemu.org/docs/master/>`_ is generated from the ``docs/``
|
||||
folder in the source tree, and is built by `Sphinx
|
||||
<https://www.sphinx-doc.org/en/master/>`_.
|
||||
|
||||
|
||||
Building
|
||||
========
|
||||
|
||||
QEMU is multi-platform software intended to be buildable on all modern
|
||||
Linux platforms, OS-X, Win32 (via the Mingw64 toolchain) and a variety
|
||||
of other UNIX targets. The simple steps to build QEMU are:
|
||||
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
../configure
|
||||
make
|
||||
|
||||
Additional information can also be found online via the QEMU website:
|
||||
|
||||
* `<https://wiki.qemu.org/Hosts/Linux>`_
|
||||
* `<https://wiki.qemu.org/Hosts/Mac>`_
|
||||
* `<https://wiki.qemu.org/Hosts/W32>`_
|
||||
|
||||
|
||||
Submitting patches
|
||||
==================
|
||||
|
||||
The QEMU source code is maintained under the GIT version control system.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git clone https://gitlab.com/qemu-project/qemu.git
|
||||
|
||||
When submitting patches, one common approach is to use 'git
|
||||
format-patch' and/or 'git send-email' to format & send the mail to the
|
||||
qemu-devel@nongnu.org mailing list. All patches submitted must contain
|
||||
a 'Signed-off-by' line from the author. Patches should follow the
|
||||
guidelines set out in the `style section
|
||||
<https://www.qemu.org/docs/master/devel/style.html>`_ of
|
||||
the Developers Guide.
|
||||
|
||||
Additional information on submitting patches can be found online via
|
||||
the QEMU website:
|
||||
|
||||
* `<https://wiki.qemu.org/Contribute/SubmitAPatch>`_
|
||||
* `<https://wiki.qemu.org/Contribute/TrivialPatches>`_
|
||||
|
||||
The QEMU website is also maintained under source control.
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
git clone https://gitlab.com/qemu-project/qemu-web.git
|
||||
|
||||
* `<https://www.qemu.org/2017/02/04/the-new-qemu-website-is-up/>`_
|
||||
|
||||
A 'git-publish' utility was created to make above process less
|
||||
cumbersome, and is highly recommended for making regular contributions,
|
||||
or even just for sending consecutive patch series revisions. It also
|
||||
requires a working 'git send-email' setup, and by default doesn't
|
||||
automate everything, so you may want to go through the above steps
|
||||
manually for once.
|
||||
|
||||
For installation instructions, please go to:
|
||||
|
||||
* `<https://github.com/stefanha/git-publish>`_
|
||||
|
||||
The workflow with 'git-publish' is:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ git checkout master -b my-feature
|
||||
$ # work on new commits, add your 'Signed-off-by' lines to each
|
||||
$ git publish
|
||||
|
||||
Your patch series will be sent and tagged as my-feature-v1 if you need to refer
|
||||
back to it in the future.
|
||||
|
||||
Sending v2:
|
||||
|
||||
.. code-block:: shell
|
||||
|
||||
$ git checkout my-feature # same topic branch
|
||||
$ # making changes to the commits (using 'git rebase', for example)
|
||||
$ git publish
|
||||
|
||||
Your patch series will be sent with 'v2' tag in the subject and the git tip
|
||||
will be tagged as my-feature-v2.
|
||||
|
||||
Bug reporting
|
||||
=============
|
||||
|
||||
The QEMU project uses GitLab issues to track bugs. Bugs
|
||||
found when running code built from QEMU git or upstream released sources
|
||||
should be reported via:
|
||||
|
||||
* `<https://gitlab.com/qemu-project/qemu/-/issues>`_
|
||||
|
||||
If using QEMU via an operating system vendor pre-built binary package, it
|
||||
is preferable to report bugs to the vendor's own bug tracker first. If
|
||||
the bug is also known to affect latest upstream code, it can also be
|
||||
reported via GitLab.
|
||||
|
||||
For additional information on bug reporting consult:
|
||||
|
||||
* `<https://wiki.qemu.org/Contribute/ReportABug>`_
|
||||
|
||||
|
||||
ChangeLog
|
||||
=========
|
||||
|
||||
For version history and release notes, please visit
|
||||
`<https://wiki.qemu.org/ChangeLog/>`_ or look at the git history for
|
||||
more detailed information.
|
||||
|
||||
|
||||
Contact
|
||||
=======
|
||||
|
||||
The QEMU community can be contacted in a number of ways, with the two
|
||||
main methods being email and IRC:
|
||||
|
||||
* `<mailto:qemu-devel@nongnu.org>`_
|
||||
* `<https://lists.nongnu.org/mailman/listinfo/qemu-devel>`_
|
||||
* #qemu on irc.oftc.net
|
||||
|
||||
Information on additional methods of contacting the community can be
|
||||
found online via the QEMU website:
|
||||
|
||||
* `<https://wiki.qemu.org/Contribute/StartHere>`_
|
1
XEMU_BRANCH
Normal file
1
XEMU_BRANCH
Normal file
@ -0,0 +1 @@
|
||||
master
|
1
XEMU_COMMIT
Normal file
1
XEMU_COMMIT
Normal file
@ -0,0 +1 @@
|
||||
86344a8543194f4e21d3777a382c608fc4fff4f2
|
1
XEMU_VERSION
Normal file
1
XEMU_VERSION
Normal file
@ -0,0 +1 @@
|
||||
0.8.15
|
19
accel/Kconfig
Normal file
19
accel/Kconfig
Normal file
@ -0,0 +1,19 @@
|
||||
config WHPX
|
||||
bool
|
||||
|
||||
config NVMM
|
||||
bool
|
||||
|
||||
config HVF
|
||||
bool
|
||||
|
||||
config TCG
|
||||
bool
|
||||
|
||||
config KVM
|
||||
bool
|
||||
|
||||
config XEN
|
||||
bool
|
||||
select FSDEV_9P if VIRTFS
|
||||
select XEN_BUS
|
155
accel/accel-blocker.c
Normal file
155
accel/accel-blocker.c
Normal file
@ -0,0 +1,155 @@
|
||||
/*
|
||||
* Lock to inhibit accelerator ioctls
|
||||
*
|
||||
* Copyright (c) 2022 Red Hat Inc.
|
||||
*
|
||||
* Author: Emanuele Giuseppe Esposito <eesposit@redhat.com>
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/lockcnt.h"
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "sysemu/accel-blocker.h"
|
||||
|
||||
static QemuLockCnt accel_in_ioctl_lock;
|
||||
static QemuEvent accel_in_ioctl_event;
|
||||
|
||||
void accel_blocker_init(void)
|
||||
{
|
||||
qemu_lockcnt_init(&accel_in_ioctl_lock);
|
||||
qemu_event_init(&accel_in_ioctl_event, false);
|
||||
}
|
||||
|
||||
void accel_ioctl_begin(void)
|
||||
{
|
||||
if (likely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* block if lock is taken in kvm_ioctl_inhibit_begin() */
|
||||
qemu_lockcnt_inc(&accel_in_ioctl_lock);
|
||||
}
|
||||
|
||||
void accel_ioctl_end(void)
|
||||
{
|
||||
if (likely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_lockcnt_dec(&accel_in_ioctl_lock);
|
||||
/* change event to SET. If event was BUSY, wake up all waiters */
|
||||
qemu_event_set(&accel_in_ioctl_event);
|
||||
}
|
||||
|
||||
void accel_cpu_ioctl_begin(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
/* block if lock is taken in kvm_ioctl_inhibit_begin() */
|
||||
qemu_lockcnt_inc(&cpu->in_ioctl_lock);
|
||||
}
|
||||
|
||||
void accel_cpu_ioctl_end(CPUState *cpu)
|
||||
{
|
||||
if (unlikely(bql_locked())) {
|
||||
return;
|
||||
}
|
||||
|
||||
qemu_lockcnt_dec(&cpu->in_ioctl_lock);
|
||||
/* change event to SET. If event was BUSY, wake up all waiters */
|
||||
qemu_event_set(&accel_in_ioctl_event);
|
||||
}
|
||||
|
||||
static bool accel_has_to_wait(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
bool needs_to_wait = false;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
if (qemu_lockcnt_count(&cpu->in_ioctl_lock)) {
|
||||
/* exit the ioctl, if vcpu is running it */
|
||||
qemu_cpu_kick(cpu);
|
||||
needs_to_wait = true;
|
||||
}
|
||||
}
|
||||
|
||||
return needs_to_wait || qemu_lockcnt_count(&accel_in_ioctl_lock);
|
||||
}
|
||||
|
||||
void accel_ioctl_inhibit_begin(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
/*
|
||||
* We allow to inhibit only when holding the BQL, so we can identify
|
||||
* when an inhibitor wants to issue an ioctl easily.
|
||||
*/
|
||||
g_assert(bql_locked());
|
||||
|
||||
/* Block further invocations of the ioctls outside the BQL. */
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_lockcnt_lock(&cpu->in_ioctl_lock);
|
||||
}
|
||||
qemu_lockcnt_lock(&accel_in_ioctl_lock);
|
||||
|
||||
/* Keep waiting until there are running ioctls */
|
||||
while (true) {
|
||||
|
||||
/* Reset event to FREE. */
|
||||
qemu_event_reset(&accel_in_ioctl_event);
|
||||
|
||||
if (accel_has_to_wait()) {
|
||||
/*
|
||||
* If event is still FREE, and there are ioctls still in progress,
|
||||
* wait.
|
||||
*
|
||||
* If an ioctl finishes before qemu_event_wait(), it will change
|
||||
* the event state to SET. This will prevent qemu_event_wait() from
|
||||
* blocking, but it's not a problem because if other ioctls are
|
||||
* still running the loop will iterate once more and reset the event
|
||||
* status to FREE so that it can wait properly.
|
||||
*
|
||||
* If an ioctls finishes while qemu_event_wait() is blocking, then
|
||||
* it will be waken up, but also here the while loop makes sure
|
||||
* to re-enter the wait if there are other running ioctls.
|
||||
*/
|
||||
qemu_event_wait(&accel_in_ioctl_event);
|
||||
} else {
|
||||
/* No ioctl is running */
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void accel_ioctl_inhibit_end(void)
|
||||
{
|
||||
CPUState *cpu;
|
||||
|
||||
qemu_lockcnt_unlock(&accel_in_ioctl_lock);
|
||||
CPU_FOREACH(cpu) {
|
||||
qemu_lockcnt_unlock(&cpu->in_ioctl_lock);
|
||||
}
|
||||
}
|
||||
|
104
accel/accel-system.c
Normal file
104
accel/accel-system.c
Normal file
@ -0,0 +1,104 @@
|
||||
/*
|
||||
* QEMU accel class, system emulation components
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "hw/boards.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "accel-system.h"
|
||||
|
||||
int accel_init_machine(AccelState *accel, MachineState *ms)
|
||||
{
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
int ret;
|
||||
ms->accelerator = accel;
|
||||
*(acc->allowed) = true;
|
||||
ret = acc->init_machine(ms);
|
||||
if (ret < 0) {
|
||||
ms->accelerator = NULL;
|
||||
*(acc->allowed) = false;
|
||||
object_unref(OBJECT(accel));
|
||||
} else {
|
||||
object_set_accelerator_compat_props(acc->compat_props);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
AccelState *current_accel(void)
|
||||
{
|
||||
return current_machine->accelerator;
|
||||
}
|
||||
|
||||
void accel_setup_post(MachineState *ms)
|
||||
{
|
||||
AccelState *accel = ms->accelerator;
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
if (acc->setup_post) {
|
||||
acc->setup_post(ms, accel);
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize the arch-independent accel operation interfaces */
|
||||
void accel_system_init_ops_interfaces(AccelClass *ac)
|
||||
{
|
||||
const char *ac_name;
|
||||
char *ops_name;
|
||||
ObjectClass *oc;
|
||||
AccelOpsClass *ops;
|
||||
|
||||
ac_name = object_class_get_name(OBJECT_CLASS(ac));
|
||||
g_assert(ac_name != NULL);
|
||||
|
||||
ops_name = g_strdup_printf("%s" ACCEL_OPS_SUFFIX, ac_name);
|
||||
oc = module_object_class_by_name(ops_name);
|
||||
if (!oc) {
|
||||
error_report("fatal: could not load module for type '%s'", ops_name);
|
||||
exit(1);
|
||||
}
|
||||
g_free(ops_name);
|
||||
/*
|
||||
* all accelerators need to define ops, providing at least a mandatory
|
||||
* non-NULL create_vcpu_thread operation.
|
||||
*/
|
||||
ops = ACCEL_OPS_CLASS(oc);
|
||||
if (ops->ops_init) {
|
||||
ops->ops_init(ops);
|
||||
}
|
||||
cpus_register_accel(ops);
|
||||
}
|
||||
|
||||
static const TypeInfo accel_ops_type_info = {
|
||||
.name = TYPE_ACCEL_OPS,
|
||||
.parent = TYPE_OBJECT,
|
||||
.abstract = true,
|
||||
.class_size = sizeof(AccelOpsClass),
|
||||
};
|
||||
|
||||
static void accel_system_register_types(void)
|
||||
{
|
||||
type_register_static(&accel_ops_type_info);
|
||||
}
|
||||
type_init(accel_system_register_types);
|
15
accel/accel-system.h
Normal file
15
accel/accel-system.h
Normal file
@ -0,0 +1,15 @@
|
||||
/*
|
||||
* QEMU System Emulation accel internal functions
|
||||
*
|
||||
* Copyright 2021 SUSE LLC
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_SYSTEM_H
|
||||
#define ACCEL_SYSTEM_H
|
||||
|
||||
void accel_system_init_ops_interfaces(AccelClass *ac);
|
||||
|
||||
#endif /* ACCEL_SYSTEM_H */
|
176
accel/accel-target.c
Normal file
176
accel/accel-target.c
Normal file
@ -0,0 +1,176 @@
|
||||
/*
|
||||
* QEMU accel class, components common to system emulation and user mode
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
|
||||
#include "cpu.h"
|
||||
#include "hw/core/accel-cpu.h"
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
#include "accel-system.h"
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
static const TypeInfo accel_type = {
|
||||
.name = TYPE_ACCEL,
|
||||
.parent = TYPE_OBJECT,
|
||||
.class_size = sizeof(AccelClass),
|
||||
.instance_size = sizeof(AccelState),
|
||||
};
|
||||
|
||||
/* Lookup AccelClass from opt_name. Returns NULL if not found */
|
||||
AccelClass *accel_find(const char *opt_name)
|
||||
{
|
||||
char *class_name = g_strdup_printf(ACCEL_CLASS_NAME("%s"), opt_name);
|
||||
AccelClass *ac = ACCEL_CLASS(module_object_class_by_name(class_name));
|
||||
g_free(class_name);
|
||||
return ac;
|
||||
}
|
||||
|
||||
/* Return the name of the current accelerator */
|
||||
const char *current_accel_name(void)
|
||||
{
|
||||
AccelClass *ac = ACCEL_GET_CLASS(current_accel());
|
||||
|
||||
return ac->name;
|
||||
}
|
||||
|
||||
static void accel_init_cpu_int_aux(ObjectClass *klass, void *opaque)
|
||||
{
|
||||
CPUClass *cc = CPU_CLASS(klass);
|
||||
AccelCPUClass *accel_cpu = opaque;
|
||||
|
||||
/*
|
||||
* The first callback allows accel-cpu to run initializations
|
||||
* for the CPU, customizing CPU behavior according to the accelerator.
|
||||
*
|
||||
* The second one allows the CPU to customize the accel-cpu
|
||||
* behavior according to the CPU.
|
||||
*
|
||||
* The second is currently only used by TCG, to specialize the
|
||||
* TCGCPUOps depending on the CPU type.
|
||||
*/
|
||||
cc->accel_cpu = accel_cpu;
|
||||
if (accel_cpu->cpu_class_init) {
|
||||
accel_cpu->cpu_class_init(cc);
|
||||
}
|
||||
if (cc->init_accel_cpu) {
|
||||
cc->init_accel_cpu(accel_cpu, cc);
|
||||
}
|
||||
}
|
||||
|
||||
/* initialize the arch-specific accel CpuClass interfaces */
|
||||
static void accel_init_cpu_interfaces(AccelClass *ac)
|
||||
{
|
||||
const char *ac_name; /* AccelClass name */
|
||||
char *acc_name; /* AccelCPUClass name */
|
||||
ObjectClass *acc; /* AccelCPUClass */
|
||||
|
||||
ac_name = object_class_get_name(OBJECT_CLASS(ac));
|
||||
g_assert(ac_name != NULL);
|
||||
|
||||
acc_name = g_strdup_printf("%s-%s", ac_name, CPU_RESOLVING_TYPE);
|
||||
acc = object_class_by_name(acc_name);
|
||||
g_free(acc_name);
|
||||
|
||||
if (acc) {
|
||||
object_class_foreach(accel_init_cpu_int_aux,
|
||||
CPU_RESOLVING_TYPE, false, acc);
|
||||
}
|
||||
}
|
||||
|
||||
void accel_init_interfaces(AccelClass *ac)
|
||||
{
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
accel_system_init_ops_interfaces(ac);
|
||||
#endif /* !CONFIG_USER_ONLY */
|
||||
|
||||
accel_init_cpu_interfaces(ac);
|
||||
}
|
||||
|
||||
void accel_cpu_instance_init(CPUState *cpu)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_instance_init) {
|
||||
cc->accel_cpu->cpu_instance_init(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
|
||||
{
|
||||
CPUClass *cc = CPU_GET_CLASS(cpu);
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
/* target specific realization */
|
||||
if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize
|
||||
&& !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* generic realization */
|
||||
if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void accel_cpu_common_unrealize(CPUState *cpu)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
|
||||
/* generic unrealization */
|
||||
if (acc->cpu_common_unrealize) {
|
||||
acc->cpu_common_unrealize(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
int accel_supported_gdbstub_sstep_flags(void)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
AccelClass *acc = ACCEL_GET_CLASS(accel);
|
||||
if (acc->gdbstub_supported_sstep_flags) {
|
||||
return acc->gdbstub_supported_sstep_flags();
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const TypeInfo accel_cpu_type = {
|
||||
.name = TYPE_ACCEL_CPU,
|
||||
.parent = TYPE_OBJECT,
|
||||
.abstract = true,
|
||||
.class_size = sizeof(AccelCPUClass),
|
||||
};
|
||||
|
||||
static void register_accel_types(void)
|
||||
{
|
||||
type_register_static(&accel_type);
|
||||
type_register_static(&accel_cpu_type);
|
||||
}
|
||||
|
||||
type_init(register_accel_types);
|
24
accel/accel-user.c
Normal file
24
accel/accel-user.c
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
* QEMU accel class, user-mode components
|
||||
*
|
||||
* Copyright 2021 SUSE LLC
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
|
||||
AccelState *current_accel(void)
|
||||
{
|
||||
static AccelState *accel;
|
||||
|
||||
if (!accel) {
|
||||
AccelClass *ac = accel_find("tcg");
|
||||
|
||||
g_assert(ac != NULL);
|
||||
accel = ACCEL(object_new_with_class(OBJECT_CLASS(ac)));
|
||||
}
|
||||
return accel;
|
||||
}
|
78
accel/dummy-cpus.c
Normal file
78
accel/dummy-cpus.c
Normal file
@ -0,0 +1,78 @@
|
||||
/*
|
||||
* Dummy cpu thread code
|
||||
*
|
||||
* Copyright IBM, Corp. 2011
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/rcu.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
static void *dummy_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
current_cpu = cpu;
|
||||
|
||||
#ifndef _WIN32
|
||||
sigset_t waitset;
|
||||
int r;
|
||||
|
||||
sigemptyset(&waitset);
|
||||
sigaddset(&waitset, SIG_IPI);
|
||||
#endif
|
||||
|
||||
/* signal CPU creation */
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
bql_unlock();
|
||||
#ifndef _WIN32
|
||||
do {
|
||||
int sig;
|
||||
r = sigwait(&waitset, &sig);
|
||||
} while (r == -1 && (errno == EAGAIN || errno == EINTR));
|
||||
if (r == -1) {
|
||||
perror("sigwait");
|
||||
exit(1);
|
||||
}
|
||||
#else
|
||||
qemu_sem_wait(&cpu->sem);
|
||||
#endif
|
||||
bql_lock();
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug);
|
||||
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void dummy_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,
|
||||
QEMU_THREAD_JOINABLE);
|
||||
#ifdef _WIN32
|
||||
qemu_sem_init(&cpu->sem, 0);
|
||||
#endif
|
||||
}
|
8
accel/hvf/entitlements.plist
Normal file
8
accel/hvf/entitlements.plist
Normal file
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>com.apple.security.hypervisor</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</plist>
|
609
accel/hvf/hvf-accel-ops.c
Normal file
609
accel/hvf/hvf-accel-ops.c
Normal file
@ -0,0 +1,609 @@
|
||||
/*
|
||||
* Copyright 2008 IBM Corporation
|
||||
* 2008 Red Hat, Inc.
|
||||
* Copyright 2011 Intel Corporation
|
||||
* Copyright 2016 Veertu, Inc.
|
||||
* Copyright 2017 The Android Open Source Project
|
||||
*
|
||||
* QEMU Hypervisor.framework support
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of version 2 of the GNU General Public
|
||||
* License as published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, see <http://www.gnu.org/licenses/>.
|
||||
*
|
||||
* This file contain code under public domain from the hvdos project:
|
||||
* https://github.com/mist64/hvdos
|
||||
*
|
||||
* Parts Copyright (c) 2011 NetApp, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice, this list of conditions and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
|
||||
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||
* ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
|
||||
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
|
||||
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
|
||||
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
|
||||
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
|
||||
* SUCH DAMAGE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "exec/address-spaces.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "gdbstub/enums.h"
|
||||
#include "hw/boards.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "qemu/guest-random.h"
|
||||
|
||||
HVFState *hvf_state;
|
||||
|
||||
/* Memory slots */
|
||||
|
||||
hvf_slot *hvf_find_overlap_slot(uint64_t start, uint64_t size)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
int x;
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
slot = &hvf_state->slots[x];
|
||||
if (slot->size && start < (slot->start + slot->size) &&
|
||||
(start + size) > slot->start) {
|
||||
return slot;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct mac_slot {
|
||||
int present;
|
||||
uint64_t size;
|
||||
uint64_t gpa_start;
|
||||
uint64_t gva;
|
||||
};
|
||||
|
||||
struct mac_slot mac_slots[32];
|
||||
|
||||
static int do_hvf_set_memory(hvf_slot *slot, hv_memory_flags_t flags)
|
||||
{
|
||||
struct mac_slot *macslot;
|
||||
hv_return_t ret;
|
||||
|
||||
macslot = &mac_slots[slot->slot_id];
|
||||
|
||||
if (macslot->present) {
|
||||
if (macslot->size != slot->size) {
|
||||
macslot->present = 0;
|
||||
ret = hv_vm_unmap(macslot->gpa_start, macslot->size);
|
||||
assert_hvf_ok(ret);
|
||||
}
|
||||
}
|
||||
|
||||
if (!slot->size) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
macslot->present = 1;
|
||||
macslot->gpa_start = slot->start;
|
||||
macslot->size = slot->size;
|
||||
ret = hv_vm_map(slot->mem, slot->start, slot->size, flags);
|
||||
assert_hvf_ok(ret);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
|
||||
{
|
||||
hvf_slot *mem;
|
||||
MemoryRegion *area = section->mr;
|
||||
bool writable = !area->readonly && !area->rom_device;
|
||||
hv_memory_flags_t flags;
|
||||
uint64_t page_size = qemu_real_host_page_size();
|
||||
|
||||
if (!memory_region_is_ram(area)) {
|
||||
if (writable) {
|
||||
return;
|
||||
} else if (!memory_region_is_romd(area)) {
|
||||
/*
|
||||
* If the memory device is not in romd_mode, then we actually want
|
||||
* to remove the hvf memory slot so all accesses will trap.
|
||||
*/
|
||||
add = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!QEMU_IS_ALIGNED(int128_get64(section->size), page_size) ||
|
||||
!QEMU_IS_ALIGNED(section->offset_within_address_space, page_size)) {
|
||||
/* Not page aligned, so we can not map as RAM */
|
||||
add = false;
|
||||
}
|
||||
|
||||
mem = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
if (mem && add) {
|
||||
if (mem->size == int128_get64(section->size) &&
|
||||
mem->start == section->offset_within_address_space &&
|
||||
mem->mem == (memory_region_get_ram_ptr(area) +
|
||||
section->offset_within_region)) {
|
||||
return; /* Same region was attempted to register, go away. */
|
||||
}
|
||||
}
|
||||
|
||||
/* Region needs to be reset. set the size to 0 and remap it. */
|
||||
if (mem) {
|
||||
mem->size = 0;
|
||||
if (do_hvf_set_memory(mem, 0)) {
|
||||
error_report("Failed to reset overlapping slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
if (!add) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (area->readonly ||
|
||||
(!memory_region_is_ram(area) && memory_region_is_romd(area))) {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_EXEC;
|
||||
} else {
|
||||
flags = HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC;
|
||||
}
|
||||
|
||||
/* Now make a new slot. */
|
||||
int x;
|
||||
|
||||
for (x = 0; x < hvf_state->num_slots; ++x) {
|
||||
mem = &hvf_state->slots[x];
|
||||
if (!mem->size) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (x == hvf_state->num_slots) {
|
||||
error_report("No free slots");
|
||||
abort();
|
||||
}
|
||||
|
||||
mem->size = int128_get64(section->size);
|
||||
mem->mem = memory_region_get_ram_ptr(area) + section->offset_within_region;
|
||||
mem->start = section->offset_within_address_space;
|
||||
mem->region = area;
|
||||
|
||||
if (do_hvf_set_memory(mem, flags)) {
|
||||
error_report("Error registering new memory slot");
|
||||
abort();
|
||||
}
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
|
||||
{
|
||||
if (!cpu->accel->dirty) {
|
||||
hvf_get_registers(cpu);
|
||||
cpu->accel->dirty = true;
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
if (!cpu->accel->dirty) {
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
|
||||
}
|
||||
}
|
||||
|
||||
static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
|
||||
run_on_cpu_data arg)
|
||||
{
|
||||
/* QEMU state is the reference, push it to HVF now and on next entry */
|
||||
cpu->accel->dirty = true;
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_post_init(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void hvf_cpu_synchronize_pre_loadvm(CPUState *cpu)
|
||||
{
|
||||
run_on_cpu(cpu, do_hvf_cpu_synchronize_set_dirty, RUN_ON_CPU_NULL);
|
||||
}
|
||||
|
||||
static void hvf_set_dirty_tracking(MemoryRegionSection *section, bool on)
|
||||
{
|
||||
hvf_slot *slot;
|
||||
|
||||
slot = hvf_find_overlap_slot(
|
||||
section->offset_within_address_space,
|
||||
int128_get64(section->size));
|
||||
|
||||
/* protect region against writes; begin tracking it */
|
||||
if (on) {
|
||||
slot->flags |= HVF_SLOT_LOG;
|
||||
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_EXEC);
|
||||
/* stop tracking region*/
|
||||
} else {
|
||||
slot->flags &= ~HVF_SLOT_LOG;
|
||||
hv_vm_protect((uintptr_t)slot->start, (size_t)slot->size,
|
||||
HV_MEMORY_READ | HV_MEMORY_WRITE | HV_MEMORY_EXEC);
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_log_start(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (old != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_log_stop(MemoryListener *listener,
|
||||
MemoryRegionSection *section, int old, int new)
|
||||
{
|
||||
if (new != 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
hvf_set_dirty_tracking(section, 0);
|
||||
}
|
||||
|
||||
static void hvf_log_sync(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
/*
|
||||
* sync of dirty pages is handled elsewhere; just make sure we keep
|
||||
* tracking the region.
|
||||
*/
|
||||
hvf_set_dirty_tracking(section, 1);
|
||||
}
|
||||
|
||||
static void hvf_region_add(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, true);
|
||||
}
|
||||
|
||||
static void hvf_region_del(MemoryListener *listener,
|
||||
MemoryRegionSection *section)
|
||||
{
|
||||
hvf_set_phys_mem(section, false);
|
||||
}
|
||||
|
||||
static MemoryListener hvf_memory_listener = {
|
||||
.name = "hvf",
|
||||
.priority = MEMORY_LISTENER_PRIORITY_ACCEL,
|
||||
.region_add = hvf_region_add,
|
||||
.region_del = hvf_region_del,
|
||||
.log_start = hvf_log_start,
|
||||
.log_stop = hvf_log_stop,
|
||||
.log_sync = hvf_log_sync,
|
||||
};
|
||||
|
||||
static void dummy_signal(int sig)
|
||||
{
|
||||
}
|
||||
|
||||
bool hvf_allowed;
|
||||
|
||||
static int hvf_accel_init(MachineState *ms)
|
||||
{
|
||||
int x;
|
||||
hv_return_t ret;
|
||||
HVFState *s;
|
||||
int pa_range = 36;
|
||||
MachineClass *mc = MACHINE_GET_CLASS(ms);
|
||||
|
||||
if (mc->hvf_get_physical_address_range) {
|
||||
pa_range = mc->hvf_get_physical_address_range(ms);
|
||||
if (pa_range < 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
ret = hvf_arch_vm_create(ms, (uint32_t)pa_range);
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
s = g_new0(HVFState, 1);
|
||||
|
||||
s->num_slots = ARRAY_SIZE(s->slots);
|
||||
for (x = 0; x < s->num_slots; ++x) {
|
||||
s->slots[x].size = 0;
|
||||
s->slots[x].slot_id = x;
|
||||
}
|
||||
|
||||
QTAILQ_INIT(&s->hvf_sw_breakpoints);
|
||||
|
||||
hvf_state = s;
|
||||
memory_listener_register(&hvf_memory_listener, &address_space_memory);
|
||||
|
||||
return hvf_arch_init();
|
||||
}
|
||||
|
||||
static inline int hvf_gdbstub_sstep_flags(void)
|
||||
{
|
||||
return SSTEP_ENABLE | SSTEP_NOIRQ;
|
||||
}
|
||||
|
||||
static void hvf_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "HVF";
|
||||
ac->init_machine = hvf_accel_init;
|
||||
ac->allowed = &hvf_allowed;
|
||||
ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
|
||||
}
|
||||
|
||||
static const TypeInfo hvf_accel_type = {
|
||||
.name = TYPE_HVF_ACCEL,
|
||||
.parent = TYPE_ACCEL,
|
||||
.class_init = hvf_accel_class_init,
|
||||
};
|
||||
|
||||
static void hvf_type_init(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_type);
|
||||
}
|
||||
|
||||
type_init(hvf_type_init);
|
||||
|
||||
static void hvf_vcpu_destroy(CPUState *cpu)
|
||||
{
|
||||
hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
|
||||
assert_hvf_ok(ret);
|
||||
|
||||
hvf_arch_vcpu_destroy(cpu);
|
||||
g_free(cpu->accel);
|
||||
cpu->accel = NULL;
|
||||
}
|
||||
|
||||
static int hvf_init_vcpu(CPUState *cpu)
|
||||
{
|
||||
int r;
|
||||
|
||||
cpu->accel = g_new0(AccelCPUState, 1);
|
||||
|
||||
/* init cpu signals */
|
||||
struct sigaction sigact;
|
||||
|
||||
memset(&sigact, 0, sizeof(sigact));
|
||||
sigact.sa_handler = dummy_signal;
|
||||
sigaction(SIG_IPI, &sigact, NULL);
|
||||
|
||||
pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask);
|
||||
sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI);
|
||||
|
||||
#ifdef __aarch64__
|
||||
r = hv_vcpu_create(&cpu->accel->fd,
|
||||
(hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
|
||||
#else
|
||||
r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT);
|
||||
#endif
|
||||
cpu->accel->dirty = true;
|
||||
assert_hvf_ok(r);
|
||||
|
||||
cpu->accel->guest_debug_enabled = false;
|
||||
|
||||
return hvf_arch_init_vcpu(cpu);
|
||||
}
|
||||
|
||||
/*
|
||||
* The HVF-specific vCPU thread function. This one should only run when the host
|
||||
* CPU supports the VMX "unrestricted guest" feature.
|
||||
*/
|
||||
static void *hvf_cpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
|
||||
int r;
|
||||
|
||||
assert(hvf_enabled());
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
current_cpu = cpu;
|
||||
|
||||
hvf_init_vcpu(cpu);
|
||||
|
||||
/* signal CPU creation */
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
r = hvf_vcpu_exec(cpu);
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
hvf_vcpu_destroy(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void hvf_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
/*
|
||||
* HVF currently does not support TCG, and only runs in
|
||||
* unrestricted-guest mode.
|
||||
*/
|
||||
assert(hvf_enabled());
|
||||
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
|
||||
static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
int err;
|
||||
|
||||
if (type == GDB_BREAKPOINT_SW) {
|
||||
bp = hvf_find_sw_breakpoint(cpu, addr);
|
||||
if (bp) {
|
||||
bp->use_count++;
|
||||
return 0;
|
||||
}
|
||||
|
||||
bp = g_new(struct hvf_sw_breakpoint, 1);
|
||||
bp->pc = addr;
|
||||
bp->use_count = 1;
|
||||
err = hvf_arch_insert_sw_breakpoint(cpu, bp);
|
||||
if (err) {
|
||||
g_free(bp);
|
||||
return err;
|
||||
}
|
||||
|
||||
QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry);
|
||||
} else {
|
||||
err = hvf_arch_insert_hw_breakpoint(addr, len, type);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
err = hvf_update_guest_debug(cpu);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
int err;
|
||||
|
||||
if (type == GDB_BREAKPOINT_SW) {
|
||||
bp = hvf_find_sw_breakpoint(cpu, addr);
|
||||
if (!bp) {
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
if (bp->use_count > 1) {
|
||||
bp->use_count--;
|
||||
return 0;
|
||||
}
|
||||
|
||||
err = hvf_arch_remove_sw_breakpoint(cpu, bp);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
|
||||
QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
|
||||
g_free(bp);
|
||||
} else {
|
||||
err = hvf_arch_remove_hw_breakpoint(addr, len, type);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
err = hvf_update_guest_debug(cpu);
|
||||
if (err) {
|
||||
return err;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void hvf_remove_all_breakpoints(CPUState *cpu)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp, *next;
|
||||
CPUState *tmpcpu;
|
||||
|
||||
QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) {
|
||||
if (hvf_arch_remove_sw_breakpoint(cpu, bp) != 0) {
|
||||
/* Try harder to find a CPU that currently sees the breakpoint. */
|
||||
CPU_FOREACH(tmpcpu)
|
||||
{
|
||||
if (hvf_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
|
||||
g_free(bp);
|
||||
}
|
||||
hvf_arch_remove_all_hw_breakpoints();
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
hvf_update_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
|
||||
static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
ops->create_vcpu_thread = hvf_start_vcpu_thread;
|
||||
ops->kick_vcpu_thread = hvf_kick_vcpu_thread;
|
||||
|
||||
ops->synchronize_post_reset = hvf_cpu_synchronize_post_reset;
|
||||
ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
|
||||
ops->synchronize_state = hvf_cpu_synchronize_state;
|
||||
ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
|
||||
|
||||
ops->insert_breakpoint = hvf_insert_breakpoint;
|
||||
ops->remove_breakpoint = hvf_remove_breakpoint;
|
||||
ops->remove_all_breakpoints = hvf_remove_all_breakpoints;
|
||||
ops->update_guest_debug = hvf_update_guest_debug;
|
||||
ops->supports_guest_debug = hvf_arch_supports_guest_debug;
|
||||
};
|
||||
static const TypeInfo hvf_accel_ops_type = {
|
||||
.name = ACCEL_OPS_NAME("hvf"),
|
||||
|
||||
.parent = TYPE_ACCEL_OPS,
|
||||
.class_init = hvf_accel_ops_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
static void hvf_accel_ops_register_types(void)
|
||||
{
|
||||
type_register_static(&hvf_accel_ops_type);
|
||||
}
|
||||
type_init(hvf_accel_ops_register_types);
|
65
accel/hvf/hvf-all.c
Normal file
65
accel/hvf/hvf-all.c
Normal file
@ -0,0 +1,65 @@
|
||||
/*
|
||||
* QEMU Hypervisor.framework support
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2. See
|
||||
* the COPYING file in the top-level directory.
|
||||
*
|
||||
* Contributions after 2012-01-13 are licensed under the terms of the
|
||||
* GNU GPL, version 2 or (at your option) any later version.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "sysemu/hvf.h"
|
||||
#include "sysemu/hvf_int.h"
|
||||
|
||||
const char *hvf_return_string(hv_return_t ret)
|
||||
{
|
||||
switch (ret) {
|
||||
case HV_SUCCESS: return "HV_SUCCESS";
|
||||
case HV_ERROR: return "HV_ERROR";
|
||||
case HV_BUSY: return "HV_BUSY";
|
||||
case HV_BAD_ARGUMENT: return "HV_BAD_ARGUMENT";
|
||||
case HV_NO_RESOURCES: return "HV_NO_RESOURCES";
|
||||
case HV_NO_DEVICE: return "HV_NO_DEVICE";
|
||||
case HV_UNSUPPORTED: return "HV_UNSUPPORTED";
|
||||
case HV_DENIED: return "HV_DENIED";
|
||||
default: return "[unknown hv_return value]";
|
||||
}
|
||||
}
|
||||
|
||||
void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
|
||||
const char *exp)
|
||||
{
|
||||
if (ret == HV_SUCCESS) {
|
||||
return;
|
||||
}
|
||||
|
||||
error_report("Error: %s = %s (0x%x, at %s:%u)",
|
||||
exp, hvf_return_string(ret), ret, file, line);
|
||||
|
||||
abort();
|
||||
}
|
||||
|
||||
struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
|
||||
{
|
||||
struct hvf_sw_breakpoint *bp;
|
||||
|
||||
QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
|
||||
if (bp->pc == pc) {
|
||||
return bp;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int hvf_sw_breakpoints_active(CPUState *cpu)
|
||||
{
|
||||
return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
|
||||
}
|
||||
|
||||
int hvf_update_guest_debug(CPUState *cpu)
|
||||
{
|
||||
hvf_arch_update_guest_debug(cpu);
|
||||
return 0;
|
||||
}
|
7
accel/hvf/meson.build
Normal file
7
accel/hvf/meson.build
Normal file
@ -0,0 +1,7 @@
|
||||
hvf_ss = ss.source_set()
|
||||
hvf_ss.add(files(
|
||||
'hvf-all.c',
|
||||
'hvf-accel-ops.c',
|
||||
))
|
||||
|
||||
specific_ss.add_all(when: 'CONFIG_HVF', if_true: hvf_ss)
|
125
accel/kvm/kvm-accel-ops.c
Normal file
125
accel/kvm/kvm-accel-ops.c
Normal file
@ -0,0 +1,125 @@
|
||||
/*
|
||||
* QEMU KVM support
|
||||
*
|
||||
* Copyright IBM, Corp. 2008
|
||||
* Red Hat, Inc. 2008
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
* Glauber Costa <gcosta@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "sysemu/kvm_int.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qapi/error.h"
|
||||
|
||||
#include <linux/kvm.h>
|
||||
#include "kvm-cpus.h"
|
||||
|
||||
static void *kvm_vcpu_thread_fn(void *arg)
|
||||
{
|
||||
CPUState *cpu = arg;
|
||||
int r;
|
||||
|
||||
rcu_register_thread();
|
||||
|
||||
bql_lock();
|
||||
qemu_thread_get_self(cpu->thread);
|
||||
cpu->thread_id = qemu_get_thread_id();
|
||||
current_cpu = cpu;
|
||||
|
||||
r = kvm_init_vcpu(cpu, &error_fatal);
|
||||
kvm_init_cpu_signals(cpu);
|
||||
|
||||
/* signal CPU creation */
|
||||
cpu_thread_signal_created(cpu);
|
||||
qemu_guest_random_seed_thread_part2(cpu->random_seed);
|
||||
|
||||
do {
|
||||
if (cpu_can_run(cpu)) {
|
||||
r = kvm_cpu_exec(cpu);
|
||||
if (r == EXCP_DEBUG) {
|
||||
cpu_handle_guest_debug(cpu);
|
||||
}
|
||||
}
|
||||
qemu_wait_io_event(cpu);
|
||||
} while (!cpu->unplug || cpu_can_run(cpu));
|
||||
|
||||
kvm_destroy_vcpu(cpu);
|
||||
cpu_thread_signal_destroyed(cpu);
|
||||
bql_unlock();
|
||||
rcu_unregister_thread();
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static void kvm_start_vcpu_thread(CPUState *cpu)
|
||||
{
|
||||
char thread_name[VCPU_THREAD_NAME_SIZE];
|
||||
|
||||
snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
|
||||
cpu->cpu_index);
|
||||
qemu_thread_create(cpu->thread, thread_name, kvm_vcpu_thread_fn,
|
||||
cpu, QEMU_THREAD_JOINABLE);
|
||||
}
|
||||
|
||||
static bool kvm_vcpu_thread_is_idle(CPUState *cpu)
|
||||
{
|
||||
return !kvm_halt_in_kernel();
|
||||
}
|
||||
|
||||
static bool kvm_cpus_are_resettable(void)
|
||||
{
|
||||
return !kvm_enabled() || !kvm_state->guest_state_protected;
|
||||
}
|
||||
|
||||
#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
|
||||
static int kvm_update_guest_debug_ops(CPUState *cpu)
|
||||
{
|
||||
return kvm_update_guest_debug(cpu, 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
ops->create_vcpu_thread = kvm_start_vcpu_thread;
|
||||
ops->cpu_thread_is_idle = kvm_vcpu_thread_is_idle;
|
||||
ops->cpus_are_resettable = kvm_cpus_are_resettable;
|
||||
ops->synchronize_post_reset = kvm_cpu_synchronize_post_reset;
|
||||
ops->synchronize_post_init = kvm_cpu_synchronize_post_init;
|
||||
ops->synchronize_state = kvm_cpu_synchronize_state;
|
||||
ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
|
||||
|
||||
#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
|
||||
ops->update_guest_debug = kvm_update_guest_debug_ops;
|
||||
ops->supports_guest_debug = kvm_supports_guest_debug;
|
||||
ops->insert_breakpoint = kvm_insert_breakpoint;
|
||||
ops->remove_breakpoint = kvm_remove_breakpoint;
|
||||
ops->remove_all_breakpoints = kvm_remove_all_breakpoints;
|
||||
#endif
|
||||
}
|
||||
|
||||
static const TypeInfo kvm_accel_ops_type = {
|
||||
.name = ACCEL_OPS_NAME("kvm"),
|
||||
|
||||
.parent = TYPE_ACCEL_OPS,
|
||||
.class_init = kvm_accel_ops_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
|
||||
static void kvm_accel_ops_register_types(void)
|
||||
{
|
||||
type_register_static(&kvm_accel_ops_type);
|
||||
}
|
||||
type_init(kvm_accel_ops_register_types);
|
4435
accel/kvm/kvm-all.c
Normal file
4435
accel/kvm/kvm-all.c
Normal file
File diff suppressed because it is too large
Load Diff
25
accel/kvm/kvm-cpus.h
Normal file
25
accel/kvm/kvm-cpus.h
Normal file
@ -0,0 +1,25 @@
|
||||
/*
|
||||
* Accelerator CPUS Interface
|
||||
*
|
||||
* Copyright 2020 SUSE LLC
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef KVM_CPUS_H
|
||||
#define KVM_CPUS_H
|
||||
|
||||
#include "sysemu/cpus.h"
|
||||
|
||||
int kvm_init_vcpu(CPUState *cpu, Error **errp);
|
||||
int kvm_cpu_exec(CPUState *cpu);
|
||||
void kvm_destroy_vcpu(CPUState *cpu);
|
||||
void kvm_cpu_synchronize_post_reset(CPUState *cpu);
|
||||
void kvm_cpu_synchronize_post_init(CPUState *cpu);
|
||||
void kvm_cpu_synchronize_pre_loadvm(CPUState *cpu);
|
||||
bool kvm_supports_guest_debug(void);
|
||||
int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
|
||||
int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
|
||||
void kvm_remove_all_breakpoints(CPUState *cpu);
|
||||
#endif /* KVM_CPUS_H */
|
7
accel/kvm/meson.build
Normal file
7
accel/kvm/meson.build
Normal file
@ -0,0 +1,7 @@
|
||||
kvm_ss = ss.source_set()
|
||||
kvm_ss.add(files(
|
||||
'kvm-all.c',
|
||||
'kvm-accel-ops.c',
|
||||
))
|
||||
|
||||
specific_ss.add_all(when: 'CONFIG_KVM', if_true: kvm_ss)
|
39
accel/kvm/trace-events
Normal file
39
accel/kvm/trace-events
Normal file
@ -0,0 +1,39 @@
|
||||
# See docs/devel/tracing.rst for syntax documentation.
|
||||
|
||||
# kvm-all.c
|
||||
kvm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p"
|
||||
kvm_vm_ioctl(unsigned long type, void *arg) "type 0x%lx, arg %p"
|
||||
kvm_vcpu_ioctl(int cpu_index, unsigned long type, void *arg) "cpu_index %d, type 0x%lx, arg %p"
|
||||
kvm_run_exit(int cpu_index, uint32_t reason) "cpu_index %d, reason %d"
|
||||
kvm_device_ioctl(int fd, unsigned long type, void *arg) "dev fd %d, type 0x%lx, arg %p"
|
||||
kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
|
||||
kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
|
||||
kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
|
||||
kvm_create_vcpu(int cpu_index, unsigned long arch_cpu_id, int kvm_fd) "index: %d, id: %lu, kvm fd: %d"
|
||||
kvm_destroy_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
|
||||
kvm_park_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
|
||||
kvm_unpark_vcpu(unsigned long arch_cpu_id, const char *msg) "id: %lu %s"
|
||||
kvm_irqchip_commit_routes(void) ""
|
||||
kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
|
||||
kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
|
||||
kvm_irqchip_release_virq(int virq) "virq %d"
|
||||
kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
|
||||
kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
|
||||
kvm_set_user_memory(uint16_t as, uint16_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint32_t fd, uint64_t fd_offset, int ret) "AddrSpace#%d Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " guest_memfd=%d" " guest_memfd_offset=0x%" PRIx64 " ret=%d"
|
||||
kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
|
||||
kvm_resample_fd_notify(int gsi) "gsi %d"
|
||||
kvm_dirty_ring_full(int id) "vcpu %d"
|
||||
kvm_dirty_ring_reap_vcpu(int id) "vcpu %d"
|
||||
kvm_dirty_ring_page(int vcpu, uint32_t slot, uint64_t offset) "vcpu %d fetch %"PRIu32" offset 0x%"PRIx64
|
||||
kvm_dirty_ring_reaper(const char *s) "%s"
|
||||
kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
|
||||
kvm_dirty_ring_reaper_kick(const char *reason) "%s"
|
||||
kvm_dirty_ring_flush(int finished) "%d"
|
||||
kvm_failed_get_vcpu_mmap_size(void) ""
|
||||
kvm_cpu_exec(void) ""
|
||||
kvm_interrupt_exit_request(void) ""
|
||||
kvm_io_window_exit(void) ""
|
||||
kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d, system_even_type %"PRIu32
|
||||
kvm_convert_memory(uint64_t start, uint64_t size, const char *msg) "start 0x%" PRIx64 " size 0x%" PRIx64 " %s"
|
||||
kvm_memory_fault(uint64_t start, uint64_t size, uint64_t flags) "start 0x%" PRIx64 " size 0x%" PRIx64 " flags 0x%" PRIx64
|
||||
kvm_slots_grow(unsigned int old, unsigned int new) "%u -> %u"
|
1
accel/kvm/trace.h
Normal file
1
accel/kvm/trace.h
Normal file
@ -0,0 +1 @@
|
||||
#include "trace/trace-accel_kvm.h"
|
15
accel/meson.build
Normal file
15
accel/meson.build
Normal file
@ -0,0 +1,15 @@
|
||||
specific_ss.add(files('accel-target.c'))
|
||||
system_ss.add(files('accel-system.c', 'accel-blocker.c'))
|
||||
user_ss.add(files('accel-user.c'))
|
||||
|
||||
subdir('tcg')
|
||||
if have_system
|
||||
subdir('hvf')
|
||||
subdir('qtest')
|
||||
subdir('kvm')
|
||||
subdir('xen')
|
||||
subdir('stubs')
|
||||
endif
|
||||
|
||||
# qtest
|
||||
system_ss.add(files('dummy-cpus.c'))
|
1
accel/qtest/meson.build
Normal file
1
accel/qtest/meson.build
Normal file
@ -0,0 +1 @@
|
||||
qtest_module_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: files('qtest.c'))
|
85
accel/qtest/qtest.c
Normal file
85
accel/qtest/qtest.c
Normal file
@ -0,0 +1,85 @@
|
||||
/*
|
||||
* QTest accelerator code
|
||||
*
|
||||
* Copyright IBM, Corp. 2011
|
||||
*
|
||||
* Authors:
|
||||
* Anthony Liguori <aliguori@us.ibm.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/rcu.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/module.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/config-file.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "sysemu/qtest.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "hw/core/cpu.h"
|
||||
|
||||
static int64_t qtest_clock_counter;
|
||||
|
||||
static int64_t qtest_get_virtual_clock(void)
|
||||
{
|
||||
return qatomic_read_i64(&qtest_clock_counter);
|
||||
}
|
||||
|
||||
static void qtest_set_virtual_clock(int64_t count)
|
||||
{
|
||||
qatomic_set_i64(&qtest_clock_counter, count);
|
||||
}
|
||||
|
||||
static int qtest_init_accel(MachineState *ms)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void qtest_accel_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelClass *ac = ACCEL_CLASS(oc);
|
||||
ac->name = "QTest";
|
||||
ac->init_machine = qtest_init_accel;
|
||||
ac->allowed = &qtest_allowed;
|
||||
}
|
||||
|
||||
#define TYPE_QTEST_ACCEL ACCEL_CLASS_NAME("qtest")
|
||||
|
||||
static const TypeInfo qtest_accel_type = {
|
||||
.name = TYPE_QTEST_ACCEL,
|
||||
.parent = TYPE_ACCEL,
|
||||
.class_init = qtest_accel_class_init,
|
||||
};
|
||||
module_obj(TYPE_QTEST_ACCEL);
|
||||
|
||||
static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
|
||||
{
|
||||
AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
|
||||
|
||||
ops->create_vcpu_thread = dummy_start_vcpu_thread;
|
||||
ops->get_virtual_clock = qtest_get_virtual_clock;
|
||||
ops->set_virtual_clock = qtest_set_virtual_clock;
|
||||
};
|
||||
|
||||
static const TypeInfo qtest_accel_ops_type = {
|
||||
.name = ACCEL_OPS_NAME("qtest"),
|
||||
|
||||
.parent = TYPE_ACCEL_OPS,
|
||||
.class_init = qtest_accel_ops_class_init,
|
||||
.abstract = true,
|
||||
};
|
||||
module_obj(ACCEL_OPS_NAME("qtest"));
|
||||
|
||||
static void qtest_type_init(void)
|
||||
{
|
||||
type_register_static(&qtest_accel_type);
|
||||
type_register_static(&qtest_accel_ops_type);
|
||||
}
|
||||
|
||||
type_init(qtest_type_init);
|
136
accel/stubs/kvm-stub.c
Normal file
136
accel/stubs/kvm-stub.c
Normal file
@ -0,0 +1,136 @@
|
||||
/*
|
||||
* QEMU KVM stub
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2010
|
||||
*
|
||||
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/kvm.h"
|
||||
#include "hw/pci/msi.h"
|
||||
|
||||
KVMState *kvm_state;
|
||||
bool kvm_kernel_irqchip;
|
||||
bool kvm_async_interrupts_allowed;
|
||||
bool kvm_resamplefds_allowed;
|
||||
bool kvm_msi_via_irqfd_allowed;
|
||||
bool kvm_gsi_routing_allowed;
|
||||
bool kvm_gsi_direct_mapping;
|
||||
bool kvm_allowed;
|
||||
bool kvm_readonly_mem_allowed;
|
||||
bool kvm_msi_use_devid;
|
||||
|
||||
void kvm_flush_coalesced_mmio_buffer(void)
|
||||
{
|
||||
}
|
||||
|
||||
void kvm_cpu_synchronize_state(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
bool kvm_has_sync_mmu(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
int kvm_on_sigbus(int code, void *addr)
|
||||
{
|
||||
return 1;
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_msi_route(KVMRouteChange *c, int vector, PCIDevice *dev)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
void kvm_init_irq_routing(KVMState *s)
|
||||
{
|
||||
}
|
||||
|
||||
void kvm_irqchip_release_virq(KVMState *s, int virq)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_irqchip_update_msi_route(KVMState *s, int virq, MSIMessage msg,
|
||||
PCIDevice *dev)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
void kvm_irqchip_commit_routes(KVMState *s)
|
||||
{
|
||||
}
|
||||
|
||||
void kvm_irqchip_add_change_notifier(Notifier *n)
|
||||
{
|
||||
}
|
||||
|
||||
void kvm_irqchip_remove_change_notifier(Notifier *n)
|
||||
{
|
||||
}
|
||||
|
||||
void kvm_irqchip_change_notify(void)
|
||||
{
|
||||
}
|
||||
|
||||
int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
||||
EventNotifier *rn, int virq)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
|
||||
int virq)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_max_memslots(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
unsigned int kvm_get_free_memslots(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
void kvm_init_cpu_signals(CPUState *cpu)
|
||||
{
|
||||
abort();
|
||||
}
|
||||
|
||||
bool kvm_arm_supports_user_irq(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
bool kvm_dirty_ring_enabled(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t kvm_dirty_ring_size(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool kvm_hwpoisoned_mem(void)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
6
accel/stubs/meson.build
Normal file
6
accel/stubs/meson.build
Normal file
@ -0,0 +1,6 @@
|
||||
system_stubs_ss = ss.source_set()
|
||||
system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
|
||||
system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
|
||||
|
||||
specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss)
|
29
accel/stubs/tcg-stub.c
Normal file
29
accel/stubs/tcg-stub.c
Normal file
@ -0,0 +1,29 @@
|
||||
/*
|
||||
* QEMU TCG accelerator stub
|
||||
*
|
||||
* Copyright Red Hat, Inc. 2013
|
||||
*
|
||||
* Author: Paolo Bonzini <pbonzini@redhat.com>
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "exec/tb-flush.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
void tb_flush(CPUState *cpu)
|
||||
{
|
||||
}
|
||||
|
||||
G_NORETURN void cpu_loop_exit(CPUState *cpu)
|
||||
{
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
|
||||
{
|
||||
g_assert_not_reached();
|
||||
}
|
16
accel/stubs/xen-stub.c
Normal file
16
accel/stubs/xen-stub.c
Normal file
@ -0,0 +1,16 @@
|
||||
/*
|
||||
* Copyright (C) 2014 Citrix Systems UK Ltd.
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/xen.h"
|
||||
#include "qapi/qapi-commands-migration.h"
|
||||
|
||||
bool xen_allowed;
|
||||
|
||||
void qmp_xen_set_global_dirty_log(bool enable, Error **errp)
|
||||
{
|
||||
}
|
126
accel/tcg/atomic_common.c.inc
Normal file
126
accel/tcg/atomic_common.c.inc
Normal file
@ -0,0 +1,126 @@
|
||||
/*
|
||||
* Common Atomic Helper Functions
|
||||
*
|
||||
* This file should be included before the various instantiations of
|
||||
* the atomic_template.h helpers.
|
||||
*
|
||||
* Copyright (c) 2019 Linaro
|
||||
* Written by Alex Bennée <alex.bennee@linaro.org>
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
|
||||
uint64_t read_value_low,
|
||||
uint64_t read_value_high,
|
||||
uint64_t write_value_low,
|
||||
uint64_t write_value_high,
|
||||
MemOpIdx oi)
|
||||
{
|
||||
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
read_value_low, read_value_high,
|
||||
oi, QEMU_PLUGIN_MEM_R);
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
write_value_low, write_value_high,
|
||||
oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Atomic helpers callable from TCG.
|
||||
* These have a common interface and all defer to cpu_atomic_*
|
||||
* using the host return address from GETPC().
|
||||
*/
|
||||
|
||||
#define CMPXCHG_HELPER(OP, TYPE) \
|
||||
TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr, \
|
||||
TYPE oldv, TYPE newv, uint32_t oi) \
|
||||
{ return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
|
||||
|
||||
CMPXCHG_HELPER(cmpxchgb, uint32_t)
|
||||
CMPXCHG_HELPER(cmpxchgw_be, uint32_t)
|
||||
CMPXCHG_HELPER(cmpxchgw_le, uint32_t)
|
||||
CMPXCHG_HELPER(cmpxchgl_be, uint32_t)
|
||||
CMPXCHG_HELPER(cmpxchgl_le, uint32_t)
|
||||
|
||||
#ifdef CONFIG_ATOMIC64
|
||||
CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
|
||||
CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
|
||||
#endif
|
||||
|
||||
#if HAVE_CMPXCHG128
|
||||
CMPXCHG_HELPER(cmpxchgo_be, Int128)
|
||||
CMPXCHG_HELPER(cmpxchgo_le, Int128)
|
||||
#endif
|
||||
|
||||
#undef CMPXCHG_HELPER
|
||||
|
||||
Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
|
||||
Int128 cmpv, Int128 newv, uint32_t oi)
|
||||
{
|
||||
#if TCG_TARGET_REG_BITS == 32
|
||||
uintptr_t ra = GETPC();
|
||||
Int128 oldv;
|
||||
|
||||
oldv = cpu_ld16_mmu(env, addr, oi, ra);
|
||||
if (int128_eq(oldv, cmpv)) {
|
||||
cpu_st16_mmu(env, addr, newv, oi, ra);
|
||||
} else {
|
||||
/* Even with comparison failure, still need a write cycle. */
|
||||
probe_write(env, addr, 16, get_mmuidx(oi), ra);
|
||||
}
|
||||
return oldv;
|
||||
#else
|
||||
g_assert_not_reached();
|
||||
#endif
|
||||
}
|
||||
|
||||
#define ATOMIC_HELPER(OP, TYPE) \
|
||||
TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr, \
|
||||
TYPE val, uint32_t oi) \
|
||||
{ return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }
|
||||
|
||||
#ifdef CONFIG_ATOMIC64
|
||||
#define GEN_ATOMIC_HELPERS(OP) \
|
||||
ATOMIC_HELPER(glue(OP,b), uint32_t) \
|
||||
ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
|
||||
ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
|
||||
ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
|
||||
ATOMIC_HELPER(glue(OP,l_le), uint32_t) \
|
||||
ATOMIC_HELPER(glue(OP,q_be), uint64_t) \
|
||||
ATOMIC_HELPER(glue(OP,q_le), uint64_t)
|
||||
#else
|
||||
#define GEN_ATOMIC_HELPERS(OP) \
|
||||
ATOMIC_HELPER(glue(OP,b), uint32_t) \
|
||||
ATOMIC_HELPER(glue(OP,w_be), uint32_t) \
|
||||
ATOMIC_HELPER(glue(OP,w_le), uint32_t) \
|
||||
ATOMIC_HELPER(glue(OP,l_be), uint32_t) \
|
||||
ATOMIC_HELPER(glue(OP,l_le), uint32_t)
|
||||
#endif
|
||||
|
||||
GEN_ATOMIC_HELPERS(fetch_add)
|
||||
GEN_ATOMIC_HELPERS(fetch_and)
|
||||
GEN_ATOMIC_HELPERS(fetch_or)
|
||||
GEN_ATOMIC_HELPERS(fetch_xor)
|
||||
GEN_ATOMIC_HELPERS(fetch_smin)
|
||||
GEN_ATOMIC_HELPERS(fetch_umin)
|
||||
GEN_ATOMIC_HELPERS(fetch_smax)
|
||||
GEN_ATOMIC_HELPERS(fetch_umax)
|
||||
|
||||
GEN_ATOMIC_HELPERS(add_fetch)
|
||||
GEN_ATOMIC_HELPERS(and_fetch)
|
||||
GEN_ATOMIC_HELPERS(or_fetch)
|
||||
GEN_ATOMIC_HELPERS(xor_fetch)
|
||||
GEN_ATOMIC_HELPERS(smin_fetch)
|
||||
GEN_ATOMIC_HELPERS(umin_fetch)
|
||||
GEN_ATOMIC_HELPERS(smax_fetch)
|
||||
GEN_ATOMIC_HELPERS(umax_fetch)
|
||||
|
||||
GEN_ATOMIC_HELPERS(xchg)
|
||||
|
||||
#undef ATOMIC_HELPER
|
||||
#undef GEN_ATOMIC_HELPERS
|
333
accel/tcg/atomic_template.h
Normal file
333
accel/tcg/atomic_template.h
Normal file
@ -0,0 +1,333 @@
|
||||
/*
|
||||
* Atomic helper templates
|
||||
* Included from tcg-runtime.c and cputlb.c.
|
||||
*
|
||||
* Copyright (c) 2016 Red Hat, Inc
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/plugin.h"
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
# define SUFFIX o
|
||||
# define DATA_TYPE Int128
|
||||
# define BSWAP bswap128
|
||||
# define SHIFT 4
|
||||
#elif DATA_SIZE == 8
|
||||
# define SUFFIX q
|
||||
# define DATA_TYPE aligned_uint64_t
|
||||
# define SDATA_TYPE aligned_int64_t
|
||||
# define BSWAP bswap64
|
||||
# define SHIFT 3
|
||||
#elif DATA_SIZE == 4
|
||||
# define SUFFIX l
|
||||
# define DATA_TYPE uint32_t
|
||||
# define SDATA_TYPE int32_t
|
||||
# define BSWAP bswap32
|
||||
# define SHIFT 2
|
||||
#elif DATA_SIZE == 2
|
||||
# define SUFFIX w
|
||||
# define DATA_TYPE uint16_t
|
||||
# define SDATA_TYPE int16_t
|
||||
# define BSWAP bswap16
|
||||
# define SHIFT 1
|
||||
#elif DATA_SIZE == 1
|
||||
# define SUFFIX b
|
||||
# define DATA_TYPE uint8_t
|
||||
# define SDATA_TYPE int8_t
|
||||
# define BSWAP
|
||||
# define SHIFT 0
|
||||
#else
|
||||
# error unsupported data size
|
||||
#endif
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
# define VALUE_LOW(val) int128_getlo(val)
|
||||
# define VALUE_HIGH(val) int128_gethi(val)
|
||||
#else
|
||||
# define VALUE_LOW(val) val
|
||||
# define VALUE_HIGH(val) 0
|
||||
#endif
|
||||
|
||||
#if DATA_SIZE >= 4
|
||||
# define ABI_TYPE DATA_TYPE
|
||||
#else
|
||||
# define ABI_TYPE uint32_t
|
||||
#endif
|
||||
|
||||
/* Define host-endian atomic operations. Note that END is used within
|
||||
the ATOMIC_NAME macro, and redefined below. */
|
||||
#if DATA_SIZE == 1
|
||||
# define END
|
||||
#elif HOST_BIG_ENDIAN
|
||||
# define END _be
|
||||
#else
|
||||
# define END _le
|
||||
#endif
|
||||
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
ret = atomic16_cmpxchg(haddr, cmpv, newv);
|
||||
#else
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, cmpv, newv);
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(newv),
|
||||
VALUE_HIGH(newv),
|
||||
oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#if DATA_SIZE < 16
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, val);
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(val),
|
||||
VALUE_HIGH(val),
|
||||
oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
#define GEN_ATOMIC_HELPER(X) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
ret = qatomic_##X(haddr, val); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(ret), \
|
||||
VALUE_HIGH(ret), \
|
||||
VALUE_LOW(val), \
|
||||
VALUE_HIGH(val), \
|
||||
oi); \
|
||||
return ret; \
|
||||
}
|
||||
|
||||
GEN_ATOMIC_HELPER(fetch_add)
|
||||
GEN_ATOMIC_HELPER(fetch_and)
|
||||
GEN_ATOMIC_HELPER(fetch_or)
|
||||
GEN_ATOMIC_HELPER(fetch_xor)
|
||||
GEN_ATOMIC_HELPER(add_fetch)
|
||||
GEN_ATOMIC_HELPER(and_fetch)
|
||||
GEN_ATOMIC_HELPER(or_fetch)
|
||||
GEN_ATOMIC_HELPER(xor_fetch)
|
||||
|
||||
#undef GEN_ATOMIC_HELPER
|
||||
|
||||
/*
|
||||
* These helpers are, as a whole, full barriers. Within the helper,
|
||||
* the leading barrier is explicit and the trailing barrier is within
|
||||
* cmpxchg primitive.
|
||||
*
|
||||
* Trace this load + RMW loop as a single RMW op. This way, regardless
|
||||
* of CF_PARALLEL's value, we'll trace just a read and a write.
|
||||
*/
|
||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, cmp, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
smp_mb(); \
|
||||
cmp = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
old = cmp; new = FN(old, val); \
|
||||
cmp = qatomic_cmpxchg__nocheck(haddr, old, new); \
|
||||
} while (cmp != old); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(old), \
|
||||
VALUE_HIGH(old), \
|
||||
VALUE_LOW(xval), \
|
||||
VALUE_HIGH(xval), \
|
||||
oi); \
|
||||
return RET; \
|
||||
}
|
||||
|
||||
GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
|
||||
GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
|
||||
GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
|
||||
GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
|
||||
|
||||
GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
|
||||
GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
|
||||
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
|
||||
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
|
||||
|
||||
#undef GEN_ATOMIC_HELPER_FN
|
||||
#endif /* DATA SIZE < 16 */
|
||||
|
||||
#undef END
|
||||
|
||||
#if DATA_SIZE > 1
|
||||
|
||||
/* Define reverse-host-endian atomic operations. Note that END is used
|
||||
within the ATOMIC_NAME macro. */
|
||||
#if HOST_BIG_ENDIAN
|
||||
# define END _le
|
||||
#else
|
||||
# define END _be
|
||||
#endif
|
||||
|
||||
ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
|
||||
ABI_TYPE cmpv, ABI_TYPE newv,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
DATA_TYPE ret;
|
||||
|
||||
#if DATA_SIZE == 16
|
||||
ret = atomic16_cmpxchg(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#else
|
||||
ret = qatomic_cmpxchg__nocheck(haddr, BSWAP(cmpv), BSWAP(newv));
|
||||
#endif
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(newv),
|
||||
VALUE_HIGH(newv),
|
||||
oi);
|
||||
return BSWAP(ret);
|
||||
}
|
||||
|
||||
#if DATA_SIZE < 16
|
||||
ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
|
||||
DATA_SIZE, retaddr);
|
||||
ABI_TYPE ret;
|
||||
|
||||
ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
|
||||
ATOMIC_MMU_CLEANUP;
|
||||
atomic_trace_rmw_post(env, addr,
|
||||
VALUE_LOW(ret),
|
||||
VALUE_HIGH(ret),
|
||||
VALUE_LOW(val),
|
||||
VALUE_HIGH(val),
|
||||
oi);
|
||||
return BSWAP(ret);
|
||||
}
|
||||
|
||||
#define GEN_ATOMIC_HELPER(X) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
DATA_TYPE *haddr, ret; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
ret = qatomic_##X(haddr, BSWAP(val)); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(ret), \
|
||||
VALUE_HIGH(ret), \
|
||||
VALUE_LOW(val), \
|
||||
VALUE_HIGH(val), \
|
||||
oi); \
|
||||
return BSWAP(ret); \
|
||||
}
|
||||
|
||||
GEN_ATOMIC_HELPER(fetch_and)
|
||||
GEN_ATOMIC_HELPER(fetch_or)
|
||||
GEN_ATOMIC_HELPER(fetch_xor)
|
||||
GEN_ATOMIC_HELPER(and_fetch)
|
||||
GEN_ATOMIC_HELPER(or_fetch)
|
||||
GEN_ATOMIC_HELPER(xor_fetch)
|
||||
|
||||
#undef GEN_ATOMIC_HELPER
|
||||
|
||||
/* These helpers are, as a whole, full barriers. Within the helper,
|
||||
* the leading barrier is explicit and the trailing barrier is within
|
||||
* cmpxchg primitive.
|
||||
*
|
||||
* Trace this load + RMW loop as a single RMW op. This way, regardless
|
||||
* of CF_PARALLEL's value, we'll trace just a read and a write.
|
||||
*/
|
||||
#define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET) \
|
||||
ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr, \
|
||||
ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
|
||||
{ \
|
||||
XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval; \
|
||||
haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr); \
|
||||
smp_mb(); \
|
||||
ldn = qatomic_read__nocheck(haddr); \
|
||||
do { \
|
||||
ldo = ldn; old = BSWAP(ldo); new = FN(old, val); \
|
||||
ldn = qatomic_cmpxchg__nocheck(haddr, ldo, BSWAP(new)); \
|
||||
} while (ldo != ldn); \
|
||||
ATOMIC_MMU_CLEANUP; \
|
||||
atomic_trace_rmw_post(env, addr, \
|
||||
VALUE_LOW(old), \
|
||||
VALUE_HIGH(old), \
|
||||
VALUE_LOW(xval), \
|
||||
VALUE_HIGH(xval), \
|
||||
oi); \
|
||||
return RET; \
|
||||
}
|
||||
|
||||
GEN_ATOMIC_HELPER_FN(fetch_smin, MIN, SDATA_TYPE, old)
|
||||
GEN_ATOMIC_HELPER_FN(fetch_umin, MIN, DATA_TYPE, old)
|
||||
GEN_ATOMIC_HELPER_FN(fetch_smax, MAX, SDATA_TYPE, old)
|
||||
GEN_ATOMIC_HELPER_FN(fetch_umax, MAX, DATA_TYPE, old)
|
||||
|
||||
GEN_ATOMIC_HELPER_FN(smin_fetch, MIN, SDATA_TYPE, new)
|
||||
GEN_ATOMIC_HELPER_FN(umin_fetch, MIN, DATA_TYPE, new)
|
||||
GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
|
||||
GEN_ATOMIC_HELPER_FN(umax_fetch, MAX, DATA_TYPE, new)
|
||||
|
||||
/* Note that for addition, we need to use a separate cmpxchg loop instead
|
||||
of bswaps for the reverse-host-endian helpers. */
|
||||
#define ADD(X, Y) (X + Y)
|
||||
GEN_ATOMIC_HELPER_FN(fetch_add, ADD, DATA_TYPE, old)
|
||||
GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
|
||||
#undef ADD
|
||||
|
||||
#undef GEN_ATOMIC_HELPER_FN
|
||||
#endif /* DATA_SIZE < 16 */
|
||||
|
||||
#undef END
|
||||
#endif /* DATA_SIZE > 1 */
|
||||
|
||||
#undef BSWAP
|
||||
#undef ABI_TYPE
|
||||
#undef DATA_TYPE
|
||||
#undef SDATA_TYPE
|
||||
#undef SUFFIX
|
||||
#undef DATA_SIZE
|
||||
#undef SHIFT
|
||||
#undef VALUE_LOW
|
||||
#undef VALUE_HIGH
|
58
accel/tcg/cpu-exec-common.c
Normal file
58
accel/tcg/cpu-exec-common.c
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* emulator main execution loop
|
||||
*
|
||||
* Copyright (c) 2003-2005 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "qemu/plugin.h"
|
||||
#include "internal-common.h"
|
||||
|
||||
bool tcg_allowed;
|
||||
|
||||
/* exit the current TB, but without causing any exception to be raised */
|
||||
void cpu_loop_exit_noexc(CPUState *cpu)
|
||||
{
|
||||
cpu->exception_index = -1;
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
|
||||
void cpu_loop_exit(CPUState *cpu)
|
||||
{
|
||||
/* Undo the setting in cpu_tb_exec. */
|
||||
cpu->neg.can_do_io = true;
|
||||
/* Undo any setting in generated code. */
|
||||
qemu_plugin_disable_mem_helpers(cpu);
|
||||
siglongjmp(cpu->jmp_env, 1);
|
||||
}
|
||||
|
||||
void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
|
||||
{
|
||||
if (pc) {
|
||||
cpu_restore_state(cpu, pc);
|
||||
}
|
||||
cpu_loop_exit(cpu);
|
||||
}
|
||||
|
||||
void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
|
||||
{
|
||||
/* Prevent looping if already executing in a serial context. */
|
||||
g_assert(!cpu_in_serial_context(cpu));
|
||||
cpu->exception_index = EXCP_ATOMIC;
|
||||
cpu_loop_exit_restore(cpu, pc);
|
||||
}
|
1130
accel/tcg/cpu-exec.c
Normal file
1130
accel/tcg/cpu-exec.c
Normal file
File diff suppressed because it is too large
Load Diff
2968
accel/tcg/cputlb.c
Normal file
2968
accel/tcg/cputlb.c
Normal file
File diff suppressed because it is too large
Load Diff
501
accel/tcg/icount-common.c
Normal file
501
accel/tcg/icount-common.c
Normal file
@ -0,0 +1,501 @@
|
||||
/*
|
||||
* QEMU System Emulator
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/cutils.h"
|
||||
#include "migration/vmstate.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qemu/error-report.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/qtest.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/option.h"
|
||||
#include "qemu/seqlock.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/runstate.h"
|
||||
#include "hw/core/cpu.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "sysemu/cpu-timers-internal.h"
|
||||
|
||||
/*
|
||||
* ICOUNT: Instruction Counter
|
||||
*
|
||||
* this module is split off from cpu-timers because the icount part
|
||||
* is TCG-specific, and does not need to be built for other accels.
|
||||
*/
|
||||
static bool icount_sleep = true;
|
||||
/* Arbitrarily pick 1MIPS as the minimum allowable speed. */
|
||||
#define MAX_ICOUNT_SHIFT 10
|
||||
|
||||
/* Do not count executed instructions */
|
||||
ICountMode use_icount = ICOUNT_DISABLED;
|
||||
|
||||
static void icount_enable_precise(void)
|
||||
{
|
||||
/* Fixed conversion of insn to ns via "shift" option */
|
||||
use_icount = ICOUNT_PRECISE;
|
||||
}
|
||||
|
||||
static void icount_enable_adaptive(void)
|
||||
{
|
||||
/* Runtime adaptive algorithm to compute shift */
|
||||
use_icount = ICOUNT_ADAPTATIVE;
|
||||
}
|
||||
|
||||
/*
|
||||
* The current number of executed instructions is based on what we
|
||||
* originally budgeted minus the current state of the decrementing
|
||||
* icount counters in extra/u16.low.
|
||||
*/
|
||||
static int64_t icount_get_executed(CPUState *cpu)
|
||||
{
|
||||
return (cpu->icount_budget -
|
||||
(cpu->neg.icount_decr.u16.low + cpu->icount_extra));
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the global shared timer_state.qemu_icount to take into
|
||||
* account executed instructions. This is done by the TCG vCPU
|
||||
* thread so the main-loop can see time has moved forward.
|
||||
*/
|
||||
static void icount_update_locked(CPUState *cpu)
|
||||
{
|
||||
int64_t executed = icount_get_executed(cpu);
|
||||
cpu->icount_budget -= executed;
|
||||
|
||||
qatomic_set_i64(&timers_state.qemu_icount,
|
||||
timers_state.qemu_icount + executed);
|
||||
}
|
||||
|
||||
/*
|
||||
* Update the global shared timer_state.qemu_icount to take into
|
||||
* account executed instructions. This is done by the TCG vCPU
|
||||
* thread so the main-loop can see time has moved forward.
|
||||
*/
|
||||
void icount_update(CPUState *cpu)
|
||||
{
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
icount_update_locked(cpu);
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
}
|
||||
|
||||
static int64_t icount_get_raw_locked(void)
|
||||
{
|
||||
CPUState *cpu = current_cpu;
|
||||
|
||||
if (cpu && cpu->running) {
|
||||
if (!cpu->neg.can_do_io) {
|
||||
error_report("Bad icount read");
|
||||
exit(1);
|
||||
}
|
||||
/* Take into account what has run */
|
||||
icount_update_locked(cpu);
|
||||
}
|
||||
/* The read is protected by the seqlock, but needs atomic64 to avoid UB */
|
||||
return qatomic_read_i64(&timers_state.qemu_icount);
|
||||
}
|
||||
|
||||
static int64_t icount_get_locked(void)
|
||||
{
|
||||
int64_t icount = icount_get_raw_locked();
|
||||
return qatomic_read_i64(&timers_state.qemu_icount_bias) +
|
||||
icount_to_ns(icount);
|
||||
}
|
||||
|
||||
int64_t icount_get_raw(void)
|
||||
{
|
||||
int64_t icount;
|
||||
unsigned start;
|
||||
|
||||
do {
|
||||
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||
icount = icount_get_raw_locked();
|
||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
||||
|
||||
return icount;
|
||||
}
|
||||
|
||||
/* Return the virtual CPU time, based on the instruction counter. */
|
||||
int64_t icount_get(void)
|
||||
{
|
||||
int64_t icount;
|
||||
unsigned start;
|
||||
|
||||
do {
|
||||
start = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||
icount = icount_get_locked();
|
||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, start));
|
||||
|
||||
return icount;
|
||||
}
|
||||
|
||||
int64_t icount_to_ns(int64_t icount)
|
||||
{
|
||||
return icount << qatomic_read(&timers_state.icount_time_shift);
|
||||
}
|
||||
|
||||
/*
|
||||
* Correlation between real and virtual time is always going to be
|
||||
* fairly approximate, so ignore small variation.
|
||||
* When the guest is idle real and virtual time will be aligned in
|
||||
* the IO wait loop.
|
||||
*/
|
||||
#define ICOUNT_WOBBLE (NANOSECONDS_PER_SECOND / 10)
|
||||
|
||||
static void icount_adjust(void)
|
||||
{
|
||||
int64_t cur_time;
|
||||
int64_t cur_icount;
|
||||
int64_t delta;
|
||||
|
||||
/* If the VM is not running, then do nothing. */
|
||||
if (!runstate_is_running()) {
|
||||
return;
|
||||
}
|
||||
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
cur_time = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
|
||||
cpu_get_clock_locked());
|
||||
cur_icount = icount_get_locked();
|
||||
|
||||
delta = cur_icount - cur_time;
|
||||
/* FIXME: This is a very crude algorithm, somewhat prone to oscillation. */
|
||||
if (delta > 0
|
||||
&& timers_state.last_delta + ICOUNT_WOBBLE < delta * 2
|
||||
&& timers_state.icount_time_shift > 0) {
|
||||
/* The guest is getting too far ahead. Slow time down. */
|
||||
qatomic_set(&timers_state.icount_time_shift,
|
||||
timers_state.icount_time_shift - 1);
|
||||
}
|
||||
if (delta < 0
|
||||
&& timers_state.last_delta - ICOUNT_WOBBLE > delta * 2
|
||||
&& timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
|
||||
/* The guest is getting too far behind. Speed time up. */
|
||||
qatomic_set(&timers_state.icount_time_shift,
|
||||
timers_state.icount_time_shift + 1);
|
||||
}
|
||||
timers_state.last_delta = delta;
|
||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
||||
cur_icount - (timers_state.qemu_icount
|
||||
<< timers_state.icount_time_shift));
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
}
|
||||
|
||||
static void icount_adjust_rt(void *opaque)
|
||||
{
|
||||
timer_mod(timers_state.icount_rt_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
||||
icount_adjust();
|
||||
}
|
||||
|
||||
static void icount_adjust_vm(void *opaque)
|
||||
{
|
||||
timer_mod(timers_state.icount_vm_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
NANOSECONDS_PER_SECOND / 10);
|
||||
icount_adjust();
|
||||
}
|
||||
|
||||
int64_t icount_round(int64_t count)
|
||||
{
|
||||
int shift = qatomic_read(&timers_state.icount_time_shift);
|
||||
return (count + (1 << shift) - 1) >> shift;
|
||||
}
|
||||
|
||||
static void icount_warp_rt(void)
|
||||
{
|
||||
unsigned seq;
|
||||
int64_t warp_start;
|
||||
|
||||
/*
|
||||
* The icount_warp_timer is rescheduled soon after vm_clock_warp_start
|
||||
* changes from -1 to another value, so the race here is okay.
|
||||
*/
|
||||
do {
|
||||
seq = seqlock_read_begin(&timers_state.vm_clock_seqlock);
|
||||
warp_start = timers_state.vm_clock_warp_start;
|
||||
} while (seqlock_read_retry(&timers_state.vm_clock_seqlock, seq));
|
||||
|
||||
if (warp_start == -1) {
|
||||
return;
|
||||
}
|
||||
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
if (runstate_is_running()) {
|
||||
int64_t clock = REPLAY_CLOCK_LOCKED(REPLAY_CLOCK_VIRTUAL_RT,
|
||||
cpu_get_clock_locked());
|
||||
int64_t warp_delta;
|
||||
|
||||
warp_delta = clock - timers_state.vm_clock_warp_start;
|
||||
if (icount_enabled() == ICOUNT_ADAPTATIVE) {
|
||||
/*
|
||||
* In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far
|
||||
* ahead of real time (it might already be ahead so careful not
|
||||
* to go backwards).
|
||||
*/
|
||||
int64_t cur_icount = icount_get_locked();
|
||||
int64_t delta = clock - cur_icount;
|
||||
|
||||
if (delta < 0) {
|
||||
delta = 0;
|
||||
}
|
||||
warp_delta = MIN(warp_delta, delta);
|
||||
}
|
||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
||||
timers_state.qemu_icount_bias + warp_delta);
|
||||
}
|
||||
timers_state.vm_clock_warp_start = -1;
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
|
||||
if (qemu_clock_expired(QEMU_CLOCK_VIRTUAL)) {
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
}
|
||||
|
||||
static void icount_timer_cb(void *opaque)
|
||||
{
|
||||
/*
|
||||
* No need for a checkpoint because the timer already synchronizes
|
||||
* with CHECKPOINT_CLOCK_VIRTUAL_RT.
|
||||
*/
|
||||
icount_warp_rt();
|
||||
}
|
||||
|
||||
void icount_start_warp_timer(void)
|
||||
{
|
||||
int64_t clock;
|
||||
int64_t deadline;
|
||||
|
||||
assert(icount_enabled());
|
||||
|
||||
/*
|
||||
* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
|
||||
* do not fire, so computing the deadline does not make sense.
|
||||
*/
|
||||
if (!runstate_is_running()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (replay_mode != REPLAY_MODE_PLAY) {
|
||||
if (!all_cpu_threads_idle()) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (qtest_enabled()) {
|
||||
/* When testing, qtest commands advance icount. */
|
||||
return;
|
||||
}
|
||||
|
||||
replay_checkpoint(CHECKPOINT_CLOCK_WARP_START);
|
||||
} else {
|
||||
/* warp clock deterministically in record/replay mode */
|
||||
if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_START)) {
|
||||
/*
|
||||
* vCPU is sleeping and warp can't be started.
|
||||
* It is probably a race condition: notification sent
|
||||
* to vCPU was processed in advance and vCPU went to sleep.
|
||||
* Therefore we have to wake it up for doing something.
|
||||
*/
|
||||
if (replay_has_event()) {
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/* We want to use the earliest deadline from ALL vm_clocks */
|
||||
clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
~QEMU_TIMER_ATTR_EXTERNAL);
|
||||
if (deadline < 0) {
|
||||
if (!icount_sleep) {
|
||||
warn_report_once("icount sleep disabled and no active timers");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (deadline > 0) {
|
||||
/*
|
||||
* Ensure QEMU_CLOCK_VIRTUAL proceeds even when the virtual CPU goes to
|
||||
* sleep. Otherwise, the CPU might be waiting for a future timer
|
||||
* interrupt to wake it up, but the interrupt never comes because
|
||||
* the vCPU isn't running any insns and thus doesn't advance the
|
||||
* QEMU_CLOCK_VIRTUAL.
|
||||
*/
|
||||
if (!icount_sleep) {
|
||||
/*
|
||||
* We never let VCPUs sleep in no sleep icount mode.
|
||||
* If there is a pending QEMU_CLOCK_VIRTUAL timer we just advance
|
||||
* to the next QEMU_CLOCK_VIRTUAL event and notify it.
|
||||
* It is useful when we want a deterministic execution time,
|
||||
* isolated from host latencies.
|
||||
*/
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
qatomic_set_i64(&timers_state.qemu_icount_bias,
|
||||
timers_state.qemu_icount_bias + deadline);
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
} else {
|
||||
/*
|
||||
* We do stop VCPUs and only advance QEMU_CLOCK_VIRTUAL after some
|
||||
* "real" time, (related to the time left until the next event) has
|
||||
* passed. The QEMU_CLOCK_VIRTUAL_RT clock will do this.
|
||||
* This avoids that the warps are visible externally; for example,
|
||||
* you will not be sending network packets continuously instead of
|
||||
* every 100ms.
|
||||
*/
|
||||
seqlock_write_lock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
if (timers_state.vm_clock_warp_start == -1
|
||||
|| timers_state.vm_clock_warp_start > clock) {
|
||||
timers_state.vm_clock_warp_start = clock;
|
||||
}
|
||||
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
|
||||
&timers_state.vm_clock_lock);
|
||||
timer_mod_anticipate(timers_state.icount_warp_timer,
|
||||
clock + deadline);
|
||||
}
|
||||
} else if (deadline == 0) {
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
}
|
||||
|
||||
void icount_account_warp_timer(void)
|
||||
{
|
||||
if (!icount_sleep) {
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* Nothing to do if the VM is stopped: QEMU_CLOCK_VIRTUAL timers
|
||||
* do not fire, so computing the deadline does not make sense.
|
||||
*/
|
||||
if (!runstate_is_running()) {
|
||||
return;
|
||||
}
|
||||
|
||||
replay_async_events();
|
||||
|
||||
/* warp clock deterministically in record/replay mode */
|
||||
if (!replay_checkpoint(CHECKPOINT_CLOCK_WARP_ACCOUNT)) {
|
||||
return;
|
||||
}
|
||||
|
||||
timer_del(timers_state.icount_warp_timer);
|
||||
icount_warp_rt();
|
||||
}
|
||||
|
||||
bool icount_configure(QemuOpts *opts, Error **errp)
|
||||
{
|
||||
const char *option = qemu_opt_get(opts, "shift");
|
||||
bool sleep = qemu_opt_get_bool(opts, "sleep", true);
|
||||
bool align = qemu_opt_get_bool(opts, "align", false);
|
||||
long time_shift = -1;
|
||||
|
||||
if (!option) {
|
||||
if (qemu_opt_get(opts, "align") != NULL) {
|
||||
error_setg(errp, "Please specify shift option when using align");
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
if (align && !sleep) {
|
||||
error_setg(errp, "align=on and sleep=off are incompatible");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (strcmp(option, "auto") != 0) {
|
||||
if (qemu_strtol(option, NULL, 0, &time_shift) < 0
|
||||
|| time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
|
||||
error_setg(errp, "icount: Invalid shift value");
|
||||
return false;
|
||||
}
|
||||
} else if (icount_align_option) {
|
||||
error_setg(errp, "shift=auto and align=on are incompatible");
|
||||
return false;
|
||||
} else if (!icount_sleep) {
|
||||
error_setg(errp, "shift=auto and sleep=off are incompatible");
|
||||
return false;
|
||||
}
|
||||
|
||||
icount_sleep = sleep;
|
||||
if (icount_sleep) {
|
||||
timers_state.icount_warp_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL_RT,
|
||||
icount_timer_cb, NULL);
|
||||
}
|
||||
|
||||
icount_align_option = align;
|
||||
|
||||
if (time_shift >= 0) {
|
||||
timers_state.icount_time_shift = time_shift;
|
||||
icount_enable_precise();
|
||||
return true;
|
||||
}
|
||||
|
||||
icount_enable_adaptive();
|
||||
|
||||
/*
|
||||
* 125MIPS seems a reasonable initial guess at the guest speed.
|
||||
* It will be corrected fairly quickly anyway.
|
||||
*/
|
||||
timers_state.icount_time_shift = 3;
|
||||
|
||||
/*
|
||||
* Have both realtime and virtual time triggers for speed adjustment.
|
||||
* The realtime trigger catches emulated time passing too slowly,
|
||||
* the virtual time trigger catches emulated time passing too fast.
|
||||
* Realtime triggers occur even when idle, so use them less frequently
|
||||
* than VM triggers.
|
||||
*/
|
||||
timers_state.vm_clock_warp_start = -1;
|
||||
timers_state.icount_rt_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL_RT,
|
||||
icount_adjust_rt, NULL);
|
||||
timer_mod(timers_state.icount_rt_timer,
|
||||
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL_RT) + 1000);
|
||||
timers_state.icount_vm_timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
|
||||
icount_adjust_vm, NULL);
|
||||
timer_mod(timers_state.icount_vm_timer,
|
||||
qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
NANOSECONDS_PER_SECOND / 10);
|
||||
return true;
|
||||
}
|
||||
|
||||
void icount_notify_exit(void)
|
||||
{
|
||||
assert(icount_enabled());
|
||||
|
||||
if (current_cpu) {
|
||||
qemu_cpu_kick(current_cpu);
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
}
|
59
accel/tcg/internal-common.h
Normal file
59
accel/tcg/internal-common.h
Normal file
@ -0,0 +1,59 @@
|
||||
/*
|
||||
* Internal execution defines for qemu (target agnostic)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_COMMON_H
|
||||
#define ACCEL_TCG_INTERNAL_COMMON_H
|
||||
|
||||
#include "exec/cpu-common.h"
|
||||
#include "exec/translation-block.h"
|
||||
|
||||
extern int64_t max_delay;
|
||||
extern int64_t max_advance;
|
||||
|
||||
extern bool one_insn_per_tb;
|
||||
|
||||
/*
|
||||
* Return true if CS is not running in parallel with other cpus, either
|
||||
* because there are no other cpus or we are within an exclusive context.
|
||||
*/
|
||||
static inline bool cpu_in_serial_context(CPUState *cs)
|
||||
{
|
||||
return !tcg_cflags_has(cs, CF_PARALLEL) || cpu_in_exclusive_context(cs);
|
||||
}
|
||||
|
||||
/**
|
||||
* cpu_plugin_mem_cbs_enabled() - are plugin memory callbacks enabled?
|
||||
* @cs: CPUState pointer
|
||||
*
|
||||
* The memory callbacks are installed if a plugin has instrumented an
|
||||
* instruction for memory. This can be useful to know if you want to
|
||||
* force a slow path for a series of memory accesses.
|
||||
*/
|
||||
static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
|
||||
{
|
||||
#ifdef CONFIG_PLUGIN
|
||||
return !!cpu->neg.plugin_mem_cbs;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
|
||||
uint64_t cs_base, uint32_t flags,
|
||||
int cflags);
|
||||
void page_init(void);
|
||||
void tb_htable_init(void);
|
||||
void tb_reset_jump(TranslationBlock *tb, int n);
|
||||
TranslationBlock *tb_link_page(TranslationBlock *tb);
|
||||
void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
|
||||
uintptr_t host_pc);
|
||||
|
||||
bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
|
||||
void tcg_exec_unrealizefn(CPUState *cpu);
|
||||
|
||||
#endif
|
118
accel/tcg/internal-target.h
Normal file
118
accel/tcg/internal-target.h
Normal file
@ -0,0 +1,118 @@
|
||||
/*
|
||||
* Internal execution defines for qemu (target specific)
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_INTERNAL_TARGET_H
|
||||
#define ACCEL_TCG_INTERNAL_TARGET_H
|
||||
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/translate-all.h"
|
||||
|
||||
/*
|
||||
* Access to the various translations structures need to be serialised
|
||||
* via locks for consistency. In user-mode emulation access to the
|
||||
* memory related structures are protected with mmap_lock.
|
||||
* In !user-mode we use per-page locks.
|
||||
*/
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
|
||||
#else
|
||||
#define assert_memory_lock()
|
||||
#endif
|
||||
|
||||
#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
|
||||
void assert_no_pages_locked(void);
|
||||
#else
|
||||
static inline void assert_no_pages_locked(void) { }
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
static inline void page_table_config_init(void) { }
|
||||
#else
|
||||
void page_table_config_init(void);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_USER_ONLY
|
||||
/*
|
||||
* For user-only, page_protect sets the page read-only.
|
||||
* Since most execution is already on read-only pages, and we'd need to
|
||||
* account for other TBs on the same page, defer undoing any page protection
|
||||
* until we receive the write fault.
|
||||
*/
|
||||
static inline void tb_lock_page0(tb_page_addr_t p0)
|
||||
{
|
||||
page_protect(p0);
|
||||
}
|
||||
|
||||
static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
|
||||
{
|
||||
page_protect(p1);
|
||||
}
|
||||
|
||||
static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
|
||||
static inline void tb_unlock_pages(TranslationBlock *tb) { }
|
||||
#else
|
||||
void tb_lock_page0(tb_page_addr_t);
|
||||
void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
|
||||
void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
|
||||
void tb_unlock_pages(TranslationBlock *);
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
|
||||
unsigned size,
|
||||
uintptr_t retaddr);
|
||||
G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
|
||||
|
||||
/* Return the current PC from CPU, which may be cached in TB. */
|
||||
static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
|
||||
{
|
||||
if (tb_cflags(tb) & CF_PCREL) {
|
||||
return cpu->cc->get_pc(cpu);
|
||||
} else {
|
||||
return tb->pc;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* tcg_req_mo:
|
||||
* @type: TCGBar
|
||||
*
|
||||
* Filter @type to the barrier that is required for the guest
|
||||
* memory ordering vs the host memory ordering. A non-zero
|
||||
* result indicates that some barrier is required.
|
||||
*
|
||||
* If TCG_GUEST_DEFAULT_MO is not defined, assume that the
|
||||
* guest requires strict ordering.
|
||||
*
|
||||
* This is a macro so that it's constant even without optimization.
|
||||
*/
|
||||
#ifdef TCG_GUEST_DEFAULT_MO
|
||||
# define tcg_req_mo(type) \
|
||||
((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
|
||||
#else
|
||||
# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
|
||||
#endif
|
||||
|
||||
/**
|
||||
* cpu_req_mo:
|
||||
* @type: TCGBar
|
||||
*
|
||||
* If tcg_req_mo indicates a barrier for @type is required
|
||||
* for the guest memory model, issue a host memory barrier.
|
||||
*/
|
||||
#define cpu_req_mo(type) \
|
||||
do { \
|
||||
if (tcg_req_mo(type)) { \
|
||||
smp_mb(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#endif /* ACCEL_TCG_INTERNAL_H */
|
1120
accel/tcg/ldst_atomicity.c.inc
Normal file
1120
accel/tcg/ldst_atomicity.c.inc
Normal file
File diff suppressed because it is too large
Load Diff
560
accel/tcg/ldst_common.c.inc
Normal file
560
accel/tcg/ldst_common.c.inc
Normal file
@ -0,0 +1,560 @@
|
||||
/*
|
||||
* Routines common to user and system emulation of load/store.
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
/*
|
||||
* Load helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
|
||||
}
|
||||
|
||||
/*
|
||||
* Provide signed versions of the load routines as well. We can of course
|
||||
* avoid this for 64-bit data, or for 32-bit data on 32-bit host.
|
||||
*/
|
||||
|
||||
tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
|
||||
}
|
||||
|
||||
Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
|
||||
}
|
||||
|
||||
Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
|
||||
{
|
||||
return helper_ld16_mmu(env, addr, oi, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for tcg-ldst.h
|
||||
*/
|
||||
|
||||
void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
|
||||
do_st1_mmu(env_cpu(env), addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
}
|
||||
|
||||
void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
|
||||
{
|
||||
helper_st16_mmu(env, addr, val, oi, GETPC());
|
||||
}
|
||||
|
||||
/*
|
||||
* Load helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_load_cb(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t value_low,
|
||||
uint64_t value_high,
|
||||
MemOpIdx oi)
|
||||
{
|
||||
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
value_low, value_high,
|
||||
oi, QEMU_PLUGIN_MEM_R);
|
||||
}
|
||||
}
|
||||
|
||||
uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint8_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
|
||||
ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint16_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint32_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
uint64_t ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
|
||||
plugin_load_cb(env, addr, ret, 0, oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
|
||||
MemOpIdx oi, uintptr_t ra)
|
||||
{
|
||||
Int128 ret;
|
||||
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
|
||||
plugin_load_cb(env, addr, int128_getlo(ret), int128_gethi(ret), oi);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* Store helpers for cpu_ldst.h
|
||||
*/
|
||||
|
||||
static void plugin_store_cb(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t value_low,
|
||||
uint64_t value_high,
|
||||
MemOpIdx oi)
|
||||
{
|
||||
if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
|
||||
qemu_plugin_vcpu_mem_cb(env_cpu(env), addr,
|
||||
value_low, value_high,
|
||||
oi, QEMU_PLUGIN_MEM_W);
|
||||
}
|
||||
}
|
||||
|
||||
void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
helper_stb_mmu(env, addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
}
|
||||
|
||||
void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
|
||||
do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
}
|
||||
|
||||
void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
|
||||
do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
}
|
||||
|
||||
void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
|
||||
do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, val, 0, oi);
|
||||
}
|
||||
|
||||
void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
|
||||
MemOpIdx oi, uintptr_t retaddr)
|
||||
{
|
||||
tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
|
||||
do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
|
||||
plugin_store_cb(env, addr, int128_getlo(val), int128_gethi(val), oi);
|
||||
}
|
||||
|
||||
/*
|
||||
* Wrappers of the above
|
||||
*/
|
||||
|
||||
uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
||||
return cpu_ldb_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
int cpu_ldsb_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
return (int8_t)cpu_ldub_mmuidx_ra(env, addr, mmu_idx, ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
|
||||
return cpu_ldw_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
return (int16_t)cpu_lduw_be_mmuidx_ra(env, addr, mmu_idx, ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
|
||||
return cpu_ldl_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
|
||||
return cpu_ldq_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
|
||||
return cpu_ldw_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
return (int16_t)cpu_lduw_le_mmuidx_ra(env, addr, mmu_idx, ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
|
||||
return cpu_ldl_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
|
||||
return cpu_ldq_mmu(env, addr, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
||||
cpu_stb_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
|
||||
cpu_stw_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
|
||||
cpu_stl_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
|
||||
cpu_stq_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
|
||||
cpu_stw_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
|
||||
cpu_stl_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
|
||||
int mmu_idx, uintptr_t ra)
|
||||
{
|
||||
MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
|
||||
cpu_stq_mmu(env, addr, val, oi, ra);
|
||||
}
|
||||
|
||||
/*--------------------------*/
|
||||
|
||||
uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
}
|
||||
|
||||
int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
return (int8_t)cpu_ldub_data_ra(env, addr, ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
}
|
||||
|
||||
int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
return (int16_t)cpu_lduw_be_data_ra(env, addr, ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
}
|
||||
|
||||
int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
return (int16_t)cpu_lduw_le_data_ra(env, addr, ra);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra);
|
||||
}
|
||||
|
||||
void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
}
|
||||
|
||||
void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
}
|
||||
|
||||
void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
}
|
||||
|
||||
void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
}
|
||||
|
||||
void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
}
|
||||
|
||||
void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint32_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
}
|
||||
|
||||
void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
|
||||
uint64_t val, uintptr_t ra)
|
||||
{
|
||||
int mmu_index = cpu_mmu_index(env_cpu(env), false);
|
||||
cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra);
|
||||
}
|
||||
|
||||
/*--------------------------*/
|
||||
|
||||
uint32_t cpu_ldub_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldub_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
int cpu_ldsb_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return (int8_t)cpu_ldub_data(env, addr);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_be_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_lduw_be_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
int cpu_ldsw_be_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return (int16_t)cpu_lduw_be_data(env, addr);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_be_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldl_be_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_be_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldq_be_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
uint32_t cpu_lduw_le_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_lduw_le_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
int cpu_ldsw_le_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return (int16_t)cpu_lduw_le_data(env, addr);
|
||||
}
|
||||
|
||||
uint32_t cpu_ldl_le_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldl_le_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
uint64_t cpu_ldq_le_data(CPUArchState *env, abi_ptr addr)
|
||||
{
|
||||
return cpu_ldq_le_data_ra(env, addr, 0);
|
||||
}
|
||||
|
||||
void cpu_stb_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stb_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stw_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stw_be_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stl_be_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stl_be_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stq_be_data(CPUArchState *env, abi_ptr addr, uint64_t val)
|
||||
{
|
||||
cpu_stq_be_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stw_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stw_le_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stl_le_data(CPUArchState *env, abi_ptr addr, uint32_t val)
|
||||
{
|
||||
cpu_stl_le_data_ra(env, addr, val, 0);
|
||||
}
|
||||
|
||||
void cpu_stq_le_data(CPUArchState *env, abi_ptr addr, uint64_t val)
|
||||
{
|
||||
cpu_stq_le_data_ra(env, addr, val, 0);
|
||||
}
|
37
accel/tcg/meson.build
Normal file
37
accel/tcg/meson.build
Normal file
@ -0,0 +1,37 @@
|
||||
common_ss.add(when: 'CONFIG_TCG', if_true: files(
|
||||
'cpu-exec-common.c',
|
||||
))
|
||||
tcg_specific_ss = ss.source_set()
|
||||
tcg_specific_ss.add(files(
|
||||
'tcg-all.c',
|
||||
'cpu-exec.c',
|
||||
'tb-maint.c',
|
||||
'tcg-runtime-gvec.c',
|
||||
'tcg-runtime.c',
|
||||
'translate-all.c',
|
||||
'translator.c',
|
||||
))
|
||||
tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
|
||||
tcg_specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
|
||||
tcg_specific_ss.add(when: 'CONFIG_VTUNE_JITPROFILING', if_true: [vtune_jitprofiling])
|
||||
if get_option('plugins')
|
||||
tcg_specific_ss.add(files('plugin-gen.c'))
|
||||
endif
|
||||
specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss)
|
||||
|
||||
specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
|
||||
'cputlb.c',
|
||||
'watchpoint.c',
|
||||
))
|
||||
|
||||
system_ss.add(when: ['CONFIG_TCG'], if_true: files(
|
||||
'icount-common.c',
|
||||
'monitor.c',
|
||||
))
|
||||
|
||||
tcg_module_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
|
||||
'tcg-accel-ops.c',
|
||||
'tcg-accel-ops-mttcg.c',
|
||||
'tcg-accel-ops-icount.c',
|
||||
'tcg-accel-ops-rr.c',
|
||||
))
|
244
accel/tcg/monitor.c
Normal file
244
accel/tcg/monitor.c
Normal file
@ -0,0 +1,244 @@
|
||||
/*
|
||||
* SPDX-License-Identifier: LGPL-2.1-or-later
|
||||
*
|
||||
* QEMU TCG monitor
|
||||
*
|
||||
* Copyright (c) 2003-2005 Fabrice Bellard
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/accel.h"
|
||||
#include "qemu/qht.h"
|
||||
#include "qapi/error.h"
|
||||
#include "qapi/type-helpers.h"
|
||||
#include "qapi/qapi-commands-machine.h"
|
||||
#include "monitor/monitor.h"
|
||||
#include "sysemu/cpus.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "sysemu/tcg.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "internal-common.h"
|
||||
#include "tb-context.h"
|
||||
|
||||
|
||||
static void dump_drift_info(GString *buf)
|
||||
{
|
||||
if (!icount_enabled()) {
|
||||
return;
|
||||
}
|
||||
|
||||
g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
|
||||
(cpu_get_clock() - icount_get()) / SCALE_MS);
|
||||
if (icount_align_option) {
|
||||
g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
|
||||
-max_delay / SCALE_MS);
|
||||
g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
|
||||
max_advance / SCALE_MS);
|
||||
} else {
|
||||
g_string_append_printf(buf, "Max guest delay NA\n");
|
||||
g_string_append_printf(buf, "Max guest advance NA\n");
|
||||
}
|
||||
}
|
||||
|
||||
static void dump_accel_info(GString *buf)
|
||||
{
|
||||
AccelState *accel = current_accel();
|
||||
bool one_insn_per_tb = object_property_get_bool(OBJECT(accel),
|
||||
"one-insn-per-tb",
|
||||
&error_fatal);
|
||||
|
||||
g_string_append_printf(buf, "Accelerator settings:\n");
|
||||
g_string_append_printf(buf, "one-insn-per-tb: %s\n\n",
|
||||
one_insn_per_tb ? "on" : "off");
|
||||
}
|
||||
|
||||
static void print_qht_statistics(struct qht_stats hst, GString *buf)
|
||||
{
|
||||
uint32_t hgram_opts;
|
||||
size_t hgram_bins;
|
||||
char *hgram;
|
||||
|
||||
if (!hst.head_buckets) {
|
||||
return;
|
||||
}
|
||||
g_string_append_printf(buf, "TB hash buckets %zu/%zu "
|
||||
"(%0.2f%% head buckets used)\n",
|
||||
hst.used_head_buckets, hst.head_buckets,
|
||||
(double)hst.used_head_buckets /
|
||||
hst.head_buckets * 100);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_opts |= QDIST_PR_100X | QDIST_PR_PERCENT;
|
||||
if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
|
||||
hgram_opts |= QDIST_PR_NODECIMAL;
|
||||
}
|
||||
hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash occupancy %0.2f%% avg chain occ. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.occupancy) * 100, hgram);
|
||||
g_free(hgram);
|
||||
|
||||
hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
|
||||
hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
|
||||
if (hgram_bins > 10) {
|
||||
hgram_bins = 10;
|
||||
} else {
|
||||
hgram_bins = 0;
|
||||
hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
|
||||
}
|
||||
hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
|
||||
g_string_append_printf(buf, "TB hash avg chain %0.3f buckets. "
|
||||
"Histogram: %s\n",
|
||||
qdist_avg(&hst.chain), hgram);
|
||||
g_free(hgram);
|
||||
}
|
||||
|
||||
struct tb_tree_stats {
|
||||
size_t nb_tbs;
|
||||
size_t host_size;
|
||||
size_t target_size;
|
||||
size_t max_target_size;
|
||||
size_t direct_jmp_count;
|
||||
size_t direct_jmp2_count;
|
||||
size_t cross_page;
|
||||
};
|
||||
|
||||
static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
|
||||
{
|
||||
const TranslationBlock *tb = value;
|
||||
struct tb_tree_stats *tst = data;
|
||||
|
||||
tst->nb_tbs++;
|
||||
tst->host_size += tb->tc.size;
|
||||
tst->target_size += tb->size;
|
||||
if (tb->size > tst->max_target_size) {
|
||||
tst->max_target_size = tb->size;
|
||||
}
|
||||
if (tb->page_addr[1] != -1) {
|
||||
tst->cross_page++;
|
||||
}
|
||||
if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp_count++;
|
||||
if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
|
||||
tst->direct_jmp2_count++;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
|
||||
{
|
||||
CPUState *cpu;
|
||||
size_t full = 0, part = 0, elide = 0;
|
||||
|
||||
CPU_FOREACH(cpu) {
|
||||
full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
|
||||
part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
|
||||
elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
|
||||
}
|
||||
*pfull = full;
|
||||
*ppart = part;
|
||||
*pelide = elide;
|
||||
}
|
||||
|
||||
static void tcg_dump_info(GString *buf)
|
||||
{
|
||||
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
|
||||
}
|
||||
|
||||
static void dump_exec_info(GString *buf)
|
||||
{
|
||||
struct tb_tree_stats tst = {};
|
||||
struct qht_stats hst;
|
||||
size_t nb_tbs, flush_full, flush_part, flush_elide;
|
||||
|
||||
tcg_tb_foreach(tb_tree_stats_iter, &tst);
|
||||
nb_tbs = tst.nb_tbs;
|
||||
/* XXX: avoid using doubles ? */
|
||||
g_string_append_printf(buf, "Translation buffer state:\n");
|
||||
/*
|
||||
* Report total code size including the padding and TB structs;
|
||||
* otherwise users might think "-accel tcg,tb-size" is not honoured.
|
||||
* For avg host size we use the precise numbers from tb_tree_stats though.
|
||||
*/
|
||||
g_string_append_printf(buf, "gen code size %zu/%zu\n",
|
||||
tcg_code_size(), tcg_code_capacity());
|
||||
g_string_append_printf(buf, "TB count %zu\n", nb_tbs);
|
||||
g_string_append_printf(buf, "TB avg target size %zu max=%zu bytes\n",
|
||||
nb_tbs ? tst.target_size / nb_tbs : 0,
|
||||
tst.max_target_size);
|
||||
g_string_append_printf(buf, "TB avg host size %zu bytes "
|
||||
"(expansion ratio: %0.1f)\n",
|
||||
nb_tbs ? tst.host_size / nb_tbs : 0,
|
||||
tst.target_size ?
|
||||
(double)tst.host_size / tst.target_size : 0);
|
||||
g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
|
||||
tst.cross_page,
|
||||
nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
|
||||
g_string_append_printf(buf, "direct jump count %zu (%zu%%) "
|
||||
"(2 jumps=%zu %zu%%)\n",
|
||||
tst.direct_jmp_count,
|
||||
nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
|
||||
tst.direct_jmp2_count,
|
||||
nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
|
||||
|
||||
qht_statistics_init(&tb_ctx.htable, &hst);
|
||||
print_qht_statistics(hst, buf);
|
||||
qht_statistics_destroy(&hst);
|
||||
|
||||
g_string_append_printf(buf, "\nStatistics:\n");
|
||||
g_string_append_printf(buf, "TB flush count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_flush_count));
|
||||
g_string_append_printf(buf, "TB invalidate count %u\n",
|
||||
qatomic_read(&tb_ctx.tb_phys_invalidate_count));
|
||||
|
||||
tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
|
||||
g_string_append_printf(buf, "TLB full flushes %zu\n", flush_full);
|
||||
g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
|
||||
g_string_append_printf(buf, "TLB elided flushes %zu\n", flush_elide);
|
||||
tcg_dump_info(buf);
|
||||
}
|
||||
|
||||
HumanReadableText *qmp_x_query_jit(Error **errp)
|
||||
{
|
||||
g_autoptr(GString) buf = g_string_new("");
|
||||
|
||||
if (!tcg_enabled()) {
|
||||
error_setg(errp, "JIT information is only available with accel=tcg");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
dump_accel_info(buf);
|
||||
dump_exec_info(buf);
|
||||
dump_drift_info(buf);
|
||||
|
||||
return human_readable_text_from_str(buf);
|
||||
}
|
||||
|
||||
static void tcg_dump_op_count(GString *buf)
|
||||
{
|
||||
g_string_append_printf(buf, "[TCG profiler not compiled]\n");
|
||||
}
|
||||
|
||||
HumanReadableText *qmp_x_query_opcount(Error **errp)
|
||||
{
|
||||
g_autoptr(GString) buf = g_string_new("");
|
||||
|
||||
if (!tcg_enabled()) {
|
||||
error_setg(errp,
|
||||
"Opcode count information is only available with accel=tcg");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
tcg_dump_op_count(buf);
|
||||
|
||||
return human_readable_text_from_str(buf);
|
||||
}
|
||||
|
||||
static void hmp_tcg_register(void)
|
||||
{
|
||||
monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
|
||||
monitor_register_hmp_info_hrt("opcount", qmp_x_query_opcount);
|
||||
}
|
||||
|
||||
type_init(hmp_tcg_register);
|
474
accel/tcg/plugin-gen.c
Normal file
474
accel/tcg/plugin-gen.c
Normal file
@ -0,0 +1,474 @@
|
||||
/*
|
||||
* plugin-gen.c - TCG-related bits of plugin infrastructure
|
||||
*
|
||||
* Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
|
||||
* License: GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*
|
||||
* We support instrumentation at an instruction granularity. That is,
|
||||
* if a plugin wants to instrument the memory accesses performed by a
|
||||
* particular instruction, it can just do that instead of instrumenting
|
||||
* all memory accesses. Thus, in order to do this we first have to
|
||||
* translate a TB, so that plugins can decide what/where to instrument.
|
||||
*
|
||||
* Injecting the desired instrumentation could be done with a second
|
||||
* translation pass that combined the instrumentation requests, but that
|
||||
* would be ugly and inefficient since we would decode the guest code twice.
|
||||
* Instead, during TB translation we add "plugin_cb" marker opcodes
|
||||
* for all possible instrumentation events, and then once we collect the
|
||||
* instrumentation requests from plugins, we generate code for those markers
|
||||
* or remove them if they have no requests.
|
||||
*/
|
||||
#include "qemu/osdep.h"
|
||||
#include "qemu/plugin.h"
|
||||
#include "qemu/log.h"
|
||||
#include "cpu.h"
|
||||
#include "tcg/tcg.h"
|
||||
#include "tcg/tcg-temp-internal.h"
|
||||
#include "tcg/tcg-op.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "exec/plugin-gen.h"
|
||||
#include "exec/translator.h"
|
||||
|
||||
enum plugin_gen_from {
|
||||
PLUGIN_GEN_FROM_TB,
|
||||
PLUGIN_GEN_FROM_INSN,
|
||||
PLUGIN_GEN_AFTER_INSN,
|
||||
PLUGIN_GEN_AFTER_TB,
|
||||
};
|
||||
|
||||
/* called before finishing a TB with exit_tb, goto_tb or goto_ptr */
|
||||
void plugin_gen_disable_mem_helpers(void)
|
||||
{
|
||||
if (tcg_ctx->plugin_insn) {
|
||||
tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_TB);
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_enable_mem_helper(struct qemu_plugin_tb *ptb,
|
||||
struct qemu_plugin_insn *insn)
|
||||
{
|
||||
GArray *arr;
|
||||
size_t len;
|
||||
|
||||
/*
|
||||
* Tracking memory accesses performed from helpers requires extra work.
|
||||
* If an instruction is emulated with helpers, we do two things:
|
||||
* (1) copy the CB descriptors, and keep track of it so that they can be
|
||||
* freed later on, and (2) point CPUState.neg.plugin_mem_cbs to the
|
||||
* descriptors, so that we can read them at run-time
|
||||
* (i.e. when the helper executes).
|
||||
* This run-time access is performed from qemu_plugin_vcpu_mem_cb.
|
||||
*
|
||||
* Note that plugin_gen_disable_mem_helpers undoes (2). Since it
|
||||
* is possible that the code we generate after the instruction is
|
||||
* dead, we also add checks before generating tb_exit etc.
|
||||
*/
|
||||
if (!insn->calls_helpers) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (!insn->mem_cbs || !insn->mem_cbs->len) {
|
||||
insn->mem_helper = false;
|
||||
return;
|
||||
}
|
||||
insn->mem_helper = true;
|
||||
ptb->mem_helper = true;
|
||||
|
||||
/*
|
||||
* TODO: It seems like we should be able to use ref/unref
|
||||
* to avoid needing to actually copy this array.
|
||||
* Alternately, perhaps we could allocate new memory adjacent
|
||||
* to the TranslationBlock itself, so that we do not have to
|
||||
* actively manage the lifetime after this.
|
||||
*/
|
||||
len = insn->mem_cbs->len;
|
||||
arr = g_array_sized_new(false, false,
|
||||
sizeof(struct qemu_plugin_dyn_cb), len);
|
||||
g_array_append_vals(arr, insn->mem_cbs->data, len);
|
||||
qemu_plugin_add_dyn_cb_arr(arr);
|
||||
|
||||
tcg_gen_st_ptr(tcg_constant_ptr((intptr_t)arr), tcg_env,
|
||||
offsetof(CPUState, neg.plugin_mem_cbs) -
|
||||
offsetof(ArchCPU, env));
|
||||
}
|
||||
|
||||
static void gen_disable_mem_helper(void)
|
||||
{
|
||||
tcg_gen_st_ptr(tcg_constant_ptr(0), tcg_env,
|
||||
offsetof(CPUState, neg.plugin_mem_cbs) -
|
||||
offsetof(ArchCPU, env));
|
||||
}
|
||||
|
||||
static TCGv_i32 gen_cpu_index(void)
|
||||
{
|
||||
TCGv_i32 cpu_index = tcg_temp_ebb_new_i32();
|
||||
tcg_gen_ld_i32(cpu_index, tcg_env,
|
||||
-offsetof(ArchCPU, env) + offsetof(CPUState, cpu_index));
|
||||
return cpu_index;
|
||||
}
|
||||
|
||||
static void gen_udata_cb(struct qemu_plugin_regular_cb *cb)
|
||||
{
|
||||
TCGv_i32 cpu_index = gen_cpu_index();
|
||||
tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
|
||||
tcgv_i32_temp(cpu_index),
|
||||
tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
|
||||
tcg_temp_free_i32(cpu_index);
|
||||
}
|
||||
|
||||
static TCGv_ptr gen_plugin_u64_ptr(qemu_plugin_u64 entry)
|
||||
{
|
||||
TCGv_ptr ptr = tcg_temp_ebb_new_ptr();
|
||||
|
||||
GArray *arr = entry.score->data;
|
||||
char *base_ptr = arr->data + entry.offset;
|
||||
size_t entry_size = g_array_get_element_size(arr);
|
||||
|
||||
TCGv_i32 cpu_index = gen_cpu_index();
|
||||
tcg_gen_muli_i32(cpu_index, cpu_index, entry_size);
|
||||
tcg_gen_ext_i32_ptr(ptr, cpu_index);
|
||||
tcg_temp_free_i32(cpu_index);
|
||||
tcg_gen_addi_ptr(ptr, ptr, (intptr_t) base_ptr);
|
||||
|
||||
return ptr;
|
||||
}
|
||||
|
||||
static TCGCond plugin_cond_to_tcgcond(enum qemu_plugin_cond cond)
|
||||
{
|
||||
switch (cond) {
|
||||
case QEMU_PLUGIN_COND_EQ:
|
||||
return TCG_COND_EQ;
|
||||
case QEMU_PLUGIN_COND_NE:
|
||||
return TCG_COND_NE;
|
||||
case QEMU_PLUGIN_COND_LT:
|
||||
return TCG_COND_LTU;
|
||||
case QEMU_PLUGIN_COND_LE:
|
||||
return TCG_COND_LEU;
|
||||
case QEMU_PLUGIN_COND_GT:
|
||||
return TCG_COND_GTU;
|
||||
case QEMU_PLUGIN_COND_GE:
|
||||
return TCG_COND_GEU;
|
||||
default:
|
||||
/* ALWAYS and NEVER conditions should never reach */
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
static void gen_udata_cond_cb(struct qemu_plugin_conditional_cb *cb)
|
||||
{
|
||||
TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
|
||||
TCGv_i64 val = tcg_temp_ebb_new_i64();
|
||||
TCGLabel *after_cb = gen_new_label();
|
||||
|
||||
/* Condition should be negated, as calling the cb is the "else" path */
|
||||
TCGCond cond = tcg_invert_cond(plugin_cond_to_tcgcond(cb->cond));
|
||||
|
||||
tcg_gen_ld_i64(val, ptr, 0);
|
||||
tcg_gen_brcondi_i64(cond, val, cb->imm, after_cb);
|
||||
TCGv_i32 cpu_index = gen_cpu_index();
|
||||
tcg_gen_call2(cb->f.vcpu_udata, cb->info, NULL,
|
||||
tcgv_i32_temp(cpu_index),
|
||||
tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
|
||||
tcg_temp_free_i32(cpu_index);
|
||||
gen_set_label(after_cb);
|
||||
|
||||
tcg_temp_free_i64(val);
|
||||
tcg_temp_free_ptr(ptr);
|
||||
}
|
||||
|
||||
static void gen_inline_add_u64_cb(struct qemu_plugin_inline_cb *cb)
|
||||
{
|
||||
TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
|
||||
TCGv_i64 val = tcg_temp_ebb_new_i64();
|
||||
|
||||
tcg_gen_ld_i64(val, ptr, 0);
|
||||
tcg_gen_addi_i64(val, val, cb->imm);
|
||||
tcg_gen_st_i64(val, ptr, 0);
|
||||
|
||||
tcg_temp_free_i64(val);
|
||||
tcg_temp_free_ptr(ptr);
|
||||
}
|
||||
|
||||
static void gen_inline_store_u64_cb(struct qemu_plugin_inline_cb *cb)
|
||||
{
|
||||
TCGv_ptr ptr = gen_plugin_u64_ptr(cb->entry);
|
||||
TCGv_i64 val = tcg_constant_i64(cb->imm);
|
||||
|
||||
tcg_gen_st_i64(val, ptr, 0);
|
||||
|
||||
tcg_temp_free_ptr(ptr);
|
||||
}
|
||||
|
||||
static void gen_mem_cb(struct qemu_plugin_regular_cb *cb,
|
||||
qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
|
||||
{
|
||||
TCGv_i32 cpu_index = gen_cpu_index();
|
||||
tcg_gen_call4(cb->f.vcpu_mem, cb->info, NULL,
|
||||
tcgv_i32_temp(cpu_index),
|
||||
tcgv_i32_temp(tcg_constant_i32(meminfo)),
|
||||
tcgv_i64_temp(addr),
|
||||
tcgv_ptr_temp(tcg_constant_ptr(cb->userp)));
|
||||
tcg_temp_free_i32(cpu_index);
|
||||
}
|
||||
|
||||
static void inject_cb(struct qemu_plugin_dyn_cb *cb)
|
||||
|
||||
{
|
||||
switch (cb->type) {
|
||||
case PLUGIN_CB_REGULAR:
|
||||
gen_udata_cb(&cb->regular);
|
||||
break;
|
||||
case PLUGIN_CB_COND:
|
||||
gen_udata_cond_cb(&cb->cond);
|
||||
break;
|
||||
case PLUGIN_CB_INLINE_ADD_U64:
|
||||
gen_inline_add_u64_cb(&cb->inline_insn);
|
||||
break;
|
||||
case PLUGIN_CB_INLINE_STORE_U64:
|
||||
gen_inline_store_u64_cb(&cb->inline_insn);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
static void inject_mem_cb(struct qemu_plugin_dyn_cb *cb,
|
||||
enum qemu_plugin_mem_rw rw,
|
||||
qemu_plugin_meminfo_t meminfo, TCGv_i64 addr)
|
||||
{
|
||||
switch (cb->type) {
|
||||
case PLUGIN_CB_MEM_REGULAR:
|
||||
if (rw & cb->regular.rw) {
|
||||
gen_mem_cb(&cb->regular, meminfo, addr);
|
||||
}
|
||||
break;
|
||||
case PLUGIN_CB_INLINE_ADD_U64:
|
||||
case PLUGIN_CB_INLINE_STORE_U64:
|
||||
if (rw & cb->inline_insn.rw) {
|
||||
inject_cb(cb);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
static void plugin_gen_inject(struct qemu_plugin_tb *plugin_tb)
|
||||
{
|
||||
TCGOp *op, *next;
|
||||
int insn_idx = -1;
|
||||
|
||||
if (unlikely(qemu_loglevel_mask(LOG_TB_OP_PLUGIN)
|
||||
&& qemu_log_in_addr_range(tcg_ctx->plugin_db->pc_first))) {
|
||||
FILE *logfile = qemu_log_trylock();
|
||||
if (logfile) {
|
||||
fprintf(logfile, "OP before plugin injection:\n");
|
||||
tcg_dump_ops(tcg_ctx, logfile, false);
|
||||
fprintf(logfile, "\n");
|
||||
qemu_log_unlock(logfile);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* While injecting code, we cannot afford to reuse any ebb temps
|
||||
* that might be live within the existing opcode stream.
|
||||
* The simplest solution is to release them all and create new.
|
||||
*/
|
||||
tcg_temp_ebb_reset_freed(tcg_ctx);
|
||||
|
||||
QTAILQ_FOREACH_SAFE(op, &tcg_ctx->ops, link, next) {
|
||||
switch (op->opc) {
|
||||
case INDEX_op_insn_start:
|
||||
insn_idx++;
|
||||
break;
|
||||
|
||||
case INDEX_op_plugin_cb:
|
||||
{
|
||||
enum plugin_gen_from from = op->args[0];
|
||||
struct qemu_plugin_insn *insn = NULL;
|
||||
const GArray *cbs;
|
||||
int i, n;
|
||||
|
||||
if (insn_idx >= 0) {
|
||||
insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
|
||||
}
|
||||
|
||||
tcg_ctx->emit_before_op = op;
|
||||
|
||||
switch (from) {
|
||||
case PLUGIN_GEN_AFTER_TB:
|
||||
if (plugin_tb->mem_helper) {
|
||||
gen_disable_mem_helper();
|
||||
}
|
||||
break;
|
||||
|
||||
case PLUGIN_GEN_AFTER_INSN:
|
||||
assert(insn != NULL);
|
||||
if (insn->mem_helper) {
|
||||
gen_disable_mem_helper();
|
||||
}
|
||||
break;
|
||||
|
||||
case PLUGIN_GEN_FROM_TB:
|
||||
assert(insn == NULL);
|
||||
|
||||
cbs = plugin_tb->cbs;
|
||||
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
|
||||
inject_cb(
|
||||
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
|
||||
}
|
||||
break;
|
||||
|
||||
case PLUGIN_GEN_FROM_INSN:
|
||||
assert(insn != NULL);
|
||||
|
||||
gen_enable_mem_helper(plugin_tb, insn);
|
||||
|
||||
cbs = insn->insn_cbs;
|
||||
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
|
||||
inject_cb(
|
||||
&g_array_index(cbs, struct qemu_plugin_dyn_cb, i));
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
tcg_ctx->emit_before_op = NULL;
|
||||
tcg_op_remove(tcg_ctx, op);
|
||||
break;
|
||||
}
|
||||
|
||||
case INDEX_op_plugin_mem_cb:
|
||||
{
|
||||
TCGv_i64 addr = temp_tcgv_i64(arg_temp(op->args[0]));
|
||||
qemu_plugin_meminfo_t meminfo = op->args[1];
|
||||
enum qemu_plugin_mem_rw rw =
|
||||
(qemu_plugin_mem_is_store(meminfo)
|
||||
? QEMU_PLUGIN_MEM_W : QEMU_PLUGIN_MEM_R);
|
||||
struct qemu_plugin_insn *insn;
|
||||
const GArray *cbs;
|
||||
int i, n;
|
||||
|
||||
assert(insn_idx >= 0);
|
||||
insn = g_ptr_array_index(plugin_tb->insns, insn_idx);
|
||||
|
||||
tcg_ctx->emit_before_op = op;
|
||||
|
||||
cbs = insn->mem_cbs;
|
||||
for (i = 0, n = (cbs ? cbs->len : 0); i < n; i++) {
|
||||
inject_mem_cb(&g_array_index(cbs, struct qemu_plugin_dyn_cb, i),
|
||||
rw, meminfo, addr);
|
||||
}
|
||||
|
||||
tcg_ctx->emit_before_op = NULL;
|
||||
tcg_op_remove(tcg_ctx, op);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
/* plugins don't care about any other ops */
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool plugin_gen_tb_start(CPUState *cpu, const DisasContextBase *db)
|
||||
{
|
||||
struct qemu_plugin_tb *ptb;
|
||||
|
||||
if (!test_bit(QEMU_PLUGIN_EV_VCPU_TB_TRANS,
|
||||
cpu->plugin_state->event_mask)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
tcg_ctx->plugin_db = db;
|
||||
tcg_ctx->plugin_insn = NULL;
|
||||
ptb = tcg_ctx->plugin_tb;
|
||||
|
||||
if (ptb) {
|
||||
/* Reset callbacks */
|
||||
if (ptb->cbs) {
|
||||
g_array_set_size(ptb->cbs, 0);
|
||||
}
|
||||
ptb->n = 0;
|
||||
ptb->mem_helper = false;
|
||||
} else {
|
||||
ptb = g_new0(struct qemu_plugin_tb, 1);
|
||||
tcg_ctx->plugin_tb = ptb;
|
||||
ptb->insns = g_ptr_array_new();
|
||||
}
|
||||
|
||||
tcg_gen_plugin_cb(PLUGIN_GEN_FROM_TB);
|
||||
return true;
|
||||
}
|
||||
|
||||
void plugin_gen_insn_start(CPUState *cpu, const DisasContextBase *db)
|
||||
{
|
||||
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
|
||||
struct qemu_plugin_insn *insn;
|
||||
size_t n = db->num_insns;
|
||||
vaddr pc;
|
||||
|
||||
assert(n >= 1);
|
||||
ptb->n = n;
|
||||
if (n <= ptb->insns->len) {
|
||||
insn = g_ptr_array_index(ptb->insns, n - 1);
|
||||
} else {
|
||||
assert(n - 1 == ptb->insns->len);
|
||||
insn = g_new0(struct qemu_plugin_insn, 1);
|
||||
g_ptr_array_add(ptb->insns, insn);
|
||||
}
|
||||
|
||||
tcg_ctx->plugin_insn = insn;
|
||||
insn->calls_helpers = false;
|
||||
insn->mem_helper = false;
|
||||
if (insn->insn_cbs) {
|
||||
g_array_set_size(insn->insn_cbs, 0);
|
||||
}
|
||||
if (insn->mem_cbs) {
|
||||
g_array_set_size(insn->mem_cbs, 0);
|
||||
}
|
||||
|
||||
pc = db->pc_next;
|
||||
insn->vaddr = pc;
|
||||
|
||||
tcg_gen_plugin_cb(PLUGIN_GEN_FROM_INSN);
|
||||
}
|
||||
|
||||
void plugin_gen_insn_end(void)
|
||||
{
|
||||
const DisasContextBase *db = tcg_ctx->plugin_db;
|
||||
struct qemu_plugin_insn *pinsn = tcg_ctx->plugin_insn;
|
||||
|
||||
pinsn->len = db->fake_insn ? db->record_len : db->pc_next - pinsn->vaddr;
|
||||
|
||||
tcg_gen_plugin_cb(PLUGIN_GEN_AFTER_INSN);
|
||||
}
|
||||
|
||||
/*
|
||||
* There are cases where we never get to finalise a translation - for
|
||||
* example a page fault during translation. As a result we shouldn't
|
||||
* do any clean-up here and make sure things are reset in
|
||||
* plugin_gen_tb_start.
|
||||
*/
|
||||
void plugin_gen_tb_end(CPUState *cpu, size_t num_insns)
|
||||
{
|
||||
struct qemu_plugin_tb *ptb = tcg_ctx->plugin_tb;
|
||||
|
||||
/* translator may have removed instructions, update final count */
|
||||
g_assert(num_insns <= ptb->n);
|
||||
ptb->n = num_insns;
|
||||
|
||||
/* collect instrumentation requests */
|
||||
qemu_plugin_tb_trans_cb(cpu, ptb);
|
||||
|
||||
/* inject the instrumentation at the appropriate places */
|
||||
plugin_gen_inject(ptb);
|
||||
|
||||
/* reset plugin translation state (plugin_tb is reused between blocks) */
|
||||
tcg_ctx->plugin_db = NULL;
|
||||
tcg_ctx->plugin_insn = NULL;
|
||||
}
|
43
accel/tcg/tb-context.h
Normal file
43
accel/tcg/tb-context.h
Normal file
@ -0,0 +1,43 @@
|
||||
/*
|
||||
* Internal structs that QEMU exports to TCG
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef QEMU_TB_CONTEXT_H
|
||||
#define QEMU_TB_CONTEXT_H
|
||||
|
||||
#include "qemu/thread.h"
|
||||
#include "qemu/qht.h"
|
||||
|
||||
#define CODE_GEN_HTABLE_BITS 15
|
||||
#define CODE_GEN_HTABLE_SIZE (1 << CODE_GEN_HTABLE_BITS)
|
||||
|
||||
typedef struct TBContext TBContext;
|
||||
|
||||
struct TBContext {
|
||||
|
||||
struct qht htable;
|
||||
struct qht inv_htable;
|
||||
|
||||
/* statistics */
|
||||
unsigned tb_flush_count;
|
||||
unsigned tb_phys_invalidate_count;
|
||||
};
|
||||
|
||||
extern TBContext tb_ctx;
|
||||
|
||||
#endif
|
81
accel/tcg/tb-hash.h
Normal file
81
accel/tcg/tb-hash.h
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* internal execution defines for qemu
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* This library is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* This library is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#ifndef EXEC_TB_HASH_H
|
||||
#define EXEC_TB_HASH_H
|
||||
|
||||
#include "exec/cpu-defs.h"
|
||||
#include "exec/cpu_ldst.h"
|
||||
#include "exec/exec-all.h"
|
||||
#include "qemu/xxhash.h"
|
||||
#include "qemu/fast-hash.h"
|
||||
#include "tb-jmp-cache.h"
|
||||
|
||||
#ifdef CONFIG_SOFTMMU
|
||||
|
||||
/* Only the bottom TB_JMP_PAGE_BITS of the jump cache hash bits vary for
|
||||
addresses on the same page. The top bits are the same. This allows
|
||||
TLB invalidation to quickly clear a subset of the hash table. */
|
||||
#define TB_JMP_PAGE_BITS (TB_JMP_CACHE_BITS / 2)
|
||||
#define TB_JMP_PAGE_SIZE (1 << TB_JMP_PAGE_BITS)
|
||||
#define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
|
||||
#define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
|
||||
|
||||
static inline unsigned int tb_jmp_cache_hash_page(vaddr pc)
|
||||
{
|
||||
vaddr tmp;
|
||||
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
|
||||
return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
|
||||
}
|
||||
|
||||
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
|
||||
{
|
||||
vaddr tmp;
|
||||
tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
|
||||
return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
|
||||
| (tmp & TB_JMP_ADDR_MASK));
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
/* In user-mode we can get better hashing because we do not have a TLB */
|
||||
static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
|
||||
{
|
||||
return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
|
||||
}
|
||||
|
||||
#endif /* CONFIG_SOFTMMU */
|
||||
|
||||
static inline
|
||||
uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc,
|
||||
uint32_t flags, uint64_t flags2, uint32_t cf_mask)
|
||||
{
|
||||
return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask);
|
||||
}
|
||||
|
||||
static inline
|
||||
uint64_t tb_code_hash_func(CPUArchState *env, target_ulong pc, size_t size)
|
||||
{
|
||||
assert(size < 4096);
|
||||
uint8_t code[size];
|
||||
cpu_ld_code(env, pc, size, code); /* Speed, error handling */
|
||||
return fast_hash(code, size);
|
||||
}
|
||||
|
||||
#endif
|
33
accel/tcg/tb-jmp-cache.h
Normal file
33
accel/tcg/tb-jmp-cache.h
Normal file
@ -0,0 +1,33 @@
|
||||
/*
|
||||
* The per-CPU TranslationBlock jump cache.
|
||||
*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* SPDX-License-Identifier: GPL-2.0-or-later
|
||||
*/
|
||||
|
||||
#ifndef ACCEL_TCG_TB_JMP_CACHE_H
|
||||
#define ACCEL_TCG_TB_JMP_CACHE_H
|
||||
|
||||
#include "qemu/rcu.h"
|
||||
#include "exec/cpu-common.h"
|
||||
|
||||
#define TB_JMP_CACHE_BITS 12
|
||||
#define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
|
||||
|
||||
/*
|
||||
* Invalidated in parallel; all accesses to 'tb' must be atomic.
|
||||
* A valid entry is read/written by a single CPU, therefore there is
|
||||
* no need for qatomic_rcu_read() and pc is always consistent with a
|
||||
* non-NULL value of 'tb'. Strictly speaking pc is only needed for
|
||||
* CF_PCREL, but it's used always for simplicity.
|
||||
*/
|
||||
typedef struct CPUJumpCache {
|
||||
struct rcu_head rcu;
|
||||
struct {
|
||||
TranslationBlock *tb;
|
||||
vaddr pc;
|
||||
} array[TB_JMP_CACHE_SIZE];
|
||||
} CPUJumpCache;
|
||||
|
||||
#endif /* ACCEL_TCG_TB_JMP_CACHE_H */
|
1247
accel/tcg/tb-maint.c
Normal file
1247
accel/tcg/tb-maint.c
Normal file
File diff suppressed because it is too large
Load Diff
160
accel/tcg/tcg-accel-ops-icount.c
Normal file
160
accel/tcg/tcg-accel-ops-icount.c
Normal file
@ -0,0 +1,160 @@
|
||||
/*
|
||||
* QEMU TCG Single Threaded vCPUs implementation using instruction counting
|
||||
*
|
||||
* Copyright (c) 2003-2008 Fabrice Bellard
|
||||
* Copyright (c) 2014 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include "qemu/osdep.h"
|
||||
#include "sysemu/replay.h"
|
||||
#include "sysemu/cpu-timers.h"
|
||||
#include "qemu/main-loop.h"
|
||||
#include "qemu/guest-random.h"
|
||||
#include "exec/exec-all.h"
|
||||
|
||||
#include "tcg-accel-ops.h"
|
||||
#include "tcg-accel-ops-icount.h"
|
||||
#include "tcg-accel-ops-rr.h"
|
||||
|
||||
static int64_t icount_get_limit(void)
|
||||
{
|
||||
int64_t deadline;
|
||||
|
||||
if (replay_mode != REPLAY_MODE_PLAY) {
|
||||
/*
|
||||
* Include all the timers, because they may need an attention.
|
||||
* Too long CPU execution may create unnecessary delay in UI.
|
||||
*/
|
||||
deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
/* Check realtime timers, because they help with input processing */
|
||||
deadline = qemu_soonest_timeout(deadline,
|
||||
qemu_clock_deadline_ns_all(QEMU_CLOCK_REALTIME,
|
||||
QEMU_TIMER_ATTR_ALL));
|
||||
|
||||
/*
|
||||
* Maintain prior (possibly buggy) behaviour where if no deadline
|
||||
* was set (as there is no QEMU_CLOCK_VIRTUAL timer) or it is more than
|
||||
* INT32_MAX nanoseconds ahead, we still use INT32_MAX
|
||||
* nanoseconds.
|
||||
*/
|
||||
if ((deadline < 0) || (deadline > INT32_MAX)) {
|
||||
deadline = INT32_MAX;
|
||||
}
|
||||
|
||||
return icount_round(deadline);
|
||||
} else {
|
||||
return replay_get_instructions();
|
||||
}
|
||||
}
|
||||
|
||||
static void icount_notify_aio_contexts(void)
|
||||
{
|
||||
/* Wake up other AioContexts. */
|
||||
qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
|
||||
qemu_clock_run_timers(QEMU_CLOCK_VIRTUAL);
|
||||
}
|
||||
|
||||
void icount_handle_deadline(void)
|
||||
{
|
||||
assert(qemu_in_vcpu_thread());
|
||||
int64_t deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
|
||||
QEMU_TIMER_ATTR_ALL);
|
||||
|
||||
/*
|
||||
* Instructions, interrupts, and exceptions are processed in cpu-exec.
|
||||
* Don't interrupt cpu thread, when these events are waiting
|
||||
* (i.e., there is no checkpoint)
|
||||
*/
|
||||
if (deadline == 0) {
|
||||
icount_notify_aio_contexts();
|
||||
}
|
||||
}
|
||||
|
||||
/* Distribute the budget evenly across all CPUs */
|
||||
int64_t icount_percpu_budget(int cpu_count)
|
||||
{
|
||||
int64_t limit = icount_get_limit();
|
||||
int64_t timeslice = limit / cpu_count;
|
||||
|
||||
if (timeslice == 0) {
|
||||
timeslice = limit;
|
||||
}
|
||||
|
||||
return timeslice;
|
||||
}
|
||||
|
||||
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
|
||||
{
|
||||
int insns_left;
|
||||
|
||||
/*
|
||||
* These should always be cleared by icount_process_data after
|
||||
* each vCPU execution. However u16.high can be raised
|
||||
* asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
|
||||
*/
|
||||
g_assert(cpu->neg.icount_decr.u16.low == 0);
|
||||
g_assert(cpu->icount_extra == 0);
|
||||
|
||||
replay_mutex_lock();
|
||||
|
||||
cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
|
||||
insns_left = MIN(0xffff, cpu->icount_budget);
|
||||
cpu->neg.icount_decr.u16.low = insns_left;
|
||||
cpu->icount_extra = cpu->icount_budget - insns_left;
|
||||
|
||||
if (cpu->icount_budget == 0) {
|
||||
/*
|
||||
* We're called without the BQL, so must take it while
|
||||
* we're calling timer handlers.
|
||||
*/
|
||||
bql_lock();
|
||||
icount_notify_aio_contexts();
|
||||
bql_unlock();
|
||||
}
|
||||
}
|
||||
|
||||
void icount_process_data(CPUState *cpu)
|
||||
{
|
||||
/* Account for executed instructions */
|
||||
icount_update(cpu);
|
||||
|
||||
/* Reset the counters */
|
||||
cpu->neg.icount_decr.u16.low = 0;
|
||||
cpu->icount_extra = 0;
|
||||
cpu->icount_budget = 0;
|
||||
|
||||
replay_account_executed_instructions();
|
||||
|
||||
replay_mutex_unlock();
|
||||
}
|
||||
|
||||
void icount_handle_interrupt(CPUState *cpu, int mask)
|
||||
{
|
||||
int old_mask = cpu->interrupt_request;
|
||||
|
||||
tcg_handle_interrupt(cpu, mask);
|
||||
if (qemu_cpu_is_self(cpu) &&
|
||||
!cpu->neg.can_do_io
|
||||
&& (mask & ~old_mask) != 0) {
|
||||
cpu_abort(cpu, "Raised interrupt while not in I/O function");
|
||||
}
|
||||
}
|
20
accel/tcg/tcg-accel-ops-icount.h
Normal file
20
accel/tcg/tcg-accel-ops-icount.h
Normal file
@ -0,0 +1,20 @@
|
||||
/*
|
||||
* QEMU TCG Single Threaded vCPUs implementation using instruction counting
|
||||
*
|
||||
* Copyright 2020 SUSE LLC
|
||||
*
|
||||
* This work is licensed under the terms of the GNU GPL, version 2 or later.
|
||||
* See the COPYING file in the top-level directory.
|
||||
*/
|
||||
|
||||
#ifndef TCG_ACCEL_OPS_ICOUNT_H
|
||||
#define TCG_ACCEL_OPS_ICOUNT_H
|
||||
|
||||
void icount_handle_deadline(void);
|
||||
void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget);
|
||||
int64_t icount_percpu_budget(int cpu_count);
|
||||
void icount_process_data(CPUState *cpu);
|
||||
|
||||
void icount_handle_interrupt(CPUState *cpu, int mask);
|
||||
|
||||
#endif /* TCG_ACCEL_OPS_ICOUNT_H */
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user