mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-17 14:30:00 +00:00
arm64 updates for 4.8:
- Kexec support for arm64 - Kprobes support - Expose MIDR_EL1 and REVIDR_EL1 CPU identification registers to sysfs - Trapping of user space cache maintenance operations and emulation in the kernel (CPU errata workaround) - Clean-up of the early page tables creation (kernel linear mapping, EFI run-time maps) to avoid splitting larger blocks (e.g. pmds) into smaller ones (e.g. ptes) - VDSO support for CLOCK_MONOTONIC_RAW in clock_gettime() - ARCH_HAS_KCOV enabled for arm64 - Optimise IP checksum helpers - SWIOTLB optimisation to only allocate/initialise the buffer if the available RAM is beyond the 32-bit mask - Properly handle the "nosmp" command line argument - Fix for the initialisation of the CPU debug state during early boot - vdso-offsets.h build dependency workaround - Build fix when RANDOMIZE_BASE is enabled with MODULES off -----BEGIN PGP SIGNATURE----- Version: GnuPG v1 iQIcBAABAgAGBQJXmF/UAAoJEGvWsS0AyF7x+jwP/2fErtX6FTXmdG0c3HBkTpuy gEuzN2ByWbP6Io+unLC6NvbQQb1q6c73PTqjsoeMHUx2o8YK3jgWEBcC+7AuepoZ YGl3r08e75a/fGrgNwEQQC1lNlgjpog4kzVDh5ji6oRXNq+OkjJGUtRPe3gBoqxv NAjviciID/MegQaq4SaMd26AmnjuUGKogo5vlIaXK0SemX9it+ytW7eLAXuVY+gW EvO3Nxk0Y5oZKJF8qRw6oLSmw1bwn2dD26OgfXfCiI30QBookRyWIoXRedUOZmJq D0+Tipd7muO4PbjlxS8aY/wd/alfnM5+TJ6HpGDo+Y1BDauXfiXMf3ktDFE5QvJB KgtICmC0stWwbDT35dHvz8sETsrCMA2Q/IMrnyxG+nj9BxVQU7rbNrxfCXesJy7Q 4EsQbcTyJwu+ECildBezfoei99XbFZyWk2vKSkTCFKzgwXpftGFaffgZ3DIzBAHH IjecDqIFENC8ymrjyAgrGjeFG+2WB/DBgoSS3Baiz6xwQqC4wFMnI3jPECtJjb/U 6e13f+onXu5lF1YFKAiRjGmqa/G1ZMr+uKZFsembuGqsZdAPkzzUHyAE9g4JVO8p t3gc3/M3T7oLSHuw4xi1/Ow5VGb2UvbslFrp7OpuFZ7CJAvhKlHL5rPe385utsFE 7++5WHXHAegeJCDNAKY2 =iJOY -----END PGP SIGNATURE----- Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux Pull arm64 updates from Catalin Marinas: - Kexec support for arm64 - Kprobes support - Expose MIDR_EL1 and REVIDR_EL1 CPU identification registers to sysfs - Trapping of user space cache maintenance operations and emulation in the kernel (CPU errata workaround) - Clean-up of the early page tables creation (kernel linear mapping, EFI run-time maps) to avoid splitting larger blocks (e.g. pmds) into smaller ones (e.g. ptes) - VDSO support for CLOCK_MONOTONIC_RAW in clock_gettime() - ARCH_HAS_KCOV enabled for arm64 - Optimise IP checksum helpers - SWIOTLB optimisation to only allocate/initialise the buffer if the available RAM is beyond the 32-bit mask - Properly handle the "nosmp" command line argument - Fix for the initialisation of the CPU debug state during early boot - vdso-offsets.h build dependency workaround - Build fix when RANDOMIZE_BASE is enabled with MODULES off * tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (64 commits) arm64: arm: Fix-up the removal of the arm64 regs_query_register_name() prototype arm64: Only select ARM64_MODULE_PLTS if MODULES=y arm64: mm: run pgtable_page_ctor() on non-swapper translation table pages arm64: mm: make create_mapping_late() non-allocating arm64: Honor nosmp kernel command line option arm64: Fix incorrect per-cpu usage for boot CPU arm64: kprobes: Add KASAN instrumentation around stack accesses arm64: kprobes: Cleanup jprobe_return arm64: kprobes: Fix overflow when saving stack arm64: kprobes: WARN if attempting to step with PSTATE.D=1 arm64: debug: remove unused local_dbg_{enable, disable} macros arm64: debug: remove redundant spsr manipulation arm64: debug: unmask PSTATE.D earlier arm64: localise Image objcopy flags arm64: ptrace: remove extra define for CPSR's E bit kprobes: Add arm64 case in kprobe example module arm64: Add kernel return probes support (kretprobes) arm64: Add trampoline code for kretprobes arm64: kprobes instruction simulation support arm64: Treat all entry code as non-kprobe-able ...
This commit is contained in:
commit
e831101a73
@ -340,3 +340,13 @@ Description: POWERNV CPUFreq driver's frequency throttle stats directory and
|
||||
'policyX/throttle_stats' directory and all the attributes are same as
|
||||
the /sys/devices/system/cpu/cpuX/cpufreq/throttle_stats directory and
|
||||
attributes which give the frequency throttle information of the chip.
|
||||
|
||||
What: /sys/devices/system/cpu/cpuX/regs/
|
||||
/sys/devices/system/cpu/cpuX/regs/identification/
|
||||
/sys/devices/system/cpu/cpuX/regs/identification/midr_el1
|
||||
/sys/devices/system/cpu/cpuX/regs/identification/revidr_el1
|
||||
Date: June 2016
|
||||
Contact: Linux ARM Kernel Mailing list <linux-arm-kernel@lists.infradead.org>
|
||||
Description: AArch64 CPU registers
|
||||
'identification' directory exposes the CPU ID registers for
|
||||
identifying model and revision of the CPU.
|
||||
|
@ -13,14 +13,14 @@ For ACPI on arm64, tables also fall into the following categories:
|
||||
|
||||
-- Required: DSDT, FADT, GTDT, MADT, MCFG, RSDP, SPCR, XSDT
|
||||
|
||||
-- Recommended: BERT, EINJ, ERST, HEST, SSDT
|
||||
-- Recommended: BERT, EINJ, ERST, HEST, PCCT, SSDT
|
||||
|
||||
-- Optional: BGRT, CPEP, CSRT, DRTM, ECDT, FACS, FPDT, MCHI, MPST,
|
||||
MSCT, RASF, SBST, SLIT, SPMI, SRAT, TCPA, TPM2, UEFI
|
||||
|
||||
-- Not supported: BOOT, DBG2, DBGP, DMAR, ETDT, HPET, IBFT, IVRS,
|
||||
LPIT, MSDM, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
|
||||
-- Optional: BGRT, CPEP, CSRT, DBG2, DRTM, ECDT, FACS, FPDT, IORT,
|
||||
MCHI, MPST, MSCT, NFIT, PMTT, RASF, SBST, SLIT, SPMI, SRAT, STAO,
|
||||
TCPA, TPM2, UEFI, XENV
|
||||
|
||||
-- Not supported: BOOT, DBGP, DMAR, ETDT, HPET, IBFT, IVRS, LPIT,
|
||||
MSDM, OEMx, PSDT, RSDT, SLIC, WAET, WDAT, WDRT, WPBT
|
||||
|
||||
Table Usage for ARMv8 Linux
|
||||
----- ----------------------------------------------------------------
|
||||
@ -50,7 +50,8 @@ CSRT Signature Reserved (signature == "CSRT")
|
||||
|
||||
DBG2 Signature Reserved (signature == "DBG2")
|
||||
== DeBuG port table 2 ==
|
||||
Microsoft only table, will not be supported.
|
||||
License has changed and should be usable. Optional if used instead
|
||||
of earlycon=<device> on the command line.
|
||||
|
||||
DBGP Signature Reserved (signature == "DBGP")
|
||||
== DeBuG Port table ==
|
||||
@ -133,10 +134,11 @@ GTDT Section 5.2.24 (signature == "GTDT")
|
||||
|
||||
HEST Section 18.3.2 (signature == "HEST")
|
||||
== Hardware Error Source Table ==
|
||||
Until further error source types are defined, use only types 6 (AER
|
||||
Root Port), 7 (AER Endpoint), 8 (AER Bridge), or 9 (Generic Hardware
|
||||
Error Source). Firmware first error handling is possible if and only
|
||||
if Trusted Firmware is being used on arm64.
|
||||
ARM-specific error sources have been defined; please use those or the
|
||||
PCI types such as type 6 (AER Root Port), 7 (AER Endpoint), or 8 (AER
|
||||
Bridge), or use type 9 (Generic Hardware Error Source). Firmware first
|
||||
error handling is possible if and only if Trusted Firmware is being
|
||||
used on arm64.
|
||||
|
||||
Must be supplied if RAS support is provided by the platform. It
|
||||
is recommended this table be supplied.
|
||||
@ -149,20 +151,30 @@ IBFT Signature Reserved (signature == "IBFT")
|
||||
== iSCSI Boot Firmware Table ==
|
||||
Microsoft defined table, support TBD.
|
||||
|
||||
IORT Signature Reserved (signature == "IORT")
|
||||
== Input Output Remapping Table ==
|
||||
arm64 only table, required in order to describe IO topology, SMMUs,
|
||||
and GIC ITSs, and how those various components are connected together,
|
||||
such as identifying which components are behind which SMMUs/ITSs.
|
||||
This table will only be required on certain SBSA platforms (e.g.,
|
||||
when using GICv3-ITS and an SMMU); on SBSA Level 0 platforms, it
|
||||
remains optional.
|
||||
|
||||
IVRS Signature Reserved (signature == "IVRS")
|
||||
== I/O Virtualization Reporting Structure ==
|
||||
x86_64 (AMD) only table, will not be supported.
|
||||
|
||||
LPIT Signature Reserved (signature == "LPIT")
|
||||
== Low Power Idle Table ==
|
||||
x86 only table as of ACPI 5.1; future versions have been adapted for
|
||||
use with ARM and will be recommended in order to support ACPI power
|
||||
management.
|
||||
x86 only table as of ACPI 5.1; starting with ACPI 6.0, processor
|
||||
descriptions and power states on ARM platforms should use the DSDT
|
||||
and define processor container devices (_HID ACPI0010, Section 8.4,
|
||||
and more specifically 8.4.3 and and 8.4.4).
|
||||
|
||||
MADT Section 5.2.12 (signature == "APIC")
|
||||
== Multiple APIC Description Table ==
|
||||
Required for arm64. Only the GIC interrupt controller structures
|
||||
should be used (types 0xA - 0xE).
|
||||
should be used (types 0xA - 0xF).
|
||||
|
||||
MCFG Signature Reserved (signature == "MCFG")
|
||||
== Memory-mapped ConFiGuration space ==
|
||||
@ -176,14 +188,38 @@ MPST Section 5.2.21 (signature == "MPST")
|
||||
== Memory Power State Table ==
|
||||
Optional, not currently supported.
|
||||
|
||||
MSCT Section 5.2.19 (signature == "MSCT")
|
||||
== Maximum System Characteristic Table ==
|
||||
Optional, not currently supported.
|
||||
|
||||
MSDM Signature Reserved (signature == "MSDM")
|
||||
== Microsoft Data Management table ==
|
||||
Microsoft only table, will not be supported.
|
||||
|
||||
MSCT Section 5.2.19 (signature == "MSCT")
|
||||
== Maximum System Characteristic Table ==
|
||||
NFIT Section 5.2.25 (signature == "NFIT")
|
||||
== NVDIMM Firmware Interface Table ==
|
||||
Optional, not currently supported.
|
||||
|
||||
OEMx Signature of "OEMx" only
|
||||
== OEM Specific Tables ==
|
||||
All tables starting with a signature of "OEM" are reserved for OEM
|
||||
use. Since these are not meant to be of general use but are limited
|
||||
to very specific end users, they are not recommended for use and are
|
||||
not supported by the kernel for arm64.
|
||||
|
||||
PCCT Section 14.1 (signature == "PCCT)
|
||||
== Platform Communications Channel Table ==
|
||||
Recommend for use on arm64; use of PCC is recommended when using CPPC
|
||||
to control performance and power for platform processors.
|
||||
|
||||
PMTT Section 5.2.21.12 (signature == "PMTT")
|
||||
== Platform Memory Topology Table ==
|
||||
Optional, not currently supported.
|
||||
|
||||
PSDT Section 5.2.11.3 (signature == "PSDT")
|
||||
== Persistent System Description Table ==
|
||||
Obsolete table, will not be supported.
|
||||
|
||||
RASF Section 5.2.20 (signature == "RASF")
|
||||
== RAS Feature table ==
|
||||
Optional, not currently supported.
|
||||
@ -195,7 +231,7 @@ RSDP Section 5.2.5 (signature == "RSD PTR")
|
||||
RSDT Section 5.2.7 (signature == "RSDT")
|
||||
== Root System Description Table ==
|
||||
Since this table can only provide 32-bit addresses, it is deprecated
|
||||
on arm64, and will not be used.
|
||||
on arm64, and will not be used. If provided, it will be ignored.
|
||||
|
||||
SBST Section 5.2.14 (signature == "SBST")
|
||||
== Smart Battery Subsystem Table ==
|
||||
@ -220,7 +256,7 @@ SPMI Signature Reserved (signature == "SPMI")
|
||||
SRAT Section 5.2.16 (signature == "SRAT")
|
||||
== System Resource Affinity Table ==
|
||||
Optional, but if used, only the GICC Affinity structures are read.
|
||||
To support NUMA, this table is required.
|
||||
To support arm64 NUMA, this table is required.
|
||||
|
||||
SSDT Section 5.2.11.2 (signature == "SSDT")
|
||||
== Secondary System Description Table ==
|
||||
@ -235,6 +271,11 @@ SSDT Section 5.2.11.2 (signature == "SSDT")
|
||||
These tables are optional, however. ACPI tables should contain only
|
||||
one DSDT but can contain many SSDTs.
|
||||
|
||||
STAO Signature Reserved (signature == "STAO")
|
||||
== _STA Override table ==
|
||||
Optional, but only necessary in virtualized environments in order to
|
||||
hide devices from guest OSs.
|
||||
|
||||
TCPA Signature Reserved (signature == "TCPA")
|
||||
== Trusted Computing Platform Alliance table ==
|
||||
Optional, not currently supported, and may need changes to fully
|
||||
@ -266,6 +307,10 @@ WPBT Signature Reserved (signature == "WPBT")
|
||||
== Windows Platform Binary Table ==
|
||||
Microsoft only table, will not be supported.
|
||||
|
||||
XENV Signature Reserved (signature == "XENV")
|
||||
== Xen project table ==
|
||||
Optional, used only by Xen at present.
|
||||
|
||||
XSDT Section 5.2.8 (signature == "XSDT")
|
||||
== eXtended System Description Table ==
|
||||
Required for arm64.
|
||||
@ -273,44 +318,46 @@ XSDT Section 5.2.8 (signature == "XSDT")
|
||||
|
||||
ACPI Objects
|
||||
------------
|
||||
The expectations on individual ACPI objects are discussed in the list that
|
||||
follows:
|
||||
The expectations on individual ACPI objects that are likely to be used are
|
||||
shown in the list that follows; any object not explicitly mentioned below
|
||||
should be used as needed for a particular platform or particular subsystem,
|
||||
such as power management or PCI.
|
||||
|
||||
Name Section Usage for ARMv8 Linux
|
||||
---- ------------ -------------------------------------------------
|
||||
_ADR 6.1.1 Use as needed.
|
||||
_CCA 6.2.17 This method must be defined for all bus masters
|
||||
on arm64 -- there are no assumptions made about
|
||||
whether such devices are cache coherent or not.
|
||||
The _CCA value is inherited by all descendants of
|
||||
these devices so it does not need to be repeated.
|
||||
Without _CCA on arm64, the kernel does not know what
|
||||
to do about setting up DMA for the device.
|
||||
|
||||
_BBN 6.5.5 Use as needed; PCI-specific.
|
||||
NB: this method provides default cache coherency
|
||||
attributes; the presence of an SMMU can be used to
|
||||
modify that, however. For example, a master could
|
||||
default to non-coherent, but be made coherent with
|
||||
the appropriate SMMU configuration (see Table 17 of
|
||||
the IORT specification, ARM Document DEN 0049B).
|
||||
|
||||
_BDN 6.5.3 Optional; not likely to be used on arm64.
|
||||
_CID 6.1.2 Use as needed, see also _HID.
|
||||
|
||||
_CCA 6.2.17 This method should be defined for all bus masters
|
||||
on arm64. While cache coherency is assumed, making
|
||||
it explicit ensures the kernel will set up DMA as
|
||||
it should.
|
||||
_CLS 6.1.3 Use as needed, see also _HID.
|
||||
|
||||
_CDM 6.2.1 Optional, to be used only for processor devices.
|
||||
|
||||
_CID 6.1.2 Use as needed.
|
||||
|
||||
_CLS 6.1.3 Use as needed.
|
||||
_CPC 8.4.7.1 Use as needed, power management specific. CPPC is
|
||||
recommended on arm64.
|
||||
|
||||
_CRS 6.2.2 Required on arm64.
|
||||
|
||||
_DCK 6.5.2 Optional; not likely to be used on arm64.
|
||||
_CSD 8.4.2.2 Use as needed, used only in conjunction with _CST.
|
||||
|
||||
_CST 8.4.2.1 Low power idle states (8.4.4) are recommended instead
|
||||
of C-states.
|
||||
|
||||
_DDN 6.1.4 This field can be used for a device name. However,
|
||||
it is meant for DOS device names (e.g., COM1), so be
|
||||
careful of its use across OSes.
|
||||
|
||||
_DEP 6.5.8 Use as needed.
|
||||
|
||||
_DIS 6.2.3 Optional, for power management use.
|
||||
|
||||
_DLM 5.7.5 Optional.
|
||||
|
||||
_DMA 6.2.4 Optional.
|
||||
|
||||
_DSD 6.2.5 To be used with caution. If this object is used, try
|
||||
to use it within the constraints already defined by the
|
||||
Device Properties UUID. Only in rare circumstances
|
||||
@ -325,20 +372,10 @@ _DSD 6.2.5 To be used with caution. If this object is used, try
|
||||
with the UEFI Forum; this may cause some iteration as
|
||||
more than one OS will be registering entries.
|
||||
|
||||
_DSM Do not use this method. It is not standardized, the
|
||||
_DSM 9.1.1 Do not use this method. It is not standardized, the
|
||||
return values are not well documented, and it is
|
||||
currently a frequent source of error.
|
||||
|
||||
_DSW 7.2.1 Use as needed; power management specific.
|
||||
|
||||
_EDL 6.3.1 Optional.
|
||||
|
||||
_EJD 6.3.2 Optional.
|
||||
|
||||
_EJx 6.3.3 Optional.
|
||||
|
||||
_FIX 6.2.7 x86 specific, not used on arm64.
|
||||
|
||||
\_GL 5.7.1 This object is not to be used in hardware reduced
|
||||
mode, and therefore should not be used on arm64.
|
||||
|
||||
@ -349,35 +386,22 @@ _GLK 6.5.7 This object requires a global lock be defined; there
|
||||
\_GPE 5.3.1 This namespace is for x86 use only. Do not use it
|
||||
on arm64.
|
||||
|
||||
_GSB 6.2.7 Optional.
|
||||
|
||||
_HID 6.1.5 Use as needed. This is the primary object to use in
|
||||
device probing, though _CID and _CLS may also be used.
|
||||
|
||||
_HPP 6.2.8 Optional, PCI specific.
|
||||
|
||||
_HPX 6.2.9 Optional, PCI specific.
|
||||
|
||||
_HRV 6.1.6 Optional, use as needed to clarify device behavior; in
|
||||
some cases, this may be easier to use than _DSD.
|
||||
_HID 6.1.5 This is the primary object to use in device probing,
|
||||
though _CID and _CLS may also be used.
|
||||
|
||||
_INI 6.5.1 Not required, but can be useful in setting up devices
|
||||
when UEFI leaves them in a state that may not be what
|
||||
the driver expects before it starts probing.
|
||||
|
||||
_IRC 7.2.15 Use as needed; power management specific.
|
||||
_LPI 8.4.4.3 Recommended for use with processor definitions (_HID
|
||||
ACPI0010) on arm64. See also _RDI.
|
||||
|
||||
_LCK 6.3.4 Optional.
|
||||
_MLS 6.1.7 Highly recommended for use in internationalization.
|
||||
|
||||
_MAT 6.2.10 Optional; see also the MADT.
|
||||
|
||||
_MLS 6.1.7 Optional, but highly recommended for use in
|
||||
internationalization.
|
||||
|
||||
_OFF 7.1.2 It is recommended to define this method for any device
|
||||
_OFF 7.2.2 It is recommended to define this method for any device
|
||||
that can be turned on or off.
|
||||
|
||||
_ON 7.1.3 It is recommended to define this method for any device
|
||||
_ON 7.2.3 It is recommended to define this method for any device
|
||||
that can be turned on or off.
|
||||
|
||||
\_OS 5.7.3 This method will return "Linux" by default (this is
|
||||
@ -398,122 +422,107 @@ _OSC 6.2.11 This method can be a global method in ACPI (i.e.,
|
||||
by the kernel community, then register it with the
|
||||
UEFI Forum.
|
||||
|
||||
\_OSI 5.7.2 Deprecated on ARM64. Any invocation of this method
|
||||
will print a warning on the console and return false.
|
||||
That is, as far as ACPI firmware is concerned, _OSI
|
||||
cannot be used to determine what sort of system is
|
||||
being used or what functionality is provided. The
|
||||
_OSC method is to be used instead.
|
||||
|
||||
_OST 6.3.5 Optional.
|
||||
\_OSI 5.7.2 Deprecated on ARM64. As far as ACPI firmware is
|
||||
concerned, _OSI is not to be used to determine what
|
||||
sort of system is being used or what functionality
|
||||
is provided. The _OSC method is to be used instead.
|
||||
|
||||
_PDC 8.4.1 Deprecated, do not use on arm64.
|
||||
|
||||
\_PIC 5.8.1 The method should not be used. On arm64, the only
|
||||
interrupt model available is GIC.
|
||||
|
||||
_PLD 6.1.8 Optional.
|
||||
|
||||
\_PR 5.3.1 This namespace is for x86 use only on legacy systems.
|
||||
Do not use it on arm64.
|
||||
|
||||
_PRS 6.2.12 Optional.
|
||||
|
||||
_PRT 6.2.13 Required as part of the definition of all PCI root
|
||||
devices.
|
||||
|
||||
_PRW 7.2.13 Use as needed; power management specific.
|
||||
|
||||
_PRx 7.2.8-11 Use as needed; power management specific. If _PR0 is
|
||||
_PRx 7.3.8-11 Use as needed; power management specific. If _PR0 is
|
||||
defined, _PR3 must also be defined.
|
||||
|
||||
_PSC 7.2.6 Use as needed; power management specific.
|
||||
|
||||
_PSE 7.2.7 Use as needed; power management specific.
|
||||
|
||||
_PSW 7.2.14 Use as needed; power management specific.
|
||||
|
||||
_PSx 7.2.2-5 Use as needed; power management specific. If _PS0 is
|
||||
_PSx 7.3.2-5 Use as needed; power management specific. If _PS0 is
|
||||
defined, _PS3 must also be defined. If clocks or
|
||||
regulators need adjusting to be consistent with power
|
||||
usage, change them in these methods.
|
||||
|
||||
\_PTS 7.3.1 Use as needed; power management specific.
|
||||
|
||||
_PXM 6.2.14 Optional.
|
||||
|
||||
_REG 6.5.4 Use as needed.
|
||||
_RDI 8.4.4.4 Recommended for use with processor definitions (_HID
|
||||
ACPI0010) on arm64. This should only be used in
|
||||
conjunction with _LPI.
|
||||
|
||||
\_REV 5.7.4 Always returns the latest version of ACPI supported.
|
||||
|
||||
_RMV 6.3.6 Optional.
|
||||
|
||||
\_SB 5.3.1 Required on arm64; all devices must be defined in this
|
||||
namespace.
|
||||
|
||||
_SEG 6.5.6 Use as needed; PCI-specific.
|
||||
|
||||
\_SI 5.3.1, Optional.
|
||||
9.1
|
||||
|
||||
_SLI 6.2.15 Optional; recommended when SLIT table is in use.
|
||||
_SLI 6.2.15 Use is recommended when SLIT table is in use.
|
||||
|
||||
_STA 6.3.7, It is recommended to define this method for any device
|
||||
7.1.4 that can be turned on or off.
|
||||
7.2.4 that can be turned on or off. See also the STAO table
|
||||
that provides overrides to hide devices in virtualized
|
||||
environments.
|
||||
|
||||
_SRS 6.2.16 Optional; see also _PRS.
|
||||
_SRS 6.2.16 Use as needed; see also _PRS.
|
||||
|
||||
_STR 6.1.10 Recommended for conveying device names to end users;
|
||||
this is preferred over using _DDN.
|
||||
|
||||
_SUB 6.1.9 Use as needed; _HID or _CID are preferred.
|
||||
|
||||
_SUN 6.1.11 Optional.
|
||||
_SUN 6.1.11 Use as needed, but recommended.
|
||||
|
||||
\_Sx 7.3.2 Use as needed; power management specific.
|
||||
|
||||
_SxD 7.2.16-19 Use as needed; power management specific.
|
||||
|
||||
_SxW 7.2.20-24 Use as needed; power management specific.
|
||||
|
||||
_SWS 7.3.3 Use as needed; power management specific; this may
|
||||
_SWS 7.4.3 Use as needed; power management specific; this may
|
||||
require specification changes for use on arm64.
|
||||
|
||||
\_TTS 7.3.4 Use as needed; power management specific.
|
||||
|
||||
\_TZ 5.3.1 Optional.
|
||||
|
||||
_UID 6.1.12 Recommended for distinguishing devices of the same
|
||||
class; define it if at all possible.
|
||||
|
||||
\_WAK 7.3.5 Use as needed; power management specific.
|
||||
|
||||
|
||||
|
||||
ACPI Event Model
|
||||
----------------
|
||||
Do not use GPE block devices; these are not supported in the hardware reduced
|
||||
profile used by arm64. Since there are no GPE blocks defined for use on ARM
|
||||
platforms, GPIO-signaled interrupts should be used for creating system events.
|
||||
platforms, ACPI events must be signaled differently.
|
||||
|
||||
There are two options: GPIO-signaled interrupts (Section 5.6.5), and
|
||||
interrupt-signaled events (Section 5.6.9). Interrupt-signaled events are a
|
||||
new feature in the ACPI 6.1 specification. Either -- or both -- can be used
|
||||
on a given platform, and which to use may be dependent of limitations in any
|
||||
given SoC. If possible, interrupt-signaled events are recommended.
|
||||
|
||||
|
||||
ACPI Processor Control
|
||||
----------------------
|
||||
Section 8 of the ACPI specification is currently undergoing change that
|
||||
should be completed in the 6.0 version of the specification. Processor
|
||||
performance control will be handled differently for arm64 at that point
|
||||
in time. Processor aggregator devices (section 8.5) will not be used,
|
||||
for example, but another similar mechanism instead.
|
||||
Section 8 of the ACPI specification changed significantly in version 6.0.
|
||||
Processors should now be defined as Device objects with _HID ACPI0007; do
|
||||
not use the deprecated Processor statement in ASL. All multiprocessor systems
|
||||
should also define a hierarchy of processors, done with Processor Container
|
||||
Devices (see Section 8.4.3.1, _HID ACPI0010); do not use processor aggregator
|
||||
devices (Section 8.5) to describe processor topology. Section 8.4 of the
|
||||
specification describes the semantics of these object definitions and how
|
||||
they interrelate.
|
||||
|
||||
While UEFI constrains what we can say until the release of 6.0, it is
|
||||
recommended that CPPC (8.4.5) be used as the primary model. This will
|
||||
still be useful into the future. C-states and P-states will still be
|
||||
provided, but most of the current design work appears to favor CPPC.
|
||||
Most importantly, the processor hierarchy defined also defines the low power
|
||||
idle states that are available to the platform, along with the rules for
|
||||
determining which processors can be turned on or off and the circumstances
|
||||
that control that. Without this information, the processors will run in
|
||||
whatever power state they were left in by UEFI.
|
||||
|
||||
Note too, that the processor Device objects defined and the entries in the
|
||||
MADT for GICs are expected to be in synchronization. The _UID of the Device
|
||||
object must correspond to processor IDs used in the MADT.
|
||||
|
||||
It is recommended that CPPC (8.4.5) be used as the primary model for processor
|
||||
performance control on arm64. C-states and P-states may become available at
|
||||
some point in the future, but most current design work appears to favor CPPC.
|
||||
|
||||
Further, it is essential that the ARMv8 SoC provide a fully functional
|
||||
implementation of PSCI; this will be the only mechanism supported by ACPI
|
||||
to control CPU power state (including secondary CPU booting).
|
||||
|
||||
More details will be provided on the release of the ACPI 6.0 specification.
|
||||
to control CPU power state. Booting of secondary CPUs using the ACPI
|
||||
parking protocol is possible, but discouraged, since only PSCI is supported
|
||||
for ARM servers.
|
||||
|
||||
|
||||
ACPI System Address Map Interfaces
|
||||
@ -535,21 +544,25 @@ used to indicate fatal errors that cannot be corrected, and require immediate
|
||||
attention.
|
||||
|
||||
Since there is no direct equivalent of the x86 SCI or NMI, arm64 handles
|
||||
these slightly differently. The SCI is handled as a normal GPIO-signaled
|
||||
interrupt; given that these are corrected (or correctable) errors being
|
||||
reported, this is sufficient. The NMI is emulated as the highest priority
|
||||
GPIO-signaled interrupt possible. This implies some caution must be used
|
||||
since there could be interrupts at higher privilege levels or even interrupts
|
||||
at the same priority as the emulated NMI. In Linux, this should not be the
|
||||
case but one should be aware it could happen.
|
||||
these slightly differently. The SCI is handled as a high priority interrupt;
|
||||
given that these are corrected (or correctable) errors being reported, this
|
||||
is sufficient. The NMI is emulated as the highest priority interrupt
|
||||
possible. This implies some caution must be used since there could be
|
||||
interrupts at higher privilege levels or even interrupts at the same priority
|
||||
as the emulated NMI. In Linux, this should not be the case but one should
|
||||
be aware it could happen.
|
||||
|
||||
|
||||
ACPI Objects Not Supported on ARM64
|
||||
-----------------------------------
|
||||
While this may change in the future, there are several classes of objects
|
||||
that can be defined, but are not currently of general interest to ARM servers.
|
||||
Some of these objects have x86 equivalents, and may actually make sense in ARM
|
||||
servers. However, there is either no hardware available at present, or there
|
||||
may not even be a non-ARM implementation yet. Hence, they are not currently
|
||||
supported.
|
||||
|
||||
These are not supported:
|
||||
The following classes of objects are not supported:
|
||||
|
||||
-- Section 9.2: ambient light sensor devices
|
||||
|
||||
@ -571,16 +584,6 @@ These are not supported:
|
||||
|
||||
-- Section 9.18: time and alarm devices (see 9.15)
|
||||
|
||||
|
||||
ACPI Objects Not Yet Implemented
|
||||
--------------------------------
|
||||
While these objects have x86 equivalents, and they do make some sense in ARM
|
||||
servers, there is either no hardware available at present, or in some cases
|
||||
there may not yet be a non-ARM implementation. Hence, they are currently not
|
||||
implemented though that may change in the future.
|
||||
|
||||
Not yet implemented are:
|
||||
|
||||
-- Section 10: power source and power meter devices
|
||||
|
||||
-- Section 11: thermal management
|
||||
@ -589,5 +592,31 @@ Not yet implemented are:
|
||||
|
||||
-- Section 13: SMBus interfaces
|
||||
|
||||
-- Section 17: NUMA support (prototypes have been submitted for
|
||||
review)
|
||||
|
||||
This also means that there is no support for the following objects:
|
||||
|
||||
Name Section Name Section
|
||||
---- ------------ ---- ------------
|
||||
_ALC 9.3.4 _FDM 9.10.3
|
||||
_ALI 9.3.2 _FIX 6.2.7
|
||||
_ALP 9.3.6 _GAI 10.4.5
|
||||
_ALR 9.3.5 _GHL 10.4.7
|
||||
_ALT 9.3.3 _GTM 9.9.2.1.1
|
||||
_BCT 10.2.2.10 _LID 9.5.1
|
||||
_BDN 6.5.3 _PAI 10.4.4
|
||||
_BIF 10.2.2.1 _PCL 10.3.2
|
||||
_BIX 10.2.2.1 _PIF 10.3.3
|
||||
_BLT 9.2.3 _PMC 10.4.1
|
||||
_BMA 10.2.2.4 _PMD 10.4.8
|
||||
_BMC 10.2.2.12 _PMM 10.4.3
|
||||
_BMD 10.2.2.11 _PRL 10.3.4
|
||||
_BMS 10.2.2.5 _PSR 10.3.1
|
||||
_BST 10.2.2.6 _PTP 10.4.2
|
||||
_BTH 10.2.2.7 _SBS 10.1.3
|
||||
_BTM 10.2.2.9 _SHL 10.4.6
|
||||
_BTP 10.2.2.8 _STM 9.9.2.1.1
|
||||
_DCK 6.5.2 _UPD 9.16.1
|
||||
_EC 12.12 _UPP 9.16.2
|
||||
_FDE 9.10.1 _WPC 10.5.2
|
||||
_FDI 9.10.2 _WPP 10.5.3
|
||||
|
||||
|
@ -34,7 +34,7 @@ of the summary text almost directly, to be honest.
|
||||
|
||||
The short form of the rationale for ACPI on ARM is:
|
||||
|
||||
-- ACPI’s bytecode (AML) allows the platform to encode hardware behavior,
|
||||
-- ACPI’s byte code (AML) allows the platform to encode hardware behavior,
|
||||
while DT explicitly does not support this. For hardware vendors, being
|
||||
able to encode behavior is a key tool used in supporting operating
|
||||
system releases on new hardware.
|
||||
@ -57,11 +57,11 @@ The short form of the rationale for ACPI on ARM is:
|
||||
|
||||
-- The new ACPI governance process works well and Linux is now at the same
|
||||
table as hardware vendors and other OS vendors. In fact, there is no
|
||||
longer any reason to feel that ACPI is only belongs to Windows or that
|
||||
longer any reason to feel that ACPI only belongs to Windows or that
|
||||
Linux is in any way secondary to Microsoft in this arena. The move of
|
||||
ACPI governance into the UEFI forum has significantly opened up the
|
||||
specification development process, and currently, a large portion of the
|
||||
changes being made to ACPI is being driven by Linux.
|
||||
changes being made to ACPI are being driven by Linux.
|
||||
|
||||
Key to the use of ACPI is the support model. For servers in general, the
|
||||
responsibility for hardware behaviour cannot solely be the domain of the
|
||||
@ -110,7 +110,7 @@ ACPI support in drivers and subsystems for ARMv8 should never be mutually
|
||||
exclusive with DT support at compile time.
|
||||
|
||||
At boot time the kernel will only use one description method depending on
|
||||
parameters passed from the bootloader (including kernel bootargs).
|
||||
parameters passed from the boot loader (including kernel bootargs).
|
||||
|
||||
Regardless of whether DT or ACPI is used, the kernel must always be capable
|
||||
of booting with either scheme (in kernels with both schemes enabled at compile
|
||||
@ -159,7 +159,7 @@ Further, the ACPI core will only use the 64-bit address fields in the FADT
|
||||
(Fixed ACPI Description Table). Any 32-bit address fields in the FADT will
|
||||
be ignored on arm64.
|
||||
|
||||
Hardware reduced mode (see Section 4.1 of the ACPI 5.1 specification) will
|
||||
Hardware reduced mode (see Section 4.1 of the ACPI 6.1 specification) will
|
||||
be enforced by the ACPI core on arm64. Doing so allows the ACPI core to
|
||||
run less complex code since it no longer has to provide support for legacy
|
||||
hardware from other architectures. Any fields that are not to be used for
|
||||
@ -167,7 +167,7 @@ hardware reduced mode must be set to zero.
|
||||
|
||||
For the ACPI core to operate properly, and in turn provide the information
|
||||
the kernel needs to configure devices, it expects to find the following
|
||||
tables (all section numbers refer to the ACPI 5.1 specfication):
|
||||
tables (all section numbers refer to the ACPI 6.1 specification):
|
||||
|
||||
-- RSDP (Root System Description Pointer), section 5.2.5
|
||||
|
||||
@ -185,9 +185,23 @@ tables (all section numbers refer to the ACPI 5.1 specfication):
|
||||
-- If PCI is supported, the MCFG (Memory mapped ConFiGuration
|
||||
Table), section 5.2.6, specifically Table 5-31.
|
||||
|
||||
-- If booting without a console=<device> kernel parameter is
|
||||
supported, the SPCR (Serial Port Console Redirection table),
|
||||
section 5.2.6, specifically Table 5-31.
|
||||
|
||||
-- If necessary to describe the I/O topology, SMMUs and GIC ITSs,
|
||||
the IORT (Input Output Remapping Table, section 5.2.6, specifically
|
||||
Table 5-31).
|
||||
|
||||
-- If NUMA is supported, the SRAT (System Resource Affinity Table)
|
||||
and SLIT (System Locality distance Information Table), sections
|
||||
5.2.16 and 5.2.17, respectively.
|
||||
|
||||
If the above tables are not all present, the kernel may or may not be
|
||||
able to boot properly since it may not be able to configure all of the
|
||||
devices available.
|
||||
devices available. This list of tables is not meant to be all inclusive;
|
||||
in some environments other tables may be needed (e.g., any of the APEI
|
||||
tables from section 18) to support specific functionality.
|
||||
|
||||
|
||||
ACPI Detection
|
||||
@ -198,7 +212,7 @@ the device structure. This is detailed further in the "Driver
|
||||
Recommendations" section.
|
||||
|
||||
In non-driver code, if the presence of ACPI needs to be detected at
|
||||
runtime, then check the value of acpi_disabled. If CONFIG_ACPI is not
|
||||
run time, then check the value of acpi_disabled. If CONFIG_ACPI is not
|
||||
set, acpi_disabled will always be 1.
|
||||
|
||||
|
||||
@ -233,7 +247,7 @@ that looks like this: Name(KEY0, "value0"). An ACPI device driver would
|
||||
then retrieve the value of the property by evaluating the KEY0 object.
|
||||
However, using Name() this way has multiple problems: (1) ACPI limits
|
||||
names ("KEY0") to four characters unlike DT; (2) there is no industry
|
||||
wide registry that maintains a list of names, minimzing re-use; (3)
|
||||
wide registry that maintains a list of names, minimizing re-use; (3)
|
||||
there is also no registry for the definition of property values ("value0"),
|
||||
again making re-use difficult; and (4) how does one maintain backward
|
||||
compatibility as new hardware comes out? The _DSD method was created
|
||||
@ -434,7 +448,8 @@ The ACPI specification changes regularly. During the year 2014, for instance,
|
||||
version 5.1 was released and version 6.0 substantially completed, with most of
|
||||
the changes being driven by ARM-specific requirements. Proposed changes are
|
||||
presented and discussed in the ASWG (ACPI Specification Working Group) which
|
||||
is a part of the UEFI Forum.
|
||||
is a part of the UEFI Forum. The current version of the ACPI specification
|
||||
is 6.1 release in January 2016.
|
||||
|
||||
Participation in this group is open to all UEFI members. Please see
|
||||
http://www.uefi.org/workinggroup for details on group membership.
|
||||
@ -443,7 +458,7 @@ It is the intent of the ARMv8 ACPI kernel code to follow the ACPI specification
|
||||
as closely as possible, and to only implement functionality that complies with
|
||||
the released standards from UEFI ASWG. As a practical matter, there will be
|
||||
vendors that provide bad ACPI tables or violate the standards in some way.
|
||||
If this is because of errors, quirks and fixups may be necessary, but will
|
||||
If this is because of errors, quirks and fix-ups may be necessary, but will
|
||||
be avoided if possible. If there are features missing from ACPI that preclude
|
||||
it from being used on a platform, ECRs (Engineering Change Requests) should be
|
||||
submitted to ASWG and go through the normal approval process; for those that
|
||||
@ -480,8 +495,7 @@ References
|
||||
Software on ARM Platforms", dated 16 Aug 2014
|
||||
|
||||
[2] http://www.secretlab.ca/archives/151, 10 Jan 2015, Copyright (c) 2015,
|
||||
Linaro Ltd., written by Grant Likely. A copy of the verbatim text (apart
|
||||
from formatting) is also in Documentation/arm64/why_use_acpi.txt.
|
||||
Linaro Ltd., written by Grant Likely.
|
||||
|
||||
[3] AMD ACPI for Seattle platform documentation:
|
||||
http://amd-dev.wpengine.netdna-cdn.com/wordpress/media/2012/10/Seattle_ACPI_Guide.pdf
|
||||
|
@ -39,7 +39,9 @@ Optional properties:
|
||||
When using a PPI, specifies a list of phandles to CPU
|
||||
nodes corresponding to the set of CPUs which have
|
||||
a PMU of this type signalling the PPI listed in the
|
||||
interrupts property.
|
||||
interrupts property, unless this is already specified
|
||||
by the PPI interrupt specifier itself (in which case
|
||||
the interrupt-affinity property shouldn't be present).
|
||||
|
||||
This property should be present when there is more than
|
||||
a single SPI.
|
||||
|
@ -8,6 +8,7 @@ config ARM64
|
||||
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
|
||||
select ARCH_HAS_ELF_RANDOMIZE
|
||||
select ARCH_HAS_GCOV_PROFILE_ALL
|
||||
select ARCH_HAS_KCOV
|
||||
select ARCH_HAS_SG_CHAIN
|
||||
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
|
||||
select ARCH_USE_CMPXCHG_LOCKREF
|
||||
@ -86,8 +87,11 @@ config ARM64
|
||||
select HAVE_PERF_EVENTS
|
||||
select HAVE_PERF_REGS
|
||||
select HAVE_PERF_USER_STACK_DUMP
|
||||
select HAVE_REGS_AND_STACK_ACCESS_API
|
||||
select HAVE_RCU_TABLE_FREE
|
||||
select HAVE_SYSCALL_TRACEPOINTS
|
||||
select HAVE_KPROBES
|
||||
select HAVE_KRETPROBES if HAVE_KPROBES
|
||||
select IOMMU_DMA if IOMMU_SUPPORT
|
||||
select IRQ_DOMAIN
|
||||
select IRQ_FORCED_THREADING
|
||||
@ -665,6 +669,16 @@ config PARAVIRT_TIME_ACCOUNTING
|
||||
|
||||
If in doubt, say N here.
|
||||
|
||||
config KEXEC
|
||||
depends on PM_SLEEP_SMP
|
||||
select KEXEC_CORE
|
||||
bool "kexec system call"
|
||||
---help---
|
||||
kexec is a system call that implements the ability to shutdown your
|
||||
current kernel, and to start another kernel. It is like a reboot
|
||||
but it is independent of the system firmware. And like a reboot
|
||||
you can start any kernel with it, not just Linux.
|
||||
|
||||
config XEN_DOM0
|
||||
def_bool y
|
||||
depends on XEN
|
||||
@ -873,7 +887,7 @@ config RELOCATABLE
|
||||
|
||||
config RANDOMIZE_BASE
|
||||
bool "Randomize the address of the kernel image"
|
||||
select ARM64_MODULE_PLTS
|
||||
select ARM64_MODULE_PLTS if MODULES
|
||||
select RELOCATABLE
|
||||
help
|
||||
Randomizes the virtual address at which the kernel image is
|
||||
|
@ -12,7 +12,6 @@
|
||||
|
||||
LDFLAGS_vmlinux :=-p --no-undefined -X
|
||||
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
|
||||
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
|
||||
GZFLAGS :=-9
|
||||
|
||||
ifneq ($(CONFIG_RELOCATABLE),)
|
||||
@ -121,6 +120,16 @@ archclean:
|
||||
$(Q)$(MAKE) $(clean)=$(boot)
|
||||
$(Q)$(MAKE) $(clean)=$(boot)/dts
|
||||
|
||||
# We need to generate vdso-offsets.h before compiling certain files in kernel/.
|
||||
# In order to do that, we should use the archprepare target, but we can't since
|
||||
# asm-offsets.h is included in some files used to generate vdso-offsets.h, and
|
||||
# asm-offsets.h is built in prepare0, for which archprepare is a dependency.
|
||||
# Therefore we need to generate the header after prepare0 has been made, hence
|
||||
# this hack.
|
||||
prepare: vdso_prepare
|
||||
vdso_prepare: prepare0
|
||||
$(Q)$(MAKE) $(build)=arch/arm64/kernel/vdso include/generated/vdso-offsets.h
|
||||
|
||||
define archhelp
|
||||
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
|
||||
echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)'
|
||||
|
@ -14,6 +14,8 @@
|
||||
# Based on the ia64 boot/Makefile.
|
||||
#
|
||||
|
||||
OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
|
||||
|
||||
targets := Image Image.gz
|
||||
|
||||
$(obj)/Image: vmlinux FORCE
|
||||
|
@ -70,6 +70,7 @@ CONFIG_KSM=y
|
||||
CONFIG_TRANSPARENT_HUGEPAGE=y
|
||||
CONFIG_CMA=y
|
||||
CONFIG_XEN=y
|
||||
CONFIG_KEXEC=y
|
||||
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
|
||||
CONFIG_COMPAT=y
|
||||
CONFIG_CPU_IDLE=y
|
||||
|
@ -1,6 +1,5 @@
|
||||
generic-y += bug.h
|
||||
generic-y += bugs.h
|
||||
generic-y += checksum.h
|
||||
generic-y += clkdev.h
|
||||
generic-y += cputime.h
|
||||
generic-y += current.h
|
||||
|
@ -95,13 +95,11 @@ void apply_alternatives(void *start, size_t length);
|
||||
* The code that follows this macro will be assembled and linked as
|
||||
* normal. There are no restrictions on this code.
|
||||
*/
|
||||
.macro alternative_if_not cap, enable = 1
|
||||
.if \enable
|
||||
.macro alternative_if_not cap
|
||||
.pushsection .altinstructions, "a"
|
||||
altinstruction_entry 661f, 663f, \cap, 662f-661f, 664f-663f
|
||||
.popsection
|
||||
661:
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
@ -118,27 +116,27 @@ void apply_alternatives(void *start, size_t length);
|
||||
* alternative sequence it is defined in (branches into an
|
||||
* alternative sequence are not fixed up).
|
||||
*/
|
||||
.macro alternative_else, enable = 1
|
||||
.if \enable
|
||||
.macro alternative_else
|
||||
662: .pushsection .altinstr_replacement, "ax"
|
||||
663:
|
||||
.endif
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Complete an alternative code sequence.
|
||||
*/
|
||||
.macro alternative_endif, enable = 1
|
||||
.if \enable
|
||||
.macro alternative_endif
|
||||
664: .popsection
|
||||
.org . - (664b-663b) + (662b-661b)
|
||||
.org . - (662b-661b) + (664b-663b)
|
||||
.endif
|
||||
.endm
|
||||
|
||||
#define _ALTERNATIVE_CFG(insn1, insn2, cap, cfg, ...) \
|
||||
alternative_insn insn1, insn2, cap, IS_ENABLED(cfg)
|
||||
|
||||
.macro user_alt, label, oldinstr, newinstr, cond
|
||||
9999: alternative_insn "\oldinstr", "\newinstr", \cond
|
||||
_ASM_EXTABLE 9999b, \label
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Generate the assembly for UAO alternatives with exception table entries.
|
||||
|
@ -24,6 +24,7 @@
|
||||
#define __ASM_ASSEMBLER_H
|
||||
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
#include <asm/ptrace.h>
|
||||
@ -261,7 +262,16 @@ lr .req x30 // link register
|
||||
add \size, \kaddr, \size
|
||||
sub \tmp2, \tmp1, #1
|
||||
bic \kaddr, \kaddr, \tmp2
|
||||
9998: dc \op, \kaddr
|
||||
9998:
|
||||
.if (\op == cvau || \op == cvac)
|
||||
alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
|
||||
dc \op, \kaddr
|
||||
alternative_else
|
||||
dc civac, \kaddr
|
||||
alternative_endif
|
||||
.else
|
||||
dc \op, \kaddr
|
||||
.endif
|
||||
add \kaddr, \kaddr, \tmp1
|
||||
cmp \kaddr, \size
|
||||
b.lo 9998b
|
||||
|
51
arch/arm64/include/asm/checksum.h
Normal file
51
arch/arm64/include/asm/checksum.h
Normal file
@ -0,0 +1,51 @@
|
||||
/*
|
||||
* Copyright (C) 2016 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_CHECKSUM_H
|
||||
#define __ASM_CHECKSUM_H
|
||||
|
||||
#include <linux/types.h>
|
||||
|
||||
static inline __sum16 csum_fold(__wsum csum)
|
||||
{
|
||||
u32 sum = (__force u32)csum;
|
||||
sum += (sum >> 16) | (sum << 16);
|
||||
return ~(__force __sum16)(sum >> 16);
|
||||
}
|
||||
#define csum_fold csum_fold
|
||||
|
||||
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
|
||||
{
|
||||
__uint128_t tmp;
|
||||
u64 sum;
|
||||
|
||||
tmp = *(const __uint128_t *)iph;
|
||||
iph += 16;
|
||||
ihl -= 4;
|
||||
tmp += ((tmp >> 64) | (tmp << 64));
|
||||
sum = tmp >> 64;
|
||||
do {
|
||||
sum += *(const u32 *)iph;
|
||||
iph += 4;
|
||||
} while (--ihl);
|
||||
|
||||
sum += ((sum >> 32) | (sum << 32));
|
||||
return csum_fold(sum >> 32);
|
||||
}
|
||||
#define ip_fast_csum ip_fast_csum
|
||||
|
||||
#include <asm-generic/checksum.h>
|
||||
|
||||
#endif /* __ASM_CHECKSUM_H */
|
@ -25,10 +25,12 @@
|
||||
*/
|
||||
struct cpuinfo_arm64 {
|
||||
struct cpu cpu;
|
||||
struct kobject kobj;
|
||||
u32 reg_ctr;
|
||||
u32 reg_cntfrq;
|
||||
u32 reg_dczid;
|
||||
u32 reg_midr;
|
||||
u32 reg_revidr;
|
||||
|
||||
u64 reg_id_aa64dfr0;
|
||||
u64 reg_id_aa64dfr1;
|
||||
|
@ -191,7 +191,9 @@ void __init setup_cpu_features(void);
|
||||
|
||||
void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
||||
const char *info);
|
||||
void enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps);
|
||||
void check_local_cpu_errata(void);
|
||||
void __init enable_errata_workarounds(void);
|
||||
|
||||
void verify_local_cpu_errata(void);
|
||||
void verify_local_cpu_capabilities(void);
|
||||
|
@ -66,6 +66,11 @@
|
||||
|
||||
#define CACHE_FLUSH_IS_SAFE 1
|
||||
|
||||
/* kprobes BRK opcodes with ESR encoding */
|
||||
#define BRK64_ESR_MASK 0xFFFF
|
||||
#define BRK64_ESR_KPROBES 0x0004
|
||||
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (BRK64_ESR_KPROBES << 5))
|
||||
|
||||
/* AArch32 */
|
||||
#define DBG_ESR_EVT_BKPT 0x4
|
||||
#define DBG_ESR_EVT_VECC 0x5
|
||||
|
@ -14,8 +14,7 @@ extern void efi_init(void);
|
||||
#endif
|
||||
|
||||
int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
|
||||
|
||||
#define efi_set_mapping_permissions efi_create_mapping
|
||||
int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
|
||||
|
||||
#define arch_efi_call_virt_setup() \
|
||||
({ \
|
||||
|
@ -74,6 +74,7 @@
|
||||
|
||||
#define ESR_ELx_EC_SHIFT (26)
|
||||
#define ESR_ELx_EC_MASK (UL(0x3F) << ESR_ELx_EC_SHIFT)
|
||||
#define ESR_ELx_EC(esr) (((esr) & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT)
|
||||
|
||||
#define ESR_ELx_IL (UL(1) << 25)
|
||||
#define ESR_ELx_ISS_MASK (ESR_ELx_IL - 1)
|
||||
|
@ -120,6 +120,29 @@ enum aarch64_insn_register {
|
||||
AARCH64_INSN_REG_SP = 31 /* Stack pointer: as load/store base reg */
|
||||
};
|
||||
|
||||
enum aarch64_insn_special_register {
|
||||
AARCH64_INSN_SPCLREG_SPSR_EL1 = 0xC200,
|
||||
AARCH64_INSN_SPCLREG_ELR_EL1 = 0xC201,
|
||||
AARCH64_INSN_SPCLREG_SP_EL0 = 0xC208,
|
||||
AARCH64_INSN_SPCLREG_SPSEL = 0xC210,
|
||||
AARCH64_INSN_SPCLREG_CURRENTEL = 0xC212,
|
||||
AARCH64_INSN_SPCLREG_DAIF = 0xDA11,
|
||||
AARCH64_INSN_SPCLREG_NZCV = 0xDA10,
|
||||
AARCH64_INSN_SPCLREG_FPCR = 0xDA20,
|
||||
AARCH64_INSN_SPCLREG_DSPSR_EL0 = 0xDA28,
|
||||
AARCH64_INSN_SPCLREG_DLR_EL0 = 0xDA29,
|
||||
AARCH64_INSN_SPCLREG_SPSR_EL2 = 0xE200,
|
||||
AARCH64_INSN_SPCLREG_ELR_EL2 = 0xE201,
|
||||
AARCH64_INSN_SPCLREG_SP_EL1 = 0xE208,
|
||||
AARCH64_INSN_SPCLREG_SPSR_INQ = 0xE218,
|
||||
AARCH64_INSN_SPCLREG_SPSR_ABT = 0xE219,
|
||||
AARCH64_INSN_SPCLREG_SPSR_UND = 0xE21A,
|
||||
AARCH64_INSN_SPCLREG_SPSR_FIQ = 0xE21B,
|
||||
AARCH64_INSN_SPCLREG_SPSR_EL3 = 0xF200,
|
||||
AARCH64_INSN_SPCLREG_ELR_EL3 = 0xF201,
|
||||
AARCH64_INSN_SPCLREG_SP_EL2 = 0xF210
|
||||
};
|
||||
|
||||
enum aarch64_insn_variant {
|
||||
AARCH64_INSN_VARIANT_32BIT,
|
||||
AARCH64_INSN_VARIANT_64BIT
|
||||
@ -223,8 +246,15 @@ static __always_inline bool aarch64_insn_is_##abbr(u32 code) \
|
||||
static __always_inline u32 aarch64_insn_get_##abbr##_value(void) \
|
||||
{ return (val); }
|
||||
|
||||
__AARCH64_INSN_FUNCS(adr_adrp, 0x1F000000, 0x10000000)
|
||||
__AARCH64_INSN_FUNCS(prfm_lit, 0xFF000000, 0xD8000000)
|
||||
__AARCH64_INSN_FUNCS(str_reg, 0x3FE0EC00, 0x38206800)
|
||||
__AARCH64_INSN_FUNCS(ldr_reg, 0x3FE0EC00, 0x38606800)
|
||||
__AARCH64_INSN_FUNCS(ldr_lit, 0xBF000000, 0x18000000)
|
||||
__AARCH64_INSN_FUNCS(ldrsw_lit, 0xFF000000, 0x98000000)
|
||||
__AARCH64_INSN_FUNCS(exclusive, 0x3F800000, 0x08000000)
|
||||
__AARCH64_INSN_FUNCS(load_ex, 0x3F400000, 0x08400000)
|
||||
__AARCH64_INSN_FUNCS(store_ex, 0x3F400000, 0x08000000)
|
||||
__AARCH64_INSN_FUNCS(stp_post, 0x7FC00000, 0x28800000)
|
||||
__AARCH64_INSN_FUNCS(ldp_post, 0x7FC00000, 0x28C00000)
|
||||
__AARCH64_INSN_FUNCS(stp_pre, 0x7FC00000, 0x29800000)
|
||||
@ -273,10 +303,15 @@ __AARCH64_INSN_FUNCS(svc, 0xFFE0001F, 0xD4000001)
|
||||
__AARCH64_INSN_FUNCS(hvc, 0xFFE0001F, 0xD4000002)
|
||||
__AARCH64_INSN_FUNCS(smc, 0xFFE0001F, 0xD4000003)
|
||||
__AARCH64_INSN_FUNCS(brk, 0xFFE0001F, 0xD4200000)
|
||||
__AARCH64_INSN_FUNCS(exception, 0xFF000000, 0xD4000000)
|
||||
__AARCH64_INSN_FUNCS(hint, 0xFFFFF01F, 0xD503201F)
|
||||
__AARCH64_INSN_FUNCS(br, 0xFFFFFC1F, 0xD61F0000)
|
||||
__AARCH64_INSN_FUNCS(blr, 0xFFFFFC1F, 0xD63F0000)
|
||||
__AARCH64_INSN_FUNCS(ret, 0xFFFFFC1F, 0xD65F0000)
|
||||
__AARCH64_INSN_FUNCS(eret, 0xFFFFFFFF, 0xD69F03E0)
|
||||
__AARCH64_INSN_FUNCS(mrs, 0xFFF00000, 0xD5300000)
|
||||
__AARCH64_INSN_FUNCS(msr_imm, 0xFFF8F01F, 0xD500401F)
|
||||
__AARCH64_INSN_FUNCS(msr_reg, 0xFFF00000, 0xD5100000)
|
||||
|
||||
#undef __AARCH64_INSN_FUNCS
|
||||
|
||||
@ -286,6 +321,8 @@ bool aarch64_insn_is_branch_imm(u32 insn);
|
||||
int aarch64_insn_read(void *addr, u32 *insnp);
|
||||
int aarch64_insn_write(void *addr, u32 insn);
|
||||
enum aarch64_insn_encoding_class aarch64_get_insn_class(u32 insn);
|
||||
bool aarch64_insn_uses_literal(u32 insn);
|
||||
bool aarch64_insn_is_branch(u32 insn);
|
||||
u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn);
|
||||
u32 aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
|
||||
u32 insn, u64 imm);
|
||||
@ -367,9 +404,13 @@ bool aarch32_insn_is_wide(u32 insn);
|
||||
#define A32_RT_OFFSET 12
|
||||
#define A32_RT2_OFFSET 0
|
||||
|
||||
u32 aarch64_insn_extract_system_reg(u32 insn);
|
||||
u32 aarch32_insn_extract_reg_num(u32 insn, int offset);
|
||||
u32 aarch32_insn_mcr_extract_opc2(u32 insn);
|
||||
u32 aarch32_insn_mcr_extract_crm(u32 insn);
|
||||
|
||||
typedef bool (pstate_check_t)(unsigned long);
|
||||
extern pstate_check_t * const aarch32_opcode_cond_checks[16];
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif /* __ASM_INSN_H */
|
||||
|
@ -110,8 +110,5 @@ static inline int arch_irqs_disabled_flags(unsigned long flags)
|
||||
: : "r" (flags) : "memory"); \
|
||||
} while (0)
|
||||
|
||||
#define local_dbg_enable() asm("msr daifclr, #8" : : : "memory")
|
||||
#define local_dbg_disable() asm("msr daifset, #8" : : : "memory")
|
||||
|
||||
#endif
|
||||
#endif
|
||||
|
48
arch/arm64/include/asm/kexec.h
Normal file
48
arch/arm64/include/asm/kexec.h
Normal file
@ -0,0 +1,48 @@
|
||||
/*
|
||||
* kexec for arm64
|
||||
*
|
||||
* Copyright (C) Linaro.
|
||||
* Copyright (C) Huawei Futurewei Technologies.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ARM64_KEXEC_H
|
||||
#define _ARM64_KEXEC_H
|
||||
|
||||
/* Maximum physical address we can use pages from */
|
||||
|
||||
#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
|
||||
|
||||
/* Maximum address we can reach in physical address mode */
|
||||
|
||||
#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
|
||||
|
||||
/* Maximum address we can use for the control code buffer */
|
||||
|
||||
#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
|
||||
|
||||
#define KEXEC_CONTROL_PAGE_SIZE 4096
|
||||
|
||||
#define KEXEC_ARCH KEXEC_ARCH_AARCH64
|
||||
|
||||
#ifndef __ASSEMBLY__
|
||||
|
||||
/**
|
||||
* crash_setup_regs() - save registers for the panic kernel
|
||||
*
|
||||
* @newregs: registers are saved here
|
||||
* @oldregs: registers to be saved (may be %NULL)
|
||||
*/
|
||||
|
||||
static inline void crash_setup_regs(struct pt_regs *newregs,
|
||||
struct pt_regs *oldregs)
|
||||
{
|
||||
/* Empty routine needed to avoid build errors. */
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
#endif
|
62
arch/arm64/include/asm/kprobes.h
Normal file
62
arch/arm64/include/asm/kprobes.h
Normal file
@ -0,0 +1,62 @@
|
||||
/*
|
||||
* arch/arm64/include/asm/kprobes.h
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Limited
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _ARM_KPROBES_H
|
||||
#define _ARM_KPROBES_H
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/percpu.h>
|
||||
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
#define MAX_INSN_SIZE 1
|
||||
#define MAX_STACK_SIZE 128
|
||||
|
||||
#define flush_insn_slot(p) do { } while (0)
|
||||
#define kretprobe_blacklist_size 0
|
||||
|
||||
#include <asm/probes.h>
|
||||
|
||||
struct prev_kprobe {
|
||||
struct kprobe *kp;
|
||||
unsigned int status;
|
||||
};
|
||||
|
||||
/* Single step context for kprobe */
|
||||
struct kprobe_step_ctx {
|
||||
unsigned long ss_pending;
|
||||
unsigned long match_addr;
|
||||
};
|
||||
|
||||
/* per-cpu kprobe control block */
|
||||
struct kprobe_ctlblk {
|
||||
unsigned int kprobe_status;
|
||||
unsigned long saved_irqflag;
|
||||
struct prev_kprobe prev_kprobe;
|
||||
struct kprobe_step_ctx ss_ctx;
|
||||
struct pt_regs jprobe_saved_regs;
|
||||
char jprobes_stack[MAX_STACK_SIZE];
|
||||
};
|
||||
|
||||
void arch_remove_kprobe(struct kprobe *);
|
||||
int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr);
|
||||
int kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data);
|
||||
int kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr);
|
||||
int kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr);
|
||||
void kretprobe_trampoline(void);
|
||||
void __kprobes *trampoline_probe_handler(struct pt_regs *regs);
|
||||
|
||||
#endif /* _ARM_KPROBES_H */
|
@ -210,7 +210,7 @@ static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
|
||||
|
||||
static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
|
||||
{
|
||||
return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
|
||||
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
|
||||
}
|
||||
|
||||
static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
|
||||
|
@ -34,7 +34,7 @@ extern void __iomem *early_io_map(phys_addr_t phys, unsigned long virt);
|
||||
extern void init_mem_pgprot(void);
|
||||
extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
||||
unsigned long virt, phys_addr_t size,
|
||||
pgprot_t prot);
|
||||
pgprot_t prot, bool allow_block_mappings);
|
||||
extern void *fixmap_remap_fdt(phys_addr_t dt_phys);
|
||||
|
||||
#endif
|
||||
|
35
arch/arm64/include/asm/probes.h
Normal file
35
arch/arm64/include/asm/probes.h
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* arch/arm64/include/asm/probes.h
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Limited
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
#ifndef _ARM_PROBES_H
|
||||
#define _ARM_PROBES_H
|
||||
|
||||
#include <asm/opcodes.h>
|
||||
|
||||
struct kprobe;
|
||||
struct arch_specific_insn;
|
||||
|
||||
typedef u32 kprobe_opcode_t;
|
||||
typedef void (kprobes_handler_t) (u32 opcode, long addr, struct pt_regs *);
|
||||
|
||||
/* architecture specific copy of original instruction */
|
||||
struct arch_specific_insn {
|
||||
kprobe_opcode_t *insn;
|
||||
pstate_check_t *pstate_cc;
|
||||
kprobes_handler_t *handler;
|
||||
/* restore address after step xol */
|
||||
unsigned long restore;
|
||||
};
|
||||
|
||||
#endif
|
@ -192,5 +192,6 @@ static inline void spin_lock_prefetch(const void *ptr)
|
||||
|
||||
void cpu_enable_pan(void *__unused);
|
||||
void cpu_enable_uao(void *__unused);
|
||||
void cpu_enable_cache_maint_trap(void *__unused);
|
||||
|
||||
#endif /* __ASM_PROCESSOR_H */
|
||||
|
44
arch/arm64/include/asm/ptdump.h
Normal file
44
arch/arm64/include/asm/ptdump.h
Normal file
@ -0,0 +1,44 @@
|
||||
/*
|
||||
* Copyright (C) 2014 ARM Ltd.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
#ifndef __ASM_PTDUMP_H
|
||||
#define __ASM_PTDUMP_H
|
||||
|
||||
#ifdef CONFIG_ARM64_PTDUMP
|
||||
|
||||
#include <linux/mm_types.h>
|
||||
|
||||
struct addr_marker {
|
||||
unsigned long start_address;
|
||||
char *name;
|
||||
};
|
||||
|
||||
struct ptdump_info {
|
||||
struct mm_struct *mm;
|
||||
const struct addr_marker *markers;
|
||||
unsigned long base_addr;
|
||||
unsigned long max_addr;
|
||||
};
|
||||
|
||||
int ptdump_register(struct ptdump_info *info, const char *name);
|
||||
|
||||
#else
|
||||
static inline int ptdump_register(struct ptdump_info *info, const char *name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif /* CONFIG_ARM64_PTDUMP */
|
||||
|
||||
#endif /* __ASM_PTDUMP_H */
|
@ -46,7 +46,6 @@
|
||||
#define COMPAT_PSR_MODE_UND 0x0000001b
|
||||
#define COMPAT_PSR_MODE_SYS 0x0000001f
|
||||
#define COMPAT_PSR_T_BIT 0x00000020
|
||||
#define COMPAT_PSR_E_BIT 0x00000200
|
||||
#define COMPAT_PSR_F_BIT 0x00000040
|
||||
#define COMPAT_PSR_I_BIT 0x00000080
|
||||
#define COMPAT_PSR_A_BIT 0x00000100
|
||||
@ -74,6 +73,7 @@
|
||||
#define COMPAT_PT_DATA_ADDR 0x10004
|
||||
#define COMPAT_PT_TEXT_END_ADDR 0x10008
|
||||
#ifndef __ASSEMBLY__
|
||||
#include <linux/bug.h>
|
||||
|
||||
/* sizeof(struct user) for AArch32 */
|
||||
#define COMPAT_USER_SZ 296
|
||||
@ -121,6 +121,8 @@ struct pt_regs {
|
||||
u64 unused; // maintain 16 byte alignment
|
||||
};
|
||||
|
||||
#define MAX_REG_OFFSET offsetof(struct pt_regs, pstate)
|
||||
|
||||
#define arch_has_single_step() (1)
|
||||
|
||||
#ifdef CONFIG_COMPAT
|
||||
@ -146,9 +148,58 @@ struct pt_regs {
|
||||
#define fast_interrupts_enabled(regs) \
|
||||
(!((regs)->pstate & PSR_F_BIT))
|
||||
|
||||
#define user_stack_pointer(regs) \
|
||||
#define GET_USP(regs) \
|
||||
(!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp)
|
||||
|
||||
#define SET_USP(ptregs, value) \
|
||||
(!compat_user_mode(regs) ? ((regs)->sp = value) : ((regs)->compat_sp = value))
|
||||
|
||||
extern int regs_query_register_offset(const char *name);
|
||||
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
|
||||
unsigned int n);
|
||||
|
||||
/**
|
||||
* regs_get_register() - get register value from its offset
|
||||
* @regs: pt_regs from which register value is gotten
|
||||
* @offset: offset of the register.
|
||||
*
|
||||
* regs_get_register returns the value of a register whose offset from @regs.
|
||||
* The @offset is the offset of the register in struct pt_regs.
|
||||
* If @offset is bigger than MAX_REG_OFFSET, this returns 0.
|
||||
*/
|
||||
static inline u64 regs_get_register(struct pt_regs *regs, unsigned int offset)
|
||||
{
|
||||
u64 val = 0;
|
||||
|
||||
WARN_ON(offset & 7);
|
||||
|
||||
offset >>= 3;
|
||||
switch (offset) {
|
||||
case 0 ... 30:
|
||||
val = regs->regs[offset];
|
||||
break;
|
||||
case offsetof(struct pt_regs, sp) >> 3:
|
||||
val = regs->sp;
|
||||
break;
|
||||
case offsetof(struct pt_regs, pc) >> 3:
|
||||
val = regs->pc;
|
||||
break;
|
||||
case offsetof(struct pt_regs, pstate) >> 3:
|
||||
val = regs->pstate;
|
||||
break;
|
||||
default:
|
||||
val = 0;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
/* Valid only for Kernel mode traps. */
|
||||
static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
|
||||
{
|
||||
return regs->sp;
|
||||
}
|
||||
|
||||
static inline unsigned long regs_return_value(struct pt_regs *regs)
|
||||
{
|
||||
return regs->regs[0];
|
||||
@ -158,8 +209,15 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
|
||||
struct task_struct;
|
||||
int valid_user_regs(struct user_pt_regs *regs, struct task_struct *task);
|
||||
|
||||
#define instruction_pointer(regs) ((unsigned long)(regs)->pc)
|
||||
#define GET_IP(regs) ((unsigned long)(regs)->pc)
|
||||
#define SET_IP(regs, value) ((regs)->pc = ((u64) (value)))
|
||||
|
||||
#define GET_FP(ptregs) ((unsigned long)(ptregs)->regs[29])
|
||||
#define SET_FP(ptregs, value) ((ptregs)->regs[29] = ((u64) (value)))
|
||||
|
||||
#include <asm-generic/ptrace.h>
|
||||
|
||||
#undef profile_pc
|
||||
extern unsigned long profile_pc(struct pt_regs *regs);
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
@ -98,11 +98,11 @@
|
||||
SCTLR_ELx_SA | SCTLR_ELx_I)
|
||||
|
||||
/* SCTLR_EL1 specific flags. */
|
||||
#define SCTLR_EL1_UCI (1 << 26)
|
||||
#define SCTLR_EL1_SPAN (1 << 23)
|
||||
#define SCTLR_EL1_SED (1 << 8)
|
||||
#define SCTLR_EL1_CP15BEN (1 << 5)
|
||||
|
||||
|
||||
/* id_aa64isar0 */
|
||||
#define ID_AA64ISAR0_RDM_SHIFT 28
|
||||
#define ID_AA64ISAR0_ATOMICS_SHIFT 20
|
||||
|
@ -34,6 +34,8 @@ struct undef_hook {
|
||||
void register_undef_hook(struct undef_hook *hook);
|
||||
void unregister_undef_hook(struct undef_hook *hook);
|
||||
|
||||
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
||||
static inline int __in_irqentry_text(unsigned long ptr)
|
||||
{
|
||||
|
@ -21,6 +21,7 @@
|
||||
/*
|
||||
* User space memory access functions
|
||||
*/
|
||||
#include <linux/kasan-checks.h>
|
||||
#include <linux/string.h>
|
||||
#include <linux/thread_info.h>
|
||||
|
||||
@ -256,15 +257,29 @@ do { \
|
||||
-EFAULT; \
|
||||
})
|
||||
|
||||
extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n);
|
||||
extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n);
|
||||
extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
|
||||
|
||||
static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
kasan_check_write(to, n);
|
||||
return __arch_copy_from_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
kasan_check_read(from, n);
|
||||
return __arch_copy_to_user(to, from, n);
|
||||
}
|
||||
|
||||
static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
|
||||
{
|
||||
kasan_check_write(to, n);
|
||||
|
||||
if (access_ok(VERIFY_READ, from, n))
|
||||
n = __copy_from_user(to, from, n);
|
||||
n = __arch_copy_from_user(to, from, n);
|
||||
else /* security hole - plug it */
|
||||
memset(to, 0, n);
|
||||
return n;
|
||||
@ -272,8 +287,10 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
|
||||
|
||||
static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
|
||||
{
|
||||
kasan_check_read(from, n);
|
||||
|
||||
if (access_ok(VERIFY_WRITE, to, n))
|
||||
n = __copy_to_user(to, from, n);
|
||||
n = __arch_copy_to_user(to, from, n);
|
||||
return n;
|
||||
}
|
||||
|
||||
|
@ -22,6 +22,8 @@
|
||||
|
||||
struct vdso_data {
|
||||
__u64 cs_cycle_last; /* Timebase at clocksource init */
|
||||
__u64 raw_time_sec; /* Raw time */
|
||||
__u64 raw_time_nsec;
|
||||
__u64 xtime_clock_sec; /* Kernel time */
|
||||
__u64 xtime_clock_nsec;
|
||||
__u64 xtime_coarse_sec; /* Coarse time */
|
||||
@ -29,8 +31,10 @@ struct vdso_data {
|
||||
__u64 wtm_clock_sec; /* Wall to monotonic time */
|
||||
__u64 wtm_clock_nsec;
|
||||
__u32 tb_seq_count; /* Timebase sequence counter */
|
||||
__u32 cs_mult; /* Clocksource multiplier */
|
||||
__u32 cs_shift; /* Clocksource shift */
|
||||
/* cs_* members must be adjacent and in this order (ldp accesses) */
|
||||
__u32 cs_mono_mult; /* NTP-adjusted clocksource multiplier */
|
||||
__u32 cs_shift; /* Clocksource shift (mono = raw) */
|
||||
__u32 cs_raw_mult; /* Raw clocksource multiplier */
|
||||
__u32 tz_minuteswest; /* Whacky timezone stuff */
|
||||
__u32 tz_dsttime;
|
||||
__u32 use_syscall;
|
||||
|
@ -34,6 +34,11 @@
|
||||
*/
|
||||
#define HVC_SET_VECTORS 1
|
||||
|
||||
/*
|
||||
* HVC_SOFT_RESTART - CPU soft reset, used by the cpu_soft_restart routine.
|
||||
*/
|
||||
#define HVC_SOFT_RESTART 2
|
||||
|
||||
#define BOOT_CPU_MODE_EL1 (0xe11)
|
||||
#define BOOT_CPU_MODE_EL2 (0xe12)
|
||||
|
||||
|
@ -26,8 +26,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE
|
||||
$(call if_changed,objcopy)
|
||||
|
||||
arm64-obj-$(CONFIG_COMPAT) += sys32.o kuser32.o signal32.o \
|
||||
sys_compat.o entry32.o \
|
||||
../../arm/kernel/opcodes.o
|
||||
sys_compat.o entry32.o
|
||||
arm64-obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o entry-ftrace.o
|
||||
arm64-obj-$(CONFIG_MODULES) += arm64ksyms.o module.o
|
||||
arm64-obj-$(CONFIG_ARM64_MODULE_PLTS) += module-plts.o
|
||||
@ -47,12 +46,10 @@ arm64-obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL) += acpi_parking_protocol.o
|
||||
arm64-obj-$(CONFIG_PARAVIRT) += paravirt.o
|
||||
arm64-obj-$(CONFIG_RANDOMIZE_BASE) += kaslr.o
|
||||
arm64-obj-$(CONFIG_HIBERNATION) += hibernate.o hibernate-asm.o
|
||||
arm64-obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o \
|
||||
cpu-reset.o
|
||||
|
||||
obj-y += $(arm64-obj-y) vdso/
|
||||
obj-y += $(arm64-obj-y) vdso/ probes/
|
||||
obj-m += $(arm64-obj-m)
|
||||
head-y := head.o
|
||||
extra-y += $(head-y) vmlinux.lds
|
||||
|
||||
# vDSO - this must be built first to generate the symbol offsets
|
||||
$(call objectify,$(arm64-obj-y)): $(obj)/vdso/vdso-offsets.h
|
||||
$(obj)/vdso/vdso-offsets.h: $(obj)/vdso
|
||||
|
@ -27,6 +27,7 @@
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include <asm/checksum.h>
|
||||
|
||||
@ -34,8 +35,8 @@ EXPORT_SYMBOL(copy_page);
|
||||
EXPORT_SYMBOL(clear_page);
|
||||
|
||||
/* user mem (segment) */
|
||||
EXPORT_SYMBOL(__copy_from_user);
|
||||
EXPORT_SYMBOL(__copy_to_user);
|
||||
EXPORT_SYMBOL(__arch_copy_from_user);
|
||||
EXPORT_SYMBOL(__arch_copy_to_user);
|
||||
EXPORT_SYMBOL(__clear_user);
|
||||
EXPORT_SYMBOL(__copy_in_user);
|
||||
|
||||
@ -68,6 +69,7 @@ EXPORT_SYMBOL(test_and_change_bit);
|
||||
|
||||
#ifdef CONFIG_FUNCTION_TRACER
|
||||
EXPORT_SYMBOL(_mcount);
|
||||
NOKPROBE_SYMBOL(_mcount);
|
||||
#endif
|
||||
|
||||
/* arm-smccc */
|
||||
|
@ -316,28 +316,6 @@ static void __init register_insn_emulation_sysctl(struct ctl_table *table)
|
||||
*/
|
||||
#define TYPE_SWPB (1 << 22)
|
||||
|
||||
/*
|
||||
* Set up process info to signal segmentation fault - called on access error.
|
||||
*/
|
||||
static void set_segfault(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
siginfo_t info;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
if (find_vma(current->mm, addr) == NULL)
|
||||
info.si_code = SEGV_MAPERR;
|
||||
else
|
||||
info.si_code = SEGV_ACCERR;
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
info.si_signo = SIGSEGV;
|
||||
info.si_errno = 0;
|
||||
info.si_addr = (void *) instruction_pointer(regs);
|
||||
|
||||
pr_debug("SWP{B} emulation: access caused memory abort!\n");
|
||||
arm64_notify_die("Illegal memory access", regs, &info, 0);
|
||||
}
|
||||
|
||||
static int emulate_swpX(unsigned int address, unsigned int *data,
|
||||
unsigned int type)
|
||||
{
|
||||
@ -366,6 +344,21 @@ static int emulate_swpX(unsigned int address, unsigned int *data,
|
||||
return res;
|
||||
}
|
||||
|
||||
#define ARM_OPCODE_CONDITION_UNCOND 0xf
|
||||
|
||||
static unsigned int __kprobes aarch32_check_condition(u32 opcode, u32 psr)
|
||||
{
|
||||
u32 cc_bits = opcode >> 28;
|
||||
|
||||
if (cc_bits != ARM_OPCODE_CONDITION_UNCOND) {
|
||||
if ((*aarch32_opcode_cond_checks[cc_bits])(psr))
|
||||
return ARM_OPCODE_CONDTEST_PASS;
|
||||
else
|
||||
return ARM_OPCODE_CONDTEST_FAIL;
|
||||
}
|
||||
return ARM_OPCODE_CONDTEST_UNCOND;
|
||||
}
|
||||
|
||||
/*
|
||||
* swp_handler logs the id of calling process, dissects the instruction, sanity
|
||||
* checks the memory location, calls emulate_swpX for the actual operation and
|
||||
@ -380,7 +373,7 @@ static int swp_handler(struct pt_regs *regs, u32 instr)
|
||||
|
||||
type = instr & TYPE_SWPB;
|
||||
|
||||
switch (arm_check_condition(instr, regs->pstate)) {
|
||||
switch (aarch32_check_condition(instr, regs->pstate)) {
|
||||
case ARM_OPCODE_CONDTEST_PASS:
|
||||
break;
|
||||
case ARM_OPCODE_CONDTEST_FAIL:
|
||||
@ -430,7 +423,8 @@ ret:
|
||||
return 0;
|
||||
|
||||
fault:
|
||||
set_segfault(regs, address);
|
||||
pr_debug("SWP{B} emulation: access caused memory abort!\n");
|
||||
arm64_notify_segfault(regs, address);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -461,7 +455,7 @@ static int cp15barrier_handler(struct pt_regs *regs, u32 instr)
|
||||
{
|
||||
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, regs->pc);
|
||||
|
||||
switch (arm_check_condition(instr, regs->pstate)) {
|
||||
switch (aarch32_check_condition(instr, regs->pstate)) {
|
||||
case ARM_OPCODE_CONDTEST_PASS:
|
||||
break;
|
||||
case ARM_OPCODE_CONDTEST_FAIL:
|
||||
|
@ -51,6 +51,17 @@ int main(void)
|
||||
DEFINE(S_X5, offsetof(struct pt_regs, regs[5]));
|
||||
DEFINE(S_X6, offsetof(struct pt_regs, regs[6]));
|
||||
DEFINE(S_X7, offsetof(struct pt_regs, regs[7]));
|
||||
DEFINE(S_X8, offsetof(struct pt_regs, regs[8]));
|
||||
DEFINE(S_X10, offsetof(struct pt_regs, regs[10]));
|
||||
DEFINE(S_X12, offsetof(struct pt_regs, regs[12]));
|
||||
DEFINE(S_X14, offsetof(struct pt_regs, regs[14]));
|
||||
DEFINE(S_X16, offsetof(struct pt_regs, regs[16]));
|
||||
DEFINE(S_X18, offsetof(struct pt_regs, regs[18]));
|
||||
DEFINE(S_X20, offsetof(struct pt_regs, regs[20]));
|
||||
DEFINE(S_X22, offsetof(struct pt_regs, regs[22]));
|
||||
DEFINE(S_X24, offsetof(struct pt_regs, regs[24]));
|
||||
DEFINE(S_X26, offsetof(struct pt_regs, regs[26]));
|
||||
DEFINE(S_X28, offsetof(struct pt_regs, regs[28]));
|
||||
DEFINE(S_LR, offsetof(struct pt_regs, regs[30]));
|
||||
DEFINE(S_SP, offsetof(struct pt_regs, sp));
|
||||
#ifdef CONFIG_COMPAT
|
||||
@ -78,6 +89,7 @@ int main(void)
|
||||
BLANK();
|
||||
DEFINE(CLOCK_REALTIME, CLOCK_REALTIME);
|
||||
DEFINE(CLOCK_MONOTONIC, CLOCK_MONOTONIC);
|
||||
DEFINE(CLOCK_MONOTONIC_RAW, CLOCK_MONOTONIC_RAW);
|
||||
DEFINE(CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
|
||||
DEFINE(CLOCK_REALTIME_COARSE, CLOCK_REALTIME_COARSE);
|
||||
DEFINE(CLOCK_MONOTONIC_COARSE,CLOCK_MONOTONIC_COARSE);
|
||||
@ -85,6 +97,8 @@ int main(void)
|
||||
DEFINE(NSEC_PER_SEC, NSEC_PER_SEC);
|
||||
BLANK();
|
||||
DEFINE(VDSO_CS_CYCLE_LAST, offsetof(struct vdso_data, cs_cycle_last));
|
||||
DEFINE(VDSO_RAW_TIME_SEC, offsetof(struct vdso_data, raw_time_sec));
|
||||
DEFINE(VDSO_RAW_TIME_NSEC, offsetof(struct vdso_data, raw_time_nsec));
|
||||
DEFINE(VDSO_XTIME_CLK_SEC, offsetof(struct vdso_data, xtime_clock_sec));
|
||||
DEFINE(VDSO_XTIME_CLK_NSEC, offsetof(struct vdso_data, xtime_clock_nsec));
|
||||
DEFINE(VDSO_XTIME_CRS_SEC, offsetof(struct vdso_data, xtime_coarse_sec));
|
||||
@ -92,7 +106,8 @@ int main(void)
|
||||
DEFINE(VDSO_WTM_CLK_SEC, offsetof(struct vdso_data, wtm_clock_sec));
|
||||
DEFINE(VDSO_WTM_CLK_NSEC, offsetof(struct vdso_data, wtm_clock_nsec));
|
||||
DEFINE(VDSO_TB_SEQ_COUNT, offsetof(struct vdso_data, tb_seq_count));
|
||||
DEFINE(VDSO_CS_MULT, offsetof(struct vdso_data, cs_mult));
|
||||
DEFINE(VDSO_CS_MONO_MULT, offsetof(struct vdso_data, cs_mono_mult));
|
||||
DEFINE(VDSO_CS_RAW_MULT, offsetof(struct vdso_data, cs_raw_mult));
|
||||
DEFINE(VDSO_CS_SHIFT, offsetof(struct vdso_data, cs_shift));
|
||||
DEFINE(VDSO_TZ_MINWEST, offsetof(struct vdso_data, tz_minuteswest));
|
||||
DEFINE(VDSO_TZ_DSTTIME, offsetof(struct vdso_data, tz_dsttime));
|
||||
|
54
arch/arm64/kernel/cpu-reset.S
Normal file
54
arch/arm64/kernel/cpu-reset.S
Normal file
@ -0,0 +1,54 @@
|
||||
/*
|
||||
* CPU reset routines
|
||||
*
|
||||
* Copyright (C) 2001 Deep Blue Solutions Ltd.
|
||||
* Copyright (C) 2012 ARM Ltd.
|
||||
* Copyright (C) 2015 Huawei Futurewei Technologies.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/sysreg.h>
|
||||
#include <asm/virt.h>
|
||||
|
||||
.text
|
||||
.pushsection .idmap.text, "ax"
|
||||
|
||||
/*
|
||||
* __cpu_soft_restart(el2_switch, entry, arg0, arg1, arg2) - Helper for
|
||||
* cpu_soft_restart.
|
||||
*
|
||||
* @el2_switch: Flag to indicate a swich to EL2 is needed.
|
||||
* @entry: Location to jump to for soft reset.
|
||||
* arg0: First argument passed to @entry.
|
||||
* arg1: Second argument passed to @entry.
|
||||
* arg2: Third argument passed to @entry.
|
||||
*
|
||||
* Put the CPU into the same state as it would be if it had been reset, and
|
||||
* branch to what would be the reset vector. It must be executed with the
|
||||
* flat identity mapping.
|
||||
*/
|
||||
ENTRY(__cpu_soft_restart)
|
||||
/* Clear sctlr_el1 flags. */
|
||||
mrs x12, sctlr_el1
|
||||
ldr x13, =SCTLR_ELx_FLAGS
|
||||
bic x12, x12, x13
|
||||
msr sctlr_el1, x12
|
||||
isb
|
||||
|
||||
cbz x0, 1f // el2_switch?
|
||||
mov x0, #HVC_SOFT_RESTART
|
||||
hvc #0 // no return
|
||||
|
||||
1: mov x18, x1 // entry
|
||||
mov x0, x2 // arg0
|
||||
mov x1, x3 // arg1
|
||||
mov x2, x4 // arg2
|
||||
br x18
|
||||
ENDPROC(__cpu_soft_restart)
|
||||
|
||||
.popsection
|
34
arch/arm64/kernel/cpu-reset.h
Normal file
34
arch/arm64/kernel/cpu-reset.h
Normal file
@ -0,0 +1,34 @@
|
||||
/*
|
||||
* CPU reset routines
|
||||
*
|
||||
* Copyright (C) 2015 Huawei Futurewei Technologies.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#ifndef _ARM64_CPU_RESET_H
|
||||
#define _ARM64_CPU_RESET_H
|
||||
|
||||
#include <asm/virt.h>
|
||||
|
||||
void __cpu_soft_restart(unsigned long el2_switch, unsigned long entry,
|
||||
unsigned long arg0, unsigned long arg1, unsigned long arg2);
|
||||
|
||||
static inline void __noreturn cpu_soft_restart(unsigned long el2_switch,
|
||||
unsigned long entry, unsigned long arg0, unsigned long arg1,
|
||||
unsigned long arg2)
|
||||
{
|
||||
typeof(__cpu_soft_restart) *restart;
|
||||
|
||||
el2_switch = el2_switch && !is_kernel_in_hyp_mode() &&
|
||||
is_hyp_mode_available();
|
||||
restart = (void *)virt_to_phys(__cpu_soft_restart);
|
||||
|
||||
cpu_install_idmap();
|
||||
restart(el2_switch, entry, arg0, arg1, arg2);
|
||||
unreachable();
|
||||
}
|
||||
|
||||
#endif
|
@ -46,6 +46,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.desc = "ARM errata 826319, 827319, 824069",
|
||||
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
|
||||
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
|
||||
.enable = cpu_enable_cache_maint_trap,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_819472
|
||||
@ -54,6 +55,7 @@ const struct arm64_cpu_capabilities arm64_errata[] = {
|
||||
.desc = "ARM errata 819472",
|
||||
.capability = ARM64_WORKAROUND_CLEAN_CACHE,
|
||||
MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
|
||||
.enable = cpu_enable_cache_maint_trap,
|
||||
},
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64_ERRATUM_832075
|
||||
@ -133,3 +135,8 @@ void check_local_cpu_errata(void)
|
||||
{
|
||||
update_cpu_capabilities(arm64_errata, "enabling workaround for");
|
||||
}
|
||||
|
||||
void __init enable_errata_workarounds(void)
|
||||
{
|
||||
enable_cpu_capabilities(arm64_errata);
|
||||
}
|
||||
|
@ -913,8 +913,7 @@ void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
|
||||
* Run through the enabled capabilities and enable() it on all active
|
||||
* CPUs
|
||||
*/
|
||||
static void __init
|
||||
enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
|
||||
void __init enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
|
||||
{
|
||||
for (; caps->matches; caps++)
|
||||
if (caps->enable && cpus_have_cap(caps->capability))
|
||||
@ -1036,6 +1035,7 @@ void __init setup_cpu_features(void)
|
||||
|
||||
/* Set the CPU feature capabilies */
|
||||
setup_feature_capabilities();
|
||||
enable_errata_workarounds();
|
||||
setup_elf_hwcaps(arm64_elf_hwcaps);
|
||||
|
||||
if (system_supports_32bit_el0())
|
||||
|
@ -183,6 +183,123 @@ const struct seq_operations cpuinfo_op = {
|
||||
.show = c_show
|
||||
};
|
||||
|
||||
|
||||
static struct kobj_type cpuregs_kobj_type = {
|
||||
.sysfs_ops = &kobj_sysfs_ops,
|
||||
};
|
||||
|
||||
/*
|
||||
* The ARM ARM uses the phrase "32-bit register" to describe a register
|
||||
* whose upper 32 bits are RES0 (per C5.1.1, ARM DDI 0487A.i), however
|
||||
* no statement is made as to whether the upper 32 bits will or will not
|
||||
* be made use of in future, and between ARM DDI 0487A.c and ARM DDI
|
||||
* 0487A.d CLIDR_EL1 was expanded from 32-bit to 64-bit.
|
||||
*
|
||||
* Thus, while both MIDR_EL1 and REVIDR_EL1 are described as 32-bit
|
||||
* registers, we expose them both as 64 bit values to cater for possible
|
||||
* future expansion without an ABI break.
|
||||
*/
|
||||
#define kobj_to_cpuinfo(kobj) container_of(kobj, struct cpuinfo_arm64, kobj)
|
||||
#define CPUREGS_ATTR_RO(_name, _field) \
|
||||
static ssize_t _name##_show(struct kobject *kobj, \
|
||||
struct kobj_attribute *attr, char *buf) \
|
||||
{ \
|
||||
struct cpuinfo_arm64 *info = kobj_to_cpuinfo(kobj); \
|
||||
\
|
||||
if (info->reg_midr) \
|
||||
return sprintf(buf, "0x%016x\n", info->reg_##_field); \
|
||||
else \
|
||||
return 0; \
|
||||
} \
|
||||
static struct kobj_attribute cpuregs_attr_##_name = __ATTR_RO(_name)
|
||||
|
||||
CPUREGS_ATTR_RO(midr_el1, midr);
|
||||
CPUREGS_ATTR_RO(revidr_el1, revidr);
|
||||
|
||||
static struct attribute *cpuregs_id_attrs[] = {
|
||||
&cpuregs_attr_midr_el1.attr,
|
||||
&cpuregs_attr_revidr_el1.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute_group cpuregs_attr_group = {
|
||||
.attrs = cpuregs_id_attrs,
|
||||
.name = "identification"
|
||||
};
|
||||
|
||||
static int cpuid_add_regs(int cpu)
|
||||
{
|
||||
int rc;
|
||||
struct device *dev;
|
||||
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
|
||||
|
||||
dev = get_cpu_device(cpu);
|
||||
if (!dev) {
|
||||
rc = -ENODEV;
|
||||
goto out;
|
||||
}
|
||||
rc = kobject_add(&info->kobj, &dev->kobj, "regs");
|
||||
if (rc)
|
||||
goto out;
|
||||
rc = sysfs_create_group(&info->kobj, &cpuregs_attr_group);
|
||||
if (rc)
|
||||
kobject_del(&info->kobj);
|
||||
out:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static int cpuid_remove_regs(int cpu)
|
||||
{
|
||||
struct device *dev;
|
||||
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
|
||||
|
||||
dev = get_cpu_device(cpu);
|
||||
if (!dev)
|
||||
return -ENODEV;
|
||||
if (info->kobj.parent) {
|
||||
sysfs_remove_group(&info->kobj, &cpuregs_attr_group);
|
||||
kobject_del(&info->kobj);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int cpuid_callback(struct notifier_block *nb,
|
||||
unsigned long action, void *hcpu)
|
||||
{
|
||||
int rc = 0;
|
||||
unsigned long cpu = (unsigned long)hcpu;
|
||||
|
||||
switch (action & ~CPU_TASKS_FROZEN) {
|
||||
case CPU_ONLINE:
|
||||
rc = cpuid_add_regs(cpu);
|
||||
break;
|
||||
case CPU_DEAD:
|
||||
rc = cpuid_remove_regs(cpu);
|
||||
break;
|
||||
}
|
||||
|
||||
return notifier_from_errno(rc);
|
||||
}
|
||||
|
||||
static int __init cpuinfo_regs_init(void)
|
||||
{
|
||||
int cpu;
|
||||
|
||||
cpu_notifier_register_begin();
|
||||
|
||||
for_each_possible_cpu(cpu) {
|
||||
struct cpuinfo_arm64 *info = &per_cpu(cpu_data, cpu);
|
||||
|
||||
kobject_init(&info->kobj, &cpuregs_kobj_type);
|
||||
if (cpu_online(cpu))
|
||||
cpuid_add_regs(cpu);
|
||||
}
|
||||
__hotcpu_notifier(cpuid_callback, 0);
|
||||
|
||||
cpu_notifier_register_done();
|
||||
return 0;
|
||||
}
|
||||
static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
|
||||
{
|
||||
unsigned int cpu = smp_processor_id();
|
||||
@ -212,6 +329,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
|
||||
info->reg_ctr = read_cpuid_cachetype();
|
||||
info->reg_dczid = read_cpuid(DCZID_EL0);
|
||||
info->reg_midr = read_cpuid_id();
|
||||
info->reg_revidr = read_cpuid(REVIDR_EL1);
|
||||
|
||||
info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
|
||||
info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
|
||||
@ -264,3 +382,5 @@ void __init cpuinfo_store_boot_cpu(void)
|
||||
boot_cpu_data = *info;
|
||||
init_cpu_features(&boot_cpu_data);
|
||||
}
|
||||
|
||||
device_initcall(cpuinfo_regs_init);
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/hardirq.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/stat.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
@ -48,6 +49,7 @@ static void mdscr_write(u32 mdscr)
|
||||
asm volatile("msr mdscr_el1, %0" :: "r" (mdscr));
|
||||
local_dbg_restore(flags);
|
||||
}
|
||||
NOKPROBE_SYMBOL(mdscr_write);
|
||||
|
||||
static u32 mdscr_read(void)
|
||||
{
|
||||
@ -55,6 +57,7 @@ static u32 mdscr_read(void)
|
||||
asm volatile("mrs %0, mdscr_el1" : "=r" (mdscr));
|
||||
return mdscr;
|
||||
}
|
||||
NOKPROBE_SYMBOL(mdscr_read);
|
||||
|
||||
/*
|
||||
* Allow root to disable self-hosted debug from userspace.
|
||||
@ -103,6 +106,7 @@ void enable_debug_monitors(enum dbg_active_el el)
|
||||
mdscr_write(mdscr);
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(enable_debug_monitors);
|
||||
|
||||
void disable_debug_monitors(enum dbg_active_el el)
|
||||
{
|
||||
@ -123,6 +127,7 @@ void disable_debug_monitors(enum dbg_active_el el)
|
||||
mdscr_write(mdscr);
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(disable_debug_monitors);
|
||||
|
||||
/*
|
||||
* OS lock clearing.
|
||||
@ -151,7 +156,6 @@ static int debug_monitors_init(void)
|
||||
/* Clear the OS lock. */
|
||||
on_each_cpu(clear_os_lock, NULL, 1);
|
||||
isb();
|
||||
local_dbg_enable();
|
||||
|
||||
/* Register hotplug handler. */
|
||||
__register_cpu_notifier(&os_lock_nb);
|
||||
@ -166,22 +170,15 @@ postcore_initcall(debug_monitors_init);
|
||||
*/
|
||||
static void set_regs_spsr_ss(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long spsr;
|
||||
|
||||
spsr = regs->pstate;
|
||||
spsr &= ~DBG_SPSR_SS;
|
||||
spsr |= DBG_SPSR_SS;
|
||||
regs->pstate = spsr;
|
||||
regs->pstate |= DBG_SPSR_SS;
|
||||
}
|
||||
NOKPROBE_SYMBOL(set_regs_spsr_ss);
|
||||
|
||||
static void clear_regs_spsr_ss(struct pt_regs *regs)
|
||||
{
|
||||
unsigned long spsr;
|
||||
|
||||
spsr = regs->pstate;
|
||||
spsr &= ~DBG_SPSR_SS;
|
||||
regs->pstate = spsr;
|
||||
regs->pstate &= ~DBG_SPSR_SS;
|
||||
}
|
||||
NOKPROBE_SYMBOL(clear_regs_spsr_ss);
|
||||
|
||||
/* EL1 Single Step Handler hooks */
|
||||
static LIST_HEAD(step_hook);
|
||||
@ -225,6 +222,7 @@ static int call_step_hook(struct pt_regs *regs, unsigned int esr)
|
||||
|
||||
return retval;
|
||||
}
|
||||
NOKPROBE_SYMBOL(call_step_hook);
|
||||
|
||||
static void send_user_sigtrap(int si_code)
|
||||
{
|
||||
@ -266,6 +264,10 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
|
||||
*/
|
||||
user_rewind_single_step(current);
|
||||
} else {
|
||||
#ifdef CONFIG_KPROBES
|
||||
if (kprobe_single_step_handler(regs, esr) == DBG_HOOK_HANDLED)
|
||||
return 0;
|
||||
#endif
|
||||
if (call_step_hook(regs, esr) == DBG_HOOK_HANDLED)
|
||||
return 0;
|
||||
|
||||
@ -279,6 +281,7 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(single_step_handler);
|
||||
|
||||
/*
|
||||
* Breakpoint handler is re-entrant as another breakpoint can
|
||||
@ -316,19 +319,28 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
|
||||
|
||||
return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
|
||||
}
|
||||
NOKPROBE_SYMBOL(call_break_hook);
|
||||
|
||||
static int brk_handler(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (user_mode(regs)) {
|
||||
send_user_sigtrap(TRAP_BRKPT);
|
||||
} else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
|
||||
pr_warning("Unexpected kernel BRK exception at EL1\n");
|
||||
}
|
||||
#ifdef CONFIG_KPROBES
|
||||
else if ((esr & BRK64_ESR_MASK) == BRK64_ESR_KPROBES) {
|
||||
if (kprobe_breakpoint_handler(regs, esr) != DBG_HOOK_HANDLED)
|
||||
return -EFAULT;
|
||||
}
|
||||
#endif
|
||||
else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
|
||||
pr_warn("Unexpected kernel BRK exception at EL1\n");
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(brk_handler);
|
||||
|
||||
int aarch32_break_handler(struct pt_regs *regs)
|
||||
{
|
||||
@ -365,6 +377,7 @@ int aarch32_break_handler(struct pt_regs *regs)
|
||||
send_user_sigtrap(TRAP_BRKPT);
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(aarch32_break_handler);
|
||||
|
||||
static int __init debug_traps_init(void)
|
||||
{
|
||||
@ -386,6 +399,7 @@ void user_rewind_single_step(struct task_struct *task)
|
||||
if (test_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP))
|
||||
set_regs_spsr_ss(task_pt_regs(task));
|
||||
}
|
||||
NOKPROBE_SYMBOL(user_rewind_single_step);
|
||||
|
||||
void user_fastforward_single_step(struct task_struct *task)
|
||||
{
|
||||
@ -401,6 +415,7 @@ void kernel_enable_single_step(struct pt_regs *regs)
|
||||
mdscr_write(mdscr_read() | DBG_MDSCR_SS);
|
||||
enable_debug_monitors(DBG_ACTIVE_EL1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(kernel_enable_single_step);
|
||||
|
||||
void kernel_disable_single_step(void)
|
||||
{
|
||||
@ -408,12 +423,14 @@ void kernel_disable_single_step(void)
|
||||
mdscr_write(mdscr_read() & ~DBG_MDSCR_SS);
|
||||
disable_debug_monitors(DBG_ACTIVE_EL1);
|
||||
}
|
||||
NOKPROBE_SYMBOL(kernel_disable_single_step);
|
||||
|
||||
int kernel_active_single_step(void)
|
||||
{
|
||||
WARN_ON(!irqs_disabled());
|
||||
return mdscr_read() & DBG_MDSCR_SS;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kernel_active_single_step);
|
||||
|
||||
/* ptrace API */
|
||||
void user_enable_single_step(struct task_struct *task)
|
||||
@ -421,8 +438,10 @@ void user_enable_single_step(struct task_struct *task)
|
||||
set_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
|
||||
set_regs_spsr_ss(task_pt_regs(task));
|
||||
}
|
||||
NOKPROBE_SYMBOL(user_enable_single_step);
|
||||
|
||||
void user_disable_single_step(struct task_struct *task)
|
||||
{
|
||||
clear_ti_thread_flag(task_thread_info(task), TIF_SINGLESTEP);
|
||||
}
|
||||
NOKPROBE_SYMBOL(user_disable_single_step);
|
||||
|
@ -62,13 +62,61 @@ struct screen_info screen_info __section(.data);
|
||||
int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
|
||||
{
|
||||
pteval_t prot_val = create_mapping_protection(md);
|
||||
bool allow_block_mappings = (md->type != EFI_RUNTIME_SERVICES_CODE &&
|
||||
md->type != EFI_RUNTIME_SERVICES_DATA);
|
||||
|
||||
if (!PAGE_ALIGNED(md->phys_addr) ||
|
||||
!PAGE_ALIGNED(md->num_pages << EFI_PAGE_SHIFT)) {
|
||||
/*
|
||||
* If the end address of this region is not aligned to page
|
||||
* size, the mapping is rounded up, and may end up sharing a
|
||||
* page frame with the next UEFI memory region. If we create
|
||||
* a block entry now, we may need to split it again when mapping
|
||||
* the next region, and support for that is going to be removed
|
||||
* from the MMU routines. So avoid block mappings altogether in
|
||||
* that case.
|
||||
*/
|
||||
allow_block_mappings = false;
|
||||
}
|
||||
|
||||
create_pgd_mapping(mm, md->phys_addr, md->virt_addr,
|
||||
md->num_pages << EFI_PAGE_SHIFT,
|
||||
__pgprot(prot_val | PTE_NG));
|
||||
__pgprot(prot_val | PTE_NG), allow_block_mappings);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init set_permissions(pte_t *ptep, pgtable_t token,
|
||||
unsigned long addr, void *data)
|
||||
{
|
||||
efi_memory_desc_t *md = data;
|
||||
pte_t pte = *ptep;
|
||||
|
||||
if (md->attribute & EFI_MEMORY_RO)
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
|
||||
if (md->attribute & EFI_MEMORY_XP)
|
||||
pte = set_pte_bit(pte, __pgprot(PTE_PXN));
|
||||
set_pte(ptep, pte);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init efi_set_mapping_permissions(struct mm_struct *mm,
|
||||
efi_memory_desc_t *md)
|
||||
{
|
||||
BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
|
||||
md->type != EFI_RUNTIME_SERVICES_DATA);
|
||||
|
||||
/*
|
||||
* Calling apply_to_page_range() is only safe on regions that are
|
||||
* guaranteed to be mapped down to pages. Since we are only called
|
||||
* for regions that have been mapped using efi_create_mapping() above
|
||||
* (and this is checked by the generic Memory Attributes table parsing
|
||||
* routines), there is no need to check that again here.
|
||||
*/
|
||||
return apply_to_page_range(mm, md->virt_addr,
|
||||
md->num_pages << EFI_PAGE_SHIFT,
|
||||
set_permissions, md);
|
||||
}
|
||||
|
||||
static int __init arm64_dmi_init(void)
|
||||
{
|
||||
/*
|
||||
|
@ -258,6 +258,7 @@ tsk .req x28 // current thread_info
|
||||
/*
|
||||
* Exception vectors.
|
||||
*/
|
||||
.pushsection ".entry.text", "ax"
|
||||
|
||||
.align 11
|
||||
ENTRY(vectors)
|
||||
@ -466,7 +467,7 @@ el0_sync:
|
||||
cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception
|
||||
b.eq el0_fpsimd_exc
|
||||
cmp x24, #ESR_ELx_EC_SYS64 // configurable trap
|
||||
b.eq el0_undef
|
||||
b.eq el0_sys
|
||||
cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception
|
||||
b.eq el0_sp_pc
|
||||
cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception
|
||||
@ -547,7 +548,7 @@ el0_ia:
|
||||
enable_dbg_and_irq
|
||||
ct_user_exit
|
||||
mov x0, x26
|
||||
orr x1, x25, #1 << 24 // use reserved ISS bit for instruction aborts
|
||||
mov x1, x25
|
||||
mov x2, sp
|
||||
bl do_mem_abort
|
||||
b ret_to_user
|
||||
@ -594,6 +595,16 @@ el0_undef:
|
||||
mov x0, sp
|
||||
bl do_undefinstr
|
||||
b ret_to_user
|
||||
el0_sys:
|
||||
/*
|
||||
* System instructions, for trapped cache maintenance instructions
|
||||
*/
|
||||
enable_dbg_and_irq
|
||||
ct_user_exit
|
||||
mov x0, x25
|
||||
mov x1, sp
|
||||
bl do_sysinstr
|
||||
b ret_to_user
|
||||
el0_dbg:
|
||||
/*
|
||||
* Debug exception handling
|
||||
@ -789,6 +800,8 @@ __ni_sys_trace:
|
||||
bl do_ni_syscall
|
||||
b __sys_trace_return
|
||||
|
||||
.popsection // .entry.text
|
||||
|
||||
/*
|
||||
* Special system call wrappers.
|
||||
*/
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include <linux/cpu_pm.h>
|
||||
#include <linux/errno.h>
|
||||
#include <linux/hw_breakpoint.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/perf_event.h>
|
||||
#include <linux/ptrace.h>
|
||||
#include <linux/smp.h>
|
||||
@ -127,6 +128,7 @@ static u64 read_wb_reg(int reg, int n)
|
||||
|
||||
return val;
|
||||
}
|
||||
NOKPROBE_SYMBOL(read_wb_reg);
|
||||
|
||||
static void write_wb_reg(int reg, int n, u64 val)
|
||||
{
|
||||
@ -140,6 +142,7 @@ static void write_wb_reg(int reg, int n, u64 val)
|
||||
}
|
||||
isb();
|
||||
}
|
||||
NOKPROBE_SYMBOL(write_wb_reg);
|
||||
|
||||
/*
|
||||
* Convert a breakpoint privilege level to the corresponding exception
|
||||
@ -157,6 +160,7 @@ static enum dbg_active_el debug_exception_level(int privilege)
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(debug_exception_level);
|
||||
|
||||
enum hw_breakpoint_ops {
|
||||
HW_BREAKPOINT_INSTALL,
|
||||
@ -575,6 +579,7 @@ static void toggle_bp_registers(int reg, enum dbg_active_el el, int enable)
|
||||
write_wb_reg(reg, i, ctrl);
|
||||
}
|
||||
}
|
||||
NOKPROBE_SYMBOL(toggle_bp_registers);
|
||||
|
||||
/*
|
||||
* Debug exception handlers.
|
||||
@ -654,6 +659,7 @@ unlock:
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(breakpoint_handler);
|
||||
|
||||
static int watchpoint_handler(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
@ -756,6 +762,7 @@ unlock:
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(watchpoint_handler);
|
||||
|
||||
/*
|
||||
* Handle single-step exception.
|
||||
@ -813,6 +820,7 @@ int reinstall_suspended_bps(struct pt_regs *regs)
|
||||
|
||||
return !handled_exception;
|
||||
}
|
||||
NOKPROBE_SYMBOL(reinstall_suspended_bps);
|
||||
|
||||
/*
|
||||
* Context-switcher for restoring suspended breakpoints.
|
||||
|
@ -71,8 +71,16 @@ el1_sync:
|
||||
msr vbar_el2, x1
|
||||
b 9f
|
||||
|
||||
2: cmp x0, #HVC_SOFT_RESTART
|
||||
b.ne 3f
|
||||
mov x0, x2
|
||||
mov x2, x4
|
||||
mov x4, x1
|
||||
mov x1, x3
|
||||
br x4 // no return
|
||||
|
||||
/* Someone called kvm_call_hyp() against the hyp-stub... */
|
||||
2: mov x0, #ARM_EXCEPTION_HYP_GONE
|
||||
3: mov x0, #ARM_EXCEPTION_HYP_GONE
|
||||
|
||||
9: eret
|
||||
ENDPROC(el1_sync)
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/opcodes.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
#define AARCH64_INSN_SF_BIT BIT(31)
|
||||
@ -162,6 +163,32 @@ static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
|
||||
aarch64_insn_is_nop(insn);
|
||||
}
|
||||
|
||||
bool __kprobes aarch64_insn_uses_literal(u32 insn)
|
||||
{
|
||||
/* ldr/ldrsw (literal), prfm */
|
||||
|
||||
return aarch64_insn_is_ldr_lit(insn) ||
|
||||
aarch64_insn_is_ldrsw_lit(insn) ||
|
||||
aarch64_insn_is_adr_adrp(insn) ||
|
||||
aarch64_insn_is_prfm_lit(insn);
|
||||
}
|
||||
|
||||
bool __kprobes aarch64_insn_is_branch(u32 insn)
|
||||
{
|
||||
/* b, bl, cb*, tb*, b.cond, br, blr */
|
||||
|
||||
return aarch64_insn_is_b(insn) ||
|
||||
aarch64_insn_is_bl(insn) ||
|
||||
aarch64_insn_is_cbz(insn) ||
|
||||
aarch64_insn_is_cbnz(insn) ||
|
||||
aarch64_insn_is_tbz(insn) ||
|
||||
aarch64_insn_is_tbnz(insn) ||
|
||||
aarch64_insn_is_ret(insn) ||
|
||||
aarch64_insn_is_br(insn) ||
|
||||
aarch64_insn_is_blr(insn) ||
|
||||
aarch64_insn_is_bcond(insn);
|
||||
}
|
||||
|
||||
/*
|
||||
* ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
|
||||
* Section B2.6.5 "Concurrent modification and execution of instructions":
|
||||
@ -1175,6 +1202,14 @@ u32 aarch64_set_branch_offset(u32 insn, s32 offset)
|
||||
BUG();
|
||||
}
|
||||
|
||||
/*
|
||||
* Extract the Op/CR data from a msr/mrs instruction.
|
||||
*/
|
||||
u32 aarch64_insn_extract_system_reg(u32 insn)
|
||||
{
|
||||
return (insn & 0x1FFFE0) >> 5;
|
||||
}
|
||||
|
||||
bool aarch32_insn_is_wide(u32 insn)
|
||||
{
|
||||
return insn >= 0xe800;
|
||||
@ -1200,3 +1235,101 @@ u32 aarch32_insn_mcr_extract_crm(u32 insn)
|
||||
{
|
||||
return insn & CRM_MASK;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_eq(unsigned long pstate)
|
||||
{
|
||||
return (pstate & PSR_Z_BIT) != 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_ne(unsigned long pstate)
|
||||
{
|
||||
return (pstate & PSR_Z_BIT) == 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_cs(unsigned long pstate)
|
||||
{
|
||||
return (pstate & PSR_C_BIT) != 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_cc(unsigned long pstate)
|
||||
{
|
||||
return (pstate & PSR_C_BIT) == 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_mi(unsigned long pstate)
|
||||
{
|
||||
return (pstate & PSR_N_BIT) != 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_pl(unsigned long pstate)
|
||||
{
|
||||
return (pstate & PSR_N_BIT) == 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_vs(unsigned long pstate)
|
||||
{
|
||||
return (pstate & PSR_V_BIT) != 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_vc(unsigned long pstate)
|
||||
{
|
||||
return (pstate & PSR_V_BIT) == 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_hi(unsigned long pstate)
|
||||
{
|
||||
pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
|
||||
return (pstate & PSR_C_BIT) != 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_ls(unsigned long pstate)
|
||||
{
|
||||
pstate &= ~(pstate >> 1); /* PSR_C_BIT &= ~PSR_Z_BIT */
|
||||
return (pstate & PSR_C_BIT) == 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_ge(unsigned long pstate)
|
||||
{
|
||||
pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
|
||||
return (pstate & PSR_N_BIT) == 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_lt(unsigned long pstate)
|
||||
{
|
||||
pstate ^= (pstate << 3); /* PSR_N_BIT ^= PSR_V_BIT */
|
||||
return (pstate & PSR_N_BIT) != 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_gt(unsigned long pstate)
|
||||
{
|
||||
/*PSR_N_BIT ^= PSR_V_BIT */
|
||||
unsigned long temp = pstate ^ (pstate << 3);
|
||||
|
||||
temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
|
||||
return (temp & PSR_N_BIT) == 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_le(unsigned long pstate)
|
||||
{
|
||||
/*PSR_N_BIT ^= PSR_V_BIT */
|
||||
unsigned long temp = pstate ^ (pstate << 3);
|
||||
|
||||
temp |= (pstate << 1); /*PSR_N_BIT |= PSR_Z_BIT */
|
||||
return (temp & PSR_N_BIT) != 0;
|
||||
}
|
||||
|
||||
static bool __kprobes __check_al(unsigned long pstate)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
|
||||
* it behaves identically to 0b1110 ("al").
|
||||
*/
|
||||
pstate_check_t * const aarch32_opcode_cond_checks[16] = {
|
||||
__check_eq, __check_ne, __check_cs, __check_cc,
|
||||
__check_mi, __check_pl, __check_vs, __check_vc,
|
||||
__check_hi, __check_ls, __check_ge, __check_lt,
|
||||
__check_gt, __check_le, __check_al, __check_al
|
||||
};
|
||||
|
@ -22,6 +22,7 @@
|
||||
#include <linux/irq.h>
|
||||
#include <linux/kdebug.h>
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
|
||||
@ -230,6 +231,7 @@ static int kgdb_brk_fn(struct pt_regs *regs, unsigned int esr)
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_brk_fn)
|
||||
|
||||
static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
@ -238,12 +240,14 @@ static int kgdb_compiled_brk_fn(struct pt_regs *regs, unsigned int esr)
|
||||
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_compiled_brk_fn);
|
||||
|
||||
static int kgdb_step_brk_fn(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
kgdb_handle_exception(1, SIGTRAP, 0, regs);
|
||||
return 0;
|
||||
}
|
||||
NOKPROBE_SYMBOL(kgdb_step_brk_fn);
|
||||
|
||||
static struct break_hook kgdb_brkpt_hook = {
|
||||
.esr_mask = 0xffffffff,
|
||||
|
212
arch/arm64/kernel/machine_kexec.c
Normal file
212
arch/arm64/kernel/machine_kexec.c
Normal file
@ -0,0 +1,212 @@
|
||||
/*
|
||||
* kexec for arm64
|
||||
*
|
||||
* Copyright (C) Linaro.
|
||||
* Copyright (C) Huawei Futurewei Technologies.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/smp.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpu_ops.h>
|
||||
#include <asm/mmu_context.h>
|
||||
|
||||
#include "cpu-reset.h"
|
||||
|
||||
/* Global variables for the arm64_relocate_new_kernel routine. */
|
||||
extern const unsigned char arm64_relocate_new_kernel[];
|
||||
extern const unsigned long arm64_relocate_new_kernel_size;
|
||||
|
||||
static unsigned long kimage_start;
|
||||
|
||||
/**
|
||||
* kexec_image_info - For debugging output.
|
||||
*/
|
||||
#define kexec_image_info(_i) _kexec_image_info(__func__, __LINE__, _i)
|
||||
static void _kexec_image_info(const char *func, int line,
|
||||
const struct kimage *kimage)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
pr_debug("%s:%d:\n", func, line);
|
||||
pr_debug(" kexec kimage info:\n");
|
||||
pr_debug(" type: %d\n", kimage->type);
|
||||
pr_debug(" start: %lx\n", kimage->start);
|
||||
pr_debug(" head: %lx\n", kimage->head);
|
||||
pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
|
||||
|
||||
for (i = 0; i < kimage->nr_segments; i++) {
|
||||
pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
|
||||
i,
|
||||
kimage->segment[i].mem,
|
||||
kimage->segment[i].mem + kimage->segment[i].memsz,
|
||||
kimage->segment[i].memsz,
|
||||
kimage->segment[i].memsz / PAGE_SIZE);
|
||||
}
|
||||
}
|
||||
|
||||
void machine_kexec_cleanup(struct kimage *kimage)
|
||||
{
|
||||
/* Empty routine needed to avoid build errors. */
|
||||
}
|
||||
|
||||
/**
|
||||
* machine_kexec_prepare - Prepare for a kexec reboot.
|
||||
*
|
||||
* Called from the core kexec code when a kernel image is loaded.
|
||||
* Forbid loading a kexec kernel if we have no way of hotplugging cpus or cpus
|
||||
* are stuck in the kernel. This avoids a panic once we hit machine_kexec().
|
||||
*/
|
||||
int machine_kexec_prepare(struct kimage *kimage)
|
||||
{
|
||||
kimage_start = kimage->start;
|
||||
|
||||
kexec_image_info(kimage);
|
||||
|
||||
if (kimage->type != KEXEC_TYPE_CRASH && cpus_are_stuck_in_kernel()) {
|
||||
pr_err("Can't kexec: CPUs are stuck in the kernel.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* kexec_list_flush - Helper to flush the kimage list and source pages to PoC.
|
||||
*/
|
||||
static void kexec_list_flush(struct kimage *kimage)
|
||||
{
|
||||
kimage_entry_t *entry;
|
||||
|
||||
for (entry = &kimage->head; ; entry++) {
|
||||
unsigned int flag;
|
||||
void *addr;
|
||||
|
||||
/* flush the list entries. */
|
||||
__flush_dcache_area(entry, sizeof(kimage_entry_t));
|
||||
|
||||
flag = *entry & IND_FLAGS;
|
||||
if (flag == IND_DONE)
|
||||
break;
|
||||
|
||||
addr = phys_to_virt(*entry & PAGE_MASK);
|
||||
|
||||
switch (flag) {
|
||||
case IND_INDIRECTION:
|
||||
/* Set entry point just before the new list page. */
|
||||
entry = (kimage_entry_t *)addr - 1;
|
||||
break;
|
||||
case IND_SOURCE:
|
||||
/* flush the source pages. */
|
||||
__flush_dcache_area(addr, PAGE_SIZE);
|
||||
break;
|
||||
case IND_DESTINATION:
|
||||
break;
|
||||
default:
|
||||
BUG();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* kexec_segment_flush - Helper to flush the kimage segments to PoC.
|
||||
*/
|
||||
static void kexec_segment_flush(const struct kimage *kimage)
|
||||
{
|
||||
unsigned long i;
|
||||
|
||||
pr_debug("%s:\n", __func__);
|
||||
|
||||
for (i = 0; i < kimage->nr_segments; i++) {
|
||||
pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
|
||||
i,
|
||||
kimage->segment[i].mem,
|
||||
kimage->segment[i].mem + kimage->segment[i].memsz,
|
||||
kimage->segment[i].memsz,
|
||||
kimage->segment[i].memsz / PAGE_SIZE);
|
||||
|
||||
__flush_dcache_area(phys_to_virt(kimage->segment[i].mem),
|
||||
kimage->segment[i].memsz);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* machine_kexec - Do the kexec reboot.
|
||||
*
|
||||
* Called from the core kexec code for a sys_reboot with LINUX_REBOOT_CMD_KEXEC.
|
||||
*/
|
||||
void machine_kexec(struct kimage *kimage)
|
||||
{
|
||||
phys_addr_t reboot_code_buffer_phys;
|
||||
void *reboot_code_buffer;
|
||||
|
||||
/*
|
||||
* New cpus may have become stuck_in_kernel after we loaded the image.
|
||||
*/
|
||||
BUG_ON(cpus_are_stuck_in_kernel() || (num_online_cpus() > 1));
|
||||
|
||||
reboot_code_buffer_phys = page_to_phys(kimage->control_code_page);
|
||||
reboot_code_buffer = phys_to_virt(reboot_code_buffer_phys);
|
||||
|
||||
kexec_image_info(kimage);
|
||||
|
||||
pr_debug("%s:%d: control_code_page: %p\n", __func__, __LINE__,
|
||||
kimage->control_code_page);
|
||||
pr_debug("%s:%d: reboot_code_buffer_phys: %pa\n", __func__, __LINE__,
|
||||
&reboot_code_buffer_phys);
|
||||
pr_debug("%s:%d: reboot_code_buffer: %p\n", __func__, __LINE__,
|
||||
reboot_code_buffer);
|
||||
pr_debug("%s:%d: relocate_new_kernel: %p\n", __func__, __LINE__,
|
||||
arm64_relocate_new_kernel);
|
||||
pr_debug("%s:%d: relocate_new_kernel_size: 0x%lx(%lu) bytes\n",
|
||||
__func__, __LINE__, arm64_relocate_new_kernel_size,
|
||||
arm64_relocate_new_kernel_size);
|
||||
|
||||
/*
|
||||
* Copy arm64_relocate_new_kernel to the reboot_code_buffer for use
|
||||
* after the kernel is shut down.
|
||||
*/
|
||||
memcpy(reboot_code_buffer, arm64_relocate_new_kernel,
|
||||
arm64_relocate_new_kernel_size);
|
||||
|
||||
/* Flush the reboot_code_buffer in preparation for its execution. */
|
||||
__flush_dcache_area(reboot_code_buffer, arm64_relocate_new_kernel_size);
|
||||
flush_icache_range((uintptr_t)reboot_code_buffer,
|
||||
arm64_relocate_new_kernel_size);
|
||||
|
||||
/* Flush the kimage list and its buffers. */
|
||||
kexec_list_flush(kimage);
|
||||
|
||||
/* Flush the new image if already in place. */
|
||||
if (kimage->head & IND_DONE)
|
||||
kexec_segment_flush(kimage);
|
||||
|
||||
pr_info("Bye!\n");
|
||||
|
||||
/* Disable all DAIF exceptions. */
|
||||
asm volatile ("msr daifset, #0xf" : : : "memory");
|
||||
|
||||
/*
|
||||
* cpu_soft_restart will shutdown the MMU, disable data caches, then
|
||||
* transfer control to the reboot_code_buffer which contains a copy of
|
||||
* the arm64_relocate_new_kernel routine. arm64_relocate_new_kernel
|
||||
* uses physical addressing to relocate the new image to its final
|
||||
* position and transfers control to the image entry point when the
|
||||
* relocation is complete.
|
||||
*/
|
||||
|
||||
cpu_soft_restart(1, reboot_code_buffer_phys, kimage->head,
|
||||
kimage_start, 0);
|
||||
|
||||
BUG(); /* Should never get here. */
|
||||
}
|
||||
|
||||
void machine_crash_shutdown(struct pt_regs *regs)
|
||||
{
|
||||
/* Empty routine needed to avoid build errors. */
|
||||
}
|
3
arch/arm64/kernel/probes/Makefile
Normal file
3
arch/arm64/kernel/probes/Makefile
Normal file
@ -0,0 +1,3 @@
|
||||
obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
|
||||
kprobes_trampoline.o \
|
||||
simulate-insn.o
|
174
arch/arm64/kernel/probes/decode-insn.c
Normal file
174
arch/arm64/kernel/probes/decode-insn.c
Normal file
@ -0,0 +1,174 @@
|
||||
/*
|
||||
* arch/arm64/kernel/probes/decode-insn.c
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Limited.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include "decode-insn.h"
|
||||
#include "simulate-insn.h"
|
||||
|
||||
static bool __kprobes aarch64_insn_is_steppable(u32 insn)
|
||||
{
|
||||
/*
|
||||
* Branch instructions will write a new value into the PC which is
|
||||
* likely to be relative to the XOL address and therefore invalid.
|
||||
* Deliberate generation of an exception during stepping is also not
|
||||
* currently safe. Lastly, MSR instructions can do any number of nasty
|
||||
* things we can't handle during single-stepping.
|
||||
*/
|
||||
if (aarch64_get_insn_class(insn) == AARCH64_INSN_CLS_BR_SYS) {
|
||||
if (aarch64_insn_is_branch(insn) ||
|
||||
aarch64_insn_is_msr_imm(insn) ||
|
||||
aarch64_insn_is_msr_reg(insn) ||
|
||||
aarch64_insn_is_exception(insn) ||
|
||||
aarch64_insn_is_eret(insn))
|
||||
return false;
|
||||
|
||||
/*
|
||||
* The MRS instruction may not return a correct value when
|
||||
* executing in the single-stepping environment. We do make one
|
||||
* exception, for reading the DAIF bits.
|
||||
*/
|
||||
if (aarch64_insn_is_mrs(insn))
|
||||
return aarch64_insn_extract_system_reg(insn)
|
||||
!= AARCH64_INSN_SPCLREG_DAIF;
|
||||
|
||||
/*
|
||||
* The HINT instruction is is problematic when single-stepping,
|
||||
* except for the NOP case.
|
||||
*/
|
||||
if (aarch64_insn_is_hint(insn))
|
||||
return aarch64_insn_is_nop(insn);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Instructions which load PC relative literals are not going to work
|
||||
* when executed from an XOL slot. Instructions doing an exclusive
|
||||
* load/store are not going to complete successfully when single-step
|
||||
* exception handling happens in the middle of the sequence.
|
||||
*/
|
||||
if (aarch64_insn_uses_literal(insn) ||
|
||||
aarch64_insn_is_exclusive(insn))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Return:
|
||||
* INSN_REJECTED If instruction is one not allowed to kprobe,
|
||||
* INSN_GOOD If instruction is supported and uses instruction slot,
|
||||
* INSN_GOOD_NO_SLOT If instruction is supported but doesn't use its slot.
|
||||
*/
|
||||
static enum kprobe_insn __kprobes
|
||||
arm_probe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi)
|
||||
{
|
||||
/*
|
||||
* Instructions reading or modifying the PC won't work from the XOL
|
||||
* slot.
|
||||
*/
|
||||
if (aarch64_insn_is_steppable(insn))
|
||||
return INSN_GOOD;
|
||||
|
||||
if (aarch64_insn_is_bcond(insn)) {
|
||||
asi->handler = simulate_b_cond;
|
||||
} else if (aarch64_insn_is_cbz(insn) ||
|
||||
aarch64_insn_is_cbnz(insn)) {
|
||||
asi->handler = simulate_cbz_cbnz;
|
||||
} else if (aarch64_insn_is_tbz(insn) ||
|
||||
aarch64_insn_is_tbnz(insn)) {
|
||||
asi->handler = simulate_tbz_tbnz;
|
||||
} else if (aarch64_insn_is_adr_adrp(insn)) {
|
||||
asi->handler = simulate_adr_adrp;
|
||||
} else if (aarch64_insn_is_b(insn) ||
|
||||
aarch64_insn_is_bl(insn)) {
|
||||
asi->handler = simulate_b_bl;
|
||||
} else if (aarch64_insn_is_br(insn) ||
|
||||
aarch64_insn_is_blr(insn) ||
|
||||
aarch64_insn_is_ret(insn)) {
|
||||
asi->handler = simulate_br_blr_ret;
|
||||
} else if (aarch64_insn_is_ldr_lit(insn)) {
|
||||
asi->handler = simulate_ldr_literal;
|
||||
} else if (aarch64_insn_is_ldrsw_lit(insn)) {
|
||||
asi->handler = simulate_ldrsw_literal;
|
||||
} else {
|
||||
/*
|
||||
* Instruction cannot be stepped out-of-line and we don't
|
||||
* (yet) simulate it.
|
||||
*/
|
||||
return INSN_REJECTED;
|
||||
}
|
||||
|
||||
return INSN_GOOD_NO_SLOT;
|
||||
}
|
||||
|
||||
static bool __kprobes
|
||||
is_probed_address_atomic(kprobe_opcode_t *scan_start, kprobe_opcode_t *scan_end)
|
||||
{
|
||||
while (scan_start > scan_end) {
|
||||
/*
|
||||
* atomic region starts from exclusive load and ends with
|
||||
* exclusive store.
|
||||
*/
|
||||
if (aarch64_insn_is_store_ex(le32_to_cpu(*scan_start)))
|
||||
return false;
|
||||
else if (aarch64_insn_is_load_ex(le32_to_cpu(*scan_start)))
|
||||
return true;
|
||||
scan_start--;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
enum kprobe_insn __kprobes
|
||||
arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi)
|
||||
{
|
||||
enum kprobe_insn decoded;
|
||||
kprobe_opcode_t insn = le32_to_cpu(*addr);
|
||||
kprobe_opcode_t *scan_start = addr - 1;
|
||||
kprobe_opcode_t *scan_end = addr - MAX_ATOMIC_CONTEXT_SIZE;
|
||||
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
|
||||
struct module *mod;
|
||||
#endif
|
||||
|
||||
if (addr >= (kprobe_opcode_t *)_text &&
|
||||
scan_end < (kprobe_opcode_t *)_text)
|
||||
scan_end = (kprobe_opcode_t *)_text;
|
||||
#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
|
||||
else {
|
||||
preempt_disable();
|
||||
mod = __module_address((unsigned long)addr);
|
||||
if (mod && within_module_init((unsigned long)addr, mod) &&
|
||||
!within_module_init((unsigned long)scan_end, mod))
|
||||
scan_end = (kprobe_opcode_t *)mod->init_layout.base;
|
||||
else if (mod && within_module_core((unsigned long)addr, mod) &&
|
||||
!within_module_core((unsigned long)scan_end, mod))
|
||||
scan_end = (kprobe_opcode_t *)mod->core_layout.base;
|
||||
preempt_enable();
|
||||
}
|
||||
#endif
|
||||
decoded = arm_probe_decode_insn(insn, asi);
|
||||
|
||||
if (decoded == INSN_REJECTED ||
|
||||
is_probed_address_atomic(scan_start, scan_end))
|
||||
return INSN_REJECTED;
|
||||
|
||||
return decoded;
|
||||
}
|
35
arch/arm64/kernel/probes/decode-insn.h
Normal file
35
arch/arm64/kernel/probes/decode-insn.h
Normal file
@ -0,0 +1,35 @@
|
||||
/*
|
||||
* arch/arm64/kernel/probes/decode-insn.h
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Limited.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _ARM_KERNEL_KPROBES_ARM64_H
|
||||
#define _ARM_KERNEL_KPROBES_ARM64_H
|
||||
|
||||
/*
|
||||
* ARM strongly recommends a limit of 128 bytes between LoadExcl and
|
||||
* StoreExcl instructions in a single thread of execution. So keep the
|
||||
* max atomic context size as 32.
|
||||
*/
|
||||
#define MAX_ATOMIC_CONTEXT_SIZE (128 / sizeof(kprobe_opcode_t))
|
||||
|
||||
enum kprobe_insn {
|
||||
INSN_REJECTED,
|
||||
INSN_GOOD_NO_SLOT,
|
||||
INSN_GOOD,
|
||||
};
|
||||
|
||||
enum kprobe_insn __kprobes
|
||||
arm_kprobe_decode_insn(kprobe_opcode_t *addr, struct arch_specific_insn *asi);
|
||||
|
||||
#endif /* _ARM_KERNEL_KPROBES_ARM64_H */
|
686
arch/arm64/kernel/probes/kprobes.c
Normal file
686
arch/arm64/kernel/probes/kprobes.c
Normal file
@ -0,0 +1,686 @@
|
||||
/*
|
||||
* arch/arm64/kernel/probes/kprobes.c
|
||||
*
|
||||
* Kprobes support for ARM64
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Limited.
|
||||
* Author: Sandeepa Prabhu <sandeepa.prabhu@linaro.org>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*
|
||||
*/
|
||||
#include <linux/kasan.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <linux/stringify.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm-generic/sections.h>
|
||||
|
||||
#include "decode-insn.h"
|
||||
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
static void __kprobes
|
||||
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
|
||||
|
||||
static inline unsigned long min_stack_size(unsigned long addr)
|
||||
{
|
||||
unsigned long size;
|
||||
|
||||
if (on_irq_stack(addr, raw_smp_processor_id()))
|
||||
size = IRQ_STACK_PTR(raw_smp_processor_id()) - addr;
|
||||
else
|
||||
size = (unsigned long)current_thread_info() + THREAD_START_SP - addr;
|
||||
|
||||
return min(size, FIELD_SIZEOF(struct kprobe_ctlblk, jprobes_stack));
|
||||
}
|
||||
|
||||
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
|
||||
{
|
||||
/* prepare insn slot */
|
||||
p->ainsn.insn[0] = cpu_to_le32(p->opcode);
|
||||
|
||||
flush_icache_range((uintptr_t) (p->ainsn.insn),
|
||||
(uintptr_t) (p->ainsn.insn) +
|
||||
MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
|
||||
|
||||
/*
|
||||
* Needs restoring of return address after stepping xol.
|
||||
*/
|
||||
p->ainsn.restore = (unsigned long) p->addr +
|
||||
sizeof(kprobe_opcode_t);
|
||||
}
|
||||
|
||||
static void __kprobes arch_prepare_simulate(struct kprobe *p)
|
||||
{
|
||||
/* This instructions is not executed xol. No need to adjust the PC */
|
||||
p->ainsn.restore = 0;
|
||||
}
|
||||
|
||||
static void __kprobes arch_simulate_insn(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
if (p->ainsn.handler)
|
||||
p->ainsn.handler((u32)p->opcode, (long)p->addr, regs);
|
||||
|
||||
/* single step simulated, now go for post processing */
|
||||
post_kprobe_handler(kcb, regs);
|
||||
}
|
||||
|
||||
int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
||||
{
|
||||
unsigned long probe_addr = (unsigned long)p->addr;
|
||||
extern char __start_rodata[];
|
||||
extern char __end_rodata[];
|
||||
|
||||
if (probe_addr & 0x3)
|
||||
return -EINVAL;
|
||||
|
||||
/* copy instruction */
|
||||
p->opcode = le32_to_cpu(*p->addr);
|
||||
|
||||
if (in_exception_text(probe_addr))
|
||||
return -EINVAL;
|
||||
if (probe_addr >= (unsigned long) __start_rodata &&
|
||||
probe_addr <= (unsigned long) __end_rodata)
|
||||
return -EINVAL;
|
||||
|
||||
/* decode instruction */
|
||||
switch (arm_kprobe_decode_insn(p->addr, &p->ainsn)) {
|
||||
case INSN_REJECTED: /* insn not supported */
|
||||
return -EINVAL;
|
||||
|
||||
case INSN_GOOD_NO_SLOT: /* insn need simulation */
|
||||
p->ainsn.insn = NULL;
|
||||
break;
|
||||
|
||||
case INSN_GOOD: /* instruction uses slot */
|
||||
p->ainsn.insn = get_insn_slot();
|
||||
if (!p->ainsn.insn)
|
||||
return -ENOMEM;
|
||||
break;
|
||||
};
|
||||
|
||||
/* prepare the instruction */
|
||||
if (p->ainsn.insn)
|
||||
arch_prepare_ss_slot(p);
|
||||
else
|
||||
arch_prepare_simulate(p);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
|
||||
{
|
||||
void *addrs[1];
|
||||
u32 insns[1];
|
||||
|
||||
addrs[0] = (void *)addr;
|
||||
insns[0] = (u32)opcode;
|
||||
|
||||
return aarch64_insn_patch_text(addrs, insns, 1);
|
||||
}
|
||||
|
||||
/* arm kprobe: install breakpoint in text */
|
||||
void __kprobes arch_arm_kprobe(struct kprobe *p)
|
||||
{
|
||||
patch_text(p->addr, BRK64_OPCODE_KPROBES);
|
||||
}
|
||||
|
||||
/* disarm kprobe: remove breakpoint from text */
|
||||
void __kprobes arch_disarm_kprobe(struct kprobe *p)
|
||||
{
|
||||
patch_text(p->addr, p->opcode);
|
||||
}
|
||||
|
||||
void __kprobes arch_remove_kprobe(struct kprobe *p)
|
||||
{
|
||||
if (p->ainsn.insn) {
|
||||
free_insn_slot(p->ainsn.insn, 0);
|
||||
p->ainsn.insn = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
kcb->prev_kprobe.kp = kprobe_running();
|
||||
kcb->prev_kprobe.status = kcb->kprobe_status;
|
||||
}
|
||||
|
||||
static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
__this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
|
||||
kcb->kprobe_status = kcb->prev_kprobe.status;
|
||||
}
|
||||
|
||||
static void __kprobes set_current_kprobe(struct kprobe *p)
|
||||
{
|
||||
__this_cpu_write(current_kprobe, p);
|
||||
}
|
||||
|
||||
/*
|
||||
* The D-flag (Debug mask) is set (masked) upon debug exception entry.
|
||||
* Kprobes needs to clear (unmask) D-flag -ONLY- in case of recursive
|
||||
* probe i.e. when probe hit from kprobe handler context upon
|
||||
* executing the pre/post handlers. In this case we return with
|
||||
* D-flag clear so that single-stepping can be carried-out.
|
||||
*
|
||||
* Leave D-flag set in all other cases.
|
||||
*/
|
||||
static void __kprobes
|
||||
spsr_set_debug_flag(struct pt_regs *regs, int mask)
|
||||
{
|
||||
unsigned long spsr = regs->pstate;
|
||||
|
||||
if (mask)
|
||||
spsr |= PSR_D_BIT;
|
||||
else
|
||||
spsr &= ~PSR_D_BIT;
|
||||
|
||||
regs->pstate = spsr;
|
||||
}
|
||||
|
||||
/*
|
||||
* Interrupts need to be disabled before single-step mode is set, and not
|
||||
* reenabled until after single-step mode ends.
|
||||
* Without disabling interrupt on local CPU, there is a chance of
|
||||
* interrupt occurrence in the period of exception return and start of
|
||||
* out-of-line single-step, that result in wrongly single stepping
|
||||
* into the interrupt handler.
|
||||
*/
|
||||
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
kcb->saved_irqflag = regs->pstate;
|
||||
regs->pstate |= PSR_I_BIT;
|
||||
}
|
||||
|
||||
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
if (kcb->saved_irqflag & PSR_I_BIT)
|
||||
regs->pstate |= PSR_I_BIT;
|
||||
else
|
||||
regs->pstate &= ~PSR_I_BIT;
|
||||
}
|
||||
|
||||
static void __kprobes
|
||||
set_ss_context(struct kprobe_ctlblk *kcb, unsigned long addr)
|
||||
{
|
||||
kcb->ss_ctx.ss_pending = true;
|
||||
kcb->ss_ctx.match_addr = addr + sizeof(kprobe_opcode_t);
|
||||
}
|
||||
|
||||
static void __kprobes clear_ss_context(struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
kcb->ss_ctx.ss_pending = false;
|
||||
kcb->ss_ctx.match_addr = 0;
|
||||
}
|
||||
|
||||
static void __kprobes setup_singlestep(struct kprobe *p,
|
||||
struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb, int reenter)
|
||||
{
|
||||
unsigned long slot;
|
||||
|
||||
if (reenter) {
|
||||
save_previous_kprobe(kcb);
|
||||
set_current_kprobe(p);
|
||||
kcb->kprobe_status = KPROBE_REENTER;
|
||||
} else {
|
||||
kcb->kprobe_status = KPROBE_HIT_SS;
|
||||
}
|
||||
|
||||
|
||||
if (p->ainsn.insn) {
|
||||
/* prepare for single stepping */
|
||||
slot = (unsigned long)p->ainsn.insn;
|
||||
|
||||
set_ss_context(kcb, slot); /* mark pending ss */
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
spsr_set_debug_flag(regs, 0);
|
||||
else
|
||||
WARN_ON(regs->pstate & PSR_D_BIT);
|
||||
|
||||
/* IRQs and single stepping do not mix well. */
|
||||
kprobes_save_local_irqflag(kcb, regs);
|
||||
kernel_enable_single_step(regs);
|
||||
instruction_pointer_set(regs, slot);
|
||||
} else {
|
||||
/* insn simulation */
|
||||
arch_simulate_insn(p, regs);
|
||||
}
|
||||
}
|
||||
|
||||
static int __kprobes reenter_kprobe(struct kprobe *p,
|
||||
struct pt_regs *regs,
|
||||
struct kprobe_ctlblk *kcb)
|
||||
{
|
||||
switch (kcb->kprobe_status) {
|
||||
case KPROBE_HIT_SSDONE:
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
kprobes_inc_nmissed_count(p);
|
||||
setup_singlestep(p, regs, kcb, 1);
|
||||
break;
|
||||
case KPROBE_HIT_SS:
|
||||
case KPROBE_REENTER:
|
||||
pr_warn("Unrecoverable kprobe detected at %p.\n", p->addr);
|
||||
dump_kprobe(p);
|
||||
BUG();
|
||||
break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void __kprobes
|
||||
post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
|
||||
if (!cur)
|
||||
return;
|
||||
|
||||
/* return addr restore if non-branching insn */
|
||||
if (cur->ainsn.restore != 0)
|
||||
instruction_pointer_set(regs, cur->ainsn.restore);
|
||||
|
||||
/* restore back original saved kprobe variables and continue */
|
||||
if (kcb->kprobe_status == KPROBE_REENTER) {
|
||||
restore_previous_kprobe(kcb);
|
||||
return;
|
||||
}
|
||||
/* call post handler */
|
||||
kcb->kprobe_status = KPROBE_HIT_SSDONE;
|
||||
if (cur->post_handler) {
|
||||
/* post_handler can hit breakpoint and single step
|
||||
* again, so we enable D-flag for recursive exception.
|
||||
*/
|
||||
cur->post_handler(cur, regs, 0);
|
||||
}
|
||||
|
||||
reset_current_kprobe();
|
||||
}
|
||||
|
||||
int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
|
||||
{
|
||||
struct kprobe *cur = kprobe_running();
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
switch (kcb->kprobe_status) {
|
||||
case KPROBE_HIT_SS:
|
||||
case KPROBE_REENTER:
|
||||
/*
|
||||
* We are here because the instruction being single
|
||||
* stepped caused a page fault. We reset the current
|
||||
* kprobe and the ip points back to the probe address
|
||||
* and allow the page fault handler to continue as a
|
||||
* normal page fault.
|
||||
*/
|
||||
instruction_pointer_set(regs, (unsigned long) cur->addr);
|
||||
if (!instruction_pointer(regs))
|
||||
BUG();
|
||||
|
||||
kernel_disable_single_step();
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
spsr_set_debug_flag(regs, 1);
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
restore_previous_kprobe(kcb);
|
||||
else
|
||||
reset_current_kprobe();
|
||||
|
||||
break;
|
||||
case KPROBE_HIT_ACTIVE:
|
||||
case KPROBE_HIT_SSDONE:
|
||||
/*
|
||||
* We increment the nmissed count for accounting,
|
||||
* we can also use npre/npostfault count for accounting
|
||||
* these specific fault cases.
|
||||
*/
|
||||
kprobes_inc_nmissed_count(cur);
|
||||
|
||||
/*
|
||||
* We come here because instructions in the pre/post
|
||||
* handler caused the page_fault, this could happen
|
||||
* if handler tries to access user space by
|
||||
* copy_from_user(), get_user() etc. Let the
|
||||
* user-specified handler try to fix it first.
|
||||
*/
|
||||
if (cur->fault_handler && cur->fault_handler(cur, regs, fsr))
|
||||
return 1;
|
||||
|
||||
/*
|
||||
* In case the user-specified fault handler returned
|
||||
* zero, try to fix up.
|
||||
*/
|
||||
if (fixup_exception(regs))
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
|
||||
unsigned long val, void *data)
|
||||
{
|
||||
return NOTIFY_DONE;
|
||||
}
|
||||
|
||||
static void __kprobes kprobe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe *p, *cur_kprobe;
|
||||
struct kprobe_ctlblk *kcb;
|
||||
unsigned long addr = instruction_pointer(regs);
|
||||
|
||||
kcb = get_kprobe_ctlblk();
|
||||
cur_kprobe = kprobe_running();
|
||||
|
||||
p = get_kprobe((kprobe_opcode_t *) addr);
|
||||
|
||||
if (p) {
|
||||
if (cur_kprobe) {
|
||||
if (reenter_kprobe(p, regs, kcb))
|
||||
return;
|
||||
} else {
|
||||
/* Probe hit */
|
||||
set_current_kprobe(p);
|
||||
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
|
||||
/*
|
||||
* If we have no pre-handler or it returned 0, we
|
||||
* continue with normal processing. If we have a
|
||||
* pre-handler and it returned non-zero, it prepped
|
||||
* for calling the break_handler below on re-entry,
|
||||
* so get out doing nothing more here.
|
||||
*
|
||||
* pre_handler can hit a breakpoint and can step thru
|
||||
* before return, keep PSTATE D-flag enabled until
|
||||
* pre_handler return back.
|
||||
*/
|
||||
if (!p->pre_handler || !p->pre_handler(p, regs)) {
|
||||
setup_singlestep(p, regs, kcb, 0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
} else if ((le32_to_cpu(*(kprobe_opcode_t *) addr) ==
|
||||
BRK64_OPCODE_KPROBES) && cur_kprobe) {
|
||||
/* We probably hit a jprobe. Call its break handler. */
|
||||
if (cur_kprobe->break_handler &&
|
||||
cur_kprobe->break_handler(cur_kprobe, regs)) {
|
||||
setup_singlestep(cur_kprobe, regs, kcb, 0);
|
||||
return;
|
||||
}
|
||||
}
|
||||
/*
|
||||
* The breakpoint instruction was removed right
|
||||
* after we hit it. Another cpu has removed
|
||||
* either a probepoint or a debugger breakpoint
|
||||
* at this address. In either case, no further
|
||||
* handling of this interrupt is appropriate.
|
||||
* Return back to original instruction, and continue.
|
||||
*/
|
||||
}
|
||||
|
||||
static int __kprobes
|
||||
kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
|
||||
{
|
||||
if ((kcb->ss_ctx.ss_pending)
|
||||
&& (kcb->ss_ctx.match_addr == addr)) {
|
||||
clear_ss_context(kcb); /* clear pending ss */
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
/* not ours, kprobes should ignore it */
|
||||
return DBG_HOOK_ERROR;
|
||||
}
|
||||
|
||||
int __kprobes
|
||||
kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
int retval;
|
||||
|
||||
/* return error if this is not our step */
|
||||
retval = kprobe_ss_hit(kcb, instruction_pointer(regs));
|
||||
|
||||
if (retval == DBG_HOOK_HANDLED) {
|
||||
kprobes_restore_local_irqflag(kcb, regs);
|
||||
kernel_disable_single_step();
|
||||
|
||||
if (kcb->kprobe_status == KPROBE_REENTER)
|
||||
spsr_set_debug_flag(regs, 1);
|
||||
|
||||
post_kprobe_handler(kcb, regs);
|
||||
}
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
||||
int __kprobes
|
||||
kprobe_breakpoint_handler(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
kprobe_handler(regs);
|
||||
return DBG_HOOK_HANDLED;
|
||||
}
|
||||
|
||||
int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
long stack_ptr = kernel_stack_pointer(regs);
|
||||
|
||||
kcb->jprobe_saved_regs = *regs;
|
||||
/*
|
||||
* As Linus pointed out, gcc assumes that the callee
|
||||
* owns the argument space and could overwrite it, e.g.
|
||||
* tailcall optimization. So, to be absolutely safe
|
||||
* we also save and restore enough stack bytes to cover
|
||||
* the argument area.
|
||||
*/
|
||||
kasan_disable_current();
|
||||
memcpy(kcb->jprobes_stack, (void *)stack_ptr,
|
||||
min_stack_size(stack_ptr));
|
||||
kasan_enable_current();
|
||||
|
||||
instruction_pointer_set(regs, (unsigned long) jp->entry);
|
||||
preempt_disable();
|
||||
pause_graph_tracing();
|
||||
return 1;
|
||||
}
|
||||
|
||||
void __kprobes jprobe_return(void)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
|
||||
/*
|
||||
* Jprobe handler return by entering break exception,
|
||||
* encoded same as kprobe, but with following conditions
|
||||
* -a special PC to identify it from the other kprobes.
|
||||
* -restore stack addr to original saved pt_regs
|
||||
*/
|
||||
asm volatile(" mov sp, %0 \n"
|
||||
"jprobe_return_break: brk %1 \n"
|
||||
:
|
||||
: "r" (kcb->jprobe_saved_regs.sp),
|
||||
"I" (BRK64_ESR_KPROBES)
|
||||
: "memory");
|
||||
|
||||
unreachable();
|
||||
}
|
||||
|
||||
int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
|
||||
{
|
||||
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
|
||||
long stack_addr = kcb->jprobe_saved_regs.sp;
|
||||
long orig_sp = kernel_stack_pointer(regs);
|
||||
struct jprobe *jp = container_of(p, struct jprobe, kp);
|
||||
extern const char jprobe_return_break[];
|
||||
|
||||
if (instruction_pointer(regs) != (u64) jprobe_return_break)
|
||||
return 0;
|
||||
|
||||
if (orig_sp != stack_addr) {
|
||||
struct pt_regs *saved_regs =
|
||||
(struct pt_regs *)kcb->jprobe_saved_regs.sp;
|
||||
pr_err("current sp %lx does not match saved sp %lx\n",
|
||||
orig_sp, stack_addr);
|
||||
pr_err("Saved registers for jprobe %p\n", jp);
|
||||
show_regs(saved_regs);
|
||||
pr_err("Current registers\n");
|
||||
show_regs(regs);
|
||||
BUG();
|
||||
}
|
||||
unpause_graph_tracing();
|
||||
*regs = kcb->jprobe_saved_regs;
|
||||
kasan_disable_current();
|
||||
memcpy((void *)stack_addr, kcb->jprobes_stack,
|
||||
min_stack_size(stack_addr));
|
||||
kasan_enable_current();
|
||||
preempt_enable_no_resched();
|
||||
return 1;
|
||||
}
|
||||
|
||||
bool arch_within_kprobe_blacklist(unsigned long addr)
|
||||
{
|
||||
extern char __idmap_text_start[], __idmap_text_end[];
|
||||
extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
|
||||
|
||||
if ((addr >= (unsigned long)__kprobes_text_start &&
|
||||
addr < (unsigned long)__kprobes_text_end) ||
|
||||
(addr >= (unsigned long)__entry_text_start &&
|
||||
addr < (unsigned long)__entry_text_end) ||
|
||||
(addr >= (unsigned long)__idmap_text_start &&
|
||||
addr < (unsigned long)__idmap_text_end) ||
|
||||
!!search_exception_tables(addr))
|
||||
return true;
|
||||
|
||||
if (!is_kernel_in_hyp_mode()) {
|
||||
if ((addr >= (unsigned long)__hyp_text_start &&
|
||||
addr < (unsigned long)__hyp_text_end) ||
|
||||
(addr >= (unsigned long)__hyp_idmap_text_start &&
|
||||
addr < (unsigned long)__hyp_idmap_text_end))
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void __kprobes __used *trampoline_probe_handler(struct pt_regs *regs)
|
||||
{
|
||||
struct kretprobe_instance *ri = NULL;
|
||||
struct hlist_head *head, empty_rp;
|
||||
struct hlist_node *tmp;
|
||||
unsigned long flags, orig_ret_address = 0;
|
||||
unsigned long trampoline_address =
|
||||
(unsigned long)&kretprobe_trampoline;
|
||||
kprobe_opcode_t *correct_ret_addr = NULL;
|
||||
|
||||
INIT_HLIST_HEAD(&empty_rp);
|
||||
kretprobe_hash_lock(current, &head, &flags);
|
||||
|
||||
/*
|
||||
* It is possible to have multiple instances associated with a given
|
||||
* task either because multiple functions in the call path have
|
||||
* return probes installed on them, and/or more than one
|
||||
* return probe was registered for a target function.
|
||||
*
|
||||
* We can handle this because:
|
||||
* - instances are always pushed into the head of the list
|
||||
* - when multiple return probes are registered for the same
|
||||
* function, the (chronologically) first instance's ret_addr
|
||||
* will be the real return address, and all the rest will
|
||||
* point to kretprobe_trampoline.
|
||||
*/
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
* This is the real return address. Any other
|
||||
* instances associated with this task are for
|
||||
* other calls deeper on the call stack
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
kretprobe_assert(ri, orig_ret_address, trampoline_address);
|
||||
|
||||
correct_ret_addr = ri->ret_addr;
|
||||
hlist_for_each_entry_safe(ri, tmp, head, hlist) {
|
||||
if (ri->task != current)
|
||||
/* another task is sharing our hash bucket */
|
||||
continue;
|
||||
|
||||
orig_ret_address = (unsigned long)ri->ret_addr;
|
||||
if (ri->rp && ri->rp->handler) {
|
||||
__this_cpu_write(current_kprobe, &ri->rp->kp);
|
||||
get_kprobe_ctlblk()->kprobe_status = KPROBE_HIT_ACTIVE;
|
||||
ri->ret_addr = correct_ret_addr;
|
||||
ri->rp->handler(ri, regs);
|
||||
__this_cpu_write(current_kprobe, NULL);
|
||||
}
|
||||
|
||||
recycle_rp_inst(ri, &empty_rp);
|
||||
|
||||
if (orig_ret_address != trampoline_address)
|
||||
/*
|
||||
* This is the real return address. Any other
|
||||
* instances associated with this task are for
|
||||
* other calls deeper on the call stack
|
||||
*/
|
||||
break;
|
||||
}
|
||||
|
||||
kretprobe_hash_unlock(current, &flags);
|
||||
|
||||
hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
|
||||
hlist_del(&ri->hlist);
|
||||
kfree(ri);
|
||||
}
|
||||
return (void *)orig_ret_address;
|
||||
}
|
||||
|
||||
void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
ri->ret_addr = (kprobe_opcode_t *)regs->regs[30];
|
||||
|
||||
/* replace return addr (x30) with trampoline */
|
||||
regs->regs[30] = (long)&kretprobe_trampoline;
|
||||
}
|
||||
|
||||
int __kprobes arch_trampoline_kprobe(struct kprobe *p)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __init arch_init_kprobes(void)
|
||||
{
|
||||
return 0;
|
||||
}
|
81
arch/arm64/kernel/probes/kprobes_trampoline.S
Normal file
81
arch/arm64/kernel/probes/kprobes_trampoline.S
Normal file
@ -0,0 +1,81 @@
|
||||
/*
|
||||
* trampoline entry and return code for kretprobes.
|
||||
*/
|
||||
|
||||
#include <linux/linkage.h>
|
||||
#include <asm/asm-offsets.h>
|
||||
#include <asm/assembler.h>
|
||||
|
||||
.text
|
||||
|
||||
.macro save_all_base_regs
|
||||
stp x0, x1, [sp, #S_X0]
|
||||
stp x2, x3, [sp, #S_X2]
|
||||
stp x4, x5, [sp, #S_X4]
|
||||
stp x6, x7, [sp, #S_X6]
|
||||
stp x8, x9, [sp, #S_X8]
|
||||
stp x10, x11, [sp, #S_X10]
|
||||
stp x12, x13, [sp, #S_X12]
|
||||
stp x14, x15, [sp, #S_X14]
|
||||
stp x16, x17, [sp, #S_X16]
|
||||
stp x18, x19, [sp, #S_X18]
|
||||
stp x20, x21, [sp, #S_X20]
|
||||
stp x22, x23, [sp, #S_X22]
|
||||
stp x24, x25, [sp, #S_X24]
|
||||
stp x26, x27, [sp, #S_X26]
|
||||
stp x28, x29, [sp, #S_X28]
|
||||
add x0, sp, #S_FRAME_SIZE
|
||||
stp lr, x0, [sp, #S_LR]
|
||||
/*
|
||||
* Construct a useful saved PSTATE
|
||||
*/
|
||||
mrs x0, nzcv
|
||||
mrs x1, daif
|
||||
orr x0, x0, x1
|
||||
mrs x1, CurrentEL
|
||||
orr x0, x0, x1
|
||||
mrs x1, SPSel
|
||||
orr x0, x0, x1
|
||||
stp xzr, x0, [sp, #S_PC]
|
||||
.endm
|
||||
|
||||
.macro restore_all_base_regs
|
||||
ldr x0, [sp, #S_PSTATE]
|
||||
and x0, x0, #(PSR_N_BIT | PSR_Z_BIT | PSR_C_BIT | PSR_V_BIT)
|
||||
msr nzcv, x0
|
||||
ldp x0, x1, [sp, #S_X0]
|
||||
ldp x2, x3, [sp, #S_X2]
|
||||
ldp x4, x5, [sp, #S_X4]
|
||||
ldp x6, x7, [sp, #S_X6]
|
||||
ldp x8, x9, [sp, #S_X8]
|
||||
ldp x10, x11, [sp, #S_X10]
|
||||
ldp x12, x13, [sp, #S_X12]
|
||||
ldp x14, x15, [sp, #S_X14]
|
||||
ldp x16, x17, [sp, #S_X16]
|
||||
ldp x18, x19, [sp, #S_X18]
|
||||
ldp x20, x21, [sp, #S_X20]
|
||||
ldp x22, x23, [sp, #S_X22]
|
||||
ldp x24, x25, [sp, #S_X24]
|
||||
ldp x26, x27, [sp, #S_X26]
|
||||
ldp x28, x29, [sp, #S_X28]
|
||||
.endm
|
||||
|
||||
ENTRY(kretprobe_trampoline)
|
||||
sub sp, sp, #S_FRAME_SIZE
|
||||
|
||||
save_all_base_regs
|
||||
|
||||
mov x0, sp
|
||||
bl trampoline_probe_handler
|
||||
/*
|
||||
* Replace trampoline address in lr with actual orig_ret_addr return
|
||||
* address.
|
||||
*/
|
||||
mov lr, x0
|
||||
|
||||
restore_all_base_regs
|
||||
|
||||
add sp, sp, #S_FRAME_SIZE
|
||||
ret
|
||||
|
||||
ENDPROC(kretprobe_trampoline)
|
217
arch/arm64/kernel/probes/simulate-insn.c
Normal file
217
arch/arm64/kernel/probes/simulate-insn.c
Normal file
@ -0,0 +1,217 @@
|
||||
/*
|
||||
* arch/arm64/kernel/probes/simulate-insn.c
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Limited.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/kprobes.h>
|
||||
|
||||
#include "simulate-insn.h"
|
||||
|
||||
#define sign_extend(x, signbit) \
|
||||
((x) | (0 - ((x) & (1 << (signbit)))))
|
||||
|
||||
#define bbl_displacement(insn) \
|
||||
sign_extend(((insn) & 0x3ffffff) << 2, 27)
|
||||
|
||||
#define bcond_displacement(insn) \
|
||||
sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
|
||||
|
||||
#define cbz_displacement(insn) \
|
||||
sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
|
||||
|
||||
#define tbz_displacement(insn) \
|
||||
sign_extend(((insn >> 5) & 0x3fff) << 2, 15)
|
||||
|
||||
#define ldr_displacement(insn) \
|
||||
sign_extend(((insn >> 5) & 0x7ffff) << 2, 20)
|
||||
|
||||
static inline void set_x_reg(struct pt_regs *regs, int reg, u64 val)
|
||||
{
|
||||
if (reg < 31)
|
||||
regs->regs[reg] = val;
|
||||
}
|
||||
|
||||
static inline void set_w_reg(struct pt_regs *regs, int reg, u64 val)
|
||||
{
|
||||
if (reg < 31)
|
||||
regs->regs[reg] = lower_32_bits(val);
|
||||
}
|
||||
|
||||
static inline u64 get_x_reg(struct pt_regs *regs, int reg)
|
||||
{
|
||||
if (reg < 31)
|
||||
return regs->regs[reg];
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline u32 get_w_reg(struct pt_regs *regs, int reg)
|
||||
{
|
||||
if (reg < 31)
|
||||
return lower_32_bits(regs->regs[reg]);
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool __kprobes check_cbz(u32 opcode, struct pt_regs *regs)
|
||||
{
|
||||
int xn = opcode & 0x1f;
|
||||
|
||||
return (opcode & (1 << 31)) ?
|
||||
(get_x_reg(regs, xn) == 0) : (get_w_reg(regs, xn) == 0);
|
||||
}
|
||||
|
||||
static bool __kprobes check_cbnz(u32 opcode, struct pt_regs *regs)
|
||||
{
|
||||
int xn = opcode & 0x1f;
|
||||
|
||||
return (opcode & (1 << 31)) ?
|
||||
(get_x_reg(regs, xn) != 0) : (get_w_reg(regs, xn) != 0);
|
||||
}
|
||||
|
||||
static bool __kprobes check_tbz(u32 opcode, struct pt_regs *regs)
|
||||
{
|
||||
int xn = opcode & 0x1f;
|
||||
int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f);
|
||||
|
||||
return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) == 0;
|
||||
}
|
||||
|
||||
static bool __kprobes check_tbnz(u32 opcode, struct pt_regs *regs)
|
||||
{
|
||||
int xn = opcode & 0x1f;
|
||||
int bit_pos = ((opcode & (1 << 31)) >> 26) | ((opcode >> 19) & 0x1f);
|
||||
|
||||
return ((get_x_reg(regs, xn) >> bit_pos) & 0x1) != 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* instruction simulation functions
|
||||
*/
|
||||
void __kprobes
|
||||
simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
long imm, xn, val;
|
||||
|
||||
xn = opcode & 0x1f;
|
||||
imm = ((opcode >> 3) & 0x1ffffc) | ((opcode >> 29) & 0x3);
|
||||
imm = sign_extend(imm, 20);
|
||||
if (opcode & 0x80000000)
|
||||
val = (imm<<12) + (addr & 0xfffffffffffff000);
|
||||
else
|
||||
val = imm + addr;
|
||||
|
||||
set_x_reg(regs, xn, val);
|
||||
|
||||
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
|
||||
}
|
||||
|
||||
void __kprobes
|
||||
simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
int disp = bbl_displacement(opcode);
|
||||
|
||||
/* Link register is x30 */
|
||||
if (opcode & (1 << 31))
|
||||
set_x_reg(regs, 30, addr + 4);
|
||||
|
||||
instruction_pointer_set(regs, addr + disp);
|
||||
}
|
||||
|
||||
void __kprobes
|
||||
simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
int disp = 4;
|
||||
|
||||
if (aarch32_opcode_cond_checks[opcode & 0xf](regs->pstate & 0xffffffff))
|
||||
disp = bcond_displacement(opcode);
|
||||
|
||||
instruction_pointer_set(regs, addr + disp);
|
||||
}
|
||||
|
||||
void __kprobes
|
||||
simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
int xn = (opcode >> 5) & 0x1f;
|
||||
|
||||
/* update pc first in case we're doing a "blr lr" */
|
||||
instruction_pointer_set(regs, get_x_reg(regs, xn));
|
||||
|
||||
/* Link register is x30 */
|
||||
if (((opcode >> 21) & 0x3) == 1)
|
||||
set_x_reg(regs, 30, addr + 4);
|
||||
}
|
||||
|
||||
void __kprobes
|
||||
simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
int disp = 4;
|
||||
|
||||
if (opcode & (1 << 24)) {
|
||||
if (check_cbnz(opcode, regs))
|
||||
disp = cbz_displacement(opcode);
|
||||
} else {
|
||||
if (check_cbz(opcode, regs))
|
||||
disp = cbz_displacement(opcode);
|
||||
}
|
||||
instruction_pointer_set(regs, addr + disp);
|
||||
}
|
||||
|
||||
void __kprobes
|
||||
simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
int disp = 4;
|
||||
|
||||
if (opcode & (1 << 24)) {
|
||||
if (check_tbnz(opcode, regs))
|
||||
disp = tbz_displacement(opcode);
|
||||
} else {
|
||||
if (check_tbz(opcode, regs))
|
||||
disp = tbz_displacement(opcode);
|
||||
}
|
||||
instruction_pointer_set(regs, addr + disp);
|
||||
}
|
||||
|
||||
void __kprobes
|
||||
simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
u64 *load_addr;
|
||||
int xn = opcode & 0x1f;
|
||||
int disp;
|
||||
|
||||
disp = ldr_displacement(opcode);
|
||||
load_addr = (u64 *) (addr + disp);
|
||||
|
||||
if (opcode & (1 << 30)) /* x0-x30 */
|
||||
set_x_reg(regs, xn, *load_addr);
|
||||
else /* w0-w30 */
|
||||
set_w_reg(regs, xn, *load_addr);
|
||||
|
||||
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
|
||||
}
|
||||
|
||||
void __kprobes
|
||||
simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs)
|
||||
{
|
||||
s32 *load_addr;
|
||||
int xn = opcode & 0x1f;
|
||||
int disp;
|
||||
|
||||
disp = ldr_displacement(opcode);
|
||||
load_addr = (s32 *) (addr + disp);
|
||||
|
||||
set_x_reg(regs, xn, *load_addr);
|
||||
|
||||
instruction_pointer_set(regs, instruction_pointer(regs) + 4);
|
||||
}
|
28
arch/arm64/kernel/probes/simulate-insn.h
Normal file
28
arch/arm64/kernel/probes/simulate-insn.h
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* arch/arm64/kernel/probes/simulate-insn.h
|
||||
*
|
||||
* Copyright (C) 2013 Linaro Limited
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
|
||||
#define _ARM_KERNEL_KPROBES_SIMULATE_INSN_H
|
||||
|
||||
void simulate_adr_adrp(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_b_bl(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_b_cond(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_br_blr_ret(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_cbz_cbnz(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_tbz_tbnz(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_ldr_literal(u32 opcode, long addr, struct pt_regs *regs);
|
||||
void simulate_ldrsw_literal(u32 opcode, long addr, struct pt_regs *regs);
|
||||
|
||||
#endif /* _ARM_KERNEL_KPROBES_SIMULATE_INSN_H */
|
@ -48,6 +48,107 @@
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/syscalls.h>
|
||||
|
||||
struct pt_regs_offset {
|
||||
const char *name;
|
||||
int offset;
|
||||
};
|
||||
|
||||
#define REG_OFFSET_NAME(r) {.name = #r, .offset = offsetof(struct pt_regs, r)}
|
||||
#define REG_OFFSET_END {.name = NULL, .offset = 0}
|
||||
#define GPR_OFFSET_NAME(r) \
|
||||
{.name = "x" #r, .offset = offsetof(struct pt_regs, regs[r])}
|
||||
|
||||
static const struct pt_regs_offset regoffset_table[] = {
|
||||
GPR_OFFSET_NAME(0),
|
||||
GPR_OFFSET_NAME(1),
|
||||
GPR_OFFSET_NAME(2),
|
||||
GPR_OFFSET_NAME(3),
|
||||
GPR_OFFSET_NAME(4),
|
||||
GPR_OFFSET_NAME(5),
|
||||
GPR_OFFSET_NAME(6),
|
||||
GPR_OFFSET_NAME(7),
|
||||
GPR_OFFSET_NAME(8),
|
||||
GPR_OFFSET_NAME(9),
|
||||
GPR_OFFSET_NAME(10),
|
||||
GPR_OFFSET_NAME(11),
|
||||
GPR_OFFSET_NAME(12),
|
||||
GPR_OFFSET_NAME(13),
|
||||
GPR_OFFSET_NAME(14),
|
||||
GPR_OFFSET_NAME(15),
|
||||
GPR_OFFSET_NAME(16),
|
||||
GPR_OFFSET_NAME(17),
|
||||
GPR_OFFSET_NAME(18),
|
||||
GPR_OFFSET_NAME(19),
|
||||
GPR_OFFSET_NAME(20),
|
||||
GPR_OFFSET_NAME(21),
|
||||
GPR_OFFSET_NAME(22),
|
||||
GPR_OFFSET_NAME(23),
|
||||
GPR_OFFSET_NAME(24),
|
||||
GPR_OFFSET_NAME(25),
|
||||
GPR_OFFSET_NAME(26),
|
||||
GPR_OFFSET_NAME(27),
|
||||
GPR_OFFSET_NAME(28),
|
||||
GPR_OFFSET_NAME(29),
|
||||
GPR_OFFSET_NAME(30),
|
||||
{.name = "lr", .offset = offsetof(struct pt_regs, regs[30])},
|
||||
REG_OFFSET_NAME(sp),
|
||||
REG_OFFSET_NAME(pc),
|
||||
REG_OFFSET_NAME(pstate),
|
||||
REG_OFFSET_END,
|
||||
};
|
||||
|
||||
/**
|
||||
* regs_query_register_offset() - query register offset from its name
|
||||
* @name: the name of a register
|
||||
*
|
||||
* regs_query_register_offset() returns the offset of a register in struct
|
||||
* pt_regs from its name. If the name is invalid, this returns -EINVAL;
|
||||
*/
|
||||
int regs_query_register_offset(const char *name)
|
||||
{
|
||||
const struct pt_regs_offset *roff;
|
||||
|
||||
for (roff = regoffset_table; roff->name != NULL; roff++)
|
||||
if (!strcmp(roff->name, name))
|
||||
return roff->offset;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/**
|
||||
* regs_within_kernel_stack() - check the address in the stack
|
||||
* @regs: pt_regs which contains kernel stack pointer.
|
||||
* @addr: address which is checked.
|
||||
*
|
||||
* regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
|
||||
* If @addr is within the kernel stack, it returns true. If not, returns false.
|
||||
*/
|
||||
static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
return ((addr & ~(THREAD_SIZE - 1)) ==
|
||||
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
|
||||
on_irq_stack(addr, raw_smp_processor_id());
|
||||
}
|
||||
|
||||
/**
|
||||
* regs_get_kernel_stack_nth() - get Nth entry of the stack
|
||||
* @regs: pt_regs which contains kernel stack pointer.
|
||||
* @n: stack entry number.
|
||||
*
|
||||
* regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
|
||||
* is specified by @regs. If the @n th entry is NOT in the kernel stack,
|
||||
* this returns 0.
|
||||
*/
|
||||
unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
|
||||
{
|
||||
unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
|
||||
|
||||
addr += n;
|
||||
if (regs_within_kernel_stack(regs, (unsigned long)addr))
|
||||
return *addr;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* TODO: does not yet catch signals sent when the child dies.
|
||||
* in exit.c or in signal.c.
|
||||
|
130
arch/arm64/kernel/relocate_kernel.S
Normal file
130
arch/arm64/kernel/relocate_kernel.S
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
* kexec for arm64
|
||||
*
|
||||
* Copyright (C) Linaro.
|
||||
* Copyright (C) Huawei Futurewei Technologies.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 as
|
||||
* published by the Free Software Foundation.
|
||||
*/
|
||||
|
||||
#include <linux/kexec.h>
|
||||
#include <linux/linkage.h>
|
||||
|
||||
#include <asm/assembler.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
/*
|
||||
* arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
|
||||
*
|
||||
* The memory that the old kernel occupies may be overwritten when coping the
|
||||
* new image to its final location. To assure that the
|
||||
* arm64_relocate_new_kernel routine which does that copy is not overwritten,
|
||||
* all code and data needed by arm64_relocate_new_kernel must be between the
|
||||
* symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
|
||||
* machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
|
||||
* control_code_page, a special page which has been set up to be preserved
|
||||
* during the copy operation.
|
||||
*/
|
||||
ENTRY(arm64_relocate_new_kernel)
|
||||
|
||||
/* Setup the list loop variables. */
|
||||
mov x17, x1 /* x17 = kimage_start */
|
||||
mov x16, x0 /* x16 = kimage_head */
|
||||
dcache_line_size x15, x0 /* x15 = dcache line size */
|
||||
mov x14, xzr /* x14 = entry ptr */
|
||||
mov x13, xzr /* x13 = copy dest */
|
||||
|
||||
/* Clear the sctlr_el2 flags. */
|
||||
mrs x0, CurrentEL
|
||||
cmp x0, #CurrentEL_EL2
|
||||
b.ne 1f
|
||||
mrs x0, sctlr_el2
|
||||
ldr x1, =SCTLR_ELx_FLAGS
|
||||
bic x0, x0, x1
|
||||
msr sctlr_el2, x0
|
||||
isb
|
||||
1:
|
||||
|
||||
/* Check if the new image needs relocation. */
|
||||
tbnz x16, IND_DONE_BIT, .Ldone
|
||||
|
||||
.Lloop:
|
||||
and x12, x16, PAGE_MASK /* x12 = addr */
|
||||
|
||||
/* Test the entry flags. */
|
||||
.Ltest_source:
|
||||
tbz x16, IND_SOURCE_BIT, .Ltest_indirection
|
||||
|
||||
/* Invalidate dest page to PoC. */
|
||||
mov x0, x13
|
||||
add x20, x0, #PAGE_SIZE
|
||||
sub x1, x15, #1
|
||||
bic x0, x0, x1
|
||||
2: dc ivac, x0
|
||||
add x0, x0, x15
|
||||
cmp x0, x20
|
||||
b.lo 2b
|
||||
dsb sy
|
||||
|
||||
mov x20, x13
|
||||
mov x21, x12
|
||||
copy_page x20, x21, x0, x1, x2, x3, x4, x5, x6, x7
|
||||
|
||||
/* dest += PAGE_SIZE */
|
||||
add x13, x13, PAGE_SIZE
|
||||
b .Lnext
|
||||
|
||||
.Ltest_indirection:
|
||||
tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
|
||||
|
||||
/* ptr = addr */
|
||||
mov x14, x12
|
||||
b .Lnext
|
||||
|
||||
.Ltest_destination:
|
||||
tbz x16, IND_DESTINATION_BIT, .Lnext
|
||||
|
||||
/* dest = addr */
|
||||
mov x13, x12
|
||||
|
||||
.Lnext:
|
||||
/* entry = *ptr++ */
|
||||
ldr x16, [x14], #8
|
||||
|
||||
/* while (!(entry & DONE)) */
|
||||
tbz x16, IND_DONE_BIT, .Lloop
|
||||
|
||||
.Ldone:
|
||||
/* wait for writes from copy_page to finish */
|
||||
dsb nsh
|
||||
ic iallu
|
||||
dsb nsh
|
||||
isb
|
||||
|
||||
/* Start new image. */
|
||||
mov x0, xzr
|
||||
mov x1, xzr
|
||||
mov x2, xzr
|
||||
mov x3, xzr
|
||||
br x17
|
||||
|
||||
ENDPROC(arm64_relocate_new_kernel)
|
||||
|
||||
.ltorg
|
||||
|
||||
.align 3 /* To keep the 64-bit values below naturally aligned. */
|
||||
|
||||
.Lcopy_end:
|
||||
.org KEXEC_CONTROL_PAGE_SIZE
|
||||
|
||||
/*
|
||||
* arm64_relocate_new_kernel_size - Number of bytes to copy to the
|
||||
* control_code_page.
|
||||
*/
|
||||
.globl arm64_relocate_new_kernel_size
|
||||
arm64_relocate_new_kernel_size:
|
||||
.quad .Lcopy_end - arm64_relocate_new_kernel
|
@ -202,7 +202,7 @@ static void __init request_standard_resources(void)
|
||||
struct resource *res;
|
||||
|
||||
kernel_code.start = virt_to_phys(_text);
|
||||
kernel_code.end = virt_to_phys(_etext - 1);
|
||||
kernel_code.end = virt_to_phys(__init_begin - 1);
|
||||
kernel_data.start = virt_to_phys(_sdata);
|
||||
kernel_data.end = virt_to_phys(_end - 1);
|
||||
|
||||
|
@ -267,7 +267,6 @@ asmlinkage void secondary_start_kernel(void)
|
||||
set_cpu_online(cpu, true);
|
||||
complete(&cpu_running);
|
||||
|
||||
local_dbg_enable();
|
||||
local_irq_enable();
|
||||
local_async_enable();
|
||||
|
||||
@ -437,9 +436,9 @@ void __init smp_cpus_done(unsigned int max_cpus)
|
||||
|
||||
void __init smp_prepare_boot_cpu(void)
|
||||
{
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
cpuinfo_store_boot_cpu();
|
||||
save_boot_cpu_run_el();
|
||||
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
|
||||
}
|
||||
|
||||
static u64 __init of_get_cpu_mpidr(struct device_node *dn)
|
||||
@ -695,6 +694,13 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
|
||||
|
||||
smp_store_cpu_info(smp_processor_id());
|
||||
|
||||
/*
|
||||
* If UP is mandated by "nosmp" (which implies "maxcpus=0"), don't set
|
||||
* secondary CPUs present.
|
||||
*/
|
||||
if (max_cpus == 0)
|
||||
return;
|
||||
|
||||
/*
|
||||
* Initialise the present map (which describes the set of CPUs
|
||||
* actually populated at the present time) and release the
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/exception.h>
|
||||
#include <asm/system_misc.h>
|
||||
#include <asm/sysreg.h>
|
||||
|
||||
static const char *handler[]= {
|
||||
"Synchronous Abort",
|
||||
@ -52,15 +53,14 @@ static const char *handler[]= {
|
||||
int show_unhandled_signals = 1;
|
||||
|
||||
/*
|
||||
* Dump out the contents of some memory nicely...
|
||||
* Dump out the contents of some kernel memory nicely...
|
||||
*/
|
||||
static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
|
||||
unsigned long top, bool compat)
|
||||
unsigned long top)
|
||||
{
|
||||
unsigned long first;
|
||||
mm_segment_t fs;
|
||||
int i;
|
||||
unsigned int width = compat ? 4 : 8;
|
||||
|
||||
/*
|
||||
* We need to switch to kernel mode so that we can use __get_user
|
||||
@ -78,22 +78,15 @@ static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
|
||||
memset(str, ' ', sizeof(str));
|
||||
str[sizeof(str) - 1] = '\0';
|
||||
|
||||
for (p = first, i = 0; i < (32 / width)
|
||||
&& p < top; i++, p += width) {
|
||||
for (p = first, i = 0; i < (32 / 8)
|
||||
&& p < top; i++, p += 8) {
|
||||
if (p >= bottom && p < top) {
|
||||
unsigned long val;
|
||||
|
||||
if (width == 8) {
|
||||
if (__get_user(val, (unsigned long *)p) == 0)
|
||||
sprintf(str + i * 17, " %016lx", val);
|
||||
else
|
||||
sprintf(str + i * 17, " ????????????????");
|
||||
} else {
|
||||
if (__get_user(val, (unsigned int *)p) == 0)
|
||||
sprintf(str + i * 9, " %08lx", val);
|
||||
else
|
||||
sprintf(str + i * 9, " ????????");
|
||||
}
|
||||
if (__get_user(val, (unsigned long *)p) == 0)
|
||||
sprintf(str + i * 17, " %016lx", val);
|
||||
else
|
||||
sprintf(str + i * 17, " ????????????????");
|
||||
}
|
||||
}
|
||||
printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
|
||||
@ -216,7 +209,7 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
|
||||
stack = IRQ_STACK_TO_TASK_STACK(irq_stack_ptr);
|
||||
|
||||
dump_mem("", "Exception stack", stack,
|
||||
stack + sizeof(struct pt_regs), false);
|
||||
stack + sizeof(struct pt_regs));
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -254,10 +247,9 @@ static int __die(const char *str, int err, struct thread_info *thread,
|
||||
pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
|
||||
TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), thread + 1);
|
||||
|
||||
if (!user_mode(regs) || in_interrupt()) {
|
||||
if (!user_mode(regs)) {
|
||||
dump_mem(KERN_EMERG, "Stack: ", regs->sp,
|
||||
THREAD_SIZE + (unsigned long)task_stack_page(tsk),
|
||||
compat_user_mode(regs));
|
||||
THREAD_SIZE + (unsigned long)task_stack_page(tsk));
|
||||
dump_backtrace(regs, tsk);
|
||||
dump_instr(KERN_EMERG, regs);
|
||||
}
|
||||
@ -373,11 +365,59 @@ exit:
|
||||
return fn ? fn(regs, instr) : 1;
|
||||
}
|
||||
|
||||
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
||||
static void force_signal_inject(int signal, int code, struct pt_regs *regs,
|
||||
unsigned long address)
|
||||
{
|
||||
siginfo_t info;
|
||||
void __user *pc = (void __user *)instruction_pointer(regs);
|
||||
const char *desc;
|
||||
|
||||
switch (signal) {
|
||||
case SIGILL:
|
||||
desc = "undefined instruction";
|
||||
break;
|
||||
case SIGSEGV:
|
||||
desc = "illegal memory access";
|
||||
break;
|
||||
default:
|
||||
desc = "bad mode";
|
||||
break;
|
||||
}
|
||||
|
||||
if (unhandled_signal(current, signal) &&
|
||||
show_unhandled_signals_ratelimited()) {
|
||||
pr_info("%s[%d]: %s: pc=%p\n",
|
||||
current->comm, task_pid_nr(current), desc, pc);
|
||||
dump_instr(KERN_INFO, regs);
|
||||
}
|
||||
|
||||
info.si_signo = signal;
|
||||
info.si_errno = 0;
|
||||
info.si_code = code;
|
||||
info.si_addr = pc;
|
||||
|
||||
arm64_notify_die(desc, regs, &info, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* Set up process info to signal segmentation fault - called on access error.
|
||||
*/
|
||||
void arm64_notify_segfault(struct pt_regs *regs, unsigned long addr)
|
||||
{
|
||||
int code;
|
||||
|
||||
down_read(¤t->mm->mmap_sem);
|
||||
if (find_vma(current->mm, addr) == NULL)
|
||||
code = SEGV_MAPERR;
|
||||
else
|
||||
code = SEGV_ACCERR;
|
||||
up_read(¤t->mm->mmap_sem);
|
||||
|
||||
force_signal_inject(SIGSEGV, code, regs, addr);
|
||||
}
|
||||
|
||||
asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
||||
{
|
||||
/* check for AArch32 breakpoint instructions */
|
||||
if (!aarch32_break_handler(regs))
|
||||
return;
|
||||
@ -385,18 +425,66 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
|
||||
if (call_undef_hook(regs) == 0)
|
||||
return;
|
||||
|
||||
if (unhandled_signal(current, SIGILL) && show_unhandled_signals_ratelimited()) {
|
||||
pr_info("%s[%d]: undefined instruction: pc=%p\n",
|
||||
current->comm, task_pid_nr(current), pc);
|
||||
dump_instr(KERN_INFO, regs);
|
||||
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
||||
}
|
||||
|
||||
void cpu_enable_cache_maint_trap(void *__unused)
|
||||
{
|
||||
config_sctlr_el1(SCTLR_EL1_UCI, 0);
|
||||
}
|
||||
|
||||
#define __user_cache_maint(insn, address, res) \
|
||||
asm volatile ( \
|
||||
"1: " insn ", %1\n" \
|
||||
" mov %w0, #0\n" \
|
||||
"2:\n" \
|
||||
" .pushsection .fixup,\"ax\"\n" \
|
||||
" .align 2\n" \
|
||||
"3: mov %w0, %w2\n" \
|
||||
" b 2b\n" \
|
||||
" .popsection\n" \
|
||||
_ASM_EXTABLE(1b, 3b) \
|
||||
: "=r" (res) \
|
||||
: "r" (address), "i" (-EFAULT) )
|
||||
|
||||
asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
|
||||
{
|
||||
unsigned long address;
|
||||
int ret;
|
||||
|
||||
/* if this is a write with: Op0=1, Op2=1, Op1=3, CRn=7 */
|
||||
if ((esr & 0x01fffc01) == 0x0012dc00) {
|
||||
int rt = (esr >> 5) & 0x1f;
|
||||
int crm = (esr >> 1) & 0x0f;
|
||||
|
||||
address = (rt == 31) ? 0 : regs->regs[rt];
|
||||
|
||||
switch (crm) {
|
||||
case 11: /* DC CVAU, gets promoted */
|
||||
__user_cache_maint("dc civac", address, ret);
|
||||
break;
|
||||
case 10: /* DC CVAC, gets promoted */
|
||||
__user_cache_maint("dc civac", address, ret);
|
||||
break;
|
||||
case 14: /* DC CIVAC */
|
||||
__user_cache_maint("dc civac", address, ret);
|
||||
break;
|
||||
case 5: /* IC IVAU */
|
||||
__user_cache_maint("ic ivau", address, ret);
|
||||
break;
|
||||
default:
|
||||
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
info.si_signo = SIGILL;
|
||||
info.si_errno = 0;
|
||||
info.si_code = ILL_ILLOPC;
|
||||
info.si_addr = pc;
|
||||
|
||||
arm64_notify_die("Oops - undefined instruction", regs, &info, 0);
|
||||
if (ret)
|
||||
arm64_notify_segfault(regs, address);
|
||||
else
|
||||
regs->pc += 4;
|
||||
}
|
||||
|
||||
long compat_arm_syscall(struct pt_regs *regs);
|
||||
@ -465,7 +553,7 @@ static const char *esr_class_str[] = {
|
||||
|
||||
const char *esr_get_class_string(u32 esr)
|
||||
{
|
||||
return esr_class_str[esr >> ESR_ELx_EC_SHIFT];
|
||||
return esr_class_str[ESR_ELx_EC(esr)];
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -214,10 +214,16 @@ void update_vsyscall(struct timekeeper *tk)
|
||||
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
|
||||
|
||||
if (!use_syscall) {
|
||||
/* tkr_mono.cycle_last == tkr_raw.cycle_last */
|
||||
vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
|
||||
vdso_data->raw_time_sec = tk->raw_time.tv_sec;
|
||||
vdso_data->raw_time_nsec = tk->raw_time.tv_nsec;
|
||||
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
||||
vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
|
||||
vdso_data->cs_mult = tk->tkr_mono.mult;
|
||||
/* tkr_raw.xtime_nsec == 0 */
|
||||
vdso_data->cs_mono_mult = tk->tkr_mono.mult;
|
||||
vdso_data->cs_raw_mult = tk->tkr_raw.mult;
|
||||
/* tkr_mono.shift == tkr_raw.shift */
|
||||
vdso_data->cs_shift = tk->tkr_mono.shift;
|
||||
}
|
||||
|
||||
|
@ -23,7 +23,7 @@ GCOV_PROFILE := n
|
||||
ccflags-y += -Wl,-shared
|
||||
|
||||
obj-y += vdso.o
|
||||
extra-y += vdso.lds vdso-offsets.h
|
||||
extra-y += vdso.lds
|
||||
CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
|
||||
|
||||
# Force dependency (incbin is bad)
|
||||
@ -42,11 +42,10 @@ $(obj)/%.so: $(obj)/%.so.dbg FORCE
|
||||
gen-vdsosym := $(srctree)/$(src)/gen_vdso_offsets.sh
|
||||
quiet_cmd_vdsosym = VDSOSYM $@
|
||||
define cmd_vdsosym
|
||||
$(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@ && \
|
||||
cp $@ include/generated/
|
||||
$(NM) $< | $(gen-vdsosym) | LC_ALL=C sort > $@
|
||||
endef
|
||||
|
||||
$(obj)/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
|
||||
include/generated/vdso-offsets.h: $(obj)/vdso.so.dbg FORCE
|
||||
$(call if_changed,vdsosym)
|
||||
|
||||
# Assembly rules for the .S files
|
||||
|
@ -26,24 +26,109 @@
|
||||
#define NSEC_PER_SEC_HI16 0x3b9a
|
||||
|
||||
vdso_data .req x6
|
||||
use_syscall .req w7
|
||||
seqcnt .req w8
|
||||
seqcnt .req w7
|
||||
w_tmp .req w8
|
||||
x_tmp .req x8
|
||||
|
||||
/*
|
||||
* Conventions for macro arguments:
|
||||
* - An argument is write-only if its name starts with "res".
|
||||
* - All other arguments are read-only, unless otherwise specified.
|
||||
*/
|
||||
|
||||
.macro seqcnt_acquire
|
||||
9999: ldr seqcnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
|
||||
tbnz seqcnt, #0, 9999b
|
||||
dmb ishld
|
||||
ldr use_syscall, [vdso_data, #VDSO_USE_SYSCALL]
|
||||
.endm
|
||||
|
||||
.macro seqcnt_read, cnt
|
||||
.macro seqcnt_check fail
|
||||
dmb ishld
|
||||
ldr \cnt, [vdso_data, #VDSO_TB_SEQ_COUNT]
|
||||
ldr w_tmp, [vdso_data, #VDSO_TB_SEQ_COUNT]
|
||||
cmp w_tmp, seqcnt
|
||||
b.ne \fail
|
||||
.endm
|
||||
|
||||
.macro seqcnt_check, cnt, fail
|
||||
cmp \cnt, seqcnt
|
||||
b.ne \fail
|
||||
.macro syscall_check fail
|
||||
ldr w_tmp, [vdso_data, #VDSO_USE_SYSCALL]
|
||||
cbnz w_tmp, \fail
|
||||
.endm
|
||||
|
||||
.macro get_nsec_per_sec res
|
||||
mov \res, #NSEC_PER_SEC_LO16
|
||||
movk \res, #NSEC_PER_SEC_HI16, lsl #16
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Returns the clock delta, in nanoseconds left-shifted by the clock
|
||||
* shift.
|
||||
*/
|
||||
.macro get_clock_shifted_nsec res, cycle_last, mult
|
||||
/* Read the virtual counter. */
|
||||
isb
|
||||
mrs x_tmp, cntvct_el0
|
||||
/* Calculate cycle delta and convert to ns. */
|
||||
sub \res, x_tmp, \cycle_last
|
||||
/* We can only guarantee 56 bits of precision. */
|
||||
movn x_tmp, #0xff00, lsl #48
|
||||
and \res, x_tmp, \res
|
||||
mul \res, \res, \mult
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Returns in res_{sec,nsec} the REALTIME timespec, based on the
|
||||
* "wall time" (xtime) and the clock_mono delta.
|
||||
*/
|
||||
.macro get_ts_realtime res_sec, res_nsec, \
|
||||
clock_nsec, xtime_sec, xtime_nsec, nsec_to_sec
|
||||
add \res_nsec, \clock_nsec, \xtime_nsec
|
||||
udiv x_tmp, \res_nsec, \nsec_to_sec
|
||||
add \res_sec, \xtime_sec, x_tmp
|
||||
msub \res_nsec, x_tmp, \nsec_to_sec, \res_nsec
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Returns in res_{sec,nsec} the timespec based on the clock_raw delta,
|
||||
* used for CLOCK_MONOTONIC_RAW.
|
||||
*/
|
||||
.macro get_ts_clock_raw res_sec, res_nsec, clock_nsec, nsec_to_sec
|
||||
udiv \res_sec, \clock_nsec, \nsec_to_sec
|
||||
msub \res_nsec, \res_sec, \nsec_to_sec, \clock_nsec
|
||||
.endm
|
||||
|
||||
/* sec and nsec are modified in place. */
|
||||
.macro add_ts sec, nsec, ts_sec, ts_nsec, nsec_to_sec
|
||||
/* Add timespec. */
|
||||
add \sec, \sec, \ts_sec
|
||||
add \nsec, \nsec, \ts_nsec
|
||||
|
||||
/* Normalise the new timespec. */
|
||||
cmp \nsec, \nsec_to_sec
|
||||
b.lt 9999f
|
||||
sub \nsec, \nsec, \nsec_to_sec
|
||||
add \sec, \sec, #1
|
||||
9999:
|
||||
cmp \nsec, #0
|
||||
b.ge 9998f
|
||||
add \nsec, \nsec, \nsec_to_sec
|
||||
sub \sec, \sec, #1
|
||||
9998:
|
||||
.endm
|
||||
|
||||
.macro clock_gettime_return, shift=0
|
||||
.if \shift == 1
|
||||
lsr x11, x11, x12
|
||||
.endif
|
||||
stp x10, x11, [x1, #TSPEC_TV_SEC]
|
||||
mov x0, xzr
|
||||
ret
|
||||
.endm
|
||||
|
||||
.macro jump_slot jumptable, index, label
|
||||
.if (. - \jumptable) != 4 * (\index)
|
||||
.error "Jump slot index mismatch"
|
||||
.endif
|
||||
b \label
|
||||
.endm
|
||||
|
||||
.text
|
||||
@ -51,18 +136,25 @@ seqcnt .req w8
|
||||
/* int __kernel_gettimeofday(struct timeval *tv, struct timezone *tz); */
|
||||
ENTRY(__kernel_gettimeofday)
|
||||
.cfi_startproc
|
||||
mov x2, x30
|
||||
.cfi_register x30, x2
|
||||
|
||||
/* Acquire the sequence counter and get the timespec. */
|
||||
adr vdso_data, _vdso_data
|
||||
1: seqcnt_acquire
|
||||
cbnz use_syscall, 4f
|
||||
|
||||
/* If tv is NULL, skip to the timezone code. */
|
||||
cbz x0, 2f
|
||||
bl __do_get_tspec
|
||||
seqcnt_check w9, 1b
|
||||
|
||||
/* Compute the time of day. */
|
||||
1: seqcnt_acquire
|
||||
syscall_check fail=4f
|
||||
ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
|
||||
/* w11 = cs_mono_mult, w12 = cs_shift */
|
||||
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
|
||||
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
|
||||
seqcnt_check fail=1b
|
||||
|
||||
get_nsec_per_sec res=x9
|
||||
lsl x9, x9, x12
|
||||
|
||||
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
|
||||
get_ts_realtime res_sec=x10, res_nsec=x11, \
|
||||
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
|
||||
|
||||
/* Convert ns to us. */
|
||||
mov x13, #1000
|
||||
@ -76,95 +168,126 @@ ENTRY(__kernel_gettimeofday)
|
||||
stp w4, w5, [x1, #TZ_MINWEST]
|
||||
3:
|
||||
mov x0, xzr
|
||||
ret x2
|
||||
ret
|
||||
4:
|
||||
/* Syscall fallback. */
|
||||
mov x8, #__NR_gettimeofday
|
||||
svc #0
|
||||
ret x2
|
||||
ret
|
||||
.cfi_endproc
|
||||
ENDPROC(__kernel_gettimeofday)
|
||||
|
||||
#define JUMPSLOT_MAX CLOCK_MONOTONIC_COARSE
|
||||
|
||||
/* int __kernel_clock_gettime(clockid_t clock_id, struct timespec *tp); */
|
||||
ENTRY(__kernel_clock_gettime)
|
||||
.cfi_startproc
|
||||
cmp w0, #CLOCK_REALTIME
|
||||
ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
|
||||
b.ne 2f
|
||||
|
||||
mov x2, x30
|
||||
.cfi_register x30, x2
|
||||
|
||||
/* Get kernel timespec. */
|
||||
cmp w0, #JUMPSLOT_MAX
|
||||
b.hi syscall
|
||||
adr vdso_data, _vdso_data
|
||||
1: seqcnt_acquire
|
||||
cbnz use_syscall, 7f
|
||||
adr x_tmp, jumptable
|
||||
add x_tmp, x_tmp, w0, uxtw #2
|
||||
br x_tmp
|
||||
|
||||
bl __do_get_tspec
|
||||
seqcnt_check w9, 1b
|
||||
ALIGN
|
||||
jumptable:
|
||||
jump_slot jumptable, CLOCK_REALTIME, realtime
|
||||
jump_slot jumptable, CLOCK_MONOTONIC, monotonic
|
||||
b syscall
|
||||
b syscall
|
||||
jump_slot jumptable, CLOCK_MONOTONIC_RAW, monotonic_raw
|
||||
jump_slot jumptable, CLOCK_REALTIME_COARSE, realtime_coarse
|
||||
jump_slot jumptable, CLOCK_MONOTONIC_COARSE, monotonic_coarse
|
||||
|
||||
mov x30, x2
|
||||
.if (. - jumptable) != 4 * (JUMPSLOT_MAX + 1)
|
||||
.error "Wrong jumptable size"
|
||||
.endif
|
||||
|
||||
cmp w0, #CLOCK_MONOTONIC
|
||||
b.ne 6f
|
||||
ALIGN
|
||||
realtime:
|
||||
seqcnt_acquire
|
||||
syscall_check fail=syscall
|
||||
ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
|
||||
/* w11 = cs_mono_mult, w12 = cs_shift */
|
||||
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
|
||||
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
|
||||
seqcnt_check fail=realtime
|
||||
|
||||
/* Get wtm timespec. */
|
||||
ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
|
||||
/* All computations are done with left-shifted nsecs. */
|
||||
get_nsec_per_sec res=x9
|
||||
lsl x9, x9, x12
|
||||
|
||||
/* Check the sequence counter. */
|
||||
seqcnt_read w9
|
||||
seqcnt_check w9, 1b
|
||||
b 4f
|
||||
2:
|
||||
cmp w0, #CLOCK_REALTIME_COARSE
|
||||
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
|
||||
b.ne 8f
|
||||
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
|
||||
get_ts_realtime res_sec=x10, res_nsec=x11, \
|
||||
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
|
||||
clock_gettime_return, shift=1
|
||||
|
||||
/* xtime_coarse_nsec is already right-shifted */
|
||||
mov x12, #0
|
||||
ALIGN
|
||||
monotonic:
|
||||
seqcnt_acquire
|
||||
syscall_check fail=syscall
|
||||
ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
|
||||
/* w11 = cs_mono_mult, w12 = cs_shift */
|
||||
ldp w11, w12, [vdso_data, #VDSO_CS_MONO_MULT]
|
||||
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
|
||||
ldp x3, x4, [vdso_data, #VDSO_WTM_CLK_SEC]
|
||||
seqcnt_check fail=monotonic
|
||||
|
||||
/* Get coarse timespec. */
|
||||
adr vdso_data, _vdso_data
|
||||
3: seqcnt_acquire
|
||||
ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
|
||||
/* All computations are done with left-shifted nsecs. */
|
||||
lsl x4, x4, x12
|
||||
get_nsec_per_sec res=x9
|
||||
lsl x9, x9, x12
|
||||
|
||||
/* Get wtm timespec. */
|
||||
ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
|
||||
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
|
||||
get_ts_realtime res_sec=x10, res_nsec=x11, \
|
||||
clock_nsec=x15, xtime_sec=x13, xtime_nsec=x14, nsec_to_sec=x9
|
||||
|
||||
/* Check the sequence counter. */
|
||||
seqcnt_read w9
|
||||
seqcnt_check w9, 3b
|
||||
add_ts sec=x10, nsec=x11, ts_sec=x3, ts_nsec=x4, nsec_to_sec=x9
|
||||
clock_gettime_return, shift=1
|
||||
|
||||
cmp w0, #CLOCK_MONOTONIC_COARSE
|
||||
b.ne 6f
|
||||
4:
|
||||
/* Add on wtm timespec. */
|
||||
add x10, x10, x13
|
||||
ALIGN
|
||||
monotonic_raw:
|
||||
seqcnt_acquire
|
||||
syscall_check fail=syscall
|
||||
ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
|
||||
/* w11 = cs_raw_mult, w12 = cs_shift */
|
||||
ldp w12, w11, [vdso_data, #VDSO_CS_SHIFT]
|
||||
ldp x13, x14, [vdso_data, #VDSO_RAW_TIME_SEC]
|
||||
seqcnt_check fail=monotonic_raw
|
||||
|
||||
/* All computations are done with left-shifted nsecs. */
|
||||
lsl x14, x14, x12
|
||||
add x11, x11, x14
|
||||
get_nsec_per_sec res=x9
|
||||
lsl x9, x9, x12
|
||||
|
||||
/* Normalise the new timespec. */
|
||||
mov x15, #NSEC_PER_SEC_LO16
|
||||
movk x15, #NSEC_PER_SEC_HI16, lsl #16
|
||||
lsl x15, x15, x12
|
||||
cmp x11, x15
|
||||
b.lt 5f
|
||||
sub x11, x11, x15
|
||||
add x10, x10, #1
|
||||
5:
|
||||
cmp x11, #0
|
||||
b.ge 6f
|
||||
add x11, x11, x15
|
||||
sub x10, x10, #1
|
||||
get_clock_shifted_nsec res=x15, cycle_last=x10, mult=x11
|
||||
get_ts_clock_raw res_sec=x10, res_nsec=x11, \
|
||||
clock_nsec=x15, nsec_to_sec=x9
|
||||
|
||||
6: /* Store to the user timespec. */
|
||||
lsr x11, x11, x12
|
||||
stp x10, x11, [x1, #TSPEC_TV_SEC]
|
||||
mov x0, xzr
|
||||
ret
|
||||
7:
|
||||
mov x30, x2
|
||||
8: /* Syscall fallback. */
|
||||
add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
|
||||
clock_gettime_return, shift=1
|
||||
|
||||
ALIGN
|
||||
realtime_coarse:
|
||||
seqcnt_acquire
|
||||
ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
|
||||
seqcnt_check fail=realtime_coarse
|
||||
clock_gettime_return
|
||||
|
||||
ALIGN
|
||||
monotonic_coarse:
|
||||
seqcnt_acquire
|
||||
ldp x10, x11, [vdso_data, #VDSO_XTIME_CRS_SEC]
|
||||
ldp x13, x14, [vdso_data, #VDSO_WTM_CLK_SEC]
|
||||
seqcnt_check fail=monotonic_coarse
|
||||
|
||||
/* Computations are done in (non-shifted) nsecs. */
|
||||
get_nsec_per_sec res=x9
|
||||
add_ts sec=x10, nsec=x11, ts_sec=x13, ts_nsec=x14, nsec_to_sec=x9
|
||||
clock_gettime_return
|
||||
|
||||
ALIGN
|
||||
syscall: /* Syscall fallback. */
|
||||
mov x8, #__NR_clock_gettime
|
||||
svc #0
|
||||
ret
|
||||
@ -176,6 +299,7 @@ ENTRY(__kernel_clock_getres)
|
||||
.cfi_startproc
|
||||
cmp w0, #CLOCK_REALTIME
|
||||
ccmp w0, #CLOCK_MONOTONIC, #0x4, ne
|
||||
ccmp w0, #CLOCK_MONOTONIC_RAW, #0x4, ne
|
||||
b.ne 1f
|
||||
|
||||
ldr x2, 5f
|
||||
@ -203,46 +327,3 @@ ENTRY(__kernel_clock_getres)
|
||||
.quad CLOCK_COARSE_RES
|
||||
.cfi_endproc
|
||||
ENDPROC(__kernel_clock_getres)
|
||||
|
||||
/*
|
||||
* Read the current time from the architected counter.
|
||||
* Expects vdso_data to be initialised.
|
||||
* Clobbers the temporary registers (x9 - x15).
|
||||
* Returns:
|
||||
* - w9 = vDSO sequence counter
|
||||
* - (x10, x11) = (ts->tv_sec, shifted ts->tv_nsec)
|
||||
* - w12 = cs_shift
|
||||
*/
|
||||
ENTRY(__do_get_tspec)
|
||||
.cfi_startproc
|
||||
|
||||
/* Read from the vDSO data page. */
|
||||
ldr x10, [vdso_data, #VDSO_CS_CYCLE_LAST]
|
||||
ldp x13, x14, [vdso_data, #VDSO_XTIME_CLK_SEC]
|
||||
ldp w11, w12, [vdso_data, #VDSO_CS_MULT]
|
||||
seqcnt_read w9
|
||||
|
||||
/* Read the virtual counter. */
|
||||
isb
|
||||
mrs x15, cntvct_el0
|
||||
|
||||
/* Calculate cycle delta and convert to ns. */
|
||||
sub x10, x15, x10
|
||||
/* We can only guarantee 56 bits of precision. */
|
||||
movn x15, #0xff00, lsl #48
|
||||
and x10, x15, x10
|
||||
mul x10, x10, x11
|
||||
|
||||
/* Use the kernel time to calculate the new timespec. */
|
||||
mov x11, #NSEC_PER_SEC_LO16
|
||||
movk x11, #NSEC_PER_SEC_HI16, lsl #16
|
||||
lsl x11, x11, x12
|
||||
add x15, x10, x14
|
||||
udiv x14, x15, x11
|
||||
add x10, x13, x14
|
||||
mul x13, x14, x11
|
||||
sub x11, x15, x13
|
||||
|
||||
ret
|
||||
.cfi_endproc
|
||||
ENDPROC(__do_get_tspec)
|
||||
|
@ -118,9 +118,11 @@ SECTIONS
|
||||
__exception_text_end = .;
|
||||
IRQENTRY_TEXT
|
||||
SOFTIRQENTRY_TEXT
|
||||
ENTRY_TEXT
|
||||
TEXT_TEXT
|
||||
SCHED_TEXT
|
||||
LOCK_TEXT
|
||||
KPROBES_TEXT
|
||||
HYPERVISOR_TEXT
|
||||
IDMAP_TEXT
|
||||
HIBERNATE_TEXT
|
||||
@ -131,12 +133,13 @@ SECTIONS
|
||||
}
|
||||
|
||||
. = ALIGN(SEGMENT_ALIGN);
|
||||
RO_DATA(PAGE_SIZE) /* everything from this point to */
|
||||
EXCEPTION_TABLE(8) /* _etext will be marked RO NX */
|
||||
_etext = .; /* End of text section */
|
||||
|
||||
RO_DATA(PAGE_SIZE) /* everything from this point to */
|
||||
EXCEPTION_TABLE(8) /* __init_begin will be marked RO NX */
|
||||
NOTES
|
||||
|
||||
. = ALIGN(SEGMENT_ALIGN);
|
||||
_etext = .; /* End of text and rodata section */
|
||||
__init_begin = .;
|
||||
|
||||
INIT_TEXT_SECTION(8)
|
||||
|
@ -106,7 +106,7 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
|
||||
run->exit_reason = KVM_EXIT_DEBUG;
|
||||
run->debug.arch.hsr = hsr;
|
||||
|
||||
switch (hsr >> ESR_ELx_EC_SHIFT) {
|
||||
switch (ESR_ELx_EC(hsr)) {
|
||||
case ESR_ELx_EC_WATCHPT_LOW:
|
||||
run->debug.arch.far = vcpu->arch.fault.far_el2;
|
||||
/* fall through */
|
||||
@ -149,7 +149,7 @@ static exit_handle_fn arm_exit_handlers[] = {
|
||||
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u32 hsr = kvm_vcpu_get_hsr(vcpu);
|
||||
u8 hsr_ec = hsr >> ESR_ELx_EC_SHIFT;
|
||||
u8 hsr_ec = ESR_ELx_EC(hsr);
|
||||
|
||||
if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
|
||||
!arm_exit_handlers[hsr_ec]) {
|
||||
|
@ -17,6 +17,10 @@ obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
|
||||
obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
|
||||
|
||||
# KVM code is run at a different exception code with a different map, so
|
||||
# compiler instrumentation that inserts callbacks or checks into the code may
|
||||
# cause crashes. Just disable it.
|
||||
GCOV_PROFILE := n
|
||||
KASAN_SANITIZE := n
|
||||
UBSAN_SANITIZE := n
|
||||
KCOV_INSTRUMENT := n
|
||||
|
@ -198,7 +198,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
|
||||
static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
|
||||
{
|
||||
u64 esr = read_sysreg_el2(esr);
|
||||
u8 ec = esr >> ESR_ELx_EC_SHIFT;
|
||||
u8 ec = ESR_ELx_EC(esr);
|
||||
u64 hpfar, far;
|
||||
|
||||
vcpu->arch.fault.esr_el2 = esr;
|
||||
|
@ -66,7 +66,7 @@
|
||||
.endm
|
||||
|
||||
end .req x5
|
||||
ENTRY(__copy_from_user)
|
||||
ENTRY(__arch_copy_from_user)
|
||||
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
|
||||
CONFIG_ARM64_PAN)
|
||||
add end, x0, x2
|
||||
@ -75,7 +75,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
|
||||
CONFIG_ARM64_PAN)
|
||||
mov x0, #0 // Nothing to copy
|
||||
ret
|
||||
ENDPROC(__copy_from_user)
|
||||
ENDPROC(__arch_copy_from_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 2
|
||||
|
@ -65,7 +65,7 @@
|
||||
.endm
|
||||
|
||||
end .req x5
|
||||
ENTRY(__copy_to_user)
|
||||
ENTRY(__arch_copy_to_user)
|
||||
ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(0)), ARM64_ALT_PAN_NOT_UAO, \
|
||||
CONFIG_ARM64_PAN)
|
||||
add end, x0, x2
|
||||
@ -74,7 +74,7 @@ ALTERNATIVE("nop", __stringify(SET_PSTATE_PAN(1)), ARM64_ALT_PAN_NOT_UAO, \
|
||||
CONFIG_ARM64_PAN)
|
||||
mov x0, #0
|
||||
ret
|
||||
ENDPROC(__copy_to_user)
|
||||
ENDPROC(__arch_copy_to_user)
|
||||
|
||||
.section .fixup,"ax"
|
||||
.align 2
|
||||
|
@ -52,7 +52,7 @@ ENTRY(__flush_cache_user_range)
|
||||
sub x3, x2, #1
|
||||
bic x4, x0, x3
|
||||
1:
|
||||
USER(9f, dc cvau, x4 ) // clean D line to PoU
|
||||
user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE
|
||||
add x4, x4, x2
|
||||
cmp x4, x1
|
||||
b.lo 1b
|
||||
|
@ -19,6 +19,7 @@
|
||||
|
||||
#include <linux/gfp.h>
|
||||
#include <linux/acpi.h>
|
||||
#include <linux/bootmem.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/genalloc.h>
|
||||
@ -29,6 +30,8 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
static int swiotlb __read_mostly;
|
||||
|
||||
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
|
||||
bool coherent)
|
||||
{
|
||||
@ -341,6 +344,13 @@ static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
|
||||
{
|
||||
if (swiotlb)
|
||||
return swiotlb_dma_supported(hwdev, mask);
|
||||
return 1;
|
||||
}
|
||||
|
||||
static struct dma_map_ops swiotlb_dma_ops = {
|
||||
.alloc = __dma_alloc,
|
||||
.free = __dma_free,
|
||||
@ -354,7 +364,7 @@ static struct dma_map_ops swiotlb_dma_ops = {
|
||||
.sync_single_for_device = __swiotlb_sync_single_for_device,
|
||||
.sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
|
||||
.sync_sg_for_device = __swiotlb_sync_sg_for_device,
|
||||
.dma_supported = swiotlb_dma_supported,
|
||||
.dma_supported = __swiotlb_dma_supported,
|
||||
.mapping_error = swiotlb_dma_mapping_error,
|
||||
};
|
||||
|
||||
@ -513,6 +523,9 @@ EXPORT_SYMBOL(dummy_dma_ops);
|
||||
|
||||
static int __init arm64_dma_init(void)
|
||||
{
|
||||
if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
|
||||
swiotlb = 1;
|
||||
|
||||
return atomic_pool_init();
|
||||
}
|
||||
arch_initcall(arm64_dma_init);
|
||||
@ -848,15 +861,16 @@ static int __iommu_attach_notifier(struct notifier_block *nb,
|
||||
{
|
||||
struct iommu_dma_notifier_data *master, *tmp;
|
||||
|
||||
if (action != BUS_NOTIFY_ADD_DEVICE)
|
||||
if (action != BUS_NOTIFY_BIND_DRIVER)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&iommu_dma_notifier_lock);
|
||||
list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
|
||||
if (do_iommu_attach(master->dev, master->ops,
|
||||
master->dma_base, master->size)) {
|
||||
if (data == master->dev && do_iommu_attach(master->dev,
|
||||
master->ops, master->dma_base, master->size)) {
|
||||
list_del(&master->list);
|
||||
kfree(master);
|
||||
break;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&iommu_dma_notifier_lock);
|
||||
@ -870,17 +884,8 @@ static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
|
||||
|
||||
if (!nb)
|
||||
return -ENOMEM;
|
||||
/*
|
||||
* The device must be attached to a domain before the driver probe
|
||||
* routine gets a chance to start allocating DMA buffers. However,
|
||||
* the IOMMU driver also needs a chance to configure the iommu_group
|
||||
* via its add_device callback first, so we need to make the attach
|
||||
* happen between those two points. Since the IOMMU core uses a bus
|
||||
* notifier with default priority for add_device, do the same but
|
||||
* with a lower priority to ensure the appropriate ordering.
|
||||
*/
|
||||
|
||||
nb->notifier_call = __iommu_attach_notifier;
|
||||
nb->priority = -100;
|
||||
|
||||
ret = bus_register_notifier(bus, nb);
|
||||
if (ret) {
|
||||
@ -904,10 +909,6 @@ static int __init __iommu_dma_init(void)
|
||||
if (!ret)
|
||||
ret = register_iommu_dma_ops_notifier(&pci_bus_type);
|
||||
#endif
|
||||
|
||||
/* handle devices queued before this arch_initcall */
|
||||
if (!ret)
|
||||
__iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
|
||||
return ret;
|
||||
}
|
||||
arch_initcall(__iommu_dma_init);
|
||||
|
@ -27,11 +27,7 @@
|
||||
#include <asm/memory.h>
|
||||
#include <asm/pgtable.h>
|
||||
#include <asm/pgtable-hwdef.h>
|
||||
|
||||
struct addr_marker {
|
||||
unsigned long start_address;
|
||||
const char *name;
|
||||
};
|
||||
#include <asm/ptdump.h>
|
||||
|
||||
static const struct addr_marker address_markers[] = {
|
||||
#ifdef CONFIG_KASAN
|
||||
@ -290,7 +286,8 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
|
||||
}
|
||||
}
|
||||
|
||||
static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long start)
|
||||
static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
|
||||
unsigned long start)
|
||||
{
|
||||
pgd_t *pgd = pgd_offset(mm, 0UL);
|
||||
unsigned i;
|
||||
@ -309,12 +306,13 @@ static void walk_pgd(struct pg_state *st, struct mm_struct *mm, unsigned long st
|
||||
|
||||
static int ptdump_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct ptdump_info *info = m->private;
|
||||
struct pg_state st = {
|
||||
.seq = m,
|
||||
.marker = address_markers,
|
||||
.marker = info->markers,
|
||||
};
|
||||
|
||||
walk_pgd(&st, &init_mm, VA_START);
|
||||
walk_pgd(&st, info->mm, info->base_addr);
|
||||
|
||||
note_page(&st, 0, 0, 0);
|
||||
return 0;
|
||||
@ -322,7 +320,7 @@ static int ptdump_show(struct seq_file *m, void *v)
|
||||
|
||||
static int ptdump_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, ptdump_show, NULL);
|
||||
return single_open(file, ptdump_show, inode->i_private);
|
||||
}
|
||||
|
||||
static const struct file_operations ptdump_fops = {
|
||||
@ -332,7 +330,7 @@ static const struct file_operations ptdump_fops = {
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
static int ptdump_init(void)
|
||||
int ptdump_register(struct ptdump_info *info, const char *name)
|
||||
{
|
||||
struct dentry *pe;
|
||||
unsigned i, j;
|
||||
@ -342,8 +340,18 @@ static int ptdump_init(void)
|
||||
for (j = 0; j < pg_level[i].num; j++)
|
||||
pg_level[i].mask |= pg_level[i].bits[j].mask;
|
||||
|
||||
pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
|
||||
&ptdump_fops);
|
||||
pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
|
||||
return pe ? 0 : -ENOMEM;
|
||||
}
|
||||
|
||||
static struct ptdump_info kernel_ptdump_info = {
|
||||
.mm = &init_mm,
|
||||
.markers = address_markers,
|
||||
.base_addr = VA_START,
|
||||
};
|
||||
|
||||
static int ptdump_init(void)
|
||||
{
|
||||
return ptdump_register(&kernel_ptdump_info, "kernel_page_tables");
|
||||
}
|
||||
device_initcall(ptdump_init);
|
||||
|
@ -41,6 +41,28 @@
|
||||
|
||||
static const char *fault_name(unsigned int esr);
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
int ret = 0;
|
||||
|
||||
/* kprobe_running() needs smp_processor_id() */
|
||||
if (!user_mode(regs)) {
|
||||
preempt_disable();
|
||||
if (kprobe_running() && kprobe_fault_handler(regs, esr))
|
||||
ret = 1;
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
#else
|
||||
static inline int notify_page_fault(struct pt_regs *regs, unsigned int esr)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Dump out the page tables associated with 'addr' in mm 'mm'.
|
||||
*/
|
||||
@ -202,8 +224,6 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
|
||||
#define VM_FAULT_BADMAP 0x010000
|
||||
#define VM_FAULT_BADACCESS 0x020000
|
||||
|
||||
#define ESR_LNX_EXEC (1 << 24)
|
||||
|
||||
static int __do_page_fault(struct mm_struct *mm, unsigned long addr,
|
||||
unsigned int mm_flags, unsigned long vm_flags,
|
||||
struct task_struct *tsk)
|
||||
@ -242,14 +262,19 @@ out:
|
||||
return fault;
|
||||
}
|
||||
|
||||
static inline int permission_fault(unsigned int esr)
|
||||
static inline bool is_permission_fault(unsigned int esr)
|
||||
{
|
||||
unsigned int ec = (esr & ESR_ELx_EC_MASK) >> ESR_ELx_EC_SHIFT;
|
||||
unsigned int ec = ESR_ELx_EC(esr);
|
||||
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
|
||||
|
||||
return (ec == ESR_ELx_EC_DABT_CUR && fsc_type == ESR_ELx_FSC_PERM);
|
||||
}
|
||||
|
||||
static bool is_el0_instruction_abort(unsigned int esr)
|
||||
{
|
||||
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW;
|
||||
}
|
||||
|
||||
static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||
struct pt_regs *regs)
|
||||
{
|
||||
@ -259,6 +284,9 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||
unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
|
||||
unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
|
||||
|
||||
if (notify_page_fault(regs, esr))
|
||||
return 0;
|
||||
|
||||
tsk = current;
|
||||
mm = tsk->mm;
|
||||
|
||||
@ -272,14 +300,14 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
|
||||
if (user_mode(regs))
|
||||
mm_flags |= FAULT_FLAG_USER;
|
||||
|
||||
if (esr & ESR_LNX_EXEC) {
|
||||
if (is_el0_instruction_abort(esr)) {
|
||||
vm_flags = VM_EXEC;
|
||||
} else if ((esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM)) {
|
||||
vm_flags = VM_WRITE;
|
||||
mm_flags |= FAULT_FLAG_WRITE;
|
||||
}
|
||||
|
||||
if (permission_fault(esr) && (addr < USER_DS)) {
|
||||
if (is_permission_fault(esr) && (addr < USER_DS)) {
|
||||
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
|
||||
if (regs->orig_addr_limit == KERNEL_DS)
|
||||
die("Accessing user space memory with fs=KERNEL_DS", regs, esr);
|
||||
@ -630,6 +658,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
|
||||
|
||||
return rv;
|
||||
}
|
||||
NOKPROBE_SYMBOL(do_debug_exception);
|
||||
|
||||
#ifdef CONFIG_ARM64_PAN
|
||||
void cpu_enable_pan(void *__unused)
|
||||
|
@ -160,12 +160,10 @@ static void __init arm64_memory_present(void)
|
||||
static void __init arm64_memory_present(void)
|
||||
{
|
||||
struct memblock_region *reg;
|
||||
int nid = 0;
|
||||
|
||||
for_each_memblock(memory, reg) {
|
||||
#ifdef CONFIG_NUMA
|
||||
nid = reg->nid;
|
||||
#endif
|
||||
int nid = memblock_get_region_node(reg);
|
||||
|
||||
memory_present(nid, memblock_region_memory_base_pfn(reg),
|
||||
memblock_region_memory_end_pfn(reg));
|
||||
}
|
||||
@ -403,7 +401,8 @@ static void __init free_unused_memmap(void)
|
||||
*/
|
||||
void __init mem_init(void)
|
||||
{
|
||||
swiotlb_init(1);
|
||||
if (swiotlb_force || max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
|
||||
swiotlb_init(1);
|
||||
|
||||
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
|
||||
|
||||
@ -430,9 +429,9 @@ void __init mem_init(void)
|
||||
pr_cont(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
|
||||
MLG(VMALLOC_START, VMALLOC_END));
|
||||
pr_cont(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
MLK_ROUNDUP(_text, __start_rodata));
|
||||
MLK_ROUNDUP(_text, _etext));
|
||||
pr_cont(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
MLK_ROUNDUP(__start_rodata, _etext));
|
||||
MLK_ROUNDUP(__start_rodata, __init_begin));
|
||||
pr_cont(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
MLK_ROUNDUP(__init_begin, __init_end));
|
||||
pr_cont(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
|
||||
|
@ -77,7 +77,6 @@ static phys_addr_t __init early_pgtable_alloc(void)
|
||||
void *ptr;
|
||||
|
||||
phys = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
|
||||
BUG_ON(!phys);
|
||||
|
||||
/*
|
||||
* The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE
|
||||
@ -97,24 +96,6 @@ static phys_addr_t __init early_pgtable_alloc(void)
|
||||
return phys;
|
||||
}
|
||||
|
||||
/*
|
||||
* remap a PMD into pages
|
||||
*/
|
||||
static void split_pmd(pmd_t *pmd, pte_t *pte)
|
||||
{
|
||||
unsigned long pfn = pmd_pfn(*pmd);
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
/*
|
||||
* Need to have the least restrictive permissions available
|
||||
* permissions will be fixed up later
|
||||
*/
|
||||
set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
|
||||
pfn++;
|
||||
} while (pte++, i++, i < PTRS_PER_PTE);
|
||||
}
|
||||
|
||||
static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
||||
unsigned long end, unsigned long pfn,
|
||||
pgprot_t prot,
|
||||
@ -122,15 +103,13 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
||||
{
|
||||
pte_t *pte;
|
||||
|
||||
if (pmd_none(*pmd) || pmd_sect(*pmd)) {
|
||||
BUG_ON(pmd_sect(*pmd));
|
||||
if (pmd_none(*pmd)) {
|
||||
phys_addr_t pte_phys;
|
||||
BUG_ON(!pgtable_alloc);
|
||||
pte_phys = pgtable_alloc();
|
||||
pte = pte_set_fixmap(pte_phys);
|
||||
if (pmd_sect(*pmd))
|
||||
split_pmd(pmd, pte);
|
||||
__pmd_populate(pmd, pte_phys, PMD_TYPE_TABLE);
|
||||
flush_tlb_all();
|
||||
pte_clear_fixmap();
|
||||
}
|
||||
BUG_ON(pmd_bad(*pmd));
|
||||
@ -144,41 +123,10 @@ static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
|
||||
pte_clear_fixmap();
|
||||
}
|
||||
|
||||
static void split_pud(pud_t *old_pud, pmd_t *pmd)
|
||||
{
|
||||
unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
|
||||
pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
|
||||
int i = 0;
|
||||
|
||||
do {
|
||||
set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
|
||||
addr += PMD_SIZE;
|
||||
} while (pmd++, i++, i < PTRS_PER_PMD);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
|
||||
{
|
||||
|
||||
/*
|
||||
* If debug_page_alloc is enabled we must map the linear map
|
||||
* using pages. However, other mappings created by
|
||||
* create_mapping_noalloc must use sections in some cases. Allow
|
||||
* sections to be used in those cases, where no pgtable_alloc
|
||||
* function is provided.
|
||||
*/
|
||||
return !pgtable_alloc || !debug_pagealloc_enabled();
|
||||
}
|
||||
#else
|
||||
static bool block_mappings_allowed(phys_addr_t (*pgtable_alloc)(void))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void))
|
||||
phys_addr_t (*pgtable_alloc)(void),
|
||||
bool allow_block_mappings)
|
||||
{
|
||||
pmd_t *pmd;
|
||||
unsigned long next;
|
||||
@ -186,20 +134,13 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
/*
|
||||
* Check for initial section mappings in the pgd/pud and remove them.
|
||||
*/
|
||||
if (pud_none(*pud) || pud_sect(*pud)) {
|
||||
BUG_ON(pud_sect(*pud));
|
||||
if (pud_none(*pud)) {
|
||||
phys_addr_t pmd_phys;
|
||||
BUG_ON(!pgtable_alloc);
|
||||
pmd_phys = pgtable_alloc();
|
||||
pmd = pmd_set_fixmap(pmd_phys);
|
||||
if (pud_sect(*pud)) {
|
||||
/*
|
||||
* need to have the 1G of mappings continue to be
|
||||
* present
|
||||
*/
|
||||
split_pud(pud, pmd);
|
||||
}
|
||||
__pud_populate(pud, pmd_phys, PUD_TYPE_TABLE);
|
||||
flush_tlb_all();
|
||||
pmd_clear_fixmap();
|
||||
}
|
||||
BUG_ON(pud_bad(*pud));
|
||||
@ -209,7 +150,7 @@ static void alloc_init_pmd(pud_t *pud, unsigned long addr, unsigned long end,
|
||||
next = pmd_addr_end(addr, end);
|
||||
/* try section mapping first */
|
||||
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
|
||||
block_mappings_allowed(pgtable_alloc)) {
|
||||
allow_block_mappings) {
|
||||
pmd_t old_pmd =*pmd;
|
||||
pmd_set_huge(pmd, phys, prot);
|
||||
/*
|
||||
@ -248,7 +189,8 @@ static inline bool use_1G_block(unsigned long addr, unsigned long next,
|
||||
|
||||
static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
phys_addr_t phys, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void))
|
||||
phys_addr_t (*pgtable_alloc)(void),
|
||||
bool allow_block_mappings)
|
||||
{
|
||||
pud_t *pud;
|
||||
unsigned long next;
|
||||
@ -268,8 +210,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
/*
|
||||
* For 4K granule only, attempt to put down a 1GB block
|
||||
*/
|
||||
if (use_1G_block(addr, next, phys) &&
|
||||
block_mappings_allowed(pgtable_alloc)) {
|
||||
if (use_1G_block(addr, next, phys) && allow_block_mappings) {
|
||||
pud_t old_pud = *pud;
|
||||
pud_set_huge(pud, phys, prot);
|
||||
|
||||
@ -290,7 +231,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
}
|
||||
} else {
|
||||
alloc_init_pmd(pud, addr, next, phys, prot,
|
||||
pgtable_alloc);
|
||||
pgtable_alloc, allow_block_mappings);
|
||||
}
|
||||
phys += next - addr;
|
||||
} while (pud++, addr = next, addr != end);
|
||||
@ -298,15 +239,14 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
|
||||
pud_clear_fixmap();
|
||||
}
|
||||
|
||||
/*
|
||||
* Create the page directory entries and any necessary page tables for the
|
||||
* mapping specified by 'md'.
|
||||
*/
|
||||
static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
|
||||
phys_addr_t size, pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void))
|
||||
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
||||
unsigned long virt, phys_addr_t size,
|
||||
pgprot_t prot,
|
||||
phys_addr_t (*pgtable_alloc)(void),
|
||||
bool allow_block_mappings)
|
||||
{
|
||||
unsigned long addr, length, end, next;
|
||||
pgd_t *pgd = pgd_offset_raw(pgdir, virt);
|
||||
|
||||
/*
|
||||
* If the virtual and physical address don't have the same offset
|
||||
@ -322,29 +262,23 @@ static void init_pgd(pgd_t *pgd, phys_addr_t phys, unsigned long virt,
|
||||
end = addr + length;
|
||||
do {
|
||||
next = pgd_addr_end(addr, end);
|
||||
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc);
|
||||
alloc_init_pud(pgd, addr, next, phys, prot, pgtable_alloc,
|
||||
allow_block_mappings);
|
||||
phys += next - addr;
|
||||
} while (pgd++, addr = next, addr != end);
|
||||
}
|
||||
|
||||
static phys_addr_t late_pgtable_alloc(void)
|
||||
static phys_addr_t pgd_pgtable_alloc(void)
|
||||
{
|
||||
void *ptr = (void *)__get_free_page(PGALLOC_GFP);
|
||||
BUG_ON(!ptr);
|
||||
if (!ptr || !pgtable_page_ctor(virt_to_page(ptr)))
|
||||
BUG();
|
||||
|
||||
/* Ensure the zeroed page is visible to the page table walker */
|
||||
dsb(ishst);
|
||||
return __pa(ptr);
|
||||
}
|
||||
|
||||
static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys,
|
||||
unsigned long virt, phys_addr_t size,
|
||||
pgprot_t prot,
|
||||
phys_addr_t (*alloc)(void))
|
||||
{
|
||||
init_pgd(pgd_offset_raw(pgdir, virt), phys, virt, size, prot, alloc);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function can only be used to modify existing table entries,
|
||||
* without allocating new levels of table. Note that this permits the
|
||||
@ -358,16 +292,17 @@ static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt,
|
||||
&phys, virt);
|
||||
return;
|
||||
}
|
||||
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
|
||||
NULL);
|
||||
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, true);
|
||||
}
|
||||
|
||||
void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
|
||||
unsigned long virt, phys_addr_t size,
|
||||
pgprot_t prot)
|
||||
pgprot_t prot, bool allow_block_mappings)
|
||||
{
|
||||
BUG_ON(mm == &init_mm);
|
||||
|
||||
__create_pgd_mapping(mm->pgd, phys, virt, size, prot,
|
||||
late_pgtable_alloc);
|
||||
pgd_pgtable_alloc, allow_block_mappings);
|
||||
}
|
||||
|
||||
static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
||||
@ -380,51 +315,54 @@ static void create_mapping_late(phys_addr_t phys, unsigned long virt,
|
||||
}
|
||||
|
||||
__create_pgd_mapping(init_mm.pgd, phys, virt, size, prot,
|
||||
late_pgtable_alloc);
|
||||
NULL, !debug_pagealloc_enabled());
|
||||
}
|
||||
|
||||
static void __init __map_memblock(pgd_t *pgd, phys_addr_t start, phys_addr_t end)
|
||||
{
|
||||
unsigned long kernel_start = __pa(_text);
|
||||
unsigned long kernel_end = __pa(_etext);
|
||||
unsigned long kernel_end = __pa(__init_begin);
|
||||
|
||||
/*
|
||||
* Take care not to create a writable alias for the
|
||||
* read-only text and rodata sections of the kernel image.
|
||||
*/
|
||||
|
||||
/* No overlap with the kernel text */
|
||||
/* No overlap with the kernel text/rodata */
|
||||
if (end < kernel_start || start >= kernel_end) {
|
||||
__create_pgd_mapping(pgd, start, __phys_to_virt(start),
|
||||
end - start, PAGE_KERNEL,
|
||||
early_pgtable_alloc);
|
||||
early_pgtable_alloc,
|
||||
!debug_pagealloc_enabled());
|
||||
return;
|
||||
}
|
||||
|
||||
/*
|
||||
* This block overlaps the kernel text mapping.
|
||||
* This block overlaps the kernel text/rodata mappings.
|
||||
* Map the portion(s) which don't overlap.
|
||||
*/
|
||||
if (start < kernel_start)
|
||||
__create_pgd_mapping(pgd, start,
|
||||
__phys_to_virt(start),
|
||||
kernel_start - start, PAGE_KERNEL,
|
||||
early_pgtable_alloc);
|
||||
early_pgtable_alloc,
|
||||
!debug_pagealloc_enabled());
|
||||
if (kernel_end < end)
|
||||
__create_pgd_mapping(pgd, kernel_end,
|
||||
__phys_to_virt(kernel_end),
|
||||
end - kernel_end, PAGE_KERNEL,
|
||||
early_pgtable_alloc);
|
||||
early_pgtable_alloc,
|
||||
!debug_pagealloc_enabled());
|
||||
|
||||
/*
|
||||
* Map the linear alias of the [_text, _etext) interval as
|
||||
* Map the linear alias of the [_text, __init_begin) interval as
|
||||
* read-only/non-executable. This makes the contents of the
|
||||
* region accessible to subsystems such as hibernate, but
|
||||
* protects it from inadvertent modification or execution.
|
||||
*/
|
||||
__create_pgd_mapping(pgd, kernel_start, __phys_to_virt(kernel_start),
|
||||
kernel_end - kernel_start, PAGE_KERNEL_RO,
|
||||
early_pgtable_alloc);
|
||||
early_pgtable_alloc, !debug_pagealloc_enabled());
|
||||
}
|
||||
|
||||
static void __init map_mem(pgd_t *pgd)
|
||||
@ -449,14 +387,14 @@ void mark_rodata_ro(void)
|
||||
{
|
||||
unsigned long section_size;
|
||||
|
||||
section_size = (unsigned long)__start_rodata - (unsigned long)_text;
|
||||
section_size = (unsigned long)_etext - (unsigned long)_text;
|
||||
create_mapping_late(__pa(_text), (unsigned long)_text,
|
||||
section_size, PAGE_KERNEL_ROX);
|
||||
/*
|
||||
* mark .rodata as read only. Use _etext rather than __end_rodata to
|
||||
* cover NOTES and EXCEPTION_TABLE.
|
||||
* mark .rodata as read only. Use __init_begin rather than __end_rodata
|
||||
* to cover NOTES and EXCEPTION_TABLE.
|
||||
*/
|
||||
section_size = (unsigned long)_etext - (unsigned long)__start_rodata;
|
||||
section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata;
|
||||
create_mapping_late(__pa(__start_rodata), (unsigned long)__start_rodata,
|
||||
section_size, PAGE_KERNEL_RO);
|
||||
}
|
||||
@ -481,7 +419,7 @@ static void __init map_kernel_segment(pgd_t *pgd, void *va_start, void *va_end,
|
||||
BUG_ON(!PAGE_ALIGNED(size));
|
||||
|
||||
__create_pgd_mapping(pgd, pa_start, (unsigned long)va_start, size, prot,
|
||||
early_pgtable_alloc);
|
||||
early_pgtable_alloc, !debug_pagealloc_enabled());
|
||||
|
||||
vma->addr = va_start;
|
||||
vma->phys_addr = pa_start;
|
||||
@ -499,8 +437,8 @@ static void __init map_kernel(pgd_t *pgd)
|
||||
{
|
||||
static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_init, vmlinux_data;
|
||||
|
||||
map_kernel_segment(pgd, _text, __start_rodata, PAGE_KERNEL_EXEC, &vmlinux_text);
|
||||
map_kernel_segment(pgd, __start_rodata, _etext, PAGE_KERNEL, &vmlinux_rodata);
|
||||
map_kernel_segment(pgd, _text, _etext, PAGE_KERNEL_EXEC, &vmlinux_text);
|
||||
map_kernel_segment(pgd, __start_rodata, __init_begin, PAGE_KERNEL, &vmlinux_rodata);
|
||||
map_kernel_segment(pgd, __init_begin, __init_end, PAGE_KERNEL_EXEC,
|
||||
&vmlinux_init);
|
||||
map_kernel_segment(pgd, _data, _end, PAGE_KERNEL, &vmlinux_data);
|
||||
|
@ -180,6 +180,8 @@ ENTRY(__cpu_setup)
|
||||
msr cpacr_el1, x0 // Enable FP/ASIMD
|
||||
mov x0, #1 << 12 // Reset mdscr_el1 and disable
|
||||
msr mdscr_el1, x0 // access to the DCC from EL0
|
||||
isb // Unmask debug exceptions now,
|
||||
enable_dbg // since this is per-cpu
|
||||
reset_pmuserenr_el0 x0 // Disable PMU access from EL0
|
||||
/*
|
||||
* Memory region attributes for LPAE:
|
||||
|
@ -603,7 +603,8 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu)
|
||||
|
||||
irq = platform_get_irq(pmu_device, 0);
|
||||
if (irq >= 0 && irq_is_percpu(irq)) {
|
||||
on_each_cpu(cpu_pmu_disable_percpu_irq, &irq, 1);
|
||||
on_each_cpu_mask(&cpu_pmu->supported_cpus,
|
||||
cpu_pmu_disable_percpu_irq, &irq, 1);
|
||||
free_percpu_irq(irq, &hw_events->percpu_pmu);
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
@ -645,7 +646,9 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
|
||||
irq);
|
||||
return err;
|
||||
}
|
||||
on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1);
|
||||
|
||||
on_each_cpu_mask(&cpu_pmu->supported_cpus,
|
||||
cpu_pmu_enable_percpu_irq, &irq, 1);
|
||||
} else {
|
||||
for (i = 0; i < irqs; ++i) {
|
||||
int cpu = i;
|
||||
@ -961,9 +964,23 @@ static int of_pmu_irq_cfg(struct arm_pmu *pmu)
|
||||
i++;
|
||||
} while (1);
|
||||
|
||||
/* If we didn't manage to parse anything, claim to support all CPUs */
|
||||
if (cpumask_weight(&pmu->supported_cpus) == 0)
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
/* If we didn't manage to parse anything, try the interrupt affinity */
|
||||
if (cpumask_weight(&pmu->supported_cpus) == 0) {
|
||||
if (!using_spi) {
|
||||
/* If using PPIs, check the affinity of the partition */
|
||||
int ret, irq;
|
||||
|
||||
irq = platform_get_irq(pdev, 0);
|
||||
ret = irq_get_percpu_devid_partition(irq, &pmu->supported_cpus);
|
||||
if (ret) {
|
||||
kfree(irqs);
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
/* Otherwise default to all CPUs */
|
||||
cpumask_setall(&pmu->supported_cpus);
|
||||
}
|
||||
}
|
||||
|
||||
/* If we matched up the IRQ affinities, use them to route the SPIs */
|
||||
if (using_spi && i == pdev->num_resources)
|
||||
|
@ -39,6 +39,7 @@
|
||||
#define KEXEC_ARCH_SH (42 << 16)
|
||||
#define KEXEC_ARCH_MIPS_LE (10 << 16)
|
||||
#define KEXEC_ARCH_MIPS ( 8 << 16)
|
||||
#define KEXEC_ARCH_AARCH64 (183 << 16)
|
||||
|
||||
/* The artificial cap on the number of segments passed to kexec_load. */
|
||||
#define KEXEC_SEGMENT_MAX 16
|
||||
|
@ -46,6 +46,11 @@ static int handler_pre(struct kprobe *p, struct pt_regs *regs)
|
||||
" ex1 = 0x%lx\n",
|
||||
p->symbol_name, p->addr, regs->pc, regs->ex1);
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64
|
||||
pr_info("<%s> pre_handler: p->addr = 0x%p, pc = 0x%lx,"
|
||||
" pstate = 0x%lx\n",
|
||||
p->symbol_name, p->addr, (long)regs->pc, (long)regs->pstate);
|
||||
#endif
|
||||
|
||||
/* A dump_stack() here will give a stack backtrace */
|
||||
return 0;
|
||||
@ -71,6 +76,10 @@ static void handler_post(struct kprobe *p, struct pt_regs *regs,
|
||||
printk(KERN_INFO "<%s> post_handler: p->addr = 0x%p, ex1 = 0x%lx\n",
|
||||
p->symbol_name, p->addr, regs->ex1);
|
||||
#endif
|
||||
#ifdef CONFIG_ARM64
|
||||
pr_info("<%s> post_handler: p->addr = 0x%p, pstate = 0x%lx\n",
|
||||
p->symbol_name, p->addr, (long)regs->pstate);
|
||||
#endif
|
||||
}
|
||||
|
||||
/*
|
||||
|
Loading…
Reference in New Issue
Block a user