mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-03 15:52:00 +00:00
x86/platform/intel/quark: Drop IMR lock bit support
Isolated Memory Regions support a lock bit. The lock bit in an IMR prevents modification of the IMR until the core goes through a warm or cold reset. The lock bit feature is not useful in the context of the kernel API and is not really necessary since modification of IMRs is possible only from ring-zero anyway. This patch drops support for IMR locks bits, it simplifies the kernel API and removes an unnecessary and needlessly complex feature. Suggested-by: Ingo Molnar <mingo@kernel.org> Signed-off-by: Bryan O'Donoghue <pure.logic@nexus-software.ie> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: andriy.shevchenko@linux.intel.com Cc: boon.leong.ong@intel.com Cc: paul.gortmaker@windriver.com Link: http://lkml.kernel.org/r/1456190999-12685-3-git-send-email-pure.logic@nexus-software.ie Signed-off-by: Ingo Molnar <mingo@kernel.org>
This commit is contained in:
parent
fb86780bf7
commit
c637fa5294
@ -53,7 +53,7 @@
|
|||||||
#define IMR_MASK (IMR_ALIGN - 1)
|
#define IMR_MASK (IMR_ALIGN - 1)
|
||||||
|
|
||||||
int imr_add_range(phys_addr_t base, size_t size,
|
int imr_add_range(phys_addr_t base, size_t size,
|
||||||
unsigned int rmask, unsigned int wmask, bool lock);
|
unsigned int rmask, unsigned int wmask);
|
||||||
|
|
||||||
int imr_remove_range(phys_addr_t base, size_t size);
|
int imr_remove_range(phys_addr_t base, size_t size);
|
||||||
|
|
||||||
|
@ -134,11 +134,9 @@ static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
|
|||||||
* @idev: pointer to imr_device structure.
|
* @idev: pointer to imr_device structure.
|
||||||
* @imr_id: IMR entry to write.
|
* @imr_id: IMR entry to write.
|
||||||
* @imr: IMR structure representing address and access masks.
|
* @imr: IMR structure representing address and access masks.
|
||||||
* @lock: indicates if the IMR lock bit should be applied.
|
|
||||||
* @return: 0 on success or error code passed from mbi_iosf on failure.
|
* @return: 0 on success or error code passed from mbi_iosf on failure.
|
||||||
*/
|
*/
|
||||||
static int imr_write(struct imr_device *idev, u32 imr_id,
|
static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
|
||||||
struct imr_regs *imr, bool lock)
|
|
||||||
{
|
{
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
|
u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
|
||||||
@ -162,15 +160,6 @@ static int imr_write(struct imr_device *idev, u32 imr_id,
|
|||||||
if (ret)
|
if (ret)
|
||||||
goto failed;
|
goto failed;
|
||||||
|
|
||||||
/* Lock bit must be set separately to addr_lo address bits. */
|
|
||||||
if (lock) {
|
|
||||||
imr->addr_lo |= IMR_LOCK;
|
|
||||||
ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE,
|
|
||||||
reg - IMR_NUM_REGS, imr->addr_lo);
|
|
||||||
if (ret)
|
|
||||||
goto failed;
|
|
||||||
}
|
|
||||||
|
|
||||||
local_irq_restore(flags);
|
local_irq_restore(flags);
|
||||||
return 0;
|
return 0;
|
||||||
failed:
|
failed:
|
||||||
@ -322,11 +311,10 @@ static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
|
|||||||
* @size: physical size of region in bytes must be aligned to 1KiB.
|
* @size: physical size of region in bytes must be aligned to 1KiB.
|
||||||
* @read_mask: read access mask.
|
* @read_mask: read access mask.
|
||||||
* @write_mask: write access mask.
|
* @write_mask: write access mask.
|
||||||
* @lock: indicates whether or not to permanently lock this region.
|
|
||||||
* @return: zero on success or negative value indicating error.
|
* @return: zero on success or negative value indicating error.
|
||||||
*/
|
*/
|
||||||
int imr_add_range(phys_addr_t base, size_t size,
|
int imr_add_range(phys_addr_t base, size_t size,
|
||||||
unsigned int rmask, unsigned int wmask, bool lock)
|
unsigned int rmask, unsigned int wmask)
|
||||||
{
|
{
|
||||||
phys_addr_t end;
|
phys_addr_t end;
|
||||||
unsigned int i;
|
unsigned int i;
|
||||||
@ -399,7 +387,7 @@ int imr_add_range(phys_addr_t base, size_t size,
|
|||||||
imr.rmask = rmask;
|
imr.rmask = rmask;
|
||||||
imr.wmask = wmask;
|
imr.wmask = wmask;
|
||||||
|
|
||||||
ret = imr_write(idev, reg, &imr, lock);
|
ret = imr_write(idev, reg, &imr);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
/*
|
/*
|
||||||
* In the highly unlikely event iosf_mbi_write failed
|
* In the highly unlikely event iosf_mbi_write failed
|
||||||
@ -410,7 +398,7 @@ int imr_add_range(phys_addr_t base, size_t size,
|
|||||||
imr.addr_hi = 0;
|
imr.addr_hi = 0;
|
||||||
imr.rmask = IMR_READ_ACCESS_ALL;
|
imr.rmask = IMR_READ_ACCESS_ALL;
|
||||||
imr.wmask = IMR_WRITE_ACCESS_ALL;
|
imr.wmask = IMR_WRITE_ACCESS_ALL;
|
||||||
imr_write(idev, reg, &imr, false);
|
imr_write(idev, reg, &imr);
|
||||||
}
|
}
|
||||||
failed:
|
failed:
|
||||||
mutex_unlock(&idev->lock);
|
mutex_unlock(&idev->lock);
|
||||||
@ -506,7 +494,7 @@ static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
|
|||||||
imr.rmask = IMR_READ_ACCESS_ALL;
|
imr.rmask = IMR_READ_ACCESS_ALL;
|
||||||
imr.wmask = IMR_WRITE_ACCESS_ALL;
|
imr.wmask = IMR_WRITE_ACCESS_ALL;
|
||||||
|
|
||||||
ret = imr_write(idev, reg, &imr, false);
|
ret = imr_write(idev, reg, &imr);
|
||||||
|
|
||||||
failed:
|
failed:
|
||||||
mutex_unlock(&idev->lock);
|
mutex_unlock(&idev->lock);
|
||||||
@ -587,7 +575,7 @@ static void __init imr_fixup_memmap(struct imr_device *idev)
|
|||||||
* We don't round up @size since it is already PAGE_SIZE aligned.
|
* We don't round up @size since it is already PAGE_SIZE aligned.
|
||||||
* See vmlinux.lds.S for details.
|
* See vmlinux.lds.S for details.
|
||||||
*/
|
*/
|
||||||
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
|
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
|
pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
|
||||||
size / 1024, start, end);
|
size / 1024, start, end);
|
||||||
|
@ -60,30 +60,30 @@ static void __init imr_self_test(void)
|
|||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
/* Test zero zero. */
|
/* Test zero zero. */
|
||||||
ret = imr_add_range(0, 0, 0, 0, false);
|
ret = imr_add_range(0, 0, 0, 0);
|
||||||
imr_self_test_result(ret < 0, "zero sized IMR\n");
|
imr_self_test_result(ret < 0, "zero sized IMR\n");
|
||||||
|
|
||||||
/* Test exact overlap. */
|
/* Test exact overlap. */
|
||||||
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
|
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
|
||||||
imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
|
imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
|
||||||
|
|
||||||
/* Test overlap with base inside of existing. */
|
/* Test overlap with base inside of existing. */
|
||||||
base += size - IMR_ALIGN;
|
base += size - IMR_ALIGN;
|
||||||
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
|
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
|
||||||
imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
|
imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
|
||||||
|
|
||||||
/* Test overlap with end inside of existing. */
|
/* Test overlap with end inside of existing. */
|
||||||
base -= size + IMR_ALIGN * 2;
|
base -= size + IMR_ALIGN * 2;
|
||||||
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
|
ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
|
||||||
imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
|
imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
|
||||||
|
|
||||||
/* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
|
/* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
|
||||||
ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
|
ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
|
||||||
IMR_WRITE_ACCESS_ALL, false);
|
IMR_WRITE_ACCESS_ALL);
|
||||||
imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
|
imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
|
||||||
|
|
||||||
/* Test that a 1 KiB IMR @ zero with CPU only will work. */
|
/* Test that a 1 KiB IMR @ zero with CPU only will work. */
|
||||||
ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU, false);
|
ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU);
|
||||||
imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
|
imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
ret = imr_remove_range(0, IMR_ALIGN);
|
ret = imr_remove_range(0, IMR_ALIGN);
|
||||||
@ -92,8 +92,7 @@ static void __init imr_self_test(void)
|
|||||||
|
|
||||||
/* Test 2 KiB works. */
|
/* Test 2 KiB works. */
|
||||||
size = IMR_ALIGN * 2;
|
size = IMR_ALIGN * 2;
|
||||||
ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL,
|
ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, IMR_WRITE_ACCESS_ALL);
|
||||||
IMR_WRITE_ACCESS_ALL, false);
|
|
||||||
imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
|
imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
|
||||||
if (ret >= 0) {
|
if (ret >= 0) {
|
||||||
ret = imr_remove_range(0, size);
|
ret = imr_remove_range(0, size);
|
||||||
|
Loading…
Reference in New Issue
Block a user