Merge tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel into drm-next

Daniel writes:
"Probably the last feature pull for 3.9, there's some fixes outstanding
thought that I'd like to sneak in. And maybe 3.8 takes a bit longer ...
Anyway, highlights of this pull:
- Kill the horrible IS_DISPLAYREG hack to handle the mmio offset movements
  on vlv, big thanks to Ville.
- Dynamic power well support for Haswell, shaves away a bit when only
  using the eDP port on pipe A (Paulo). Plus unclaimed register fixes
  uncovered by this.
- Clarifications of the gpu hang/reset state transitions, hopefully fixing
  a few spurious -EIO deaths in userspace.
- Haswell ELD fixes.
- Some more (pp)gtt cleanups from Ben.
- A few smaller things all over.

Plus all the stuff from the previous rather small pull request:
- Broadcast RBG improvements and reduced color range fixes from Ville.
- Ben is on a "kill legacy gtt code for good" spree, first pile of patches
  included.
- No-relocs and bo lut improvements for faster execbuf from Chris.
- Some refactorings from Imre."

* tag 'drm-intel-next-2013-02-01' of git://people.freedesktop.org/~danvet/drm-intel: (101 commits)
  GPU/i915: Fix acpi_bus_get_device() check in drivers/gpu/drm/i915/intel_opregion.c
  drm/i915: Set the SR01 "screen off" bit in i915_redisable_vga() too
  drm/i915: Kill IS_DISPLAYREG()
  drm/i915: Introduce i915_vgacntrl_reg()
  drm/i915: gen6_gmch_remove can be static
  drm/i915: dynamic Haswell display power well support
  drm/i915: check the power down well on assert_pipe()
  drm/i915: don't send DP "idle" pattern before "normal" on HSW PORT_A
  drm/i915: don't run hsw power well code on !hsw
  drm/i915: kill cargo-culted locking from power well code
  drm/i915: Only run idle processing from i915_gem_retire_requests_worker
  drm/i915: Fix CAGF for HSW
  drm/i915: Reclaim GTT space for failed PPGTT
  drm/i915: remove intel_gtt structure
  drm/i915: Add probe and remove to the gtt ops
  drm/i915: extract hw ppgtt setup/cleanup code
  drm/i915: pte_encode is gen6+
  drm/i915: vfuncs for ppgtt
  drm/i915: vfuncs for gtt_clear_range/insert_entries
  drm/i915: Error state should print /sys/kernel/debug
  ...
This commit is contained in:
Dave Airlie 2013-02-08 11:08:10 +10:00
commit cd17ef4114
34 changed files with 2290 additions and 1767 deletions

View File

@ -60,7 +60,6 @@ struct intel_gtt_driver {
};
static struct _intel_private {
struct intel_gtt base;
const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */
struct pci_dev *bridge_dev;
@ -75,7 +74,18 @@ static struct _intel_private {
struct resource ifp_resource;
int resource_valid;
struct page *scratch_page;
phys_addr_t scratch_page_dma;
int refcount;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
phys_addr_t gma_bus_addr;
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
} intel_private;
#define INTEL_GTT_GEN intel_private.driver->gen
@ -291,15 +301,15 @@ static int intel_gtt_setup_scratch_page(void)
get_page(page);
set_pages_uc(page, 1);
if (intel_private.base.needs_dmar) {
if (intel_private.needs_dmar) {
dma_addr = pci_map_page(intel_private.pcidev, page, 0,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
return -EINVAL;
intel_private.base.scratch_page_dma = dma_addr;
intel_private.scratch_page_dma = dma_addr;
} else
intel_private.base.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page_dma = page_to_phys(page);
intel_private.scratch_page = page;
@ -506,7 +516,7 @@ static unsigned int intel_gtt_total_entries(void)
/* On previous hardware, the GTT size was just what was
* required to map the aperture.
*/
return intel_private.base.gtt_mappable_entries;
return intel_private.gtt_mappable_entries;
}
}
@ -546,7 +556,7 @@ static unsigned int intel_gtt_mappable_entries(void)
static void intel_gtt_teardown_scratch_page(void)
{
set_pages_wb(intel_private.scratch_page, 1);
pci_unmap_page(intel_private.pcidev, intel_private.base.scratch_page_dma,
pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(intel_private.scratch_page);
__free_page(intel_private.scratch_page);
@ -572,8 +582,8 @@ static int intel_gtt_init(void)
if (ret != 0)
return ret;
intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.base.gtt_total_entries = intel_gtt_total_entries();
intel_private.gtt_mappable_entries = intel_gtt_mappable_entries();
intel_private.gtt_total_entries = intel_gtt_total_entries();
/* save the PGETBL reg for resume */
intel_private.PGETBL_save =
@ -585,10 +595,10 @@ static int intel_gtt_init(void)
dev_info(&intel_private.bridge_dev->dev,
"detected gtt size: %dK total, %dK mappable\n",
intel_private.base.gtt_total_entries * 4,
intel_private.base.gtt_mappable_entries * 4);
intel_private.gtt_total_entries * 4,
intel_private.gtt_mappable_entries * 4);
gtt_map_size = intel_private.base.gtt_total_entries * 4;
gtt_map_size = intel_private.gtt_total_entries * 4;
intel_private.gtt = NULL;
if (INTEL_GTT_GEN < 6 && INTEL_GTT_GEN > 2)
@ -605,9 +615,9 @@ static int intel_gtt_init(void)
global_cache_flush(); /* FIXME: ? */
intel_private.base.stolen_size = intel_gtt_stolen_size();
intel_private.stolen_size = intel_gtt_stolen_size();
intel_private.base.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
intel_private.needs_dmar = USE_PCI_DMA_API && INTEL_GTT_GEN > 2;
ret = intel_gtt_setup_scratch_page();
if (ret != 0) {
@ -622,7 +632,7 @@ static int intel_gtt_init(void)
pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
&gma_addr);
intel_private.base.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
return 0;
}
@ -633,8 +643,7 @@ static int intel_fake_agp_fetch_size(void)
unsigned int aper_size;
int i;
aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
/ MB(1);
aper_size = (intel_private.gtt_mappable_entries << PAGE_SHIFT) / MB(1);
for (i = 0; i < num_sizes; i++) {
if (aper_size == intel_fake_agp_sizes[i].size) {
@ -778,7 +787,7 @@ static int intel_fake_agp_configure(void)
return -EIO;
intel_private.clear_fake_agp = true;
agp_bridge->gart_bus_addr = intel_private.base.gma_bus_addr;
agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
return 0;
}
@ -840,12 +849,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
{
int ret = -EINVAL;
if (intel_private.base.do_idle_maps)
return -ENODEV;
if (intel_private.clear_fake_agp) {
int start = intel_private.base.stolen_size / PAGE_SIZE;
int end = intel_private.base.gtt_mappable_entries;
int start = intel_private.stolen_size / PAGE_SIZE;
int end = intel_private.gtt_mappable_entries;
intel_gtt_clear_range(start, end - start);
intel_private.clear_fake_agp = false;
}
@ -856,7 +862,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (mem->page_count == 0)
goto out;
if (pg_start + mem->page_count > intel_private.base.gtt_total_entries)
if (pg_start + mem->page_count > intel_private.gtt_total_entries)
goto out_err;
if (type != mem->type)
@ -868,7 +874,7 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
if (!mem->is_flushed)
global_cache_flush();
if (intel_private.base.needs_dmar) {
if (intel_private.needs_dmar) {
struct sg_table st;
ret = intel_gtt_map_memory(mem->pages, mem->page_count, &st);
@ -894,7 +900,7 @@ void intel_gtt_clear_range(unsigned int first_entry, unsigned int num_entries)
unsigned int i;
for (i = first_entry; i < (first_entry + num_entries); i++) {
intel_private.driver->write_entry(intel_private.base.scratch_page_dma,
intel_private.driver->write_entry(intel_private.scratch_page_dma,
i, 0);
}
readl(intel_private.gtt+i-1);
@ -907,12 +913,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
if (mem->page_count == 0)
return 0;
if (intel_private.base.do_idle_maps)
return -ENODEV;
intel_gtt_clear_range(pg_start, mem->page_count);
if (intel_private.base.needs_dmar) {
if (intel_private.needs_dmar) {
intel_gtt_unmap_memory(mem->sg_list, mem->num_sg);
mem->sg_list = NULL;
mem->num_sg = 0;
@ -1069,24 +1072,6 @@ static void i965_write_entry(dma_addr_t addr,
writel(addr | pte_flags, intel_private.gtt + entry);
}
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static inline int needs_idle_maps(void)
{
#ifdef CONFIG_INTEL_IOMMU
const unsigned short gpu_devid = intel_private.pcidev->device;
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
intel_iommu_gfx_mapped)
return 1;
#endif
return 0;
}
static int i9xx_setup(void)
{
@ -1115,9 +1100,6 @@ static int i9xx_setup(void)
break;
}
if (needs_idle_maps())
intel_private.base.do_idle_maps = 1;
intel_i9xx_setup_flush();
return 0;
@ -1389,9 +1371,10 @@ int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
}
EXPORT_SYMBOL(intel_gmch_probe);
struct intel_gtt *intel_gtt_get(void)
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size)
{
return &intel_private.base;
*gtt_total = intel_private.gtt_total_entries << PAGE_SHIFT;
*stolen_size = intel_private.stolen_size;
}
EXPORT_SYMBOL(intel_gtt_get);

View File

@ -1483,9 +1483,11 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
#define VIDEO_BLOCK 0x02
#define VENDOR_BLOCK 0x03
#define SPEAKER_BLOCK 0x04
#define VIDEO_CAPABILITY_BLOCK 0x07
#define EDID_BASIC_AUDIO (1 << 6)
#define EDID_CEA_YCRCB444 (1 << 5)
#define EDID_CEA_YCRCB422 (1 << 4)
#define EDID_CEA_VCDB_QS (1 << 6)
/**
* Search EDID for CEA extension block.
@ -1901,6 +1903,37 @@ end:
}
EXPORT_SYMBOL(drm_detect_monitor_audio);
/**
* drm_rgb_quant_range_selectable - is RGB quantization range selectable?
*
* Check whether the monitor reports the RGB quantization range selection
* as supported. The AVI infoframe can then be used to inform the monitor
* which quantization range (full or limited) is used.
*/
bool drm_rgb_quant_range_selectable(struct edid *edid)
{
u8 *edid_ext;
int i, start, end;
edid_ext = drm_find_cea_extension(edid);
if (!edid_ext)
return false;
if (cea_db_offsets(edid_ext, &start, &end))
return false;
for_each_cea_db(edid_ext, i, start, end) {
if (cea_db_tag(&edid_ext[i]) == VIDEO_CAPABILITY_BLOCK &&
cea_db_payload_len(&edid_ext[i]) == 2) {
DRM_DEBUG_KMS("CEA VCDB 0x%02x\n", edid_ext[i + 2]);
return edid_ext[i + 2] & EDID_CEA_VCDB_QS;
}
}
return false;
}
EXPORT_SYMBOL(drm_rgb_quant_range_selectable);
/**
* drm_add_display_info - pull display info out if present
* @edid: EDID data

View File

@ -16,6 +16,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o \
i915_gem_tiling.o \
i915_sysfs.o \
i915_trace_points.o \
i915_ums.o \
intel_display.o \
intel_crt.o \
intel_lvds.o \

View File

@ -258,8 +258,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
seq_printf(m, "%u fault mappable objects, %zu bytes\n",
count, size);
seq_printf(m, "%zu [%zu] gtt total\n",
dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
seq_printf(m, "%zu [%lu] gtt total\n",
dev_priv->gtt.total,
dev_priv->gtt.mappable_end - dev_priv->gtt.start);
mutex_unlock(&dev->struct_mutex);
@ -813,11 +814,11 @@ static int i915_error_state_open(struct inode *inode, struct file *file)
error_priv->dev = dev;
spin_lock_irqsave(&dev_priv->error_lock, flags);
error_priv->error = dev_priv->first_error;
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error_priv->error = dev_priv->gpu_error.first_error;
if (error_priv->error)
kref_get(&error_priv->error->ref);
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
return single_open(file, i915_error_state, error_priv);
}
@ -956,7 +957,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
u32 rpstat;
u32 rpstat, cagf;
u32 rpupei, rpcurup, rpprevup;
u32 rpdownei, rpcurdown, rpprevdown;
int max_freq;
@ -975,6 +976,11 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI);
rpcurdown = I915_READ(GEN6_RP_CUR_DOWN);
rpprevdown = I915_READ(GEN6_RP_PREV_DOWN);
if (IS_HASWELL(dev))
cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
else
cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
cagf *= GT_FREQUENCY_MULTIPLIER;
gen6_gt_force_wake_put(dev_priv);
mutex_unlock(&dev->struct_mutex);
@ -987,8 +993,7 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
gt_perf_status & 0xff);
seq_printf(m, "Render p-state limit: %d\n",
rp_state_limits & 0xff);
seq_printf(m, "CAGF: %dMHz\n", ((rpstat & GEN6_CAGF_MASK) >>
GEN6_CAGF_SHIFT) * GT_FREQUENCY_MULTIPLIER);
seq_printf(m, "CAGF: %dMHz\n", cagf);
seq_printf(m, "RP CUR UP EI: %dus\n", rpupei &
GEN6_CURICONT_MASK);
seq_printf(m, "RP CUR UP: %dus\n", rpcurup &
@ -1674,7 +1679,7 @@ i915_wedged_read(struct file *filp,
len = snprintf(buf, sizeof(buf),
"wedged : %d\n",
atomic_read(&dev_priv->mm.wedged));
atomic_read(&dev_priv->gpu_error.reset_counter));
if (len > sizeof(buf))
len = sizeof(buf);
@ -1729,7 +1734,7 @@ i915_ring_stop_read(struct file *filp,
int len;
len = snprintf(buf, sizeof(buf),
"0x%08x\n", dev_priv->stop_rings);
"0x%08x\n", dev_priv->gpu_error.stop_rings);
if (len > sizeof(buf))
len = sizeof(buf);
@ -1765,7 +1770,7 @@ i915_ring_stop_write(struct file *filp,
if (ret)
return ret;
dev_priv->stop_rings = val;
dev_priv->gpu_error.stop_rings = val;
mutex_unlock(&dev->struct_mutex);
return cnt;
@ -1779,6 +1784,102 @@ static const struct file_operations i915_ring_stop_fops = {
.llseek = default_llseek,
};
#define DROP_UNBOUND 0x1
#define DROP_BOUND 0x2
#define DROP_RETIRE 0x4
#define DROP_ACTIVE 0x8
#define DROP_ALL (DROP_UNBOUND | \
DROP_BOUND | \
DROP_RETIRE | \
DROP_ACTIVE)
static ssize_t
i915_drop_caches_read(struct file *filp,
char __user *ubuf,
size_t max,
loff_t *ppos)
{
char buf[20];
int len;
len = snprintf(buf, sizeof(buf), "0x%08x\n", DROP_ALL);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(ubuf, max, ppos, buf, len);
}
static ssize_t
i915_drop_caches_write(struct file *filp,
const char __user *ubuf,
size_t cnt,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj, *next;
char buf[20];
int val = 0, ret;
if (cnt > 0) {
if (cnt > sizeof(buf) - 1)
return -EINVAL;
if (copy_from_user(buf, ubuf, cnt))
return -EFAULT;
buf[cnt] = 0;
val = simple_strtoul(buf, NULL, 0);
}
DRM_DEBUG_DRIVER("Dropping caches: 0x%08x\n", val);
/* No need to check and wait for gpu resets, only libdrm auto-restarts
* on ioctls on -EAGAIN. */
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
if (val & DROP_ACTIVE) {
ret = i915_gpu_idle(dev);
if (ret)
goto unlock;
}
if (val & (DROP_RETIRE | DROP_ACTIVE))
i915_gem_retire_requests(dev);
if (val & DROP_BOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list)
if (obj->pin_count == 0) {
ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
}
if (val & DROP_UNBOUND) {
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list, gtt_list)
if (obj->pages_pin_count == 0) {
ret = i915_gem_object_put_pages(obj);
if (ret)
goto unlock;
}
}
unlock:
mutex_unlock(&dev->struct_mutex);
return ret ?: cnt;
}
static const struct file_operations i915_drop_caches_fops = {
.owner = THIS_MODULE,
.open = simple_open,
.read = i915_drop_caches_read,
.write = i915_drop_caches_write,
.llseek = default_llseek,
};
static ssize_t
i915_max_freq_read(struct file *filp,
char __user *ubuf,
@ -2175,6 +2276,12 @@ int i915_debugfs_init(struct drm_minor *minor)
if (ret)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_gem_drop_caches",
&i915_drop_caches_fops);
if (ret)
return ret;
ret = i915_debugfs_create(minor->debugfs_root, minor,
"i915_error_state",
&i915_error_state_fops);
@ -2206,6 +2313,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_cache_sharing_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_drop_caches_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_ring_stop_fops,
1, minor);
drm_debugfs_remove_files((struct drm_info_list *) &i915_error_state_fops,

View File

@ -992,6 +992,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_PINNED_BATCHES:
value = 1;
break;
case I915_PARAM_HAS_EXEC_NO_RELOC:
value = 1;
break;
case I915_PARAM_HAS_EXEC_HANDLE_LUT:
value = 1;
break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@ -1070,7 +1076,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
ring->status_page.gfx_addr = hws->addr & (0x1ffff<<12);
dev_priv->dri1.gfx_hws_cpu_addr =
ioremap_wc(dev_priv->mm.gtt_base_addr + hws->addr, 4096);
ioremap_wc(dev_priv->gtt.mappable_base + hws->addr, 4096);
if (dev_priv->dri1.gfx_hws_cpu_addr == NULL) {
i915_dma_cleanup(dev);
ring->status_page.gfx_addr = 0;
@ -1420,9 +1426,9 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
if (!ap)
return;
ap->ranges[0].base = dev_priv->mm.gtt->gma_bus_addr;
ap->ranges[0].size =
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
ap->ranges[0].base = dev_priv->gtt.mappable_base;
ap->ranges[0].size = dev_priv->gtt.mappable_end - dev_priv->gtt.start;
primary =
pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
@ -1536,18 +1542,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
goto put_gmch;
}
aperture_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
dev_priv->mm.gtt_base_addr = dev_priv->mm.gtt->gma_bus_addr;
aperture_size = dev_priv->gtt.mappable_end;
dev_priv->mm.gtt_mapping =
io_mapping_create_wc(dev_priv->mm.gtt_base_addr,
dev_priv->gtt.mappable =
io_mapping_create_wc(dev_priv->gtt.mappable_base,
aperture_size);
if (dev_priv->mm.gtt_mapping == NULL) {
if (dev_priv->gtt.mappable == NULL) {
ret = -EIO;
goto out_rmmap;
}
i915_mtrr_setup(dev_priv, dev_priv->mm.gtt_base_addr,
i915_mtrr_setup(dev_priv, dev_priv->gtt.mappable_base,
aperture_size);
/* The i915 workqueue is primarily used for batched retirement of
@ -1600,7 +1605,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
pci_enable_msi(dev->pdev);
spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
spin_lock_init(&dev_priv->gpu_error.lock);
spin_lock_init(&dev_priv->rps.lock);
mutex_init(&dev_priv->dpio_lock);
@ -1652,15 +1657,15 @@ out_gem_unload:
out_mtrrfree:
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->mm.gtt_base_addr,
dev_priv->gtt.mappable_base,
aperture_size);
dev_priv->mm.gtt_mtrr = -1;
}
io_mapping_free(dev_priv->mm.gtt_mapping);
io_mapping_free(dev_priv->gtt.mappable);
out_rmmap:
pci_iounmap(dev->pdev, dev_priv->regs);
put_gmch:
i915_gem_gtt_fini(dev);
dev_priv->gtt.gtt_remove(dev);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@ -1690,11 +1695,11 @@ int i915_driver_unload(struct drm_device *dev)
/* Cancel the retire work handler, which should be idle now. */
cancel_delayed_work_sync(&dev_priv->mm.retire_work);
io_mapping_free(dev_priv->mm.gtt_mapping);
io_mapping_free(dev_priv->gtt.mappable);
if (dev_priv->mm.gtt_mtrr >= 0) {
mtrr_del(dev_priv->mm.gtt_mtrr,
dev_priv->mm.gtt_base_addr,
dev_priv->mm.gtt->gtt_mappable_entries * PAGE_SIZE);
dev_priv->gtt.mappable_base,
dev_priv->gtt.mappable_end);
dev_priv->mm.gtt_mtrr = -1;
}
@ -1720,8 +1725,8 @@ int i915_driver_unload(struct drm_device *dev)
}
/* Free error state after interrupts are fully disabled. */
del_timer_sync(&dev_priv->hangcheck_timer);
cancel_work_sync(&dev_priv->error_work);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
cancel_work_sync(&dev_priv->gpu_error.work);
i915_destroy_error_state(dev);
if (dev->pdev->msi_enabled)

View File

@ -276,6 +276,7 @@ static const struct intel_device_info intel_valleyview_m_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
};
static const struct intel_device_info intel_valleyview_d_info = {
@ -285,6 +286,7 @@ static const struct intel_device_info intel_valleyview_d_info = {
.has_bsd_ring = 1,
.has_blt_ring = 1,
.is_valleyview = 1,
.display_mmio_offset = VLV_DISPLAY_BASE,
};
static const struct intel_device_info intel_haswell_d_info = {
@ -468,6 +470,8 @@ static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
intel_set_power_well(dev, true);
drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
@ -779,9 +783,9 @@ int intel_gpu_reset(struct drm_device *dev)
}
/* Also reset the gpu hangman. */
if (dev_priv->stop_rings) {
if (dev_priv->gpu_error.stop_rings) {
DRM_DEBUG("Simulated gpu hang, resetting stop_rings\n");
dev_priv->stop_rings = 0;
dev_priv->gpu_error.stop_rings = 0;
if (ret == -ENODEV) {
DRM_ERROR("Reset not implemented, but ignoring "
"error for simulated gpu hangs\n");
@ -820,12 +824,12 @@ int i915_reset(struct drm_device *dev)
i915_gem_reset(dev);
ret = -ENODEV;
if (get_seconds() - dev_priv->last_gpu_reset < 5)
if (get_seconds() - dev_priv->gpu_error.last_reset < 5)
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
else
ret = intel_gpu_reset(dev);
dev_priv->last_gpu_reset = get_seconds();
dev_priv->gpu_error.last_reset = get_seconds();
if (ret) {
DRM_ERROR("Failed to reset chip.\n");
mutex_unlock(&dev->struct_mutex);
@ -1115,102 +1119,6 @@ MODULE_LICENSE("GPL and additional rights");
((HAS_FORCE_WAKE((dev_priv)->dev)) && \
((reg) < 0x40000) && \
((reg) != FORCEWAKE))
static bool IS_DISPLAYREG(u32 reg)
{
/*
* This should make it easier to transition modules over to the
* new register block scheme, since we can do it incrementally.
*/
if (reg >= VLV_DISPLAY_BASE)
return false;
if (reg >= RENDER_RING_BASE &&
reg < RENDER_RING_BASE + 0xff)
return false;
if (reg >= GEN6_BSD_RING_BASE &&
reg < GEN6_BSD_RING_BASE + 0xff)
return false;
if (reg >= BLT_RING_BASE &&
reg < BLT_RING_BASE + 0xff)
return false;
if (reg == PGTBL_ER)
return false;
if (reg >= IPEIR_I965 &&
reg < HWSTAM)
return false;
if (reg == MI_MODE)
return false;
if (reg == GFX_MODE_GEN7)
return false;
if (reg == RENDER_HWS_PGA_GEN7 ||
reg == BSD_HWS_PGA_GEN7 ||
reg == BLT_HWS_PGA_GEN7)
return false;
if (reg == GEN6_BSD_SLEEP_PSMI_CONTROL ||
reg == GEN6_BSD_RNCID)
return false;
if (reg == GEN6_BLITTER_ECOSKPD)
return false;
if (reg >= 0x4000c &&
reg <= 0x4002c)
return false;
if (reg >= 0x4f000 &&
reg <= 0x4f08f)
return false;
if (reg >= 0x4f100 &&
reg <= 0x4f11f)
return false;
if (reg >= VLV_MASTER_IER &&
reg <= GEN6_PMIER)
return false;
if (reg >= FENCE_REG_SANDYBRIDGE_0 &&
reg < (FENCE_REG_SANDYBRIDGE_0 + (16*8)))
return false;
if (reg >= VLV_IIR_RW &&
reg <= VLV_ISR)
return false;
if (reg == FORCEWAKE_VLV ||
reg == FORCEWAKE_ACK_VLV)
return false;
if (reg == GEN6_GDRST)
return false;
switch (reg) {
case _3D_CHICKEN3:
case IVB_CHICKEN3:
case GEN7_COMMON_SLICE_CHICKEN1:
case GEN7_L3CNTLREG1:
case GEN7_L3_CHICKEN_MODE_REGISTER:
case GEN7_ROW_CHICKEN2:
case GEN7_L3SQCREG4:
case GEN7_SQ_CHICKEN_MBCUNIT_CONFIG:
case GEN7_HALF_SLICE_CHICKEN1:
case GEN6_MBCTL:
case GEN6_UCGCTL2:
return false;
default:
break;
}
return true;
}
static void
ilk_dummy_write(struct drm_i915_private *dev_priv)
{
@ -1234,8 +1142,6 @@ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
if (dev_priv->forcewake_count == 0) \
dev_priv->gt.force_wake_put(dev_priv); \
spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
} else if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
val = read##y(dev_priv->regs + reg + 0x180000); \
} else { \
val = read##y(dev_priv->regs + reg); \
} \
@ -1262,11 +1168,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
DRM_ERROR("Unknown unclaimed register before writing to %x\n", reg); \
I915_WRITE_NOTRACE(GEN7_ERR_INT, ERR_INT_MMIO_UNCLAIMED); \
} \
if (IS_VALLEYVIEW(dev_priv->dev) && IS_DISPLAYREG(reg)) { \
write##y(val, dev_priv->regs + reg + 0x180000); \
} else { \
write##y(val, dev_priv->regs + reg); \
} \
if (unlikely(__fifo_ret)) { \
gen6_gt_check_fifodbg(dev_priv); \
} \

View File

@ -337,6 +337,7 @@ struct drm_i915_gt_funcs {
DEV_INFO_FLAG(has_llc)
struct intel_device_info {
u32 display_mmio_offset;
u8 gen;
u8 is_mobile:1;
u8 is_i85x:1;
@ -364,6 +365,49 @@ struct intel_device_info {
u8 has_llc:1;
};
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
/* The Graphics Translation Table is the way in which GEN hardware translates a
* Graphics Virtual Address into a Physical Address. In addition to the normal
* collateral associated with any va->pa translations GEN hardware also has a
* portion of the GTT which can be mapped by the CPU and remain both coherent
* and correct (in cases like swizzling). That region is referred to as GMADR in
* the spec.
*/
struct i915_gtt {
unsigned long start; /* Start offset of used GTT */
size_t total; /* Total size GTT can map */
size_t stolen_size; /* Total size of stolen memory */
unsigned long mappable_end; /* End offset that we can CPU map */
struct io_mapping *mappable; /* Mapping to our CPU mappable region */
phys_addr_t mappable_base; /* PA of our GMADR */
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
bool do_idle_maps;
dma_addr_t scratch_page_dma;
struct page *scratch_page;
/* global gtt ops */
int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total,
size_t *stolen);
void (*gtt_remove)(struct drm_device *dev);
void (*gtt_clear_range)(struct drm_device *dev,
unsigned int first_entry,
unsigned int num_entries);
void (*gtt_insert_entries)(struct drm_device *dev,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
};
#define gtt_total_entries(gtt) ((gtt).total >> PAGE_SHIFT)
#define I915_PPGTT_PD_ENTRIES 512
#define I915_PPGTT_PT_ENTRIES 1024
struct i915_hw_ppgtt {
@ -373,6 +417,16 @@ struct i915_hw_ppgtt {
uint32_t pd_offset;
dma_addr_t *pt_dma_addr;
dma_addr_t scratch_page_dma_addr;
/* pte functions, mirroring the interface of the global gtt. */
void (*clear_range)(struct i915_hw_ppgtt *ppgtt,
unsigned int first_entry,
unsigned int num_entries);
void (*insert_entries)(struct i915_hw_ppgtt *ppgtt,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level);
void (*cleanup)(struct i915_hw_ppgtt *ppgtt);
};
@ -642,149 +696,7 @@ struct intel_l3_parity {
struct work_struct error_work;
};
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
const struct intel_device_info *info;
int relative_constants_mode;
void __iomem *regs;
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
spinlock_t gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
atomic_t irq_received;
/* protects the irq masks */
spinlock_t irq_lock;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
/* DPIO indirect register protection */
struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
bool enable_hotplug_processing;
int num_pipe;
int num_pch_pll;
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
unsigned int stop_rings;
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
/* overlay */
struct intel_overlay *overlay;
bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
spinlock_t error_lock;
/* Protected by dev->error_lock. */
struct drm_i915_error_state *first_error;
struct work_struct error_work;
struct completion error_completion;
struct workqueue_struct *wq;
/* Display functions */
struct drm_i915_display_funcs display;
/* PCH chipset type */
enum intel_pch pch_type;
unsigned short pch_id;
unsigned long quirks;
/* Register state */
bool modeset_on_lid;
struct {
/** Bridge to intel-gtt-ko */
struct intel_gtt *gtt;
struct i915_gem_mm {
/** Memory allocator for GTT stolen memory */
struct drm_mm stolen;
/** Memory allocator for GTT */
@ -800,16 +712,8 @@ typedef struct drm_i915_private {
struct list_head unbound_list;
/** Usable portion of the GTT for GEM */
unsigned long gtt_start;
unsigned long gtt_mappable_end;
unsigned long gtt_end;
unsigned long stolen_base; /* limited to low memory (32-bit) */
/** "Graphics Stolen Memory" holds the global PTEs */
void __iomem *gsm;
struct io_mapping *gtt_mapping;
phys_addr_t gtt_base_addr;
int gtt_mtrr;
/** PPGTT used for aliasing the PPGTT with the GTT */
@ -869,15 +773,6 @@ typedef struct drm_i915_private {
*/
int suspended;
/**
* Flag if the hardware appears to be wedged.
*
* This is set when attempts to idle the device timeout.
* It prevents command submission from occurring and makes
* every pending request fail
*/
atomic_t wedged;
/** Bit 6 swizzling required for X tiling */
uint32_t bit_6_swizzle_x;
/** Bit 6 swizzling required for Y tiling */
@ -887,11 +782,194 @@ typedef struct drm_i915_private {
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
/* accounting, useful for userland debugging */
size_t gtt_total;
size_t mappable_gtt_total;
size_t object_memory;
u32 object_count;
} mm;
};
struct i915_gpu_error {
/* For hangcheck timer */
#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
#define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd[I915_NUM_RINGS];
uint32_t prev_instdone[I915_NUM_INSTDONE_REG];
/* For reset and error_state handling. */
spinlock_t lock;
/* Protected by the above dev->gpu_error.lock. */
struct drm_i915_error_state *first_error;
struct work_struct work;
unsigned long last_reset;
/**
* State variable and reset counter controlling the reset flow
*
* Upper bits are for the reset counter. This counter is used by the
* wait_seqno code to race-free noticed that a reset event happened and
* that it needs to restart the entire ioctl (since most likely the
* seqno it waited for won't ever signal anytime soon).
*
* This is important for lock-free wait paths, where no contended lock
* naturally enforces the correct ordering between the bail-out of the
* waiter and the gpu reset work code.
*
* Lowest bit controls the reset state machine: Set means a reset is in
* progress. This state will (presuming we don't have any bugs) decay
* into either unset (successful reset) or the special WEDGED value (hw
* terminally sour). All waiters on the reset_queue will be woken when
* that happens.
*/
atomic_t reset_counter;
/**
* Special values/flags for reset_counter
*
* Note that the code relies on
* I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG
* being true.
*/
#define I915_RESET_IN_PROGRESS_FLAG 1
#define I915_WEDGED 0xffffffff
/**
* Waitqueue to signal when the reset has completed. Used by clients
* that wait for dev_priv->mm.wedged to settle.
*/
wait_queue_head_t reset_queue;
/* For gpu hang simulation. */
unsigned int stop_rings;
};
typedef struct drm_i915_private {
struct drm_device *dev;
struct kmem_cache *slab;
const struct intel_device_info *info;
int relative_constants_mode;
void __iomem *regs;
struct drm_i915_gt_funcs gt;
/** gt_fifo_count and the subsequent register write are synchronized
* with dev->struct_mutex. */
unsigned gt_fifo_count;
/** forcewake_count is protected by gt_lock */
unsigned forcewake_count;
/** gt_lock is also taken in irq contexts. */
spinlock_t gt_lock;
struct intel_gmbus gmbus[GMBUS_NUM_PORTS];
/** gmbus_mutex protects against concurrent usage of the single hw gmbus
* controller on different i2c buses. */
struct mutex gmbus_mutex;
/**
* Base address of the gmbus and gpio block.
*/
uint32_t gpio_mmio_base;
wait_queue_head_t gmbus_wait_queue;
struct pci_dev *bridge_dev;
struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t last_seqno, next_seqno;
drm_dma_handle_t *status_page_dmah;
struct resource mch_res;
atomic_t irq_received;
/* protects the irq masks */
spinlock_t irq_lock;
/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */
struct pm_qos_request pm_qos;
/* DPIO indirect register protection */
struct mutex dpio_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
u32 pipestat[2];
u32 irq_mask;
u32 gt_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
bool enable_hotplug_processing;
int num_pipe;
int num_pch_pll;
unsigned long cfb_size;
unsigned int cfb_fb;
enum plane cfb_plane;
int cfb_y;
struct intel_fbc_work *fbc_work;
struct intel_opregion opregion;
/* overlay */
struct intel_overlay *overlay;
bool sprite_scaling_enabled;
/* LVDS info */
int backlight_level; /* restore backlight to this value */
bool backlight_enabled;
struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
/* Feature bits from the VBIOS */
unsigned int int_tv_support:1;
unsigned int lvds_dither:1;
unsigned int lvds_vbt:1;
unsigned int int_crt_support:1;
unsigned int lvds_use_ssc:1;
unsigned int display_clock_mode:1;
int lvds_ssc_freq;
unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */
struct {
int rate;
int lanes;
int preemphasis;
int vswing;
bool initialized;
bool support;
int bpp;
struct edp_power_seq pps;
} edp;
bool no_aux_handshake;
int crt_ddc_pin;
struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
int num_fence_regs; /* 8 on pre-965, 16 otherwise */
unsigned int fsb_freq, mem_freq, is_ddr3;
struct workqueue_struct *wq;
/* Display functions */
struct drm_i915_display_funcs display;
/* PCH chipset type */
enum intel_pch pch_type;
unsigned short pch_id;
unsigned long quirks;
/* Register state */
bool modeset_on_lid;
struct i915_gtt gtt;
struct i915_gem_mm mm;
/* Kernel Modesetting */
@ -933,7 +1011,7 @@ typedef struct drm_i915_private {
struct drm_mm_node *compressed_fb;
struct drm_mm_node *compressed_llb;
unsigned long last_gpu_reset;
struct i915_gpu_error gpu_error;
/* list of fbdev register on this device */
struct intel_fbdev *fbdev;
@ -973,12 +1051,6 @@ enum hdmi_force_audio {
HDMI_AUDIO_ON, /* force turn on HDMI audio */
};
enum i915_cache_level {
I915_CACHE_NONE = 0,
I915_CACHE_LLC,
I915_CACHE_LLC_MLC, /* gen6+, in docs at least! */
};
#define I915_GTT_RESERVED ((struct drm_mm_node *)0x1)
struct drm_i915_gem_object_ops {
@ -1446,6 +1518,7 @@ int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
bool nonblocking);
void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
int i915_gem_object_put_pages(struct drm_i915_gem_object *obj);
void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
@ -1524,8 +1597,18 @@ i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj)
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring);
int __must_check i915_gem_check_wedge(struct drm_i915_private *dev_priv,
int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible);
static inline bool i915_reset_in_progress(struct i915_gpu_error *error)
{
return unlikely(atomic_read(&error->reset_counter)
& I915_RESET_IN_PROGRESS_FLAG);
}
static inline bool i915_terminally_wedged(struct i915_gpu_error *error)
{
return atomic_read(&error->reset_counter) == I915_WEDGED;
}
void i915_gem_reset(struct drm_device *dev);
void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
@ -1566,9 +1649,10 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
void i915_gem_release(struct drm_device *dev, struct drm_file *file);
uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
uint32_t size,
int tiling_mode);
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode);
uint32_t
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
int tiling_mode, bool fenced);
int i915_gem_object_set_cache_level(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level);
@ -1591,7 +1675,6 @@ int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file);
/* i915_gem_gtt.c */
int __must_check i915_gem_init_aliasing_ppgtt(struct drm_device *dev);
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev);
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
@ -1609,7 +1692,6 @@ void i915_gem_init_global_gtt(struct drm_device *dev);
void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start,
unsigned long mappable_end, unsigned long end);
int i915_gem_gtt_init(struct drm_device *dev);
void i915_gem_gtt_fini(struct drm_device *dev);
static inline void i915_gem_chipset_flush(struct drm_device *dev)
{
if (INTEL_INFO(dev)->gen < 6)
@ -1668,9 +1750,9 @@ void i915_debugfs_cleanup(struct drm_minor *minor);
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
/* i915_suspend.c */
extern int i915_save_state(struct drm_device *dev);
extern int i915_restore_state(struct drm_device *dev);
/* i915_ums.c */
void i915_save_display_reg(struct drm_device *dev);
void i915_restore_display_reg(struct drm_device *dev);
/* i915_sysfs.c */
void i915_setup_sysfs(struct drm_device *dev_priv);
@ -1727,6 +1809,7 @@ extern void intel_modeset_cleanup(struct drm_device *dev);
extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
extern void intel_modeset_setup_hw_state(struct drm_device *dev,
bool force_restore);
extern void i915_redisable_vga(struct drm_device *dev);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern void intel_disable_fbc(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
@ -1799,5 +1882,19 @@ __i915_write(64, q)
#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
/* "Broadcast RGB" property */
#define INTEL_BROADCAST_RGB_AUTO 0
#define INTEL_BROADCAST_RGB_FULL 1
#define INTEL_BROADCAST_RGB_LIMITED 2
static inline uint32_t i915_vgacntrl_reg(struct drm_device *dev)
{
if (HAS_PCH_SPLIT(dev))
return CPU_VGACNTRL;
else if (IS_VALLEYVIEW(dev))
return VLV_VGACNTRL;
else
return VGACNTRL;
}
#endif

View File

@ -87,47 +87,43 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
}
static int
i915_gem_wait_for_error(struct drm_device *dev)
i915_gem_wait_for_error(struct i915_gpu_error *error)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct completion *x = &dev_priv->error_completion;
unsigned long flags;
int ret;
if (!atomic_read(&dev_priv->mm.wedged))
#define EXIT_COND (!i915_reset_in_progress(error))
if (EXIT_COND)
return 0;
/* GPU is already declared terminally dead, give up. */
if (i915_terminally_wedged(error))
return -EIO;
/*
* Only wait 10 seconds for the gpu reset to complete to avoid hanging
* userspace. If it takes that long something really bad is going on and
* we should simply try to bail out and fail as gracefully as possible.
*/
ret = wait_for_completion_interruptible_timeout(x, 10*HZ);
ret = wait_event_interruptible_timeout(error->reset_queue,
EXIT_COND,
10*HZ);
if (ret == 0) {
DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
return -EIO;
} else if (ret < 0) {
return ret;
}
#undef EXIT_COND
if (atomic_read(&dev_priv->mm.wedged)) {
/* GPU is hung, bump the completion count to account for
* the token we just consumed so that we never hit zero and
* end up waiting upon a subsequent completion event that
* will never happen.
*/
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
spin_unlock_irqrestore(&x->wait.lock, flags);
}
return 0;
}
int i915_mutex_lock_interruptible(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = i915_gem_wait_for_error(dev);
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
@ -149,6 +145,7 @@ int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_init *args = data;
if (drm_core_check_feature(dev, DRIVER_MODESET))
@ -165,6 +162,7 @@ i915_gem_init_ioctl(struct drm_device *dev, void *data,
mutex_lock(&dev->struct_mutex);
i915_gem_setup_global_gtt(dev, args->gtt_start, args->gtt_end,
args->gtt_end);
dev_priv->gtt.mappable_end = args->gtt_end;
mutex_unlock(&dev->struct_mutex);
return 0;
@ -186,7 +184,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
args->aper_size = dev_priv->mm.gtt_total;
args->aper_size = dev_priv->gtt.total;
args->aper_available_size = args->aper_size - pinned;
return 0;
@ -637,7 +635,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
* source page isn't available. Return the error and we'll
* retry in the slow path.
*/
if (fast_user_write(dev_priv->mm.gtt_mapping, page_base,
if (fast_user_write(dev_priv->gtt.mappable, page_base,
page_offset, user_data, page_length)) {
ret = -EFAULT;
goto out_unpin;
@ -937,26 +935,17 @@ unlock:
}
int
i915_gem_check_wedge(struct drm_i915_private *dev_priv,
i915_gem_check_wedge(struct i915_gpu_error *error,
bool interruptible)
{
if (atomic_read(&dev_priv->mm.wedged)) {
struct completion *x = &dev_priv->error_completion;
bool recovery_complete;
unsigned long flags;
/* Give the error handler a chance to run. */
spin_lock_irqsave(&x->wait.lock, flags);
recovery_complete = x->done > 0;
spin_unlock_irqrestore(&x->wait.lock, flags);
if (i915_reset_in_progress(error)) {
/* Non-interruptible callers can't handle -EAGAIN, hence return
* -EIO unconditionally for these. */
if (!interruptible)
return -EIO;
/* Recovery complete, but still wedged means reset failure. */
if (recovery_complete)
/* Recovery complete, but the reset failed ... */
if (i915_terminally_wedged(error))
return -EIO;
return -EAGAIN;
@ -987,13 +976,22 @@ i915_gem_check_olr(struct intel_ring_buffer *ring, u32 seqno)
* __wait_seqno - wait until execution of seqno has finished
* @ring: the ring expected to report seqno
* @seqno: duh!
* @reset_counter: reset sequence associated with the given seqno
* @interruptible: do an interruptible wait (normally yes)
* @timeout: in - how long to wait (NULL forever); out - how much time remaining
*
* Note: It is of utmost importance that the passed in seqno and reset_counter
* values have been read by the caller in an smp safe manner. Where read-side
* locks are involved, it is sufficient to read the reset_counter before
* unlocking the lock that protects the seqno. For lockless tricks, the
* reset_counter _must_ be read before, and an appropriate smp_rmb must be
* inserted.
*
* Returns 0 if the seqno was found within the alloted time. Else returns the
* errno with remaining time filled in timeout argument.
*/
static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
unsigned reset_counter,
bool interruptible, struct timespec *timeout)
{
drm_i915_private_t *dev_priv = ring->dev->dev_private;
@ -1023,7 +1021,8 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
#define EXIT_COND \
(i915_seqno_passed(ring->get_seqno(ring, false), seqno) || \
atomic_read(&dev_priv->mm.wedged))
i915_reset_in_progress(&dev_priv->gpu_error) || \
reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
do {
if (interruptible)
end = wait_event_interruptible_timeout(ring->irq_queue,
@ -1033,7 +1032,14 @@ static int __wait_seqno(struct intel_ring_buffer *ring, u32 seqno,
end = wait_event_timeout(ring->irq_queue, EXIT_COND,
timeout_jiffies);
ret = i915_gem_check_wedge(dev_priv, interruptible);
/* We need to check whether any gpu reset happened in between
* the caller grabbing the seqno and now ... */
if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
end = -EAGAIN;
/* ... but upgrade the -EGAIN to an -EIO if the gpu is truely
* gone. */
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
end = ret;
} while (end == 0 && wait_forever);
@ -1079,7 +1085,7 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
BUG_ON(seqno == 0);
ret = i915_gem_check_wedge(dev_priv, interruptible);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
if (ret)
return ret;
@ -1087,7 +1093,9 @@ i915_wait_seqno(struct intel_ring_buffer *ring, uint32_t seqno)
if (ret)
return ret;
return __wait_seqno(ring, seqno, interruptible, NULL);
return __wait_seqno(ring, seqno,
atomic_read(&dev_priv->gpu_error.reset_counter),
interruptible, NULL);
}
/**
@ -1134,6 +1142,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_ring_buffer *ring = obj->ring;
unsigned reset_counter;
u32 seqno;
int ret;
@ -1144,7 +1153,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (seqno == 0)
return 0;
ret = i915_gem_check_wedge(dev_priv, true);
ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
if (ret)
return ret;
@ -1152,8 +1161,9 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
if (ret)
return ret;
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, true, NULL);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
mutex_lock(&dev->struct_mutex);
i915_gem_retire_requests_ring(ring);
@ -1362,7 +1372,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
obj->fault_mappable = true;
pfn = ((dev_priv->mm.gtt_base_addr + obj->gtt_offset) >> PAGE_SHIFT) +
pfn = ((dev_priv->gtt.mappable_base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@ -1377,7 +1387,7 @@ out:
/* If this -EIO is due to a gpu hang, give the reset code a
* chance to clean up the mess. Otherwise return the proper
* SIGBUS. */
if (!atomic_read(&dev_priv->mm.wedged))
if (i915_terminally_wedged(&dev_priv->gpu_error))
return VM_FAULT_SIGBUS;
case -EAGAIN:
/* Give the error handler a chance to run and move the
@ -1435,7 +1445,7 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
obj->fault_mappable = false;
}
static uint32_t
uint32_t
i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
{
uint32_t gtt_size;
@ -1463,16 +1473,15 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
* Return the required GTT alignment for an object, taking into account
* potential fence register mapping.
*/
static uint32_t
i915_gem_get_gtt_alignment(struct drm_device *dev,
uint32_t size,
int tiling_mode)
uint32_t
i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
int tiling_mode, bool fenced)
{
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
if (INTEL_INFO(dev)->gen >= 4 ||
if (INTEL_INFO(dev)->gen >= 4 || (!fenced && IS_G33(dev)) ||
tiling_mode == I915_TILING_NONE)
return 4096;
@ -1483,35 +1492,6 @@ i915_gem_get_gtt_alignment(struct drm_device *dev,
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
/**
* i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
* unfenced object
* @dev: the device
* @size: size of the object
* @tiling_mode: tiling mode of the object
*
* Return the required GTT alignment for an object, only taking into account
* unfenced tiled surface requirements.
*/
uint32_t
i915_gem_get_unfenced_gtt_alignment(struct drm_device *dev,
uint32_t size,
int tiling_mode)
{
/*
* Minimum alignment is 4k (GTT page size) for sane hw.
*/
if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
tiling_mode == I915_TILING_NONE)
return 4096;
/* Previous hardware however needs to be aligned to a power-of-two
* tile height. The simplest method for determining this is to reuse
* the power-of-tile object size.
*/
return i915_gem_get_gtt_size(dev, size, tiling_mode);
}
static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
@ -1574,7 +1554,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
}
@ -1692,7 +1672,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
kfree(obj->pages);
}
static int
int
i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
{
const struct drm_i915_gem_object_ops *ops = obj->ops;
@ -1865,6 +1845,11 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
if (obj->pages)
return 0;
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to obtain a purgeable object\n");
return -EINVAL;
}
BUG_ON(obj->pages_pin_count);
ret = ops->get_pages(obj);
@ -1921,9 +1906,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
BUG_ON(!obj->active);
if (obj->pin_count) /* are we a framebuffer? */
intel_mark_fb_idle(obj);
list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
list_del_init(&obj->ring_list);
@ -2075,7 +2057,7 @@ i915_add_request(struct intel_ring_buffer *ring,
if (!dev_priv->mm.suspended) {
if (i915_enable_hangcheck) {
mod_timer(&dev_priv->hangcheck_timer,
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
if (was_empty) {
@ -2340,10 +2322,12 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
int
i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_wait *args = data;
struct drm_i915_gem_object *obj;
struct intel_ring_buffer *ring = NULL;
struct timespec timeout_stack, *timeout = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret = 0;
@ -2384,9 +2368,10 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
}
drm_gem_object_unreference(&obj->base);
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
mutex_unlock(&dev->struct_mutex);
ret = __wait_seqno(ring, seqno, true, timeout);
ret = __wait_seqno(ring, seqno, reset_counter, true, timeout);
if (timeout) {
WARN_ON(!timespec_valid(timeout));
args->timeout_ns = timespec_to_ns(timeout);
@ -2450,15 +2435,15 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
{
u32 old_write_domain, old_read_domains;
/* Act a barrier for all accesses through the GTT */
mb();
/* Force a pagefault for domain tracking on next user access */
i915_gem_release_mmap(obj);
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
return;
/* Wait for any direct GTT access to complete */
mb();
old_read_domains = obj->base.read_domains;
old_write_domain = obj->base.write_domain;
@ -2477,7 +2462,7 @@ int
i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
int ret = 0;
int ret;
if (obj->gtt_space == NULL)
return 0;
@ -2544,52 +2529,38 @@ int i915_gpu_idle(struct drm_device *dev)
return 0;
}
static void sandybridge_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint64_t val;
if (obj) {
u32 size = obj->gtt_space->size;
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
val |= (uint64_t)((obj->stride / 128) - 1) <<
SANDYBRIDGE_FENCE_PITCH_SHIFT;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
} else
val = 0;
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + reg * 8, val);
POSTING_READ(FENCE_REG_SANDYBRIDGE_0 + reg * 8);
}
static void i965_write_fence_reg(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int fence_reg;
int fence_pitch_shift;
uint64_t val;
if (INTEL_INFO(dev)->gen >= 6) {
fence_reg = FENCE_REG_SANDYBRIDGE_0;
fence_pitch_shift = SANDYBRIDGE_FENCE_PITCH_SHIFT;
} else {
fence_reg = FENCE_REG_965_0;
fence_pitch_shift = I965_FENCE_PITCH_SHIFT;
}
if (obj) {
u32 size = obj->gtt_space->size;
val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
val |= obj->gtt_offset & 0xfffff000;
val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
val |= (uint64_t)((obj->stride / 128) - 1) << fence_pitch_shift;
if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
} else
val = 0;
I915_WRITE64(FENCE_REG_965_0 + reg * 8, val);
POSTING_READ(FENCE_REG_965_0 + reg * 8);
fence_reg += reg * 8;
I915_WRITE64(fence_reg, val);
POSTING_READ(fence_reg);
}
static void i915_write_fence_reg(struct drm_device *dev, int reg,
@ -2668,18 +2639,37 @@ static void i830_write_fence_reg(struct drm_device *dev, int reg,
POSTING_READ(FENCE_REG_830_0 + reg * 4);
}
inline static bool i915_gem_object_needs_mb(struct drm_i915_gem_object *obj)
{
return obj && obj->base.read_domains & I915_GEM_DOMAIN_GTT;
}
static void i915_gem_write_fence(struct drm_device *dev, int reg,
struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj))
mb();
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6: sandybridge_write_fence_reg(dev, reg, obj); break;
case 6:
case 5:
case 4: i965_write_fence_reg(dev, reg, obj); break;
case 3: i915_write_fence_reg(dev, reg, obj); break;
case 2: i830_write_fence_reg(dev, reg, obj); break;
default: BUG();
}
/* And similarly be paranoid that no direct access to this region
* is reordered to before the fence is installed.
*/
if (i915_gem_object_needs_mb(obj))
mb();
}
static inline int fence_number(struct drm_i915_private *dev_priv,
@ -2709,7 +2699,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
}
static int
i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
i915_gem_object_wait_fence(struct drm_i915_gem_object *obj)
{
if (obj->last_fenced_seqno) {
int ret = i915_wait_seqno(obj->ring, obj->last_fenced_seqno);
@ -2719,12 +2709,6 @@ i915_gem_object_flush_fence(struct drm_i915_gem_object *obj)
obj->last_fenced_seqno = 0;
}
/* Ensure that all CPU reads are completed before installing a fence
* and all writes before removing the fence.
*/
if (obj->base.read_domains & I915_GEM_DOMAIN_GTT)
mb();
obj->fenced_gpu_access = false;
return 0;
}
@ -2735,7 +2719,7 @@ i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
int ret;
ret = i915_gem_object_flush_fence(obj);
ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
@ -2809,7 +2793,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
* will need to serialise the write to the associated fence register?
*/
if (obj->fence_dirty) {
ret = i915_gem_object_flush_fence(obj);
ret = i915_gem_object_wait_fence(obj);
if (ret)
return ret;
}
@ -2830,7 +2814,7 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj)
if (reg->obj) {
struct drm_i915_gem_object *old = reg->obj;
ret = i915_gem_object_flush_fence(old);
ret = i915_gem_object_wait_fence(old);
if (ret)
return ret;
@ -2931,21 +2915,16 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
bool mappable, fenceable;
int ret;
if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
}
fence_size = i915_gem_get_gtt_size(dev,
obj->base.size,
obj->tiling_mode);
fence_alignment = i915_gem_get_gtt_alignment(dev,
obj->base.size,
obj->tiling_mode);
obj->tiling_mode, true);
unfenced_alignment =
i915_gem_get_unfenced_gtt_alignment(dev,
i915_gem_get_gtt_alignment(dev,
obj->base.size,
obj->tiling_mode);
obj->tiling_mode, false);
if (alignment == 0)
alignment = map_and_fenceable ? fence_alignment :
@ -2961,7 +2940,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
* before evicting everything in a vain attempt to find space.
*/
if (obj->base.size >
(map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
(map_and_fenceable ? dev_priv->gtt.mappable_end : dev_priv->gtt.total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
@ -2982,7 +2961,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
if (map_and_fenceable)
ret = drm_mm_insert_node_in_range_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level,
0, dev_priv->mm.gtt_mappable_end);
0, dev_priv->gtt.mappable_end);
else
ret = drm_mm_insert_node_generic(&dev_priv->mm.gtt_space, node,
size, alignment, obj->cache_level);
@ -3022,7 +3001,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
(node->start & (fence_alignment - 1)) == 0;
mappable =
obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end;
obj->map_and_fenceable = mappable && fenceable;
@ -3130,6 +3109,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
i915_gem_object_flush_cpu_write_domain(obj);
/* Serialise direct access to this object with the barriers for
* coherent writes from the GPU, by effectively invalidating the
* GTT domain upon first access.
*/
if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
mb();
old_write_domain = obj->base.write_domain;
old_read_domains = obj->base.read_domains;
@ -3436,11 +3422,17 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
struct drm_i915_gem_request *request;
struct intel_ring_buffer *ring = NULL;
unsigned reset_counter;
u32 seqno = 0;
int ret;
if (atomic_read(&dev_priv->mm.wedged))
return -EIO;
ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
if (ret)
return ret;
ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
if (ret)
return ret;
spin_lock(&file_priv->mm.lock);
list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
@ -3450,12 +3442,13 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
ring = request->ring;
seqno = request->seqno;
}
reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
spin_unlock(&file_priv->mm.lock);
if (seqno == 0)
return 0;
ret = __wait_seqno(ring, seqno, true, NULL);
ret = __wait_seqno(ring, seqno, reset_counter, true, NULL);
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
@ -3853,7 +3846,7 @@ i915_gem_idle(struct drm_device *dev)
* And not confound mm.suspended!
*/
dev_priv->mm.suspended = 1;
del_timer_sync(&dev_priv->hangcheck_timer);
del_timer_sync(&dev_priv->gpu_error.hangcheck_timer);
i915_kernel_lost_context(dev);
i915_gem_cleanup_ringbuffer(dev);
@ -3953,8 +3946,6 @@ i915_gem_init_hw(struct drm_device *dev)
i915_gem_init_swizzling(dev);
dev_priv->next_seqno = dev_priv->last_seqno = (u32)~0 - 0x1000;
ret = intel_init_render_ring_buffer(dev);
if (ret)
return ret;
@ -3971,6 +3962,10 @@ i915_gem_init_hw(struct drm_device *dev)
goto cleanup_bsd_ring;
}
ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
if (ret)
return ret;
/*
* XXX: There was some w/a described somewhere suggesting loading
* contexts before PPGTT.
@ -4028,9 +4023,9 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
if (atomic_read(&dev_priv->mm.wedged)) {
if (i915_reset_in_progress(&dev_priv->gpu_error)) {
DRM_ERROR("Reenabling wedged hardware, good luck\n");
atomic_set(&dev_priv->mm.wedged, 0);
atomic_set(&dev_priv->gpu_error.reset_counter, 0);
}
mutex_lock(&dev->struct_mutex);
@ -4114,7 +4109,7 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
init_completion(&dev_priv->error_completion);
init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {

View File

@ -80,7 +80,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
if (mappable)
drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level,
0, dev_priv->mm.gtt_mappable_end);
0, dev_priv->gtt.mappable_end);
else
drm_mm_init_scan(&dev_priv->mm.gtt_space,
min_size, alignment, cache_level);

View File

@ -34,61 +34,133 @@
#include <linux/dma_remapping.h>
struct eb_objects {
struct list_head objects;
int and;
union {
struct drm_i915_gem_object *lut[0];
struct hlist_head buckets[0];
};
};
static struct eb_objects *
eb_create(int size)
eb_create(struct drm_i915_gem_execbuffer2 *args)
{
struct eb_objects *eb;
struct eb_objects *eb = NULL;
if (args->flags & I915_EXEC_HANDLE_LUT) {
int size = args->buffer_count;
size *= sizeof(struct drm_i915_gem_object *);
size += sizeof(struct eb_objects);
eb = kmalloc(size, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
}
if (eb == NULL) {
int size = args->buffer_count;
int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
BUILD_BUG_ON(!is_power_of_2(PAGE_SIZE / sizeof(struct hlist_head)));
while (count > size)
while (count > 2*size)
count >>= 1;
eb = kzalloc(count*sizeof(struct hlist_head) +
sizeof(struct eb_objects),
GFP_KERNEL);
GFP_TEMPORARY);
if (eb == NULL)
return eb;
eb->and = count - 1;
} else
eb->and = -args->buffer_count;
INIT_LIST_HEAD(&eb->objects);
return eb;
}
static void
eb_reset(struct eb_objects *eb)
{
if (eb->and >= 0)
memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
}
static void
eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
static int
eb_lookup_objects(struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
const struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file)
{
int i;
spin_lock(&file->table_lock);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(idr_find(&file->object_idr, exec[i].handle));
if (obj == NULL) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
return -ENOENT;
}
if (!list_empty(&obj->exec_list)) {
spin_unlock(&file->table_lock);
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
return -EINVAL;
}
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->exec_list, &eb->objects);
obj->exec_entry = &exec[i];
if (eb->and < 0) {
eb->lut[i] = obj;
} else {
uint32_t handle = args->flags & I915_EXEC_HANDLE_LUT ? i : exec[i].handle;
obj->exec_handle = handle;
hlist_add_head(&obj->exec_node,
&eb->buckets[obj->exec_handle & eb->and]);
&eb->buckets[handle & eb->and]);
}
}
spin_unlock(&file->table_lock);
return 0;
}
static struct drm_i915_gem_object *
eb_get_object(struct eb_objects *eb, unsigned long handle)
{
if (eb->and < 0) {
if (handle >= -eb->and)
return NULL;
return eb->lut[handle];
} else {
struct hlist_head *head;
struct hlist_node *node;
struct drm_i915_gem_object *obj;
head = &eb->buckets[handle & eb->and];
hlist_for_each(node, head) {
struct drm_i915_gem_object *obj;
obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
if (obj->exec_handle == handle)
return obj;
}
return NULL;
}
}
static void
eb_destroy(struct eb_objects *eb)
{
while (!list_empty(&eb->objects)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
kfree(eb);
}
@ -209,7 +281,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
/* Map the page containing the relocation we're going to perform. */
reloc->offset += obj->gtt_offset;
reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
reloc_page = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc->offset & PAGE_MASK);
reloc_entry = (uint32_t __iomem *)
(reloc_page + (reloc->offset & ~PAGE_MASK));
@ -288,8 +360,7 @@ i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
static int
i915_gem_execbuffer_relocate(struct drm_device *dev,
struct eb_objects *eb,
struct list_head *objects)
struct eb_objects *eb)
{
struct drm_i915_gem_object *obj;
int ret = 0;
@ -302,7 +373,7 @@ i915_gem_execbuffer_relocate(struct drm_device *dev,
* lockdep complains vehemently.
*/
pagefault_disable();
list_for_each_entry(obj, objects, exec_list) {
list_for_each_entry(obj, &eb->objects, exec_list) {
ret = i915_gem_execbuffer_relocate_object(obj, eb);
if (ret)
break;
@ -324,7 +395,8 @@ need_reloc_mappable(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
struct intel_ring_buffer *ring)
struct intel_ring_buffer *ring,
bool *need_reloc)
{
struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
struct drm_i915_gem_exec_object2 *entry = obj->exec_entry;
@ -365,7 +437,20 @@ i915_gem_execbuffer_reserve_object(struct drm_i915_gem_object *obj,
obj->has_aliasing_ppgtt_mapping = 1;
}
if (entry->offset != obj->gtt_offset) {
entry->offset = obj->gtt_offset;
*need_reloc = true;
}
if (entry->flags & EXEC_OBJECT_WRITE) {
obj->base.pending_read_domains = I915_GEM_DOMAIN_RENDER;
obj->base.pending_write_domain = I915_GEM_DOMAIN_RENDER;
}
if (entry->flags & EXEC_OBJECT_NEEDS_GTT &&
!obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(obj, obj->cache_level);
return 0;
}
@ -391,7 +476,8 @@ i915_gem_execbuffer_unreserve_object(struct drm_i915_gem_object *obj)
static int
i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
struct drm_file *file,
struct list_head *objects)
struct list_head *objects,
bool *need_relocs)
{
struct drm_i915_gem_object *obj;
struct list_head ordered_objects;
@ -419,7 +505,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
else
list_move_tail(&obj->exec_list, &ordered_objects);
obj->base.pending_read_domains = 0;
obj->base.pending_read_domains = I915_GEM_GPU_DOMAINS & ~I915_GEM_DOMAIN_COMMAND;
obj->base.pending_write_domain = 0;
obj->pending_fenced_gpu_access = false;
}
@ -459,7 +545,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
(need_mappable && !obj->map_and_fenceable))
ret = i915_gem_object_unbind(obj);
else
ret = i915_gem_execbuffer_reserve_object(obj, ring);
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
@ -469,7 +555,7 @@ i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
if (obj->gtt_space)
continue;
ret = i915_gem_execbuffer_reserve_object(obj, ring);
ret = i915_gem_execbuffer_reserve_object(obj, ring, need_relocs);
if (ret)
goto err;
}
@ -489,21 +575,22 @@ err: /* Decrement pin count for bound objects */
static int
i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
struct drm_i915_gem_execbuffer2 *args,
struct drm_file *file,
struct intel_ring_buffer *ring,
struct list_head *objects,
struct eb_objects *eb,
struct drm_i915_gem_exec_object2 *exec,
int count)
struct drm_i915_gem_exec_object2 *exec)
{
struct drm_i915_gem_relocation_entry *reloc;
struct drm_i915_gem_object *obj;
bool need_relocs;
int *reloc_offset;
int i, total, ret;
int count = args->buffer_count;
/* We may process another execbuffer during the unlock... */
while (!list_empty(objects)) {
obj = list_first_entry(objects,
while (!list_empty(&eb->objects)) {
obj = list_first_entry(&eb->objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
@ -550,27 +637,16 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
/* reacquire the objects */
eb_reset(eb);
for (i = 0; i < count; i++) {
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
goto err;
}
list_add_tail(&obj->exec_list, objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
ret = i915_gem_execbuffer_reserve(ring, file, objects);
ret = eb_lookup_objects(eb, exec, args, file);
if (ret)
goto err;
list_for_each_entry(obj, objects, exec_list) {
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
list_for_each_entry(obj, &eb->objects, exec_list) {
int offset = obj->exec_entry - exec;
ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
reloc + reloc_offset[offset]);
@ -624,6 +700,9 @@ i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
static bool
i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
{
if (exec->flags & __I915_EXEC_UNKNOWN_FLAGS)
return false;
return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
}
@ -637,6 +716,9 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
int length; /* limited by fault_in_pages_readable() */
if (exec[i].flags & __EXEC_OBJECT_UNKNOWN_FLAGS)
return -EINVAL;
/* First check for malicious input causing overflow */
if (exec[i].relocation_count >
INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
@ -644,9 +726,6 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
length = exec[i].relocation_count *
sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
/* we may also need to update the presumed offsets */
if (!access_ok(VERIFY_WRITE, ptr, length))
return -EFAULT;
@ -668,8 +747,10 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects,
u32 old_read = obj->base.read_domains;
u32 old_write = obj->base.write_domain;
obj->base.read_domains = obj->base.pending_read_domains;
obj->base.write_domain = obj->base.pending_write_domain;
if (obj->base.write_domain == 0)
obj->base.pending_read_domains |= obj->base.read_domains;
obj->base.read_domains = obj->base.pending_read_domains;
obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
i915_gem_object_move_to_active(obj, ring);
@ -728,21 +809,18 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
struct drm_i915_gem_exec_object2 *exec)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head objects;
struct eb_objects *eb;
struct drm_i915_gem_object *batch_obj;
struct drm_clip_rect *cliprects = NULL;
struct intel_ring_buffer *ring;
u32 ctx_id = i915_execbuffer2_get_context_id(*args);
u32 exec_start, exec_len;
u32 mask;
u32 flags;
u32 mask, flags;
int ret, mode, i;
bool need_relocs;
if (!i915_gem_check_execbuffer(args)) {
DRM_DEBUG("execbuf with invalid offset/length\n");
if (!i915_gem_check_execbuffer(args))
return -EINVAL;
}
ret = validate_exec_list(exec, args->buffer_count);
if (ret)
@ -863,7 +941,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto pre_mutex_err;
}
eb = eb_create(args->buffer_count);
eb = eb_create(args);
if (eb == NULL) {
mutex_unlock(&dev->struct_mutex);
ret = -ENOMEM;
@ -871,51 +949,28 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Look up object handles */
INIT_LIST_HEAD(&objects);
for (i = 0; i < args->buffer_count; i++) {
struct drm_i915_gem_object *obj;
obj = to_intel_bo(drm_gem_object_lookup(dev, file,
exec[i].handle));
if (&obj->base == NULL) {
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
/* prevent error path from reading uninitialized data */
ret = -ENOENT;
ret = eb_lookup_objects(eb, exec, args, file);
if (ret)
goto err;
}
if (!list_empty(&obj->exec_list)) {
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
goto err;
}
list_add_tail(&obj->exec_list, &objects);
obj->exec_handle = exec[i].handle;
obj->exec_entry = &exec[i];
eb_add_object(eb, obj);
}
/* take note of the batch buffer before we might reorder the lists */
batch_obj = list_entry(objects.prev,
batch_obj = list_entry(eb->objects.prev,
struct drm_i915_gem_object,
exec_list);
/* Move the objects en-masse into the GTT, evicting if necessary. */
ret = i915_gem_execbuffer_reserve(ring, file, &objects);
need_relocs = (args->flags & I915_EXEC_NO_RELOC) == 0;
ret = i915_gem_execbuffer_reserve(ring, file, &eb->objects, &need_relocs);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
ret = i915_gem_execbuffer_relocate(dev, eb, &objects);
if (need_relocs)
ret = i915_gem_execbuffer_relocate(dev, eb);
if (ret) {
if (ret == -EFAULT) {
ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
&objects, eb,
exec,
args->buffer_count);
ret = i915_gem_execbuffer_relocate_slow(dev, args, file, ring,
eb, exec);
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
}
if (ret)
@ -937,7 +992,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
if (flags & I915_DISPATCH_SECURE && !batch_obj->has_global_gtt_mapping)
i915_gem_gtt_bind_object(batch_obj, batch_obj->cache_level);
ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
ret = i915_gem_execbuffer_move_to_gpu(ring, &eb->objects);
if (ret)
goto err;
@ -991,20 +1046,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
i915_gem_execbuffer_move_to_active(&objects, ring);
i915_gem_execbuffer_move_to_active(&eb->objects, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring);
err:
eb_destroy(eb);
while (!list_empty(&objects)) {
struct drm_i915_gem_object *obj;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
exec_list);
list_del_init(&obj->exec_list);
drm_gem_object_unreference(&obj->base);
}
mutex_unlock(&dev->struct_mutex);
@ -1113,7 +1159,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
}
exec2_list = kmalloc(sizeof(*exec2_list)*args->buffer_count,
GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (exec2_list == NULL)
exec2_list = drm_malloc_ab(sizeof(*exec2_list),
args->buffer_count);

View File

@ -44,7 +44,7 @@ typedef uint32_t gtt_pte_t;
#define GEN6_PTE_CACHE_LLC_MLC (3 << 1)
#define GEN6_PTE_ADDR_ENCODE(addr) GEN6_GTT_ADDR_ENCODE(addr)
static inline gtt_pte_t pte_encode(struct drm_device *dev,
static inline gtt_pte_t gen6_pte_encode(struct drm_device *dev,
dma_addr_t addr,
enum i915_cache_level level)
{
@ -77,7 +77,7 @@ static inline gtt_pte_t pte_encode(struct drm_device *dev,
}
/* PPGTT support for Sandybdrige/Gen6 and later */
static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
static void gen6_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_entry,
unsigned num_entries)
{
@ -87,7 +87,8 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
unsigned first_pte = first_entry % I915_PPGTT_PT_ENTRIES;
unsigned last_pte, i;
scratch_pte = pte_encode(ppgtt->dev, ppgtt->scratch_page_dma_addr,
scratch_pte = gen6_pte_encode(ppgtt->dev,
ppgtt->scratch_page_dma_addr,
I915_CACHE_LLC);
while (num_entries) {
@ -108,114 +109,8 @@ static void i915_ppgtt_clear_range(struct i915_hw_ppgtt *ppgtt,
}
}
int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt;
unsigned first_pd_entry_in_global_pt;
int i;
int ret = -ENOMEM;
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
first_pd_entry_in_global_pt = dev_priv->mm.gtt->gtt_total_entries - I915_PPGTT_PD_ENTRIES;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return ret;
ppgtt->dev = dev;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
goto err_ppgtt;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
if (!ppgtt->pt_pages[i])
goto err_pt_alloc;
}
if (dev_priv->mm.gtt->needs_dmar) {
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t)
*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i],
0, 4096,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev,
pt_addr)) {
ret = -EIO;
goto err_pd_pin;
}
ppgtt->pt_dma_addr[i] = pt_addr;
}
}
ppgtt->scratch_page_dma_addr = dev_priv->mm.gtt->scratch_page_dma;
i915_ppgtt_clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
dev_priv->mm.aliasing_ppgtt = ppgtt;
return 0;
err_pd_pin:
if (ppgtt->pt_dma_addr) {
for (i--; i >= 0; i--)
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
err_pt_alloc:
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
if (ppgtt->pt_pages[i])
__free_page(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
err_ppgtt:
kfree(ppgtt);
return ret;
}
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
int i;
if (!ppgtt)
return;
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
kfree(ppgtt);
}
static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
const struct sg_table *pages,
static void gen6_ppgtt_insert_entries(struct i915_hw_ppgtt *ppgtt,
struct sg_table *pages,
unsigned first_entry,
enum i915_cache_level cache_level)
{
@ -237,7 +132,7 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
for (j = first_pte; j < I915_PPGTT_PT_ENTRIES; j++) {
page_addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
pt_vaddr[j] = pte_encode(ppgtt->dev, page_addr,
pt_vaddr[j] = gen6_pte_encode(ppgtt->dev, page_addr,
cache_level);
/* grab the next page */
@ -258,12 +153,135 @@ static void i915_ppgtt_insert_sg_entries(struct i915_hw_ppgtt *ppgtt,
}
}
static void gen6_ppgtt_cleanup(struct i915_hw_ppgtt *ppgtt)
{
int i;
if (ppgtt->pt_dma_addr) {
for (i = 0; i < ppgtt->num_pd_entries; i++)
pci_unmap_page(ppgtt->dev->pdev,
ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++)
__free_page(ppgtt->pt_pages[i]);
kfree(ppgtt->pt_pages);
kfree(ppgtt);
}
static int gen6_ppgtt_init(struct i915_hw_ppgtt *ppgtt)
{
struct drm_device *dev = ppgtt->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned first_pd_entry_in_global_pt;
int i;
int ret = -ENOMEM;
/* ppgtt PDEs reside in the global gtt pagetable, which has 512*1024
* entries. For aliasing ppgtt support we just steal them at the end for
* now. */
first_pd_entry_in_global_pt =
gtt_total_entries(dev_priv->gtt) - I915_PPGTT_PD_ENTRIES;
ppgtt->num_pd_entries = I915_PPGTT_PD_ENTRIES;
ppgtt->clear_range = gen6_ppgtt_clear_range;
ppgtt->insert_entries = gen6_ppgtt_insert_entries;
ppgtt->cleanup = gen6_ppgtt_cleanup;
ppgtt->pt_pages = kzalloc(sizeof(struct page *)*ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_pages)
return -ENOMEM;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
ppgtt->pt_pages[i] = alloc_page(GFP_KERNEL);
if (!ppgtt->pt_pages[i])
goto err_pt_alloc;
}
ppgtt->pt_dma_addr = kzalloc(sizeof(dma_addr_t) *ppgtt->num_pd_entries,
GFP_KERNEL);
if (!ppgtt->pt_dma_addr)
goto err_pt_alloc;
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
pt_addr = pci_map_page(dev->pdev, ppgtt->pt_pages[i], 0, 4096,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(dev->pdev, pt_addr)) {
ret = -EIO;
goto err_pd_pin;
}
ppgtt->pt_dma_addr[i] = pt_addr;
}
ppgtt->scratch_page_dma_addr = dev_priv->gtt.scratch_page_dma;
ppgtt->clear_range(ppgtt, 0,
ppgtt->num_pd_entries*I915_PPGTT_PT_ENTRIES);
ppgtt->pd_offset = (first_pd_entry_in_global_pt)*sizeof(gtt_pte_t);
return 0;
err_pd_pin:
if (ppgtt->pt_dma_addr) {
for (i--; i >= 0; i--)
pci_unmap_page(dev->pdev, ppgtt->pt_dma_addr[i],
4096, PCI_DMA_BIDIRECTIONAL);
}
err_pt_alloc:
kfree(ppgtt->pt_dma_addr);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
if (ppgtt->pt_pages[i])
__free_page(ppgtt->pt_pages[i]);
}
kfree(ppgtt->pt_pages);
return ret;
}
static int i915_gem_init_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt;
int ret;
ppgtt = kzalloc(sizeof(*ppgtt), GFP_KERNEL);
if (!ppgtt)
return -ENOMEM;
ppgtt->dev = dev;
ret = gen6_ppgtt_init(ppgtt);
if (ret)
kfree(ppgtt);
else
dev_priv->mm.aliasing_ppgtt = ppgtt;
return ret;
}
void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
if (!ppgtt)
return;
ppgtt->cleanup(ppgtt);
}
void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
i915_ppgtt_insert_sg_entries(ppgtt,
obj->pages,
ppgtt->insert_entries(ppgtt, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
cache_level);
}
@ -271,7 +289,7 @@ void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt,
void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt,
struct drm_i915_gem_object *obj)
{
i915_ppgtt_clear_range(ppgtt,
ppgtt->clear_range(ppgtt,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
}
@ -290,15 +308,11 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
return;
pd_addr = (gtt_pte_t __iomem*)dev_priv->mm.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
pd_addr = (gtt_pte_t __iomem*)dev_priv->gtt.gsm + ppgtt->pd_offset/sizeof(gtt_pte_t);
for (i = 0; i < ppgtt->num_pd_entries; i++) {
dma_addr_t pt_addr;
if (dev_priv->mm.gtt->needs_dmar)
pt_addr = ppgtt->pt_dma_addr[i];
else
pt_addr = page_to_phys(ppgtt->pt_pages[i]);
pd_entry = GEN6_PDE_ADDR_ENCODE(pt_addr);
pd_entry |= GEN6_PDE_VALID;
@ -338,11 +352,27 @@ void i915_gem_init_ppgtt(struct drm_device *dev)
}
}
extern int intel_iommu_gfx_mapped;
/* Certain Gen5 chipsets require require idling the GPU before
* unmapping anything from the GTT when VT-d is enabled.
*/
static inline bool needs_idle_maps(struct drm_device *dev)
{
#ifdef CONFIG_INTEL_IOMMU
/* Query intel_iommu to see if we need the workaround. Presumably that
* was loaded first.
*/
if (IS_GEN5(dev) && IS_MOBILE(dev) && intel_iommu_gfx_mapped)
return true;
#endif
return false;
}
static bool do_idling(struct drm_i915_private *dev_priv)
{
bool ret = dev_priv->mm.interruptible;
if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
if (unlikely(dev_priv->gtt.do_idle_maps)) {
dev_priv->mm.interruptible = false;
if (i915_gpu_idle(dev_priv->dev)) {
DRM_ERROR("Couldn't idle GPU\n");
@ -356,45 +386,18 @@ static bool do_idling(struct drm_i915_private *dev_priv)
static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
{
if (unlikely(dev_priv->mm.gtt->do_idle_maps))
if (unlikely(dev_priv->gtt.do_idle_maps))
dev_priv->mm.interruptible = interruptible;
}
static void i915_ggtt_clear_range(struct drm_device *dev,
unsigned first_entry,
unsigned num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->mm.gsm + first_entry;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
int i;
if (INTEL_INFO(dev)->gen < 6) {
intel_gtt_clear_range(first_entry, num_entries);
return;
}
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = pte_encode(dev, dev_priv->mm.gtt->scratch_page_dma, I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
/* First fill our portion of the GTT with scratch pages */
i915_ggtt_clear_range(dev, dev_priv->mm.gtt_start / PAGE_SIZE,
(dev_priv->mm.gtt_end - dev_priv->mm.gtt_start) / PAGE_SIZE);
dev_priv->gtt.gtt_clear_range(dev, dev_priv->gtt.start / PAGE_SIZE,
dev_priv->gtt.total / PAGE_SIZE);
list_for_each_entry(obj, &dev_priv->mm.bound_list, gtt_list) {
i915_gem_clflush_object(obj);
@ -423,17 +426,15 @@ int i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj)
* within the global GTT as well as accessible by the GPU through the GMADR
* mapped BAR (dev_priv->mm.gtt->gtt).
*/
static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
static void gen6_ggtt_insert_entries(struct drm_device *dev,
struct sg_table *st,
unsigned int first_entry,
enum i915_cache_level level)
{
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct sg_table *st = obj->pages;
struct scatterlist *sg = st->sgl;
const int first_entry = obj->gtt_space->start >> PAGE_SHIFT;
const int max_entries = dev_priv->mm.gtt->gtt_total_entries - first_entry;
gtt_pte_t __iomem *gtt_entries =
(gtt_pte_t __iomem *)dev_priv->mm.gsm + first_entry;
(gtt_pte_t __iomem *)dev_priv->gtt.gsm + first_entry;
int unused, i = 0;
unsigned int len, m = 0;
dma_addr_t addr;
@ -442,14 +443,12 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
len = sg_dma_len(sg) >> PAGE_SHIFT;
for (m = 0; m < len; m++) {
addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
iowrite32(pte_encode(dev, addr, level), &gtt_entries[i]);
iowrite32(gen6_pte_encode(dev, addr, level),
&gtt_entries[i]);
i++;
}
}
BUG_ON(i > max_entries);
BUG_ON(i != obj->base.size / PAGE_SIZE);
/* XXX: This serves as a posting read to make sure that the PTE has
* actually been updated. There is some concern that even though
* registers and PTEs are within the same BAR that they are potentially
@ -457,7 +456,8 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
* hardware should work, we must keep this posting read for paranoia.
*/
if (i != 0)
WARN_ON(readl(&gtt_entries[i-1]) != pte_encode(dev, addr, level));
WARN_ON(readl(&gtt_entries[i-1])
!= gen6_pte_encode(dev, addr, level));
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
@ -467,26 +467,68 @@ static void gen6_ggtt_bind_object(struct drm_i915_gem_object *obj,
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
static void gen6_ggtt_clear_range(struct drm_device *dev,
unsigned int first_entry,
unsigned int num_entries)
{
struct drm_i915_private *dev_priv = dev->dev_private;
gtt_pte_t scratch_pte;
gtt_pte_t __iomem *gtt_base = (gtt_pte_t __iomem *) dev_priv->gtt.gsm + first_entry;
const int max_entries = gtt_total_entries(dev_priv->gtt) - first_entry;
int i;
if (WARN(num_entries > max_entries,
"First entry = %d; Num entries = %d (max=%d)\n",
first_entry, num_entries, max_entries))
num_entries = max_entries;
scratch_pte = gen6_pte_encode(dev, dev_priv->gtt.scratch_page_dma,
I915_CACHE_LLC);
for (i = 0; i < num_entries; i++)
iowrite32(scratch_pte, &gtt_base[i]);
readl(gtt_base);
}
static void i915_ggtt_insert_entries(struct drm_device *dev,
struct sg_table *st,
unsigned int pg_start,
enum i915_cache_level cache_level)
{
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(st, pg_start, flags);
}
static void i915_ggtt_clear_range(struct drm_device *dev,
unsigned int first_entry,
unsigned int num_entries)
{
intel_gtt_clear_range(first_entry, num_entries);
}
void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj,
enum i915_cache_level cache_level)
{
struct drm_device *dev = obj->base.dev;
if (INTEL_INFO(dev)->gen < 6) {
unsigned int flags = (cache_level == I915_CACHE_NONE) ?
AGP_USER_MEMORY : AGP_USER_CACHED_MEMORY;
intel_gtt_insert_sg_entries(obj->pages,
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->gtt.gtt_insert_entries(dev, obj->pages,
obj->gtt_space->start >> PAGE_SHIFT,
flags);
} else {
gen6_ggtt_bind_object(obj, cache_level);
}
cache_level);
obj->has_global_gtt_mapping = 1;
}
void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
{
i915_ggtt_clear_range(obj->base.dev,
struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
dev_priv->gtt.gtt_clear_range(obj->base.dev,
obj->gtt_space->start >> PAGE_SHIFT,
obj->base.size >> PAGE_SHIFT);
@ -525,17 +567,27 @@ static void i915_gtt_color_adjust(struct drm_mm_node *node,
*end -= 4096;
}
}
void i915_gem_setup_global_gtt(struct drm_device *dev,
unsigned long start,
unsigned long mappable_end,
unsigned long end)
{
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently prefetches
* past the end of the object, and we've seen multiple hangs with the
* GPU head pointer stuck in a batchbuffer bound at the last page of the
* aperture. One page should be enough to keep any prefetching inside
* of the aperture.
*/
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_mm_node *entry;
struct drm_i915_gem_object *obj;
unsigned long hole_start, hole_end;
BUG_ON(mappable_end > end);
/* Subtract the guard page ... */
drm_mm_init(&dev_priv->mm.gtt_space, start, end - start - PAGE_SIZE);
if (!HAS_LLC(dev))
@ -554,24 +606,20 @@ void i915_gem_setup_global_gtt(struct drm_device *dev,
obj->has_global_gtt_mapping = 1;
}
dev_priv->mm.gtt_start = start;
dev_priv->mm.gtt_mappable_end = mappable_end;
dev_priv->mm.gtt_end = end;
dev_priv->mm.gtt_total = end - start;
dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
dev_priv->gtt.start = start;
dev_priv->gtt.total = end - start;
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &dev_priv->mm.gtt_space,
hole_start, hole_end) {
DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
hole_start, hole_end);
i915_ggtt_clear_range(dev,
hole_start / PAGE_SIZE,
dev_priv->gtt.gtt_clear_range(dev, hole_start / PAGE_SIZE,
(hole_end-hole_start) / PAGE_SIZE);
}
/* And finally clear the reserved guard page */
i915_ggtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
dev_priv->gtt.gtt_clear_range(dev, end / PAGE_SIZE - 1, 1);
}
static bool
@ -593,12 +641,12 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long gtt_size, mappable_size;
int ret;
gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
gtt_size = dev_priv->gtt.total;
mappable_size = dev_priv->gtt.mappable_end;
if (intel_enable_ppgtt(dev) && HAS_ALIASING_PPGTT(dev)) {
int ret;
/* PPGTT pdes are stolen from global gtt ptes, so shrink the
* aperture accordingly when using aliasing ppgtt. */
gtt_size -= I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
@ -606,23 +654,14 @@ void i915_gem_init_global_gtt(struct drm_device *dev)
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
ret = i915_gem_init_aliasing_ppgtt(dev);
if (ret) {
mutex_unlock(&dev->struct_mutex);
if (!ret)
return;
DRM_ERROR("Aliased PPGTT setup failed %d\n", ret);
drm_mm_takedown(&dev_priv->mm.gtt_space);
gtt_size += I915_PPGTT_PD_ENTRIES*PAGE_SIZE;
}
} else {
/* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch
* page. There are a number of places where the hardware
* apparently prefetches past the end of the object, and we've
* seen multiple hangs with the GPU head pointer stuck in a
* batchbuffer bound at the last page of the aperture. One page
* should be enough to keep any prefetching inside of the
* aperture.
*/
i915_gem_setup_global_gtt(dev, 0, mappable_size, gtt_size);
}
}
static int setup_scratch_page(struct drm_device *dev)
@ -645,8 +684,8 @@ static int setup_scratch_page(struct drm_device *dev)
#else
dma_addr = page_to_phys(page);
#endif
dev_priv->mm.gtt->scratch_page = page;
dev_priv->mm.gtt->scratch_page_dma = dma_addr;
dev_priv->gtt.scratch_page = page;
dev_priv->gtt.scratch_page_dma = dma_addr;
return 0;
}
@ -654,11 +693,11 @@ static int setup_scratch_page(struct drm_device *dev)
static void teardown_scratch_page(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
set_pages_wb(dev_priv->mm.gtt->scratch_page, 1);
pci_unmap_page(dev->pdev, dev_priv->mm.gtt->scratch_page_dma,
set_pages_wb(dev_priv->gtt.scratch_page, 1);
pci_unmap_page(dev->pdev, dev_priv->gtt.scratch_page_dma,
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
put_page(dev_priv->mm.gtt->scratch_page);
__free_page(dev_priv->mm.gtt->scratch_page);
put_page(dev_priv->gtt.scratch_page);
__free_page(dev_priv->gtt.scratch_page);
}
static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@ -668,14 +707,14 @@ static inline unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
return snb_gmch_ctl << 20;
}
static inline unsigned int gen6_get_stolen_size(u16 snb_gmch_ctl)
static inline size_t gen6_get_stolen_size(u16 snb_gmch_ctl)
{
snb_gmch_ctl >>= SNB_GMCH_GMS_SHIFT;
snb_gmch_ctl &= SNB_GMCH_GMS_MASK;
return snb_gmch_ctl << 25; /* 32 MB units */
}
static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
static inline size_t gen7_get_stolen_size(u16 snb_gmch_ctl)
{
static const int stolen_decoder[] = {
0, 0, 0, 0, 0, 32, 48, 64, 128, 256, 96, 160, 224, 352};
@ -684,103 +723,122 @@ static inline unsigned int gen7_get_stolen_size(u16 snb_gmch_ctl)
return stolen_decoder[snb_gmch_ctl] << 20;
}
int i915_gem_gtt_init(struct drm_device *dev)
static int gen6_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen)
{
struct drm_i915_private *dev_priv = dev->dev_private;
phys_addr_t gtt_bus_addr;
unsigned int gtt_size;
u16 snb_gmch_ctl;
int ret;
/* On modern platforms we need not worry ourself with the legacy
* hostbridge query stuff. Skip it entirely
/* 64/512MB is the current min/max we actually know of, but this is just
* a coarse sanity check.
*/
if (INTEL_INFO(dev)->gen < 6) {
ret = intel_gmch_probe(dev_priv->bridge_dev, dev->pdev, NULL);
if ((dev_priv->gtt.mappable_end < (64<<20) ||
(dev_priv->gtt.mappable_end > (512<<20)))) {
DRM_ERROR("Unknown GMADR size (%lx)\n",
dev_priv->gtt.mappable_end);
return -ENXIO;
}
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
gtt_size = gen6_get_total_gtt_size(snb_gmch_ctl);
if (IS_GEN7(dev))
*stolen = gen7_get_stolen_size(snb_gmch_ctl);
else
*stolen = gen6_get_stolen_size(snb_gmch_ctl);
*gtt_total = (gtt_size / sizeof(gtt_pte_t)) << PAGE_SHIFT;
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->gtt.gsm = ioremap_wc(gtt_bus_addr, gtt_size);
if (!dev_priv->gtt.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
return -ENOMEM;
}
ret = setup_scratch_page(dev);
if (ret)
DRM_ERROR("Scratch setup failed\n");
dev_priv->gtt.gtt_clear_range = gen6_ggtt_clear_range;
dev_priv->gtt.gtt_insert_entries = gen6_ggtt_insert_entries;
return ret;
}
static void gen6_gmch_remove(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
iounmap(dev_priv->gtt.gsm);
teardown_scratch_page(dev_priv->dev);
}
static int i915_gmch_probe(struct drm_device *dev,
size_t *gtt_total,
size_t *stolen)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
ret = intel_gmch_probe(dev_priv->bridge_dev, dev_priv->dev->pdev, NULL);
if (!ret) {
DRM_ERROR("failed to set up gmch\n");
return -EIO;
}
dev_priv->mm.gtt = intel_gtt_get();
if (!dev_priv->mm.gtt) {
DRM_ERROR("Failed to initialize GTT\n");
intel_gmch_remove();
return -ENODEV;
}
return 0;
}
intel_gtt_get(gtt_total, stolen);
dev_priv->mm.gtt = kzalloc(sizeof(*dev_priv->mm.gtt), GFP_KERNEL);
if (!dev_priv->mm.gtt)
return -ENOMEM;
if (!pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(40)))
pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(40));
#ifdef CONFIG_INTEL_IOMMU
dev_priv->mm.gtt->needs_dmar = 1;
#endif
/* For GEN6+ the PTEs for the ggtt live at 2MB + BAR0 */
gtt_bus_addr = pci_resource_start(dev->pdev, 0) + (2<<20);
dev_priv->mm.gtt->gma_bus_addr = pci_resource_start(dev->pdev, 2);
/* i9xx_setup */
pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &snb_gmch_ctl);
dev_priv->mm.gtt->gtt_total_entries =
gen6_get_total_gtt_size(snb_gmch_ctl) / sizeof(gtt_pte_t);
if (INTEL_INFO(dev)->gen < 7)
dev_priv->mm.gtt->stolen_size = gen6_get_stolen_size(snb_gmch_ctl);
else
dev_priv->mm.gtt->stolen_size = gen7_get_stolen_size(snb_gmch_ctl);
dev_priv->mm.gtt->gtt_mappable_entries = pci_resource_len(dev->pdev, 2) >> PAGE_SHIFT;
/* 64/512MB is the current min/max we actually know of, but this is just a
* coarse sanity check.
*/
if ((dev_priv->mm.gtt->gtt_mappable_entries >> 8) < 64 ||
dev_priv->mm.gtt->gtt_mappable_entries > dev_priv->mm.gtt->gtt_total_entries) {
DRM_ERROR("Unknown GMADR entries (%d)\n",
dev_priv->mm.gtt->gtt_mappable_entries);
ret = -ENXIO;
goto err_out;
}
ret = setup_scratch_page(dev);
if (ret) {
DRM_ERROR("Scratch setup failed\n");
goto err_out;
}
dev_priv->mm.gsm = ioremap_wc(gtt_bus_addr,
dev_priv->mm.gtt->gtt_total_entries * sizeof(gtt_pte_t));
if (!dev_priv->mm.gsm) {
DRM_ERROR("Failed to map the gtt page table\n");
teardown_scratch_page(dev);
ret = -ENOMEM;
goto err_out;
}
/* GMADR is the PCI aperture used by SW to access tiled GFX surfaces in a linear fashion. */
DRM_INFO("Memory usable by graphics device = %dM\n", dev_priv->mm.gtt->gtt_total_entries >> 8);
DRM_DEBUG_DRIVER("GMADR size = %dM\n", dev_priv->mm.gtt->gtt_mappable_entries >> 8);
DRM_DEBUG_DRIVER("GTT stolen size = %dM\n", dev_priv->mm.gtt->stolen_size >> 20);
dev_priv->gtt.do_idle_maps = needs_idle_maps(dev_priv->dev);
dev_priv->gtt.gtt_clear_range = i915_ggtt_clear_range;
dev_priv->gtt.gtt_insert_entries = i915_ggtt_insert_entries;
return 0;
err_out:
kfree(dev_priv->mm.gtt);
if (INTEL_INFO(dev)->gen < 6)
intel_gmch_remove();
return ret;
}
void i915_gem_gtt_fini(struct drm_device *dev)
static void i915_gmch_remove(struct drm_device *dev)
{
intel_gmch_remove();
}
int i915_gem_gtt_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
iounmap(dev_priv->mm.gsm);
teardown_scratch_page(dev);
if (INTEL_INFO(dev)->gen < 6)
intel_gmch_remove();
kfree(dev_priv->mm.gtt);
struct i915_gtt *gtt = &dev_priv->gtt;
unsigned long gtt_size;
int ret;
gtt->mappable_base = pci_resource_start(dev->pdev, 2);
gtt->mappable_end = pci_resource_len(dev->pdev, 2);
if (INTEL_INFO(dev)->gen <= 5) {
dev_priv->gtt.gtt_probe = i915_gmch_probe;
dev_priv->gtt.gtt_remove = i915_gmch_remove;
} else {
dev_priv->gtt.gtt_probe = gen6_gmch_probe;
dev_priv->gtt.gtt_remove = gen6_gmch_remove;
}
ret = dev_priv->gtt.gtt_probe(dev, &dev_priv->gtt.total,
&dev_priv->gtt.stolen_size);
if (ret)
return ret;
gtt_size = (dev_priv->gtt.total >> PAGE_SHIFT) * sizeof(gtt_pte_t);
/* GMADR is the PCI mmio aperture into the global GTT. */
DRM_INFO("Memory usable by graphics device = %zdM\n",
dev_priv->gtt.total >> 20);
DRM_DEBUG_DRIVER("GMADR size = %ldM\n",
dev_priv->gtt.mappable_end >> 20);
DRM_DEBUG_DRIVER("GTT stolen size = %zdM\n",
dev_priv->gtt.stolen_size >> 20);
return 0;
}

View File

@ -187,11 +187,11 @@ int i915_gem_init_stolen(struct drm_device *dev)
if (dev_priv->mm.stolen_base == 0)
return 0;
DRM_DEBUG_KMS("found %d bytes of stolen memory at %08lx\n",
dev_priv->mm.gtt->stolen_size, dev_priv->mm.stolen_base);
DRM_DEBUG_KMS("found %zd bytes of stolen memory at %08lx\n",
dev_priv->gtt.stolen_size, dev_priv->mm.stolen_base);
/* Basic memrange allocator for stolen space */
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->mm.gtt->stolen_size);
drm_mm_init(&dev_priv->mm.stolen, 0, dev_priv->gtt.stolen_size);
return 0;
}
@ -205,7 +205,7 @@ i915_pages_create_for_stolen(struct drm_device *dev,
struct scatterlist *sg;
DRM_DEBUG_DRIVER("offset=0x%x, size=%d\n", offset, size);
BUG_ON(offset > dev_priv->mm.gtt->stolen_size - size);
BUG_ON(offset > dev_priv->gtt.stolen_size - size);
/* We hide that we have no struct page backing our stolen object
* by wrapping the contiguous physical allocation with a fake

View File

@ -272,18 +272,7 @@ i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
return false;
}
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
if (INTEL_INFO(obj->base.dev)->gen == 3)
size = 1024*1024;
else
size = 512*1024;
while (size < obj->base.size)
size <<= 1;
size = i915_gem_get_gtt_size(obj->base.dev, obj->base.size, tiling_mode);
if (obj->gtt_space->size != size)
return false;
@ -368,15 +357,15 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
obj->map_and_fenceable =
obj->gtt_space == NULL ||
(obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
(obj->gtt_offset + obj->base.size <= dev_priv->gtt.mappable_end &&
i915_gem_object_fence_ok(obj, args->tiling_mode));
/* Rebind if we need a change of alignment */
if (!obj->map_and_fenceable) {
u32 unfenced_alignment =
i915_gem_get_unfenced_gtt_alignment(dev,
obj->base.size,
args->tiling_mode);
i915_gem_get_gtt_alignment(dev, obj->base.size,
args->tiling_mode,
false);
if (obj->gtt_offset & (unfenced_alignment - 1))
ret = i915_gem_object_unbind(obj);
}

View File

@ -356,8 +356,8 @@ static void notify_ring(struct drm_device *dev,
wake_up_all(&ring->irq_queue);
if (i915_enable_hangcheck) {
dev_priv->hangcheck_count = 0;
mod_timer(&dev_priv->hangcheck_timer,
dev_priv->gpu_error.hangcheck_count = 0;
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
}
@ -862,23 +862,60 @@ done:
*/
static void i915_error_work_func(struct work_struct *work)
{
drm_i915_private_t *dev_priv = container_of(work, drm_i915_private_t,
error_work);
struct i915_gpu_error *error = container_of(work, struct i915_gpu_error,
work);
drm_i915_private_t *dev_priv = container_of(error, drm_i915_private_t,
gpu_error);
struct drm_device *dev = dev_priv->dev;
struct intel_ring_buffer *ring;
char *error_event[] = { "ERROR=1", NULL };
char *reset_event[] = { "RESET=1", NULL };
char *reset_done_event[] = { "ERROR=0", NULL };
int i, ret;
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
if (atomic_read(&dev_priv->mm.wedged)) {
/*
* Note that there's only one work item which does gpu resets, so we
* need not worry about concurrent gpu resets potentially incrementing
* error->reset_counter twice. We only need to take care of another
* racing irq/hangcheck declaring the gpu dead for a second time. A
* quick check for that is good enough: schedule_work ensures the
* correct ordering between hang detection and this work item, and since
* the reset in-progress bit is only ever set by code outside of this
* work we don't need to worry about any other races.
*/
if (i915_reset_in_progress(error) && !i915_terminally_wedged(error)) {
DRM_DEBUG_DRIVER("resetting chip\n");
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
if (!i915_reset(dev)) {
atomic_set(&dev_priv->mm.wedged, 0);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE,
reset_event);
ret = i915_reset(dev);
if (ret == 0) {
/*
* After all the gem state is reset, increment the reset
* counter and wake up everyone waiting for the reset to
* complete.
*
* Since unlock operations are a one-sided barrier only,
* we need to insert a barrier here to order any seqno
* updates before
* the counter increment.
*/
smp_mb__before_atomic_inc();
atomic_inc(&dev_priv->gpu_error.reset_counter);
kobject_uevent_env(&dev->primary->kdev.kobj,
KOBJ_CHANGE, reset_done_event);
} else {
atomic_set(&error->reset_counter, I915_WEDGED);
}
complete_all(&dev_priv->error_completion);
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
wake_up_all(&dev_priv->gpu_error.reset_queue);
}
}
@ -939,7 +976,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
goto unwind;
local_irq_save(flags);
if (reloc_offset < dev_priv->mm.gtt_mappable_end &&
if (reloc_offset < dev_priv->gtt.mappable_end &&
src->has_global_gtt_mapping) {
void __iomem *s;
@ -948,7 +985,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
* captures what the GPU read.
*/
s = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
s = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
reloc_offset);
memcpy_fromio(d, s, PAGE_SIZE);
io_mapping_unmap_atomic(s);
@ -1255,9 +1292,9 @@ static void i915_capture_error_state(struct drm_device *dev)
unsigned long flags;
int i, pipe;
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->gpu_error.first_error;
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
return;
@ -1268,7 +1305,8 @@ static void i915_capture_error_state(struct drm_device *dev)
return;
}
DRM_INFO("capturing error event; look for more information in /debug/dri/%d/i915_error_state\n",
DRM_INFO("capturing error event; look for more information in"
"/sys/kernel/debug/dri/%d/i915_error_state\n",
dev->primary->index);
kref_init(&error->ref);
@ -1341,12 +1379,12 @@ static void i915_capture_error_state(struct drm_device *dev)
error->overlay = intel_overlay_capture_error_state(dev);
error->display = intel_display_capture_error_state(dev);
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
dev_priv->first_error = error;
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
if (dev_priv->gpu_error.first_error == NULL) {
dev_priv->gpu_error.first_error = error;
error = NULL;
}
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
i915_error_state_free(&error->ref);
@ -1358,10 +1396,10 @@ void i915_destroy_error_state(struct drm_device *dev)
struct drm_i915_error_state *error;
unsigned long flags;
spin_lock_irqsave(&dev_priv->error_lock, flags);
error = dev_priv->first_error;
dev_priv->first_error = NULL;
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
spin_lock_irqsave(&dev_priv->gpu_error.lock, flags);
error = dev_priv->gpu_error.first_error;
dev_priv->gpu_error.first_error = NULL;
spin_unlock_irqrestore(&dev_priv->gpu_error.lock, flags);
if (error)
kref_put(&error->ref, i915_error_state_free);
@ -1482,17 +1520,18 @@ void i915_handle_error(struct drm_device *dev, bool wedged)
i915_report_and_clear_eir(dev);
if (wedged) {
INIT_COMPLETION(dev_priv->error_completion);
atomic_set(&dev_priv->mm.wedged, 1);
atomic_set_mask(I915_RESET_IN_PROGRESS_FLAG,
&dev_priv->gpu_error.reset_counter);
/*
* Wakeup waiting processes so they don't hang
* Wakeup waiting processes so that the reset work item
* doesn't deadlock trying to grab various locks.
*/
for_each_ring(ring, dev_priv, i)
wake_up_all(&ring->irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
queue_work(dev_priv->wq, &dev_priv->gpu_error.work);
}
static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
@ -1723,7 +1762,7 @@ static bool i915_hangcheck_hung(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
if (dev_priv->hangcheck_count++ > 1) {
if (dev_priv->gpu_error.hangcheck_count++ > 1) {
bool hung = true;
DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
@ -1782,25 +1821,29 @@ void i915_hangcheck_elapsed(unsigned long data)
goto repeat;
}
dev_priv->hangcheck_count = 0;
dev_priv->gpu_error.hangcheck_count = 0;
return;
}
i915_get_extra_instdone(dev, instdone);
if (memcmp(dev_priv->last_acthd, acthd, sizeof(acthd)) == 0 &&
memcmp(dev_priv->prev_instdone, instdone, sizeof(instdone)) == 0) {
if (memcmp(dev_priv->gpu_error.last_acthd, acthd,
sizeof(acthd)) == 0 &&
memcmp(dev_priv->gpu_error.prev_instdone, instdone,
sizeof(instdone)) == 0) {
if (i915_hangcheck_hung(dev))
return;
} else {
dev_priv->hangcheck_count = 0;
dev_priv->gpu_error.hangcheck_count = 0;
memcpy(dev_priv->last_acthd, acthd, sizeof(acthd));
memcpy(dev_priv->prev_instdone, instdone, sizeof(instdone));
memcpy(dev_priv->gpu_error.last_acthd, acthd,
sizeof(acthd));
memcpy(dev_priv->gpu_error.prev_instdone, instdone,
sizeof(instdone));
}
repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
mod_timer(&dev_priv->gpu_error.hangcheck_timer,
round_jiffies_up(jiffies + DRM_I915_HANGCHECK_JIFFIES));
}
@ -1892,6 +1935,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
DE_AUX_CHANNEL_A;
u32 render_irqs;
u32 hotplug_mask;
u32 pch_irq_mask;
dev_priv->irq_mask = ~display_mask;
@ -1935,10 +1979,10 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
SDE_AUX_MASK);
}
dev_priv->pch_irq_mask = ~hotplug_mask;
pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
I915_WRITE(SDEIMR, pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
@ -1966,6 +2010,7 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
DE_AUX_CHANNEL_A_IVB;
u32 render_irqs;
u32 hotplug_mask;
u32 pch_irq_mask;
dev_priv->irq_mask = ~display_mask;
@ -1995,10 +2040,10 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
SDE_PORTD_HOTPLUG_CPT |
SDE_GMBUS_CPT |
SDE_AUX_MASK_CPT);
dev_priv->pch_irq_mask = ~hotplug_mask;
pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
I915_WRITE(SDEIMR, pch_irq_mask);
I915_WRITE(SDEIER, hotplug_mask);
POSTING_READ(SDEIER);
@ -2767,11 +2812,12 @@ void intel_irq_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
INIT_WORK(&dev_priv->error_work, i915_error_work_func);
INIT_WORK(&dev_priv->gpu_error.work, i915_error_work_func);
INIT_WORK(&dev_priv->rps.work, gen6_pm_rps_work);
INIT_WORK(&dev_priv->l3_parity.error_work, ivybridge_parity_work);
setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
setup_timer(&dev_priv->gpu_error.hangcheck_timer,
i915_hangcheck_elapsed,
(unsigned long) dev);
pm_qos_add_request(&dev_priv->pm_qos, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);

View File

@ -141,9 +141,15 @@
#define VGA_MSR_MEM_EN (1<<1)
#define VGA_MSR_CGA_MODE (1<<0)
#define VGA_SR_INDEX 0x3c4
/*
* SR01 is the only VGA register touched on non-UMS setups.
* VLV doesn't do UMS, so the sequencer index/data registers
* are the only VGA registers which need to include
* display_mmio_offset.
*/
#define VGA_SR_INDEX (dev_priv->info->display_mmio_offset + 0x3c4)
#define SR01 1
#define VGA_SR_DATA 0x3c5
#define VGA_SR_DATA (dev_priv->info->display_mmio_offset + 0x3c5)
#define VGA_AR_INDEX 0x3c0
#define VGA_AR_VID_EN (1<<5)
@ -336,17 +342,19 @@
* 0x801c/3c: core clock bits
* 0x8048/68: low pass filter coefficients
* 0x8100: fast clock controls
*
* DPIO is VLV only.
*/
#define DPIO_PKT 0x2100
#define DPIO_PKT (VLV_DISPLAY_BASE + 0x2100)
#define DPIO_RID (0<<24)
#define DPIO_OP_WRITE (1<<16)
#define DPIO_OP_READ (0<<16)
#define DPIO_PORTID (0x12<<8)
#define DPIO_BYTE (0xf<<4)
#define DPIO_BUSY (1<<0) /* status only */
#define DPIO_DATA 0x2104
#define DPIO_REG 0x2108
#define DPIO_CTL 0x2110
#define DPIO_DATA (VLV_DISPLAY_BASE + 0x2104)
#define DPIO_REG (VLV_DISPLAY_BASE + 0x2108)
#define DPIO_CTL (VLV_DISPLAY_BASE + 0x2110)
#define DPIO_MODSEL1 (1<<3) /* if ref clk b == 27 */
#define DPIO_MODSEL0 (1<<2) /* if ref clk a == 27 */
#define DPIO_SFR_BYPASS (1<<1)
@ -554,13 +562,13 @@
#define IIR 0x020a4
#define IMR 0x020a8
#define ISR 0x020ac
#define VLV_GUNIT_CLOCK_GATE 0x182060
#define VLV_GUNIT_CLOCK_GATE (VLV_DISPLAY_BASE + 0x2060)
#define GCFG_DIS (1<<8)
#define VLV_IIR_RW 0x182084
#define VLV_IER 0x1820a0
#define VLV_IIR 0x1820a4
#define VLV_IMR 0x1820a8
#define VLV_ISR 0x1820ac
#define VLV_IIR_RW (VLV_DISPLAY_BASE + 0x2084)
#define VLV_IER (VLV_DISPLAY_BASE + 0x20a0)
#define VLV_IIR (VLV_DISPLAY_BASE + 0x20a4)
#define VLV_IMR (VLV_DISPLAY_BASE + 0x20a8)
#define VLV_ISR (VLV_DISPLAY_BASE + 0x20ac)
#define I915_PIPE_CONTROL_NOTIFY_INTERRUPT (1<<18)
#define I915_DISPLAY_PORT_INTERRUPT (1<<17)
#define I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT (1<<15)
@ -733,6 +741,7 @@
#define GEN7_FF_TS_SCHED_HS0 (0x3<<16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1<<16)
#define GEN7_FF_TS_SCHED_HW (0x0<<16) /* Default */
#define GEN7_FF_VS_REF_CNT_FFME (1 << 15)
#define GEN7_FF_VS_SCHED_HS1 (0x5<<12)
#define GEN7_FF_VS_SCHED_HS0 (0x3<<12)
#define GEN7_FF_VS_SCHED_LOAD_BALANCE (0x1<<12) /* Default */
@ -919,8 +928,8 @@
#define VGA1_PD_P1_DIV_2 (1 << 13)
#define VGA1_PD_P1_SHIFT 8
#define VGA1_PD_P1_MASK (0x1f << 8)
#define _DPLL_A 0x06014
#define _DPLL_B 0x06018
#define _DPLL_A (dev_priv->info->display_mmio_offset + 0x6014)
#define _DPLL_B (dev_priv->info->display_mmio_offset + 0x6018)
#define DPLL(pipe) _PIPE(pipe, _DPLL_A, _DPLL_B)
#define DPLL_VCO_ENABLE (1 << 31)
#define DPLL_DVO_HIGH_SPEED (1 << 30)
@ -979,7 +988,7 @@
#define SDVO_MULTIPLIER_MASK 0x000000ff
#define SDVO_MULTIPLIER_SHIFT_HIRES 4
#define SDVO_MULTIPLIER_SHIFT_VGA 0
#define _DPLL_A_MD 0x0601c /* 965+ only */
#define _DPLL_A_MD (dev_priv->info->display_mmio_offset + 0x601c) /* 965+ only */
/*
* UDI pixel divider, controlling how many pixels are stuffed into a packet.
*
@ -1016,7 +1025,7 @@
*/
#define DPLL_MD_VGA_UDI_MULTIPLIER_MASK 0x0000003f
#define DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT 0
#define _DPLL_B_MD 0x06020 /* 965+ only */
#define _DPLL_B_MD (dev_priv->info->display_mmio_offset + 0x6020) /* 965+ only */
#define DPLL_MD(pipe) _PIPE(pipe, _DPLL_A_MD, _DPLL_B_MD)
#define _FPA0 0x06040
@ -1159,15 +1168,15 @@
#define RAMCLK_GATE_D 0x6210 /* CRL only */
#define DEUC 0x6214 /* CRL only */
#define FW_BLC_SELF_VLV 0x6500
#define FW_BLC_SELF_VLV (VLV_DISPLAY_BASE + 0x6500)
#define FW_CSPWRDWNEN (1<<15)
/*
* Palette regs
*/
#define _PALETTE_A 0x0a000
#define _PALETTE_B 0x0a800
#define _PALETTE_A (dev_priv->info->display_mmio_offset + 0xa000)
#define _PALETTE_B (dev_priv->info->display_mmio_offset + 0xa800)
#define PALETTE(pipe) _PIPE(pipe, _PALETTE_A, _PALETTE_B)
/* MCH MMIO space */
@ -1532,26 +1541,26 @@
*/
/* Pipe A timing regs */
#define _HTOTAL_A 0x60000
#define _HBLANK_A 0x60004
#define _HSYNC_A 0x60008
#define _VTOTAL_A 0x6000c
#define _VBLANK_A 0x60010
#define _VSYNC_A 0x60014
#define _PIPEASRC 0x6001c
#define _BCLRPAT_A 0x60020
#define _VSYNCSHIFT_A 0x60028
#define _HTOTAL_A (dev_priv->info->display_mmio_offset + 0x60000)
#define _HBLANK_A (dev_priv->info->display_mmio_offset + 0x60004)
#define _HSYNC_A (dev_priv->info->display_mmio_offset + 0x60008)
#define _VTOTAL_A (dev_priv->info->display_mmio_offset + 0x6000c)
#define _VBLANK_A (dev_priv->info->display_mmio_offset + 0x60010)
#define _VSYNC_A (dev_priv->info->display_mmio_offset + 0x60014)
#define _PIPEASRC (dev_priv->info->display_mmio_offset + 0x6001c)
#define _BCLRPAT_A (dev_priv->info->display_mmio_offset + 0x60020)
#define _VSYNCSHIFT_A (dev_priv->info->display_mmio_offset + 0x60028)
/* Pipe B timing regs */
#define _HTOTAL_B 0x61000
#define _HBLANK_B 0x61004
#define _HSYNC_B 0x61008
#define _VTOTAL_B 0x6100c
#define _VBLANK_B 0x61010
#define _VSYNC_B 0x61014
#define _PIPEBSRC 0x6101c
#define _BCLRPAT_B 0x61020
#define _VSYNCSHIFT_B 0x61028
#define _HTOTAL_B (dev_priv->info->display_mmio_offset + 0x61000)
#define _HBLANK_B (dev_priv->info->display_mmio_offset + 0x61004)
#define _HSYNC_B (dev_priv->info->display_mmio_offset + 0x61008)
#define _VTOTAL_B (dev_priv->info->display_mmio_offset + 0x6100c)
#define _VBLANK_B (dev_priv->info->display_mmio_offset + 0x61010)
#define _VSYNC_B (dev_priv->info->display_mmio_offset + 0x61014)
#define _PIPEBSRC (dev_priv->info->display_mmio_offset + 0x6101c)
#define _BCLRPAT_B (dev_priv->info->display_mmio_offset + 0x61020)
#define _VSYNCSHIFT_B (dev_priv->info->display_mmio_offset + 0x61028)
#define HTOTAL(trans) _TRANSCODER(trans, _HTOTAL_A, _HTOTAL_B)
@ -1612,7 +1621,7 @@
/* Hotplug control (945+ only) */
#define PORT_HOTPLUG_EN 0x61110
#define PORT_HOTPLUG_EN (dev_priv->info->display_mmio_offset + 0x61110)
#define HDMIB_HOTPLUG_INT_EN (1 << 29)
#define DPB_HOTPLUG_INT_EN (1 << 29)
#define HDMIC_HOTPLUG_INT_EN (1 << 28)
@ -1639,7 +1648,7 @@
#define CRT_HOTPLUG_DETECT_VOLTAGE_325MV (0 << 2)
#define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2)
#define PORT_HOTPLUG_STAT 0x61114
#define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114)
/* HDMI/DP bits are gen4+ */
#define DPB_HOTPLUG_LIVE_STATUS (1 << 29)
#define DPC_HOTPLUG_LIVE_STATUS (1 << 28)
@ -1858,7 +1867,7 @@
#define PP_DIVISOR 0x61210
/* Panel fitting */
#define PFIT_CONTROL 0x61230
#define PFIT_CONTROL (dev_priv->info->display_mmio_offset + 0x61230)
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
@ -1876,7 +1885,7 @@
#define PFIT_SCALING_PROGRAMMED (1 << 26)
#define PFIT_SCALING_PILLAR (2 << 26)
#define PFIT_SCALING_LETTER (3 << 26)
#define PFIT_PGM_RATIOS 0x61234
#define PFIT_PGM_RATIOS (dev_priv->info->display_mmio_offset + 0x61234)
/* Pre-965 */
#define PFIT_VERT_SCALE_SHIFT 20
#define PFIT_VERT_SCALE_MASK 0xfff00000
@ -1888,7 +1897,7 @@
#define PFIT_HORIZ_SCALE_SHIFT_965 0
#define PFIT_HORIZ_SCALE_MASK_965 0x00001fff
#define PFIT_AUTO_RATIOS 0x61238
#define PFIT_AUTO_RATIOS (dev_priv->info->display_mmio_offset + 0x61238)
/* Backlight control */
#define BLC_PWM_CTL2 0x61250 /* 965+ only */
@ -2618,10 +2627,10 @@
/* Display & cursor control */
/* Pipe A */
#define _PIPEADSL 0x70000
#define _PIPEADSL (dev_priv->info->display_mmio_offset + 0x70000)
#define DSL_LINEMASK_GEN2 0x00000fff
#define DSL_LINEMASK_GEN3 0x00001fff
#define _PIPEACONF 0x70008
#define _PIPEACONF (dev_priv->info->display_mmio_offset + 0x70008)
#define PIPECONF_ENABLE (1<<31)
#define PIPECONF_DISABLE 0
#define PIPECONF_DOUBLE_WIDE (1<<30)
@ -2650,6 +2659,7 @@
#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */
#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */
#define PIPECONF_CXSR_DOWNCLOCK (1<<16)
#define PIPECONF_COLOR_RANGE_SELECT (1 << 13)
#define PIPECONF_BPC_MASK (0x7 << 5)
#define PIPECONF_8BPC (0<<5)
#define PIPECONF_10BPC (1<<5)
@ -2661,7 +2671,7 @@
#define PIPECONF_DITHER_TYPE_ST1 (1<<2)
#define PIPECONF_DITHER_TYPE_ST2 (2<<2)
#define PIPECONF_DITHER_TYPE_TEMP (3<<2)
#define _PIPEASTAT 0x70024
#define _PIPEASTAT (dev_priv->info->display_mmio_offset + 0x70024)
#define PIPE_FIFO_UNDERRUN_STATUS (1UL<<31)
#define SPRITE1_FLIPDONE_INT_EN_VLV (1UL<<30)
#define PIPE_CRC_ERROR_ENABLE (1UL<<29)
@ -2672,7 +2682,7 @@
#define PIPE_VSYNC_INTERRUPT_ENABLE (1UL<<25)
#define PIPE_DISPLAY_LINE_COMPARE_ENABLE (1UL<<24)
#define PIPE_DPST_EVENT_ENABLE (1UL<<23)
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<26)
#define SPRITE0_FLIP_DONE_INT_EN_VLV (1UL<<22)
#define PIPE_LEGACY_BLC_EVENT_ENABLE (1UL<<22)
#define PIPE_ODD_FIELD_INTERRUPT_ENABLE (1UL<<21)
#define PIPE_EVEN_FIELD_INTERRUPT_ENABLE (1UL<<20)
@ -2682,7 +2692,7 @@
#define PIPEA_HBLANK_INT_EN_VLV (1UL<<16)
#define PIPE_OVERLAY_UPDATED_ENABLE (1UL<<16)
#define SPRITE1_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<15)
#define SPRITE0_FLIPDONE_INT_STATUS_VLV (1UL<<14)
#define PIPE_CRC_ERROR_INTERRUPT_STATUS (1UL<<13)
#define PIPE_CRC_DONE_INTERRUPT_STATUS (1UL<<12)
#define PIPE_GMBUS_INTERRUPT_STATUS (1UL<<11)
@ -2706,7 +2716,7 @@
#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, _PIPEAFRAMEPIXEL, _PIPEBFRAMEPIXEL)
#define PIPESTAT(pipe) _PIPE(pipe, _PIPEASTAT, _PIPEBSTAT)
#define VLV_DPFLIPSTAT 0x70028
#define VLV_DPFLIPSTAT (VLV_DISPLAY_BASE + 0x70028)
#define PIPEB_LINE_COMPARE_INT_EN (1<<29)
#define PIPEB_HLINE_INT_EN (1<<28)
#define PIPEB_VBLANK_INT_EN (1<<27)
@ -2720,7 +2730,7 @@
#define SPRITEA_FLIPDONE_INT_EN (1<<17)
#define PLANEA_FLIPDONE_INT_EN (1<<16)
#define DPINVGTT 0x7002c /* VLV only */
#define DPINVGTT (VLV_DISPLAY_BASE + 0x7002c) /* VLV only */
#define CURSORB_INVALID_GTT_INT_EN (1<<23)
#define CURSORA_INVALID_GTT_INT_EN (1<<22)
#define SPRITED_INVALID_GTT_INT_EN (1<<21)
@ -2748,7 +2758,7 @@
#define DSPARB_BEND_SHIFT 9 /* on 855 */
#define DSPARB_AEND_SHIFT 0
#define DSPFW1 0x70034
#define DSPFW1 (dev_priv->info->display_mmio_offset + 0x70034)
#define DSPFW_SR_SHIFT 23
#define DSPFW_SR_MASK (0x1ff<<23)
#define DSPFW_CURSORB_SHIFT 16
@ -2756,11 +2766,11 @@
#define DSPFW_PLANEB_SHIFT 8
#define DSPFW_PLANEB_MASK (0x7f<<8)
#define DSPFW_PLANEA_MASK (0x7f)
#define DSPFW2 0x70038
#define DSPFW2 (dev_priv->info->display_mmio_offset + 0x70038)
#define DSPFW_CURSORA_MASK 0x00003f00
#define DSPFW_CURSORA_SHIFT 8
#define DSPFW_PLANEC_MASK (0x7f)
#define DSPFW3 0x7003c
#define DSPFW3 (dev_priv->info->display_mmio_offset + 0x7003c)
#define DSPFW_HPLL_SR_EN (1<<31)
#define DSPFW_CURSOR_SR_SHIFT 24
#define PINEVIEW_SELF_REFRESH_EN (1<<30)
@ -2772,13 +2782,13 @@
/* drain latency register values*/
#define DRAIN_LATENCY_PRECISION_32 32
#define DRAIN_LATENCY_PRECISION_16 16
#define VLV_DDL1 0x70050
#define VLV_DDL1 (VLV_DISPLAY_BASE + 0x70050)
#define DDL_CURSORA_PRECISION_32 (1<<31)
#define DDL_CURSORA_PRECISION_16 (0<<31)
#define DDL_CURSORA_SHIFT 24
#define DDL_PLANEA_PRECISION_32 (1<<7)
#define DDL_PLANEA_PRECISION_16 (0<<7)
#define VLV_DDL2 0x70054
#define VLV_DDL2 (VLV_DISPLAY_BASE + 0x70054)
#define DDL_CURSORB_PRECISION_32 (1<<31)
#define DDL_CURSORB_PRECISION_16 (0<<31)
#define DDL_CURSORB_SHIFT 24
@ -2922,10 +2932,10 @@
* } while (high1 != high2);
* frame = (high1 << 8) | low1;
*/
#define _PIPEAFRAMEHIGH 0x70040
#define _PIPEAFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x70040)
#define PIPE_FRAME_HIGH_MASK 0x0000ffff
#define PIPE_FRAME_HIGH_SHIFT 0
#define _PIPEAFRAMEPIXEL 0x70044
#define _PIPEAFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x70044)
#define PIPE_FRAME_LOW_MASK 0xff000000
#define PIPE_FRAME_LOW_SHIFT 24
#define PIPE_PIXEL_MASK 0x00ffffff
@ -2936,7 +2946,7 @@
#define PIPE_FRMCOUNT_GM45(pipe) _PIPE(pipe, _PIPEA_FRMCOUNT_GM45, _PIPEB_FRMCOUNT_GM45)
/* Cursor A & B regs */
#define _CURACNTR 0x70080
#define _CURACNTR (dev_priv->info->display_mmio_offset + 0x70080)
/* Old style CUR*CNTR flags (desktop 8xx) */
#define CURSOR_ENABLE 0x80000000
#define CURSOR_GAMMA_ENABLE 0x40000000
@ -2957,16 +2967,16 @@
#define MCURSOR_PIPE_A 0x00
#define MCURSOR_PIPE_B (1 << 28)
#define MCURSOR_GAMMA_ENABLE (1 << 26)
#define _CURABASE 0x70084
#define _CURAPOS 0x70088
#define _CURABASE (dev_priv->info->display_mmio_offset + 0x70084)
#define _CURAPOS (dev_priv->info->display_mmio_offset + 0x70088)
#define CURSOR_POS_MASK 0x007FF
#define CURSOR_POS_SIGN 0x8000
#define CURSOR_X_SHIFT 0
#define CURSOR_Y_SHIFT 16
#define CURSIZE 0x700a0
#define _CURBCNTR 0x700c0
#define _CURBBASE 0x700c4
#define _CURBPOS 0x700c8
#define _CURBCNTR (dev_priv->info->display_mmio_offset + 0x700c0)
#define _CURBBASE (dev_priv->info->display_mmio_offset + 0x700c4)
#define _CURBPOS (dev_priv->info->display_mmio_offset + 0x700c8)
#define _CURBCNTR_IVB 0x71080
#define _CURBBASE_IVB 0x71084
@ -2981,7 +2991,7 @@
#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
/* Display A control */
#define _DSPACNTR 0x70180
#define _DSPACNTR (dev_priv->info->display_mmio_offset + 0x70180)
#define DISPLAY_PLANE_ENABLE (1<<31)
#define DISPLAY_PLANE_DISABLE 0
#define DISPPLANE_GAMMA_ENABLE (1<<30)
@ -3014,14 +3024,14 @@
#define DISPPLANE_STEREO_POLARITY_SECOND (1<<18)
#define DISPPLANE_TRICKLE_FEED_DISABLE (1<<14) /* Ironlake */
#define DISPPLANE_TILED (1<<10)
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
#define _DSPASIZE 0x70190
#define _DSPASURF 0x7019C /* 965+ only */
#define _DSPATILEOFF 0x701A4 /* 965+ only */
#define _DSPAOFFSET 0x701A4 /* HSW */
#define _DSPASURFLIVE 0x701AC
#define _DSPAADDR (dev_priv->info->display_mmio_offset + 0x70184)
#define _DSPASTRIDE (dev_priv->info->display_mmio_offset + 0x70188)
#define _DSPAPOS (dev_priv->info->display_mmio_offset + 0x7018C) /* reserved */
#define _DSPASIZE (dev_priv->info->display_mmio_offset + 0x70190)
#define _DSPASURF (dev_priv->info->display_mmio_offset + 0x7019C) /* 965+ only */
#define _DSPATILEOFF (dev_priv->info->display_mmio_offset + 0x701A4) /* 965+ only */
#define _DSPAOFFSET (dev_priv->info->display_mmio_offset + 0x701A4) /* HSW */
#define _DSPASURFLIVE (dev_priv->info->display_mmio_offset + 0x701AC)
#define DSPCNTR(plane) _PIPE(plane, _DSPACNTR, _DSPBCNTR)
#define DSPADDR(plane) _PIPE(plane, _DSPAADDR, _DSPBADDR)
@ -3042,44 +3052,44 @@
(I915_WRITE((reg), (gfx_addr) | I915_LO_DISPBASE(I915_READ(reg))))
/* VBIOS flags */
#define SWF00 0x71410
#define SWF01 0x71414
#define SWF02 0x71418
#define SWF03 0x7141c
#define SWF04 0x71420
#define SWF05 0x71424
#define SWF06 0x71428
#define SWF10 0x70410
#define SWF11 0x70414
#define SWF14 0x71420
#define SWF30 0x72414
#define SWF31 0x72418
#define SWF32 0x7241c
#define SWF00 (dev_priv->info->display_mmio_offset + 0x71410)
#define SWF01 (dev_priv->info->display_mmio_offset + 0x71414)
#define SWF02 (dev_priv->info->display_mmio_offset + 0x71418)
#define SWF03 (dev_priv->info->display_mmio_offset + 0x7141c)
#define SWF04 (dev_priv->info->display_mmio_offset + 0x71420)
#define SWF05 (dev_priv->info->display_mmio_offset + 0x71424)
#define SWF06 (dev_priv->info->display_mmio_offset + 0x71428)
#define SWF10 (dev_priv->info->display_mmio_offset + 0x70410)
#define SWF11 (dev_priv->info->display_mmio_offset + 0x70414)
#define SWF14 (dev_priv->info->display_mmio_offset + 0x71420)
#define SWF30 (dev_priv->info->display_mmio_offset + 0x72414)
#define SWF31 (dev_priv->info->display_mmio_offset + 0x72418)
#define SWF32 (dev_priv->info->display_mmio_offset + 0x7241c)
/* Pipe B */
#define _PIPEBDSL 0x71000
#define _PIPEBCONF 0x71008
#define _PIPEBSTAT 0x71024
#define _PIPEBFRAMEHIGH 0x71040
#define _PIPEBFRAMEPIXEL 0x71044
#define _PIPEBDSL (dev_priv->info->display_mmio_offset + 0x71000)
#define _PIPEBCONF (dev_priv->info->display_mmio_offset + 0x71008)
#define _PIPEBSTAT (dev_priv->info->display_mmio_offset + 0x71024)
#define _PIPEBFRAMEHIGH (dev_priv->info->display_mmio_offset + 0x71040)
#define _PIPEBFRAMEPIXEL (dev_priv->info->display_mmio_offset + 0x71044)
#define _PIPEB_FRMCOUNT_GM45 0x71040
#define _PIPEB_FLIPCOUNT_GM45 0x71044
/* Display B control */
#define _DSPBCNTR 0x71180
#define _DSPBCNTR (dev_priv->info->display_mmio_offset + 0x71180)
#define DISPPLANE_ALPHA_TRANS_ENABLE (1<<15)
#define DISPPLANE_ALPHA_TRANS_DISABLE 0
#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0
#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1)
#define _DSPBADDR 0x71184
#define _DSPBSTRIDE 0x71188
#define _DSPBPOS 0x7118C
#define _DSPBSIZE 0x71190
#define _DSPBSURF 0x7119C
#define _DSPBTILEOFF 0x711A4
#define _DSPBOFFSET 0x711A4
#define _DSPBSURFLIVE 0x711AC
#define _DSPBADDR (dev_priv->info->display_mmio_offset + 0x71184)
#define _DSPBSTRIDE (dev_priv->info->display_mmio_offset + 0x71188)
#define _DSPBPOS (dev_priv->info->display_mmio_offset + 0x7118C)
#define _DSPBSIZE (dev_priv->info->display_mmio_offset + 0x71190)
#define _DSPBSURF (dev_priv->info->display_mmio_offset + 0x7119C)
#define _DSPBTILEOFF (dev_priv->info->display_mmio_offset + 0x711A4)
#define _DSPBOFFSET (dev_priv->info->display_mmio_offset + 0x711A4)
#define _DSPBSURFLIVE (dev_priv->info->display_mmio_offset + 0x711AC)
/* Sprite A control */
#define _DVSACNTR 0x72180
@ -3228,6 +3238,8 @@
# define VGA_2X_MODE (1 << 30)
# define VGA_PIPE_B_SELECT (1 << 29)
#define VLV_VGACNTRL (VLV_DISPLAY_BASE + 0x71400)
/* Ironlake */
#define CPU_VGACNTRL 0x41000
@ -3268,41 +3280,41 @@
#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
#define _PIPEA_DATA_M1 0x60030
#define _PIPEA_DATA_M1 (dev_priv->info->display_mmio_offset + 0x60030)
#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
#define TU_SIZE_MASK 0x7e000000
#define PIPE_DATA_M1_OFFSET 0
#define _PIPEA_DATA_N1 0x60034
#define _PIPEA_DATA_N1 (dev_priv->info->display_mmio_offset + 0x60034)
#define PIPE_DATA_N1_OFFSET 0
#define _PIPEA_DATA_M2 0x60038
#define _PIPEA_DATA_M2 (dev_priv->info->display_mmio_offset + 0x60038)
#define PIPE_DATA_M2_OFFSET 0
#define _PIPEA_DATA_N2 0x6003c
#define _PIPEA_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6003c)
#define PIPE_DATA_N2_OFFSET 0
#define _PIPEA_LINK_M1 0x60040
#define _PIPEA_LINK_M1 (dev_priv->info->display_mmio_offset + 0x60040)
#define PIPE_LINK_M1_OFFSET 0
#define _PIPEA_LINK_N1 0x60044
#define _PIPEA_LINK_N1 (dev_priv->info->display_mmio_offset + 0x60044)
#define PIPE_LINK_N1_OFFSET 0
#define _PIPEA_LINK_M2 0x60048
#define _PIPEA_LINK_M2 (dev_priv->info->display_mmio_offset + 0x60048)
#define PIPE_LINK_M2_OFFSET 0
#define _PIPEA_LINK_N2 0x6004c
#define _PIPEA_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6004c)
#define PIPE_LINK_N2_OFFSET 0
/* PIPEB timing regs are same start from 0x61000 */
#define _PIPEB_DATA_M1 0x61030
#define _PIPEB_DATA_N1 0x61034
#define _PIPEB_DATA_M1 (dev_priv->info->display_mmio_offset + 0x61030)
#define _PIPEB_DATA_N1 (dev_priv->info->display_mmio_offset + 0x61034)
#define _PIPEB_DATA_M2 0x61038
#define _PIPEB_DATA_N2 0x6103c
#define _PIPEB_DATA_M2 (dev_priv->info->display_mmio_offset + 0x61038)
#define _PIPEB_DATA_N2 (dev_priv->info->display_mmio_offset + 0x6103c)
#define _PIPEB_LINK_M1 0x61040
#define _PIPEB_LINK_N1 0x61044
#define _PIPEB_LINK_M1 (dev_priv->info->display_mmio_offset + 0x61040)
#define _PIPEB_LINK_N1 (dev_priv->info->display_mmio_offset + 0x61044)
#define _PIPEB_LINK_M2 0x61048
#define _PIPEB_LINK_N2 0x6104c
#define _PIPEB_LINK_M2 (dev_priv->info->display_mmio_offset + 0x61048)
#define _PIPEB_LINK_N2 (dev_priv->info->display_mmio_offset + 0x6104c)
#define PIPE_DATA_M1(tran) _TRANSCODER(tran, _PIPEA_DATA_M1, _PIPEB_DATA_M1)
#define PIPE_DATA_N1(tran) _TRANSCODER(tran, _PIPEA_DATA_N1, _PIPEB_DATA_N1)
@ -3699,13 +3711,13 @@
#define TVIDEO_DIP_DATA(pipe) _PIPE(pipe, _VIDEO_DIP_DATA_A, _VIDEO_DIP_DATA_B)
#define TVIDEO_DIP_GCP(pipe) _PIPE(pipe, _VIDEO_DIP_GCP_A, _VIDEO_DIP_GCP_B)
#define VLV_VIDEO_DIP_CTL_A 0x60200
#define VLV_VIDEO_DIP_DATA_A 0x60208
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A 0x60210
#define VLV_VIDEO_DIP_CTL_A (VLV_DISPLAY_BASE + 0x60200)
#define VLV_VIDEO_DIP_DATA_A (VLV_DISPLAY_BASE + 0x60208)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_A (VLV_DISPLAY_BASE + 0x60210)
#define VLV_VIDEO_DIP_CTL_B 0x61170
#define VLV_VIDEO_DIP_DATA_B 0x61174
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B 0x61178
#define VLV_VIDEO_DIP_CTL_B (VLV_DISPLAY_BASE + 0x61170)
#define VLV_VIDEO_DIP_DATA_B (VLV_DISPLAY_BASE + 0x61174)
#define VLV_VIDEO_DIP_GDCP_PAYLOAD_B (VLV_DISPLAY_BASE + 0x61178)
#define VLV_TVIDEO_DIP_CTL(pipe) \
_PIPE(pipe, VLV_VIDEO_DIP_CTL_A, VLV_VIDEO_DIP_CTL_B)
@ -3995,17 +4007,17 @@
#define LVDS_DETECTED (1 << 1)
/* vlv has 2 sets of panel control regs. */
#define PIPEA_PP_STATUS 0x61200
#define PIPEA_PP_CONTROL 0x61204
#define PIPEA_PP_ON_DELAYS 0x61208
#define PIPEA_PP_OFF_DELAYS 0x6120c
#define PIPEA_PP_DIVISOR 0x61210
#define PIPEA_PP_STATUS (VLV_DISPLAY_BASE + 0x61200)
#define PIPEA_PP_CONTROL (VLV_DISPLAY_BASE + 0x61204)
#define PIPEA_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61208)
#define PIPEA_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6120c)
#define PIPEA_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61210)
#define PIPEB_PP_STATUS 0x61300
#define PIPEB_PP_CONTROL 0x61304
#define PIPEB_PP_ON_DELAYS 0x61308
#define PIPEB_PP_OFF_DELAYS 0x6130c
#define PIPEB_PP_DIVISOR 0x61310
#define PIPEB_PP_STATUS (VLV_DISPLAY_BASE + 0x61300)
#define PIPEB_PP_CONTROL (VLV_DISPLAY_BASE + 0x61304)
#define PIPEB_PP_ON_DELAYS (VLV_DISPLAY_BASE + 0x61308)
#define PIPEB_PP_OFF_DELAYS (VLV_DISPLAY_BASE + 0x6130c)
#define PIPEB_PP_DIVISOR (VLV_DISPLAY_BASE + 0x61310)
#define PCH_PP_STATUS 0xc7200
#define PCH_PP_CONTROL 0xc7204
@ -4186,7 +4198,9 @@
#define GEN6_RP_INTERRUPT_LIMITS 0xA014
#define GEN6_RPSTAT1 0xA01C
#define GEN6_CAGF_SHIFT 8
#define HSW_CAGF_SHIFT 7
#define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT)
#define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT)
#define GEN6_RP_CONTROL 0xA024
#define GEN6_RP_MEDIA_TURBO (1<<11)
#define GEN6_RP_MEDIA_MODE_MASK (3<<9)
@ -4297,7 +4311,7 @@
#define GEN7_ROW_CHICKEN2_GT2 0xf4f4
#define DOP_CLOCK_GATING_DISABLE (1<<0)
#define G4X_AUD_VID_DID 0x62020
#define G4X_AUD_VID_DID (dev_priv->info->display_mmio_offset + 0x62020)
#define INTEL_AUDIO_DEVCL 0x808629FB
#define INTEL_AUDIO_DEVBLC 0x80862801
#define INTEL_AUDIO_DEVCTG 0x80862802
@ -4413,10 +4427,10 @@
#define AUDIO_CP_READY_C (1<<9)
/* HSW Power Wells */
#define HSW_PWR_WELL_CTL1 0x45400 /* BIOS */
#define HSW_PWR_WELL_CTL2 0x45404 /* Driver */
#define HSW_PWR_WELL_CTL3 0x45408 /* KVMR */
#define HSW_PWR_WELL_CTL4 0x4540C /* Debug */
#define HSW_PWR_WELL_BIOS 0x45400 /* CTL1 */
#define HSW_PWR_WELL_DRIVER 0x45404 /* CTL2 */
#define HSW_PWR_WELL_KVMR 0x45408 /* CTL3 */
#define HSW_PWR_WELL_DEBUG 0x4540C /* CTL4 */
#define HSW_PWR_WELL_ENABLE (1<<31)
#define HSW_PWR_WELL_STATE (1<<30)
#define HSW_PWR_WELL_CTL5 0x45410

View File

@ -29,67 +29,6 @@
#include "intel_drv.h"
#include "i915_reg.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
/* On IVB, 3rd pipe shares PLL with another one */
if (pipe > 1)
return false;
if (HAS_PCH_SPLIT(dev))
dpll_reg = _PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->regfile.save_palette_a;
else
array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->regfile.save_palette_a;
else
array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -130,6 +69,12 @@ static void i915_save_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
/* VGA state */
dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
dev_priv->regfile.saveVGACNTRL = I915_READ(i915_vgacntrl_reg(dev));
/* VGA color palette registers */
dev_priv->regfile.saveDACMASK = I915_READ8(VGA_DACMASK);
@ -188,6 +133,15 @@ static void i915_restore_vga(struct drm_device *dev)
int i;
u16 cr_index, cr_data, st01;
/* VGA state */
I915_WRITE(i915_vgacntrl_reg(dev), dev_priv->regfile.saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
POSTING_READ(VGA_PD);
udelay(150);
/* MSR bits */
I915_WRITE8(VGA_MSR_WRITE, dev_priv->regfile.saveMSR);
if (dev_priv->regfile.saveMSR & VGA_MSR_CGA_MODE) {
@ -235,396 +189,18 @@ static void i915_restore_vga(struct drm_device *dev)
I915_WRITE8(VGA_DACMASK, dev_priv->regfile.saveDACMASK);
}
static void i915_save_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
/* Cursor state */
dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
}
dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
}
dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
/* CRT state */
if (HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
else
dev_priv->regfile.saveADPA = I915_READ(ADPA);
return;
}
static void i915_restore_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
break;
}
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = _PCH_DPLL_A;
dpll_b_reg = _PCH_DPLL_B;
fpa0_reg = _PCH_FPA0;
fpb0_reg = _PCH_FPB0;
fpa1_reg = _PCH_FPA1;
fpb1_reg = _PCH_FPB1;
} else {
dpll_a_reg = _DPLL_A;
dpll_b_reg = _DPLL_B;
fpa0_reg = _FPA0;
fpb0_reg = _FPB0;
fpa1_reg = _FPA1;
fpb1_reg = _FPB1;
}
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
udelay(150);
}
I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
}
/* Restore plane info */
I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
}
I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
udelay(150);
}
I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
}
/* Restore plane info */
I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
}
I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
else
I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
return;
}
static void i915_save_display(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration control */
if (INTEL_INFO(dev)->gen <= 4)
dev_priv->regfile.saveDSPARB = I915_READ(DSPARB);
/* This is only meaningful in non-KMS mode */
/* Don't regfile.save them in KMS mode */
i915_save_modeset_reg(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_display_reg(dev);
/* LVDS state */
if (HAS_PCH_SPLIT(dev)) {
@ -658,24 +234,6 @@ static void i915_save_display(struct drm_device *dev)
dev_priv->regfile.savePP_DIVISOR = I915_READ(PP_DIVISOR);
}
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
dev_priv->regfile.saveDP_B = I915_READ(DP_B);
dev_priv->regfile.saveDP_C = I915_READ(DP_C);
dev_priv->regfile.saveDP_D = I915_READ(DP_D);
dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
}
/* FIXME: regfile.save TV & SDVO state */
}
/* Only regfile.save FBC state on the platform that supports FBC */
if (I915_HAS_FBC(dev)) {
if (HAS_PCH_SPLIT(dev)) {
@ -690,15 +248,7 @@ static void i915_save_display(struct drm_device *dev)
}
}
/* VGA state */
dev_priv->regfile.saveVGA0 = I915_READ(VGA0);
dev_priv->regfile.saveVGA1 = I915_READ(VGA1);
dev_priv->regfile.saveVGA_PD = I915_READ(VGA_PD);
if (HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveVGACNTRL = I915_READ(CPU_VGACNTRL);
else
dev_priv->regfile.saveVGACNTRL = I915_READ(VGACNTRL);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_save_vga(dev);
}
@ -707,25 +257,11 @@ static void i915_restore_display(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
/* Display arbitration */
if (INTEL_INFO(dev)->gen <= 4)
I915_WRITE(DSPARB, dev_priv->regfile.saveDSPARB);
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
}
}
/* This is only meaningful in non-KMS mode */
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_restore_display_reg(dev);
/* LVDS state */
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
@ -763,16 +299,6 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
}
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
}
/* FIXME: restore TV & SDVO state */
}
/* only restore FBC info on the platform that supports FBC*/
intel_disable_fbc(dev);
if (I915_HAS_FBC(dev)) {
@ -787,19 +313,11 @@ static void i915_restore_display(struct drm_device *dev)
I915_WRITE(FBC_CONTROL, dev_priv->regfile.saveFBC_CONTROL);
}
}
/* VGA state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(CPU_VGACNTRL, dev_priv->regfile.saveVGACNTRL);
else
I915_WRITE(VGACNTRL, dev_priv->regfile.saveVGACNTRL);
I915_WRITE(VGA0, dev_priv->regfile.saveVGA0);
I915_WRITE(VGA1, dev_priv->regfile.saveVGA1);
I915_WRITE(VGA_PD, dev_priv->regfile.saveVGA_PD);
POSTING_READ(VGA_PD);
udelay(150);
if (!drm_core_check_feature(dev, DRIVER_MODESET))
i915_restore_vga(dev);
else
i915_redisable_vga(dev);
}
int i915_save_state(struct drm_device *dev)

View File

@ -0,0 +1,503 @@
/*
*
* Copyright 2008 (c) Intel Corporation
* Jesse Barnes <jbarnes@virtuousgeek.org>
* Copyright 2013 (c) Intel Corporation
* Daniel Vetter <daniel.vetter@ffwll.ch>
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
* IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#include <drm/drmP.h>
#include <drm/i915_drm.h>
#include "intel_drv.h"
#include "i915_reg.h"
static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 dpll_reg;
/* On IVB, 3rd pipe shares PLL with another one */
if (pipe > 1)
return false;
if (HAS_PCH_SPLIT(dev))
dpll_reg = _PCH_DPLL(pipe);
else
dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
return (I915_READ(dpll_reg) & DPLL_VCO_ENABLE);
}
static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->regfile.save_palette_a;
else
array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
array[i] = I915_READ(reg + (i << 2));
}
static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long reg = (pipe == PIPE_A ? _PALETTE_A : _PALETTE_B);
u32 *array;
int i;
if (!i915_pipe_enabled(dev, pipe))
return;
if (HAS_PCH_SPLIT(dev))
reg = (pipe == PIPE_A) ? _LGC_PALETTE_A : _LGC_PALETTE_B;
if (pipe == PIPE_A)
array = dev_priv->regfile.save_palette_a;
else
array = dev_priv->regfile.save_palette_b;
for (i = 0; i < 256; i++)
I915_WRITE(reg + (i << 2), array[i]);
}
void i915_save_display_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int i;
/* Cursor state */
dev_priv->regfile.saveCURACNTR = I915_READ(_CURACNTR);
dev_priv->regfile.saveCURAPOS = I915_READ(_CURAPOS);
dev_priv->regfile.saveCURABASE = I915_READ(_CURABASE);
dev_priv->regfile.saveCURBCNTR = I915_READ(_CURBCNTR);
dev_priv->regfile.saveCURBPOS = I915_READ(_CURBPOS);
dev_priv->regfile.saveCURBBASE = I915_READ(_CURBBASE);
if (IS_GEN2(dev))
dev_priv->regfile.saveCURSIZE = I915_READ(CURSIZE);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->regfile.saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
}
/* Pipe & plane A info */
dev_priv->regfile.savePIPEACONF = I915_READ(_PIPEACONF);
dev_priv->regfile.savePIPEASRC = I915_READ(_PIPEASRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveFPA0 = I915_READ(_PCH_FPA0);
dev_priv->regfile.saveFPA1 = I915_READ(_PCH_FPA1);
dev_priv->regfile.saveDPLL_A = I915_READ(_PCH_DPLL_A);
} else {
dev_priv->regfile.saveFPA0 = I915_READ(_FPA0);
dev_priv->regfile.saveFPA1 = I915_READ(_FPA1);
dev_priv->regfile.saveDPLL_A = I915_READ(_DPLL_A);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveDPLL_A_MD = I915_READ(_DPLL_A_MD);
dev_priv->regfile.saveHTOTAL_A = I915_READ(_HTOTAL_A);
dev_priv->regfile.saveHBLANK_A = I915_READ(_HBLANK_A);
dev_priv->regfile.saveHSYNC_A = I915_READ(_HSYNC_A);
dev_priv->regfile.saveVTOTAL_A = I915_READ(_VTOTAL_A);
dev_priv->regfile.saveVBLANK_A = I915_READ(_VBLANK_A);
dev_priv->regfile.saveVSYNC_A = I915_READ(_VSYNC_A);
if (!HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveBCLRPAT_A = I915_READ(_BCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePIPEA_DATA_M1 = I915_READ(_PIPEA_DATA_M1);
dev_priv->regfile.savePIPEA_DATA_N1 = I915_READ(_PIPEA_DATA_N1);
dev_priv->regfile.savePIPEA_LINK_M1 = I915_READ(_PIPEA_LINK_M1);
dev_priv->regfile.savePIPEA_LINK_N1 = I915_READ(_PIPEA_LINK_N1);
dev_priv->regfile.saveFDI_TXA_CTL = I915_READ(_FDI_TXA_CTL);
dev_priv->regfile.saveFDI_RXA_CTL = I915_READ(_FDI_RXA_CTL);
dev_priv->regfile.savePFA_CTL_1 = I915_READ(_PFA_CTL_1);
dev_priv->regfile.savePFA_WIN_SZ = I915_READ(_PFA_WIN_SZ);
dev_priv->regfile.savePFA_WIN_POS = I915_READ(_PFA_WIN_POS);
dev_priv->regfile.saveTRANSACONF = I915_READ(_TRANSACONF);
dev_priv->regfile.saveTRANS_HTOTAL_A = I915_READ(_TRANS_HTOTAL_A);
dev_priv->regfile.saveTRANS_HBLANK_A = I915_READ(_TRANS_HBLANK_A);
dev_priv->regfile.saveTRANS_HSYNC_A = I915_READ(_TRANS_HSYNC_A);
dev_priv->regfile.saveTRANS_VTOTAL_A = I915_READ(_TRANS_VTOTAL_A);
dev_priv->regfile.saveTRANS_VBLANK_A = I915_READ(_TRANS_VBLANK_A);
dev_priv->regfile.saveTRANS_VSYNC_A = I915_READ(_TRANS_VSYNC_A);
}
dev_priv->regfile.saveDSPACNTR = I915_READ(_DSPACNTR);
dev_priv->regfile.saveDSPASTRIDE = I915_READ(_DSPASTRIDE);
dev_priv->regfile.saveDSPASIZE = I915_READ(_DSPASIZE);
dev_priv->regfile.saveDSPAPOS = I915_READ(_DSPAPOS);
dev_priv->regfile.saveDSPAADDR = I915_READ(_DSPAADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->regfile.saveDSPASURF = I915_READ(_DSPASURF);
dev_priv->regfile.saveDSPATILEOFF = I915_READ(_DSPATILEOFF);
}
i915_save_palette(dev, PIPE_A);
dev_priv->regfile.savePIPEASTAT = I915_READ(_PIPEASTAT);
/* Pipe & plane B info */
dev_priv->regfile.savePIPEBCONF = I915_READ(_PIPEBCONF);
dev_priv->regfile.savePIPEBSRC = I915_READ(_PIPEBSRC);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.saveFPB0 = I915_READ(_PCH_FPB0);
dev_priv->regfile.saveFPB1 = I915_READ(_PCH_FPB1);
dev_priv->regfile.saveDPLL_B = I915_READ(_PCH_DPLL_B);
} else {
dev_priv->regfile.saveFPB0 = I915_READ(_FPB0);
dev_priv->regfile.saveFPB1 = I915_READ(_FPB1);
dev_priv->regfile.saveDPLL_B = I915_READ(_DPLL_B);
}
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveDPLL_B_MD = I915_READ(_DPLL_B_MD);
dev_priv->regfile.saveHTOTAL_B = I915_READ(_HTOTAL_B);
dev_priv->regfile.saveHBLANK_B = I915_READ(_HBLANK_B);
dev_priv->regfile.saveHSYNC_B = I915_READ(_HSYNC_B);
dev_priv->regfile.saveVTOTAL_B = I915_READ(_VTOTAL_B);
dev_priv->regfile.saveVBLANK_B = I915_READ(_VBLANK_B);
dev_priv->regfile.saveVSYNC_B = I915_READ(_VSYNC_B);
if (!HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveBCLRPAT_B = I915_READ(_BCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
dev_priv->regfile.savePIPEB_DATA_M1 = I915_READ(_PIPEB_DATA_M1);
dev_priv->regfile.savePIPEB_DATA_N1 = I915_READ(_PIPEB_DATA_N1);
dev_priv->regfile.savePIPEB_LINK_M1 = I915_READ(_PIPEB_LINK_M1);
dev_priv->regfile.savePIPEB_LINK_N1 = I915_READ(_PIPEB_LINK_N1);
dev_priv->regfile.saveFDI_TXB_CTL = I915_READ(_FDI_TXB_CTL);
dev_priv->regfile.saveFDI_RXB_CTL = I915_READ(_FDI_RXB_CTL);
dev_priv->regfile.savePFB_CTL_1 = I915_READ(_PFB_CTL_1);
dev_priv->regfile.savePFB_WIN_SZ = I915_READ(_PFB_WIN_SZ);
dev_priv->regfile.savePFB_WIN_POS = I915_READ(_PFB_WIN_POS);
dev_priv->regfile.saveTRANSBCONF = I915_READ(_TRANSBCONF);
dev_priv->regfile.saveTRANS_HTOTAL_B = I915_READ(_TRANS_HTOTAL_B);
dev_priv->regfile.saveTRANS_HBLANK_B = I915_READ(_TRANS_HBLANK_B);
dev_priv->regfile.saveTRANS_HSYNC_B = I915_READ(_TRANS_HSYNC_B);
dev_priv->regfile.saveTRANS_VTOTAL_B = I915_READ(_TRANS_VTOTAL_B);
dev_priv->regfile.saveTRANS_VBLANK_B = I915_READ(_TRANS_VBLANK_B);
dev_priv->regfile.saveTRANS_VSYNC_B = I915_READ(_TRANS_VSYNC_B);
}
dev_priv->regfile.saveDSPBCNTR = I915_READ(_DSPBCNTR);
dev_priv->regfile.saveDSPBSTRIDE = I915_READ(_DSPBSTRIDE);
dev_priv->regfile.saveDSPBSIZE = I915_READ(_DSPBSIZE);
dev_priv->regfile.saveDSPBPOS = I915_READ(_DSPBPOS);
dev_priv->regfile.saveDSPBADDR = I915_READ(_DSPBADDR);
if (INTEL_INFO(dev)->gen >= 4) {
dev_priv->regfile.saveDSPBSURF = I915_READ(_DSPBSURF);
dev_priv->regfile.saveDSPBTILEOFF = I915_READ(_DSPBTILEOFF);
}
i915_save_palette(dev, PIPE_B);
dev_priv->regfile.savePIPEBSTAT = I915_READ(_PIPEBSTAT);
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
break;
case 3:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
dev_priv->regfile.saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
case 2:
for (i = 0; i < 8; i++)
dev_priv->regfile.saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
break;
}
/* CRT state */
if (HAS_PCH_SPLIT(dev))
dev_priv->regfile.saveADPA = I915_READ(PCH_ADPA);
else
dev_priv->regfile.saveADPA = I915_READ(ADPA);
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
dev_priv->regfile.saveDP_B = I915_READ(DP_B);
dev_priv->regfile.saveDP_C = I915_READ(DP_C);
dev_priv->regfile.saveDP_D = I915_READ(DP_D);
dev_priv->regfile.savePIPEA_GMCH_DATA_M = I915_READ(_PIPEA_GMCH_DATA_M);
dev_priv->regfile.savePIPEB_GMCH_DATA_M = I915_READ(_PIPEB_GMCH_DATA_M);
dev_priv->regfile.savePIPEA_GMCH_DATA_N = I915_READ(_PIPEA_GMCH_DATA_N);
dev_priv->regfile.savePIPEB_GMCH_DATA_N = I915_READ(_PIPEB_GMCH_DATA_N);
dev_priv->regfile.savePIPEA_DP_LINK_M = I915_READ(_PIPEA_DP_LINK_M);
dev_priv->regfile.savePIPEB_DP_LINK_M = I915_READ(_PIPEB_DP_LINK_M);
dev_priv->regfile.savePIPEA_DP_LINK_N = I915_READ(_PIPEA_DP_LINK_N);
dev_priv->regfile.savePIPEB_DP_LINK_N = I915_READ(_PIPEB_DP_LINK_N);
}
/* FIXME: regfile.save TV & SDVO state */
return;
}
void i915_restore_display_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
int i;
/* Display port ratios (must be done before clock is set) */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(_PIPEA_GMCH_DATA_M, dev_priv->regfile.savePIPEA_GMCH_DATA_M);
I915_WRITE(_PIPEB_GMCH_DATA_M, dev_priv->regfile.savePIPEB_GMCH_DATA_M);
I915_WRITE(_PIPEA_GMCH_DATA_N, dev_priv->regfile.savePIPEA_GMCH_DATA_N);
I915_WRITE(_PIPEB_GMCH_DATA_N, dev_priv->regfile.savePIPEB_GMCH_DATA_N);
I915_WRITE(_PIPEA_DP_LINK_M, dev_priv->regfile.savePIPEA_DP_LINK_M);
I915_WRITE(_PIPEB_DP_LINK_M, dev_priv->regfile.savePIPEB_DP_LINK_M);
I915_WRITE(_PIPEA_DP_LINK_N, dev_priv->regfile.savePIPEA_DP_LINK_N);
I915_WRITE(_PIPEB_DP_LINK_N, dev_priv->regfile.savePIPEB_DP_LINK_N);
}
/* Fences */
switch (INTEL_INFO(dev)->gen) {
case 7:
case 6:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 5:
case 4:
for (i = 0; i < 16; i++)
I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->regfile.saveFENCE[i]);
break;
case 3:
case 2:
if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->regfile.saveFENCE[i+8]);
for (i = 0; i < 8; i++)
I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->regfile.saveFENCE[i]);
break;
}
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = _PCH_DPLL_A;
dpll_b_reg = _PCH_DPLL_B;
fpa0_reg = _PCH_FPA0;
fpb0_reg = _PCH_FPB0;
fpa1_reg = _PCH_FPA1;
fpb1_reg = _PCH_FPB1;
} else {
dpll_a_reg = _DPLL_A;
dpll_b_reg = _DPLL_B;
fpa0_reg = _FPA0;
fpb0_reg = _FPB0;
fpa1_reg = _FPA1;
fpb1_reg = _FPB1;
}
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(PCH_DREF_CONTROL, dev_priv->regfile.savePCH_DREF_CONTROL);
I915_WRITE(DISP_ARB_CTL, dev_priv->regfile.saveDISP_ARB_CTL);
}
/* Pipe & plane A info */
/* Prime the clock */
if (dev_priv->regfile.saveDPLL_A & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_a_reg);
udelay(150);
}
I915_WRITE(fpa0_reg, dev_priv->regfile.saveFPA0);
I915_WRITE(fpa1_reg, dev_priv->regfile.saveFPA1);
/* Actually enable it */
I915_WRITE(dpll_a_reg, dev_priv->regfile.saveDPLL_A);
POSTING_READ(dpll_a_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_A_MD, dev_priv->regfile.saveDPLL_A_MD);
POSTING_READ(_DPLL_A_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_A, dev_priv->regfile.saveHTOTAL_A);
I915_WRITE(_HBLANK_A, dev_priv->regfile.saveHBLANK_A);
I915_WRITE(_HSYNC_A, dev_priv->regfile.saveHSYNC_A);
I915_WRITE(_VTOTAL_A, dev_priv->regfile.saveVTOTAL_A);
I915_WRITE(_VBLANK_A, dev_priv->regfile.saveVBLANK_A);
I915_WRITE(_VSYNC_A, dev_priv->regfile.saveVSYNC_A);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(_BCLRPAT_A, dev_priv->regfile.saveBCLRPAT_A);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(_PIPEA_DATA_M1, dev_priv->regfile.savePIPEA_DATA_M1);
I915_WRITE(_PIPEA_DATA_N1, dev_priv->regfile.savePIPEA_DATA_N1);
I915_WRITE(_PIPEA_LINK_M1, dev_priv->regfile.savePIPEA_LINK_M1);
I915_WRITE(_PIPEA_LINK_N1, dev_priv->regfile.savePIPEA_LINK_N1);
I915_WRITE(_FDI_RXA_CTL, dev_priv->regfile.saveFDI_RXA_CTL);
I915_WRITE(_FDI_TXA_CTL, dev_priv->regfile.saveFDI_TXA_CTL);
I915_WRITE(_PFA_CTL_1, dev_priv->regfile.savePFA_CTL_1);
I915_WRITE(_PFA_WIN_SZ, dev_priv->regfile.savePFA_WIN_SZ);
I915_WRITE(_PFA_WIN_POS, dev_priv->regfile.savePFA_WIN_POS);
I915_WRITE(_TRANSACONF, dev_priv->regfile.saveTRANSACONF);
I915_WRITE(_TRANS_HTOTAL_A, dev_priv->regfile.saveTRANS_HTOTAL_A);
I915_WRITE(_TRANS_HBLANK_A, dev_priv->regfile.saveTRANS_HBLANK_A);
I915_WRITE(_TRANS_HSYNC_A, dev_priv->regfile.saveTRANS_HSYNC_A);
I915_WRITE(_TRANS_VTOTAL_A, dev_priv->regfile.saveTRANS_VTOTAL_A);
I915_WRITE(_TRANS_VBLANK_A, dev_priv->regfile.saveTRANS_VBLANK_A);
I915_WRITE(_TRANS_VSYNC_A, dev_priv->regfile.saveTRANS_VSYNC_A);
}
/* Restore plane info */
I915_WRITE(_DSPASIZE, dev_priv->regfile.saveDSPASIZE);
I915_WRITE(_DSPAPOS, dev_priv->regfile.saveDSPAPOS);
I915_WRITE(_PIPEASRC, dev_priv->regfile.savePIPEASRC);
I915_WRITE(_DSPAADDR, dev_priv->regfile.saveDSPAADDR);
I915_WRITE(_DSPASTRIDE, dev_priv->regfile.saveDSPASTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(_DSPASURF, dev_priv->regfile.saveDSPASURF);
I915_WRITE(_DSPATILEOFF, dev_priv->regfile.saveDSPATILEOFF);
}
I915_WRITE(_PIPEACONF, dev_priv->regfile.savePIPEACONF);
i915_restore_palette(dev, PIPE_A);
/* Enable the plane */
I915_WRITE(_DSPACNTR, dev_priv->regfile.saveDSPACNTR);
I915_WRITE(_DSPAADDR, I915_READ(_DSPAADDR));
/* Pipe & plane B info */
if (dev_priv->regfile.saveDPLL_B & DPLL_VCO_ENABLE) {
I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B &
~DPLL_VCO_ENABLE);
POSTING_READ(dpll_b_reg);
udelay(150);
}
I915_WRITE(fpb0_reg, dev_priv->regfile.saveFPB0);
I915_WRITE(fpb1_reg, dev_priv->regfile.saveFPB1);
/* Actually enable it */
I915_WRITE(dpll_b_reg, dev_priv->regfile.saveDPLL_B);
POSTING_READ(dpll_b_reg);
udelay(150);
if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
I915_WRITE(_DPLL_B_MD, dev_priv->regfile.saveDPLL_B_MD);
POSTING_READ(_DPLL_B_MD);
}
udelay(150);
/* Restore mode */
I915_WRITE(_HTOTAL_B, dev_priv->regfile.saveHTOTAL_B);
I915_WRITE(_HBLANK_B, dev_priv->regfile.saveHBLANK_B);
I915_WRITE(_HSYNC_B, dev_priv->regfile.saveHSYNC_B);
I915_WRITE(_VTOTAL_B, dev_priv->regfile.saveVTOTAL_B);
I915_WRITE(_VBLANK_B, dev_priv->regfile.saveVBLANK_B);
I915_WRITE(_VSYNC_B, dev_priv->regfile.saveVSYNC_B);
if (!HAS_PCH_SPLIT(dev))
I915_WRITE(_BCLRPAT_B, dev_priv->regfile.saveBCLRPAT_B);
if (HAS_PCH_SPLIT(dev)) {
I915_WRITE(_PIPEB_DATA_M1, dev_priv->regfile.savePIPEB_DATA_M1);
I915_WRITE(_PIPEB_DATA_N1, dev_priv->regfile.savePIPEB_DATA_N1);
I915_WRITE(_PIPEB_LINK_M1, dev_priv->regfile.savePIPEB_LINK_M1);
I915_WRITE(_PIPEB_LINK_N1, dev_priv->regfile.savePIPEB_LINK_N1);
I915_WRITE(_FDI_RXB_CTL, dev_priv->regfile.saveFDI_RXB_CTL);
I915_WRITE(_FDI_TXB_CTL, dev_priv->regfile.saveFDI_TXB_CTL);
I915_WRITE(_PFB_CTL_1, dev_priv->regfile.savePFB_CTL_1);
I915_WRITE(_PFB_WIN_SZ, dev_priv->regfile.savePFB_WIN_SZ);
I915_WRITE(_PFB_WIN_POS, dev_priv->regfile.savePFB_WIN_POS);
I915_WRITE(_TRANSBCONF, dev_priv->regfile.saveTRANSBCONF);
I915_WRITE(_TRANS_HTOTAL_B, dev_priv->regfile.saveTRANS_HTOTAL_B);
I915_WRITE(_TRANS_HBLANK_B, dev_priv->regfile.saveTRANS_HBLANK_B);
I915_WRITE(_TRANS_HSYNC_B, dev_priv->regfile.saveTRANS_HSYNC_B);
I915_WRITE(_TRANS_VTOTAL_B, dev_priv->regfile.saveTRANS_VTOTAL_B);
I915_WRITE(_TRANS_VBLANK_B, dev_priv->regfile.saveTRANS_VBLANK_B);
I915_WRITE(_TRANS_VSYNC_B, dev_priv->regfile.saveTRANS_VSYNC_B);
}
/* Restore plane info */
I915_WRITE(_DSPBSIZE, dev_priv->regfile.saveDSPBSIZE);
I915_WRITE(_DSPBPOS, dev_priv->regfile.saveDSPBPOS);
I915_WRITE(_PIPEBSRC, dev_priv->regfile.savePIPEBSRC);
I915_WRITE(_DSPBADDR, dev_priv->regfile.saveDSPBADDR);
I915_WRITE(_DSPBSTRIDE, dev_priv->regfile.saveDSPBSTRIDE);
if (INTEL_INFO(dev)->gen >= 4) {
I915_WRITE(_DSPBSURF, dev_priv->regfile.saveDSPBSURF);
I915_WRITE(_DSPBTILEOFF, dev_priv->regfile.saveDSPBTILEOFF);
}
I915_WRITE(_PIPEBCONF, dev_priv->regfile.savePIPEBCONF);
i915_restore_palette(dev, PIPE_B);
/* Enable the plane */
I915_WRITE(_DSPBCNTR, dev_priv->regfile.saveDSPBCNTR);
I915_WRITE(_DSPBADDR, I915_READ(_DSPBADDR));
/* Cursor state */
I915_WRITE(_CURAPOS, dev_priv->regfile.saveCURAPOS);
I915_WRITE(_CURACNTR, dev_priv->regfile.saveCURACNTR);
I915_WRITE(_CURABASE, dev_priv->regfile.saveCURABASE);
I915_WRITE(_CURBPOS, dev_priv->regfile.saveCURBPOS);
I915_WRITE(_CURBCNTR, dev_priv->regfile.saveCURBCNTR);
I915_WRITE(_CURBBASE, dev_priv->regfile.saveCURBBASE);
if (IS_GEN2(dev))
I915_WRITE(CURSIZE, dev_priv->regfile.saveCURSIZE);
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->regfile.saveADPA);
else
I915_WRITE(ADPA, dev_priv->regfile.saveADPA);
/* Display Port state */
if (SUPPORTS_INTEGRATED_DP(dev)) {
I915_WRITE(DP_B, dev_priv->regfile.saveDP_B);
I915_WRITE(DP_C, dev_priv->regfile.saveDP_C);
I915_WRITE(DP_D, dev_priv->regfile.saveDP_D);
}
/* FIXME: restore TV & SDVO state */
return;
}

View File

@ -267,27 +267,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = 0;
save_adpa = adpa = I915_READ(PCH_ADPA);
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
I915_WRITE(PCH_ADPA, adpa);
I915_WRITE(crt->adpa_reg, adpa);
if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000))
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
I915_WRITE(PCH_ADPA, save_adpa);
POSTING_READ(PCH_ADPA);
I915_WRITE(crt->adpa_reg, save_adpa);
POSTING_READ(crt->adpa_reg);
}
}
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
@ -300,26 +300,27 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
u32 adpa;
bool ret;
u32 save_adpa;
save_adpa = adpa = I915_READ(ADPA);
save_adpa = adpa = I915_READ(crt->adpa_reg);
DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
I915_WRITE(ADPA, adpa);
I915_WRITE(crt->adpa_reg, adpa);
if (wait_for((I915_READ(ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
if (wait_for((I915_READ(crt->adpa_reg) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
1000)) {
DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
I915_WRITE(ADPA, save_adpa);
I915_WRITE(crt->adpa_reg, save_adpa);
}
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(ADPA);
adpa = I915_READ(crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
@ -665,11 +666,11 @@ static void intel_crt_reset(struct drm_connector *connector)
if (HAS_PCH_SPLIT(dev)) {
u32 adpa;
adpa = I915_READ(PCH_ADPA);
adpa = I915_READ(crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
I915_WRITE(PCH_ADPA, adpa);
POSTING_READ(PCH_ADPA);
I915_WRITE(crt->adpa_reg, adpa);
POSTING_READ(crt->adpa_reg);
DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = 1;

View File

@ -677,6 +677,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder,
DRM_DEBUG_KMS("Preparing DDI mode for Haswell on port %c, pipe %c\n",
port_name(port), pipe_name(pipe));
intel_crtc->eld_vld = false;
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@ -987,7 +988,13 @@ void intel_ddi_enable_pipe_func(struct drm_crtc *crtc)
if (cpu_transcoder == TRANSCODER_EDP) {
switch (pipe) {
case PIPE_A:
/* Can only use the always-on power well for eDP when
* not using the panel fitter, and when not using motion
* blur mitigation (which we don't support). */
if (dev_priv->pch_pf_size)
temp |= TRANS_DDI_EDP_INPUT_A_ONOFF;
else
temp |= TRANS_DDI_EDP_INPUT_A_ON;
break;
case PIPE_B:
temp |= TRANS_DDI_EDP_INPUT_B_ONOFF;
@ -1287,10 +1294,14 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
static void intel_enable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
enum port port = intel_ddi_get_encoder_port(intel_encoder);
int type = intel_encoder->type;
uint32_t tmp;
if (type == INTEL_OUTPUT_HDMI) {
/* In HDMI/DVI mode, the port width, and swing/emphasis values
@ -1303,18 +1314,34 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder)
ironlake_edp_backlight_on(intel_dp);
}
if (intel_crtc->eld_vld) {
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp |= ((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
}
static void intel_disable_ddi(struct intel_encoder *intel_encoder)
{
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_crtc *crtc = encoder->crtc;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int pipe = intel_crtc->pipe;
int type = intel_encoder->type;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t tmp;
if (type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
ironlake_edp_backlight_off(intel_dp);
}
tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~((AUDIO_OUTPUT_ENABLE_A | AUDIO_ELD_VALID_A) << (pipe * 4));
I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
}
int intel_ddi_get_cdclk_freq(struct drm_i915_private *dev_priv)

View File

@ -1214,9 +1214,15 @@ void assert_pipe(struct drm_i915_private *dev_priv,
if (pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE)
state = true;
if (IS_HASWELL(dev_priv->dev) && cpu_transcoder != TRANSCODER_EDP &&
!(I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_ENABLE)) {
cur_state = false;
} else {
reg = PIPECONF(cpu_transcoder);
val = I915_READ(reg);
cur_state = !!(val & PIPECONF_ENABLE);
}
WARN(cur_state != state,
"pipe %c assertion failure (expected %s, current %s)\n",
pipe_name(pipe), state_string(state), state_string(cur_state));
@ -2220,8 +2226,10 @@ intel_finish_fb(struct drm_framebuffer *old_fb)
bool was_interruptible = dev_priv->mm.interruptible;
int ret;
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
wait_event(dev_priv->pending_flip_queue,
atomic_read(&dev_priv->mm.wedged) ||
i915_reset_in_progress(&dev_priv->gpu_error) ||
atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
@ -2869,7 +2877,7 @@ static bool intel_crtc_has_pending_flip(struct drm_crtc *crtc)
unsigned long flags;
bool pending;
if (atomic_read(&dev_priv->mm.wedged))
if (i915_reset_in_progress(&dev_priv->gpu_error))
return false;
spin_lock_irqsave(&dev->event_lock, flags);
@ -2887,6 +2895,8 @@ static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
if (crtc->fb == NULL)
return;
WARN_ON(waitqueue_active(&dev_priv->pending_flip_queue));
wait_event(dev_priv->pending_flip_queue,
!intel_crtc_has_pending_flip(crtc));
@ -3717,10 +3727,12 @@ static void intel_crtc_disable(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
struct drm_connector *connector;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
/* crtc should still be enabled when we disable it. */
WARN_ON(!crtc->enabled);
intel_crtc->eld_vld = false;
dev_priv->display.crtc_disable(crtc);
intel_crtc_update_sarea(crtc, false);
dev_priv->display.off(crtc);
@ -4867,6 +4879,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
if (!has_vga)
return;
mutex_lock(&dev_priv->dpio_lock);
/* XXX: Rip out SDV support once Haswell ships for real. */
if (IS_HASWELL(dev) && (dev->pci_device & 0xFF00) == 0x0C00)
is_sdv = true;
@ -5009,6 +5023,8 @@ static void lpt_init_pch_refclk(struct drm_device *dev)
tmp = intel_sbi_read(dev_priv, SBI_DBUFF0, SBI_ICLK);
tmp |= SBI_DBUFF0_ENABLE;
intel_sbi_write(dev_priv, SBI_DBUFF0, tmp, SBI_ICLK);
mutex_unlock(&dev_priv->dpio_lock);
}
/*
@ -5092,6 +5108,11 @@ static void ironlake_set_pipeconf(struct drm_crtc *crtc,
else
val |= PIPECONF_PROGRESSIVE;
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
val |= PIPECONF_COLOR_RANGE_SELECT;
else
val &= ~PIPECONF_COLOR_RANGE_SELECT;
I915_WRITE(PIPECONF(pipe), val);
POSTING_READ(PIPECONF(pipe));
}
@ -5586,6 +5607,35 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
return fdi_config_ok ? ret : -EINVAL;
}
static void haswell_modeset_global_resources(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool enable = false;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) {
if (crtc->pipe != PIPE_A && crtc->base.enabled)
enable = true;
/* XXX: Should check for edp transcoder here, but thanks to init
* sequence that's not yet available. Just in case desktop eDP
* on PORT D is possible on haswell, too. */
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list,
base.head) {
if (encoder->type != INTEL_OUTPUT_EDP &&
encoder->connectors_active)
enable = true;
}
/* Even the eDP panel fitter is outside the always-on well. */
if (dev_priv->pch_pf_size)
enable = true;
intel_set_power_well(dev, enable);
}
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
@ -5618,11 +5668,6 @@ static int haswell_crtc_mode_set(struct drm_crtc *crtc,
num_connectors++;
}
if (is_cpu_edp)
intel_crtc->cpu_transcoder = TRANSCODER_EDP;
else
intel_crtc->cpu_transcoder = pipe;
/* We are not sure yet this won't happen. */
WARN(!HAS_PCH_LPT(dev), "Unexpected PCH type %d\n",
INTEL_PCH_TYPE(dev));
@ -5687,6 +5732,11 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
int pipe = intel_crtc->pipe;
int ret;
if (IS_HASWELL(dev) && intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))
intel_crtc->cpu_transcoder = TRANSCODER_EDP;
else
intel_crtc->cpu_transcoder = pipe;
drm_vblank_pre_modeset(dev, pipe);
ret = dev_priv->display.crtc_mode_set(crtc, mode, adjusted_mode,
@ -5783,6 +5833,7 @@ static void haswell_write_eld(struct drm_connector *connector,
struct drm_i915_private *dev_priv = connector->dev->dev_private;
uint8_t *eld = connector->eld;
struct drm_device *dev = crtc->dev;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
uint32_t eldv;
uint32_t i;
int len;
@ -5824,6 +5875,7 @@ static void haswell_write_eld(struct drm_connector *connector,
DRM_DEBUG_DRIVER("ELD on pipe %c\n", pipe_name(pipe));
eldv = AUDIO_ELD_VALID_A << (pipe * 4);
intel_crtc->eld_vld = true;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
DRM_DEBUG_DRIVER("ELD: DisplayPort detected\n");
@ -6717,6 +6769,17 @@ void intel_mark_busy(struct drm_device *dev)
void intel_mark_idle(struct drm_device *dev)
{
struct drm_crtc *crtc;
if (!i915_powersave)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
intel_decrease_pllclock(crtc);
}
}
void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
@ -6736,23 +6799,6 @@ void intel_mark_fb_busy(struct drm_i915_gem_object *obj)
}
}
void intel_mark_fb_idle(struct drm_i915_gem_object *obj)
{
struct drm_device *dev = obj->base.dev;
struct drm_crtc *crtc;
if (!i915_powersave)
return;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (!crtc->fb)
continue;
if (to_intel_framebuffer(crtc->fb)->obj == obj)
intel_decrease_pllclock(crtc);
}
}
static void intel_crtc_destroy(struct drm_crtc *crtc)
{
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@ -6833,7 +6879,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
obj = work->old_fb_obj;
wake_up(&dev_priv->pending_flip_queue);
wake_up_all(&dev_priv->pending_flip_queue);
queue_work(dev_priv->wq, &work->work);
@ -8219,23 +8265,18 @@ static void intel_setup_outputs(struct drm_device *dev)
if (I915_READ(PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev)) {
int found;
/* Check for built-in panel first. Shares lanes with HDMI on SDVOC */
if (I915_READ(DP_C) & DP_DETECTED)
intel_dp_init(dev, DP_C, PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
if (I915_READ(SDVOB) & PORT_DETECTED) {
/* SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev, SDVOB, true);
if (!found)
intel_hdmi_init(dev, SDVOB, PORT_B);
if (!found && (I915_READ(DP_B) & DP_DETECTED))
intel_dp_init(dev, DP_B, PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + SDVOB) & PORT_DETECTED) {
intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOB, PORT_B);
if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED)
intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
}
if (I915_READ(SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, SDVOC, PORT_C);
if (I915_READ(VLV_DISPLAY_BASE + SDVOC) & PORT_DETECTED)
intel_hdmi_init(dev, VLV_DISPLAY_BASE + SDVOC, PORT_C);
} else if (SUPPORTS_DIGITAL_OUTPUTS(dev)) {
bool found = false;
@ -8495,6 +8536,8 @@ static void intel_init_display(struct drm_device *dev)
} else if (IS_HASWELL(dev)) {
dev_priv->display.fdi_link_train = hsw_fdi_link_train;
dev_priv->display.write_eld = haswell_write_eld;
dev_priv->display.modeset_global_resources =
haswell_modeset_global_resources;
}
} else if (IS_G4X(dev)) {
dev_priv->display.write_eld = g4x_write_eld;
@ -8617,6 +8660,15 @@ static struct intel_quirk intel_quirks[] = {
/* Acer Aspire 5734Z must invert backlight brightness */
{ 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
/* Acer/eMachines G725 */
{ 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
/* Acer/eMachines e725 */
{ 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
/* Acer/Packard Bell NCL20 */
{ 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
};
static void intel_init_quirks(struct drm_device *dev)
@ -8645,12 +8697,7 @@ static void i915_disable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u8 sr1;
u32 vga_reg;
if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
u32 vga_reg = i915_vgacntrl_reg(dev);
vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO);
outb(SR01, VGA_SR_INDEX);
@ -8665,10 +8712,7 @@ static void i915_disable_vga(struct drm_device *dev)
void intel_modeset_init_hw(struct drm_device *dev)
{
/* We attempt to init the necessary power wells early in the initialization
* time, so the subsystems that expect power to be enabled can work.
*/
intel_init_power_wells(dev);
intel_init_power_well(dev);
intel_prepare_ddi(dev);
@ -8710,7 +8754,7 @@ void intel_modeset_init(struct drm_device *dev)
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
}
dev->mode_config.fb_base = dev_priv->mm.gtt_base_addr;
dev->mode_config.fb_base = dev_priv->gtt.mappable_base;
DRM_DEBUG_KMS("%d display pipe%s available.\n",
dev_priv->num_pipe, dev_priv->num_pipe > 1 ? "s" : "");
@ -8912,20 +8956,14 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
* the crtc fixup. */
}
static void i915_redisable_vga(struct drm_device *dev)
void i915_redisable_vga(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 vga_reg;
if (HAS_PCH_SPLIT(dev))
vga_reg = CPU_VGACNTRL;
else
vga_reg = VGACNTRL;
u32 vga_reg = i915_vgacntrl_reg(dev);
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
i915_disable_vga(dev);
}
}

View File

@ -763,6 +763,22 @@ intel_dp_mode_fixup(struct drm_encoder *encoder,
return false;
bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24;
if (intel_dp->color_range_auto) {
/*
* See:
* CEA-861-E - 5.1 Default Encoding Parameters
* VESA DisplayPort Ver.1.2a - 5.1.1.1 Video Colorimetry
*/
if (bpp != 18 && drm_mode_cea_vic(adjusted_mode) > 1)
intel_dp->color_range = DP_COLOR_RANGE_16_235;
else
intel_dp->color_range = 0;
}
if (intel_dp->color_range)
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp);
for (clock = 0; clock <= max_clock; clock++) {
@ -967,6 +983,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
} else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
if (!HAS_PCH_SPLIT(dev))
intel_dp->DP |= intel_dp->color_range;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@ -1770,6 +1787,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) {
case DP_TRAINING_PATTERN_DISABLE:
if (port != PORT_A) {
temp |= DP_TP_CTL_LINK_TRAIN_IDLE;
I915_WRITE(DP_TP_CTL(port), temp);
@ -1778,6 +1797,8 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
DRM_ERROR("Timed out waiting for DP idle patterns\n");
temp &= ~DP_TP_CTL_LINK_TRAIN_MASK;
}
temp |= DP_TP_CTL_LINK_TRAIN_NORMAL;
break;
@ -2276,16 +2297,17 @@ g4x_dp_detect(struct intel_dp *intel_dp)
{
struct drm_device *dev = intel_dp_to_dev(intel_dp);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = dp_to_dig_port(intel_dp);
uint32_t bit;
switch (intel_dp->output_reg) {
case DP_B:
switch (intel_dig_port->port) {
case PORT_B:
bit = DPB_HOTPLUG_LIVE_STATUS;
break;
case DP_C:
case PORT_C:
bit = DPC_HOTPLUG_LIVE_STATUS;
break;
case DP_D:
case PORT_D:
bit = DPD_HOTPLUG_LIVE_STATUS;
break;
default:
@ -2459,10 +2481,21 @@ intel_dp_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_dp->color_range)
return 0;
intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_dp->color_range_auto = true;
break;
case INTEL_BROADCAST_RGB_FULL:
intel_dp->color_range_auto = false;
intel_dp->color_range = 0;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_dp->color_range_auto = false;
intel_dp->color_range = DP_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
goto done;
}
@ -2603,6 +2636,7 @@ intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connect
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_dp->color_range_auto = true;
if (is_edp(intel_dp)) {
drm_mode_create_scaling_mode_property(connector->dev);

View File

@ -109,6 +109,11 @@
* timings in the mode to prevent the crtc fixup from overwriting them.
* Currently only lvds needs that. */
#define INTEL_MODE_CRTC_TIMINGS_SET (0x20)
/*
* Set when limited 16-235 (as opposed to full 0-255) RGB color range is
* to be used.
*/
#define INTEL_MODE_LIMITED_COLOR_RANGE (0x40)
static inline void
intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
@ -206,6 +211,7 @@ struct intel_crtc {
* some outputs connected to this crtc.
*/
bool active;
bool eld_vld;
bool primary_disabled; /* is the crtc obscured by a plane? */
bool lowfreq_avail;
struct intel_overlay *overlay;
@ -284,6 +290,9 @@ struct cxsr_latency {
#define DIP_LEN_AVI 13
#define DIP_AVI_PR_1 0
#define DIP_AVI_PR_2 1
#define DIP_AVI_RGB_QUANT_RANGE_DEFAULT (0 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_LIMITED (1 << 2)
#define DIP_AVI_RGB_QUANT_RANGE_FULL (2 << 2)
#define DIP_TYPE_SPD 0x83
#define DIP_VERSION_SPD 0x1
@ -338,9 +347,11 @@ struct intel_hdmi {
u32 sdvox_reg;
int ddc_bus;
uint32_t color_range;
bool color_range_auto;
bool has_hdmi_sink;
bool has_audio;
enum hdmi_force_audio force_audio;
bool rgb_quant_range_selectable;
void (*write_infoframe)(struct drm_encoder *encoder,
struct dip_infoframe *frame);
void (*set_infoframes)(struct drm_encoder *encoder,
@ -357,6 +368,7 @@ struct intel_dp {
bool has_audio;
enum hdmi_force_audio force_audio;
uint32_t color_range;
bool color_range_auto;
uint8_t link_bw;
uint8_t lane_count;
uint8_t dpcd[DP_RECEIVER_CAP_SIZE];
@ -440,9 +452,8 @@ extern bool intel_sdvo_init(struct drm_device *dev, uint32_t sdvo_reg,
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev);
extern void intel_mark_idle(struct drm_device *dev);
extern void intel_mark_fb_busy(struct drm_i915_gem_object *obj);
extern void intel_mark_fb_idle(struct drm_i915_gem_object *obj);
extern void intel_mark_idle(struct drm_device *dev);
extern bool intel_lvds_init(struct drm_device *dev);
extern bool intel_is_dual_link_lvds(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int output_reg,
@ -655,7 +666,8 @@ extern void intel_update_fbc(struct drm_device *dev);
extern void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
extern void intel_gpu_ips_teardown(void);
extern void intel_init_power_wells(struct drm_device *dev);
extern void intel_init_power_well(struct drm_device *dev);
extern void intel_set_power_well(struct drm_device *dev, bool enable);
extern void intel_enable_gt_powersave(struct drm_device *dev);
extern void intel_disable_gt_powersave(struct drm_device *dev);
extern void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv);

View File

@ -135,14 +135,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
goto out_unpin;
}
info->apertures->ranges[0].base = dev->mode_config.fb_base;
info->apertures->ranges[0].size =
dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
info->apertures->ranges[0].size = dev_priv->gtt.mappable_end;
info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
info->screen_base =
ioremap_wc(dev_priv->mm.gtt_base_addr + obj->gtt_offset,
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
size);
if (!info->screen_base) {
ret = -ENOSPC;
@ -306,6 +305,7 @@ void intel_fb_restore_mode(struct drm_device *dev)
/* Be sure to shut off any planes that may be active */
list_for_each_entry(plane, &config->plane_list, head)
if (plane->enabled)
plane->funcs->disable_plane(plane);
drm_modeset_unlock_all(dev);

View File

@ -331,6 +331,7 @@ static void intel_set_infoframe(struct drm_encoder *encoder,
static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
.ver = DIP_VERSION_AVI,
@ -340,6 +341,13 @@ static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK)
avi_if.body.avi.YQ_CN_PR |= DIP_AVI_PR_2;
if (intel_hdmi->rgb_quant_range_selectable) {
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
}
avi_if.body.avi.VIC = drm_mode_cea_vic(adjusted_mode);
intel_set_infoframe(encoder, &avi_if);
@ -364,7 +372,8 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode)
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = VIDEO_DIP_CTL;
u32 val = I915_READ(reg);
u32 port;
@ -391,11 +400,11 @@ static void g4x_set_infoframes(struct drm_encoder *encoder,
return;
}
switch (intel_hdmi->sdvox_reg) {
case SDVOB:
switch (intel_dig_port->port) {
case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
case SDVOC:
case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
default:
@ -428,7 +437,8 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
{
struct drm_i915_private *dev_priv = encoder->dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
u32 reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
u32 val = I915_READ(reg);
u32 port;
@ -447,14 +457,14 @@ static void ibx_set_infoframes(struct drm_encoder *encoder,
return;
}
switch (intel_hdmi->sdvox_reg) {
case HDMIB:
switch (intel_dig_port->port) {
case PORT_B:
port = VIDEO_DIP_PORT_B;
break;
case HDMIC:
case PORT_C:
port = VIDEO_DIP_PORT_C;
break;
case HDMID:
case PORT_D:
port = VIDEO_DIP_PORT_D;
break;
default:
@ -766,6 +776,20 @@ bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
if (intel_hdmi->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (intel_hdmi->has_hdmi_sink &&
drm_mode_cea_vic(adjusted_mode) > 1)
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
else
intel_hdmi->color_range = 0;
}
if (intel_hdmi->color_range)
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
return true;
}
@ -773,13 +797,14 @@ static bool g4x_hdmi_connected(struct intel_hdmi *intel_hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(intel_hdmi);
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi);
uint32_t bit;
switch (intel_hdmi->sdvox_reg) {
case SDVOB:
switch (intel_dig_port->port) {
case PORT_B:
bit = HDMIB_HOTPLUG_LIVE_STATUS;
break;
case SDVOC:
case PORT_C:
bit = HDMIC_HOTPLUG_LIVE_STATUS;
break;
default:
@ -811,6 +836,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink = false;
intel_hdmi->has_audio = false;
intel_hdmi->rgb_quant_range_selectable = false;
edid = drm_get_edid(connector,
intel_gmbus_get_adapter(dev_priv,
intel_hdmi->ddc_bus));
@ -822,6 +848,8 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
intel_hdmi->has_hdmi_sink =
drm_detect_hdmi_monitor(edid);
intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
intel_hdmi->rgb_quant_range_selectable =
drm_rgb_quant_range_selectable(edid);
}
kfree(edid);
}
@ -907,10 +935,21 @@ intel_hdmi_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_hdmi->color_range)
return 0;
intel_hdmi->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_hdmi->color_range_auto = true;
break;
case INTEL_BROADCAST_RGB_FULL:
intel_hdmi->color_range_auto = false;
intel_hdmi->color_range = 0;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_hdmi->color_range_auto = false;
intel_hdmi->color_range = SDVO_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
goto done;
}
@ -959,6 +998,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
{
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
intel_hdmi->color_range_auto = true;
}
void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,

View File

@ -515,6 +515,8 @@ int intel_setup_gmbus(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev))
dev_priv->gpio_mmio_base = PCH_GPIOA - GPIOA;
else if (IS_VALLEYVIEW(dev))
dev_priv->gpio_mmio_base = VLV_DISPLAY_BASE;
else
dev_priv->gpio_mmio_base = 0;

View File

@ -100,8 +100,9 @@ intel_attach_force_audio_property(struct drm_connector *connector)
}
static const struct drm_prop_enum_list broadcast_rgb_names[] = {
{ 0, "Full" },
{ 1, "Limited 16:235" },
{ INTEL_BROADCAST_RGB_AUTO, "Automatic" },
{ INTEL_BROADCAST_RGB_FULL, "Full" },
{ INTEL_BROADCAST_RGB_LIMITED, "Limited 16:235" },
};
void

View File

@ -347,7 +347,7 @@ static void intel_didl_outputs(struct drm_device *dev)
int i = 0;
handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev);
if (!handle || ACPI_FAILURE(acpi_bus_get_device(handle, &acpi_dev)))
if (!handle || acpi_bus_get_device(handle, &acpi_dev))
return;
if (acpi_is_video_device(acpi_dev))

View File

@ -195,7 +195,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_wc(dev_priv->mm.gtt_mapping,
regs = io_mapping_map_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset);
return regs;
@ -1434,7 +1434,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
regs = (struct overlay_registers __iomem *)
overlay->reg_bo->phys_obj->handle->vaddr;
else
regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
regs = io_mapping_map_atomic_wc(dev_priv->gtt.mappable,
overlay->reg_bo->gtt_offset);
return regs;

View File

@ -3687,6 +3687,10 @@ static void gen7_setup_fixed_func_scheduler(struct drm_i915_private *dev_priv)
reg |= GEN7_FF_VS_SCHED_HW;
reg |= GEN7_FF_DS_SCHED_HW;
/* WaVSRefCountFullforceMissDisable */
if (IS_HASWELL(dev_priv->dev))
reg &= ~GEN7_FF_VS_REF_CNT_FFME;
I915_WRITE(GEN7_FF_THREAD_MODE, reg);
}
@ -4050,35 +4054,57 @@ void intel_init_clock_gating(struct drm_device *dev)
dev_priv->display.init_clock_gating(dev);
}
/* Starting with Haswell, we have different power wells for
* different parts of the GPU. This attempts to enable them all.
*/
void intel_init_power_wells(struct drm_device *dev)
void intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long power_wells[] = {
HSW_PWR_WELL_CTL1,
HSW_PWR_WELL_CTL2,
HSW_PWR_WELL_CTL4
};
int i;
bool is_enabled, enable_requested;
uint32_t tmp;
if (!IS_HASWELL(dev))
return;
mutex_lock(&dev->struct_mutex);
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE;
enable_requested = tmp & HSW_PWR_WELL_ENABLE;
for (i = 0; i < ARRAY_SIZE(power_wells); i++) {
int well = I915_READ(power_wells[i]);
if (enable) {
if (!enable_requested)
I915_WRITE(HSW_PWR_WELL_DRIVER, HSW_PWR_WELL_ENABLE);
if ((well & HSW_PWR_WELL_STATE) == 0) {
I915_WRITE(power_wells[i], well & HSW_PWR_WELL_ENABLE);
if (wait_for((I915_READ(power_wells[i]) & HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Error enabling power well %lx\n", power_wells[i]);
if (!is_enabled) {
DRM_DEBUG_KMS("Enabling power well\n");
if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) &
HSW_PWR_WELL_STATE), 20))
DRM_ERROR("Timeout enabling power well\n");
}
} else {
if (enable_requested) {
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
DRM_DEBUG_KMS("Requesting to disable the power well\n");
}
}
}
mutex_unlock(&dev->struct_mutex);
/*
* Starting with Haswell, we have a "Power Down Well" that can be turned off
* when not needed anymore. We have 4 registers that can request the power well
* to be enabled, and it will only be disabled if none of the registers is
* requesting it to be enabled.
*/
void intel_init_power_well(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_HASWELL(dev))
return;
/* For now, we need the power well to be always enabled. */
intel_set_power_well(dev, true);
/* We're taking over the BIOS, so clear any requests made by it since
* the driver is in charge now. */
if (I915_READ(HSW_PWR_WELL_BIOS) & HSW_PWR_WELL_ENABLE)
I915_WRITE(HSW_PWR_WELL_BIOS, 0);
}
/* Set up chip specific power management-related functions */

View File

@ -1203,7 +1203,7 @@ static int intel_init_ring_buffer(struct drm_device *dev,
goto err_unpin;
ring->virtual_start =
ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
ring->size);
if (ring->virtual_start == NULL) {
DRM_ERROR("Failed to map ringbuffer.\n");
@ -1223,8 +1223,6 @@ static int intel_init_ring_buffer(struct drm_device *dev,
if (IS_I830(ring->dev) || IS_845G(ring->dev))
ring->effective_size -= 128;
intel_ring_init_seqno(ring, dev_priv->last_seqno);
return 0;
err_unmap:
@ -1371,7 +1369,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
msleep(1);
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
} while (!time_after(jiffies, end));
@ -1460,7 +1459,8 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
drm_i915_private_t *dev_priv = ring->dev->dev_private;
int ret;
ret = i915_gem_check_wedge(dev_priv, dev_priv->mm.interruptible);
ret = i915_gem_check_wedge(&dev_priv->gpu_error,
dev_priv->mm.interruptible);
if (ret)
return ret;
@ -1491,7 +1491,7 @@ void intel_ring_advance(struct intel_ring_buffer *ring)
struct drm_i915_private *dev_priv = ring->dev->dev_private;
ring->tail &= ring->size - 1;
if (dev_priv->stop_rings & intel_ring_flag(ring))
if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
return;
ring->write_tail(ring, ring->tail);
}

View File

@ -103,6 +103,7 @@ struct intel_sdvo {
* It is only valid when using TMDS encoding and 8 bit per color mode.
*/
uint32_t color_range;
bool color_range_auto;
/**
* This is set if we're going to treat the device as TV-out.
@ -125,6 +126,7 @@ struct intel_sdvo {
bool is_hdmi;
bool has_hdmi_monitor;
bool has_hdmi_audio;
bool rgb_quant_range_selectable;
/**
* This is set if we detect output of sdvo device as LVDS and
@ -946,7 +948,8 @@ static bool intel_sdvo_write_infoframe(struct intel_sdvo *intel_sdvo,
&tx_rate, 1);
}
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo,
const struct drm_display_mode *adjusted_mode)
{
struct dip_infoframe avi_if = {
.type = DIP_TYPE_AVI,
@ -955,6 +958,13 @@ static bool intel_sdvo_set_avi_infoframe(struct intel_sdvo *intel_sdvo)
};
uint8_t sdvo_data[4 + sizeof(avi_if.body.avi)];
if (intel_sdvo->rgb_quant_range_selectable) {
if (adjusted_mode->private_flags & INTEL_MODE_LIMITED_COLOR_RANGE)
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_LIMITED;
else
avi_if.body.avi.ITC_EC_Q_SC |= DIP_AVI_RGB_QUANT_RANGE_FULL;
}
intel_dip_infoframe_csum(&avi_if);
/* sdvo spec says that the ecc is handled by the hw, and it looks like
@ -1064,6 +1074,18 @@ static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
multiplier = intel_sdvo_get_pixel_multiplier(adjusted_mode);
intel_mode_set_pixel_multiplier(adjusted_mode, multiplier);
if (intel_sdvo->color_range_auto) {
/* See CEA-861-E - 5.1 Default Encoding Parameters */
if (intel_sdvo->has_hdmi_monitor &&
drm_mode_cea_vic(adjusted_mode) > 1)
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
else
intel_sdvo->color_range = 0;
}
if (intel_sdvo->color_range)
adjusted_mode->private_flags |= INTEL_MODE_LIMITED_COLOR_RANGE;
return true;
}
@ -1121,7 +1143,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_HDMI);
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
intel_sdvo_set_avi_infoframe(intel_sdvo);
intel_sdvo_set_avi_infoframe(intel_sdvo, adjusted_mode);
} else
intel_sdvo_set_encode(intel_sdvo, SDVO_ENCODE_DVI);
@ -1153,7 +1175,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* The real mode polarity is set by the SDVO commands, using
* struct intel_sdvo_dtd. */
sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
if (intel_sdvo->is_hdmi)
if (!HAS_PCH_SPLIT(dev) && intel_sdvo->is_hdmi)
sdvox |= intel_sdvo->color_range;
if (INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_BORDER_ENABLE;
@ -1513,6 +1535,8 @@ intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
if (intel_sdvo->is_hdmi) {
intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
intel_sdvo->rgb_quant_range_selectable =
drm_rgb_quant_range_selectable(edid);
}
} else
status = connector_status_disconnected;
@ -1564,6 +1588,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
intel_sdvo->has_hdmi_monitor = false;
intel_sdvo->has_hdmi_audio = false;
intel_sdvo->rgb_quant_range_selectable = false;
if ((intel_sdvo_connector->output_flag & response) == 0)
ret = connector_status_disconnected;
@ -1897,10 +1922,21 @@ intel_sdvo_set_property(struct drm_connector *connector,
}
if (property == dev_priv->broadcast_rgb_property) {
if (val == !!intel_sdvo->color_range)
return 0;
intel_sdvo->color_range = val ? SDVO_COLOR_RANGE_16_235 : 0;
switch (val) {
case INTEL_BROADCAST_RGB_AUTO:
intel_sdvo->color_range_auto = true;
break;
case INTEL_BROADCAST_RGB_FULL:
intel_sdvo->color_range_auto = false;
intel_sdvo->color_range = 0;
break;
case INTEL_BROADCAST_RGB_LIMITED:
intel_sdvo->color_range_auto = false;
intel_sdvo->color_range = SDVO_COLOR_RANGE_16_235;
break;
default:
return -EINVAL;
}
goto done;
}
@ -2197,13 +2233,16 @@ intel_sdvo_connector_init(struct intel_sdvo_connector *connector,
}
static void
intel_sdvo_add_hdmi_properties(struct intel_sdvo_connector *connector)
intel_sdvo_add_hdmi_properties(struct intel_sdvo *intel_sdvo,
struct intel_sdvo_connector *connector)
{
struct drm_device *dev = connector->base.base.dev;
intel_attach_force_audio_property(&connector->base.base);
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev))
if (INTEL_INFO(dev)->gen >= 4 && IS_MOBILE(dev)) {
intel_attach_broadcast_rgb_property(&connector->base.base);
intel_sdvo->color_range_auto = true;
}
}
static bool
@ -2251,7 +2290,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
intel_sdvo_add_hdmi_properties(intel_sdvo, intel_sdvo_connector);
return true;
}

View File

@ -1063,6 +1063,7 @@ extern u8 *drm_find_cea_extension(struct edid *edid);
extern u8 drm_match_cea_mode(struct drm_display_mode *to_match);
extern bool drm_detect_hdmi_monitor(struct edid *edid);
extern bool drm_detect_monitor_audio(struct edid *edid);
extern bool drm_rgb_quant_range_selectable(struct edid *edid);
extern int drm_mode_page_flip_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
extern struct drm_display_mode *drm_cvt_mode(struct drm_device *dev,

View File

@ -3,24 +3,7 @@
#ifndef _DRM_INTEL_GTT_H
#define _DRM_INTEL_GTT_H
struct intel_gtt {
/* Size of memory reserved for graphics by the BIOS */
unsigned int stolen_size;
/* Total number of gtt entries. */
unsigned int gtt_total_entries;
/* Part of the gtt that is mappable by the cpu, for those chips where
* this is not the full gtt. */
unsigned int gtt_mappable_entries;
/* Whether i915 needs to use the dmar apis or not. */
unsigned int needs_dmar : 1;
/* Whether we idle the gpu before mapping/unmapping */
unsigned int do_idle_maps : 1;
/* Share the scratch page dma with ppgtts. */
dma_addr_t scratch_page_dma;
struct page *scratch_page;
/* needed for ioremap in drm/i915 */
phys_addr_t gma_bus_addr;
} *intel_gtt_get(void);
void intel_gtt_get(size_t *gtt_total, size_t *stolen_size);
int intel_gmch_probe(struct pci_dev *bridge_pdev, struct pci_dev *gpu_pdev,
struct agp_bridge_data *bridge);

View File

@ -308,6 +308,8 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_RSVD_FOR_FUTURE_USE 22
#define I915_PARAM_HAS_SECURE_BATCHES 23
#define I915_PARAM_HAS_PINNED_BATCHES 24
#define I915_PARAM_HAS_EXEC_NO_RELOC 25
#define I915_PARAM_HAS_EXEC_HANDLE_LUT 26
typedef struct drm_i915_getparam {
int param;
@ -628,7 +630,11 @@ struct drm_i915_gem_exec_object2 {
__u64 offset;
#define EXEC_OBJECT_NEEDS_FENCE (1<<0)
#define EXEC_OBJECT_NEEDS_GTT (1<<1)
#define EXEC_OBJECT_WRITE (1<<2)
#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_WRITE<<1)
__u64 flags;
__u64 rsvd1;
__u64 rsvd2;
};
@ -687,6 +693,20 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_IS_PINNED (1<<10)
/** Provide a hint to the kernel that the command stream and auxilliary
* state buffers already holds the correct presumed addresses and so the
* relocation process may be skipped if no buffers need to be moved in
* preparation for the execbuffer.
*/
#define I915_EXEC_NO_RELOC (1<<11)
/** Use the reloc.handle as an index into the exec object array rather
* than as the per-file handle.
*/
#define I915_EXEC_HANDLE_LUT (1<<12)
#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_HANDLE_LUT<<1)
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
(eb2).rsvd1 = context & I915_EXEC_CONTEXT_ID_MASK