mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-25 03:59:29 +00:00
Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux
Pull drm fixes from Dave Airlie: "This contains one i915 patch twice, as I merged it locally for testing, and then pulled some stuff in on top, and then Jani sent to me, I didn't think it was worth redoing all the merges of what I had tested. Summary: - amdgpu/radeon fixes for some more power management and VM races. - Two i915 fixes, one for the a recent regression, one another power management fix for skylake. - Two tegra dma mask fixes for a regression. - One ast fix for a typo I made transcribing the userspace driver, that I'd like to get into stable so I don't forget about it" * 'drm-fixes' of git://people.freedesktop.org/~airlied/linux: gpu: host1x: Set DMA ops on device creation gpu: host1x: Set DMA mask drm/amdgpu: return from atombios_dp_get_dpcd only when error drm/amdgpu/cz: remove commented out call to enable vce pg drm/amdgpu/powerplay/cz: enable/disable vce dpm independent of vce pg drm/amdgpu/cz: enable/disable vce dpm even if vce pg is disabled drm/amdgpu/gfx8: specify which engine to wait before vm flush drm/amdgpu: apply gfx_v8 fixes to gfx_v7 as well drm/amd/powerplay: send event to notify powerplay all modules are initialized. drm/amd/powerplay: export AMD_PP_EVENT_COMPLETE_INIT task to amdgpu. drm/radeon/pm: update current crtc info after setting the powerstate drm/amdgpu/pm: update current crtc info after setting the powerstate drm/i915: Balance assert_rpm_wakelock_held() for !IS_ENABLED(CONFIG_PM) drm/i915/skl: Fix power domain suspend sequence drm/ast: Fix incorrect register check for DRAM width drm/i915: Balance assert_rpm_wakelock_held() for !IS_ENABLED(CONFIG_PM)
This commit is contained in:
commit
638c201e3f
@ -77,7 +77,7 @@ void amdgpu_connector_hotplug(struct drm_connector *connector)
|
||||
} else if (amdgpu_atombios_dp_needs_link_train(amdgpu_connector)) {
|
||||
/* Don't try to start link training before we
|
||||
* have the dpcd */
|
||||
if (!amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
|
||||
if (amdgpu_atombios_dp_get_dpcd(amdgpu_connector))
|
||||
return;
|
||||
|
||||
/* set it to OFF so that drm_helper_connector_dpms()
|
||||
|
@ -649,9 +649,6 @@ force:
|
||||
/* update display watermarks based on new power state */
|
||||
amdgpu_display_bandwidth_update(adev);
|
||||
|
||||
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
||||
|
||||
/* wait for the rings to drain */
|
||||
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
|
||||
struct amdgpu_ring *ring = adev->rings[i];
|
||||
@ -670,6 +667,9 @@ force:
|
||||
/* update displays */
|
||||
amdgpu_dpm_display_configuration_changed(adev);
|
||||
|
||||
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
|
||||
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
|
||||
|
||||
if (adev->pm.funcs->force_performance_level) {
|
||||
if (adev->pm.dpm.thermal_active) {
|
||||
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
|
||||
|
@ -143,8 +143,10 @@ static int amdgpu_pp_late_init(void *handle)
|
||||
adev->powerplay.pp_handle);
|
||||
|
||||
#ifdef CONFIG_DRM_AMD_POWERPLAY
|
||||
if (adev->pp_enabled)
|
||||
if (adev->pp_enabled) {
|
||||
amdgpu_pm_sysfs_init(adev);
|
||||
amdgpu_dpm_dispatch_task(adev, AMD_PP_EVENT_COMPLETE_INIT, NULL, NULL);
|
||||
}
|
||||
#endif
|
||||
return ret;
|
||||
}
|
||||
|
@ -2202,8 +2202,7 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
|
||||
AMD_PG_STATE_GATE);
|
||||
|
||||
cz_enable_vce_dpm(adev, false);
|
||||
/* TODO: to figure out why vce can't be poweroff. */
|
||||
/* cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF); */
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerOFF);
|
||||
pi->vce_power_gated = true;
|
||||
} else {
|
||||
cz_send_msg_to_smc(adev, PPSMC_MSG_VCEPowerON);
|
||||
@ -2226,10 +2225,8 @@ static void cz_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
|
||||
}
|
||||
} else { /*pi->caps_vce_pg*/
|
||||
cz_update_vce_dpm(adev);
|
||||
cz_enable_vce_dpm(adev, true);
|
||||
cz_enable_vce_dpm(adev, !gate);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
const struct amd_ip_funcs cz_dpm_ip_funcs = {
|
||||
|
@ -3628,6 +3628,19 @@ static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
unsigned vm_id, uint64_t pd_addr)
|
||||
{
|
||||
int usepfp = (ring->type == AMDGPU_RING_TYPE_GFX);
|
||||
uint32_t seq = ring->fence_drv.sync_seq;
|
||||
uint64_t addr = ring->fence_drv.gpu_addr;
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
|
||||
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
|
||||
WAIT_REG_MEM_FUNCTION(3) | /* equal */
|
||||
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, seq);
|
||||
amdgpu_ring_write(ring, 0xffffffff);
|
||||
amdgpu_ring_write(ring, 4); /* poll interval */
|
||||
|
||||
if (usepfp) {
|
||||
/* synce CE with ME to prevent CE fetch CEIB before context switch done */
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
|
||||
|
@ -4809,7 +4809,8 @@ static void gfx_v8_0_ring_emit_vm_flush(struct amdgpu_ring *ring,
|
||||
|
||||
amdgpu_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
|
||||
amdgpu_ring_write(ring, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
|
||||
WAIT_REG_MEM_FUNCTION(3))); /* equal */
|
||||
WAIT_REG_MEM_FUNCTION(3) | /* equal */
|
||||
WAIT_REG_MEM_ENGINE(usepfp))); /* pfp or me */
|
||||
amdgpu_ring_write(ring, addr & 0xfffffffc);
|
||||
amdgpu_ring_write(ring, upper_32_bits(addr) & 0xffffffff);
|
||||
amdgpu_ring_write(ring, seq);
|
||||
|
@ -402,8 +402,11 @@ int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_event event_id, void *input,
|
||||
|
||||
data.requested_ui_label = power_state_convert(ps);
|
||||
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
case AMD_PP_EVENT_COMPLETE_INIT:
|
||||
ret = pem_handle_event(pp_handle->eventmgr, event_id, &data);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -165,6 +165,7 @@ const struct action_chain resume_action_chain = {
|
||||
};
|
||||
|
||||
static const pem_event_action *complete_init_event[] = {
|
||||
unblock_adjust_power_state_tasks,
|
||||
adjust_power_state_tasks,
|
||||
enable_gfx_clock_gating_tasks,
|
||||
enable_gfx_voltage_island_power_gating_tasks,
|
||||
|
@ -226,7 +226,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
|
||||
}
|
||||
} else {
|
||||
cz_dpm_update_vce_dpm(hwmgr);
|
||||
cz_enable_disable_vce_dpm(hwmgr, true);
|
||||
cz_enable_disable_vce_dpm(hwmgr, !bgate);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -227,7 +227,7 @@ static int ast_get_dram_info(struct drm_device *dev)
|
||||
} while (ast_read32(ast, 0x10000) != 0x01);
|
||||
data = ast_read32(ast, 0x10004);
|
||||
|
||||
if (data & 0x400)
|
||||
if (data & 0x40)
|
||||
ast->dram_bus_width = 16;
|
||||
else
|
||||
ast->dram_bus_width = 32;
|
||||
|
@ -2303,15 +2303,15 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv, bool resume)
|
||||
*/
|
||||
void intel_power_domains_suspend(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_display_core_uninit(dev_priv);
|
||||
|
||||
/*
|
||||
* Even if power well support was disabled we still want to disable
|
||||
* power wells while we are system suspended.
|
||||
*/
|
||||
if (!i915.disable_power_well)
|
||||
intel_display_power_put(dev_priv, POWER_DOMAIN_INIT);
|
||||
|
||||
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
|
||||
skl_display_core_uninit(dev_priv);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -2349,22 +2349,20 @@ bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct device *device = &dev->pdev->dev;
|
||||
int ret;
|
||||
|
||||
if (!IS_ENABLED(CONFIG_PM))
|
||||
return true;
|
||||
if (IS_ENABLED(CONFIG_PM)) {
|
||||
int ret = pm_runtime_get_if_in_use(device);
|
||||
|
||||
ret = pm_runtime_get_if_in_use(device);
|
||||
|
||||
/*
|
||||
* In cases runtime PM is disabled by the RPM core and we get an
|
||||
* -EINVAL return value we are not supposed to call this function,
|
||||
* since the power state is undefined. This applies atm to the
|
||||
* late/early system suspend/resume handlers.
|
||||
*/
|
||||
WARN_ON_ONCE(ret < 0);
|
||||
if (ret <= 0)
|
||||
return false;
|
||||
/*
|
||||
* In cases runtime PM is disabled by the RPM core and we get
|
||||
* an -EINVAL return value we are not supposed to call this
|
||||
* function, since the power state is undefined. This applies
|
||||
* atm to the late/early system suspend/resume handlers.
|
||||
*/
|
||||
WARN_ON_ONCE(ret < 0);
|
||||
if (ret <= 0)
|
||||
return false;
|
||||
}
|
||||
|
||||
atomic_inc(&dev_priv->pm.wakeref_count);
|
||||
assert_rpm_wakelock_held(dev_priv);
|
||||
|
@ -1080,10 +1080,6 @@ force:
|
||||
/* update display watermarks based on new power state */
|
||||
radeon_bandwidth_update(rdev);
|
||||
|
||||
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
|
||||
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
|
||||
rdev->pm.dpm.single_display = single_display;
|
||||
|
||||
/* wait for the rings to drain */
|
||||
for (i = 0; i < RADEON_NUM_RINGS; i++) {
|
||||
struct radeon_ring *ring = &rdev->ring[i];
|
||||
@ -1102,6 +1098,10 @@ force:
|
||||
/* update displays */
|
||||
radeon_dpm_display_configuration_changed(rdev);
|
||||
|
||||
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
|
||||
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
|
||||
rdev->pm.dpm.single_display = single_display;
|
||||
|
||||
if (rdev->asic->dpm.force_performance_level) {
|
||||
if (rdev->pm.dpm.thermal_active) {
|
||||
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <linux/host1x.h>
|
||||
#include <linux/of.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/of_device.h>
|
||||
|
||||
#include "bus.h"
|
||||
#include "dev.h"
|
||||
@ -394,6 +395,7 @@ static int host1x_device_add(struct host1x *host1x,
|
||||
device->dev.coherent_dma_mask = host1x->dev->coherent_dma_mask;
|
||||
device->dev.dma_mask = &device->dev.coherent_dma_mask;
|
||||
dev_set_name(&device->dev, "%s", driver->driver.name);
|
||||
of_dma_configure(&device->dev, host1x->dev->of_node);
|
||||
device->dev.release = host1x_device_release;
|
||||
device->dev.bus = &host1x_bus_type;
|
||||
device->dev.parent = host1x->dev;
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/clk.h>
|
||||
#include <linux/io.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/host1x.h>
|
||||
@ -68,6 +69,7 @@ static const struct host1x_info host1x01_info = {
|
||||
.nb_bases = 8,
|
||||
.init = host1x01_init,
|
||||
.sync_offset = 0x3000,
|
||||
.dma_mask = DMA_BIT_MASK(32),
|
||||
};
|
||||
|
||||
static const struct host1x_info host1x02_info = {
|
||||
@ -77,6 +79,7 @@ static const struct host1x_info host1x02_info = {
|
||||
.nb_bases = 12,
|
||||
.init = host1x02_init,
|
||||
.sync_offset = 0x3000,
|
||||
.dma_mask = DMA_BIT_MASK(32),
|
||||
};
|
||||
|
||||
static const struct host1x_info host1x04_info = {
|
||||
@ -86,6 +89,7 @@ static const struct host1x_info host1x04_info = {
|
||||
.nb_bases = 64,
|
||||
.init = host1x04_init,
|
||||
.sync_offset = 0x2100,
|
||||
.dma_mask = DMA_BIT_MASK(34),
|
||||
};
|
||||
|
||||
static const struct host1x_info host1x05_info = {
|
||||
@ -95,6 +99,7 @@ static const struct host1x_info host1x05_info = {
|
||||
.nb_bases = 64,
|
||||
.init = host1x05_init,
|
||||
.sync_offset = 0x2100,
|
||||
.dma_mask = DMA_BIT_MASK(34),
|
||||
};
|
||||
|
||||
static struct of_device_id host1x_of_match[] = {
|
||||
@ -148,6 +153,8 @@ static int host1x_probe(struct platform_device *pdev)
|
||||
if (IS_ERR(host->regs))
|
||||
return PTR_ERR(host->regs);
|
||||
|
||||
dma_set_mask_and_coherent(host->dev, host->info->dma_mask);
|
||||
|
||||
if (host->info->init) {
|
||||
err = host->info->init(host);
|
||||
if (err)
|
||||
|
@ -96,6 +96,7 @@ struct host1x_info {
|
||||
int nb_mlocks; /* host1x: number of mlocks */
|
||||
int (*init)(struct host1x *); /* initialize per SoC ops */
|
||||
int sync_offset;
|
||||
u64 dma_mask; /* mask of addressable memory */
|
||||
};
|
||||
|
||||
struct host1x {
|
||||
|
Loading…
x
Reference in New Issue
Block a user