mirror of
https://github.com/FEX-Emu/linux.git
synced 2025-01-12 20:31:49 +00:00
Merge branch 'linux-4.11' of git://github.com/skeggsb/linux into drm-next
- Rework of the secure boot code, in preparation for GP10x secure boot. - Improvements to channel recovery - Initial power budget code - Some preparation for an upcoming MMU rework (probably 4.12) - Misc other fixes. * 'linux-4.11' of git://github.com/skeggsb/linux: (88 commits) drm/nouveau/tmr: provide backtrace when a timeout is hit drm/nouveau/pci/g92: Fix rearm drm/nouveau/drm/therm/fan: add a fallback if no fan control is specified in the vbios drm/nouveau/hwmon: expose power_max and power_crit drm/nouveau/iccsense: Parse max and crit power level drm/nouveau/bios/power_budget: Add basic power budget parsing drm/nouveau/fifo/gk104-: preempt recovery drm/nouveau/fifo/gk104-: trigger mmu fault before attempting engine recovery drm/nouveau/fifo/gk104-: ACK SCHED_ERROR before attempting CTXSW_TIMEOUT recovery drm/nouveau/fifo/gk104-: directly use new recovery code for ctxsw timeout drm/nouveau/fifo/gk104-: directly use new recovery code for mmu faults drm/nouveau/fifo/gk104-: reset all engines a killed channel is still active on drm/nouveau/fifo/gk104-: refactor recovery code drm/nouveau/fifo/gk104-: better detection of chid when parsing engine status drm/nouveau/fifo/gk104-: separate out engine status parsing drm/nouveau/fifo: add an api for initiating channel recovery drm/nouveau/top: add function to translate subdev index to mmu fault id drm/nouveau/gr/gf100-: implement chsw_load() method drm/nouveau/gr: implement chsw_load() method drm/nouveau/core: add engine method to assist in determining chsw direction ...
This commit is contained in:
commit
8fd4a62d87
@ -198,7 +198,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
|
||||
int *burst, int *lwm)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
struct nv_fifo_info fifo_data;
|
||||
struct nv_sim_state sim_data;
|
||||
int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
|
||||
@ -227,7 +227,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
|
||||
sim_data.mem_page_miss = ((cfg1 >> 4) & 0xf) + ((cfg1 >> 31) & 0x1);
|
||||
}
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT)
|
||||
nv04_calc_arb(&fifo_data, &sim_data);
|
||||
else
|
||||
nv10_calc_arb(&fifo_data, &sim_data);
|
||||
@ -254,7 +254,7 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN)
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN)
|
||||
nv04_update_arb(dev, vclk, bpp, burst, lwm);
|
||||
else if ((dev->pdev->device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
|
||||
(dev->pdev->device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
|
||||
|
@ -113,8 +113,8 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_bios *bios = nvxx_bios(&drm->device);
|
||||
struct nvkm_clk *clk = nvxx_clk(&drm->device);
|
||||
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
|
||||
struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
struct nv04_mode_state *state = &nv04_display(dev)->mode_reg;
|
||||
struct nv04_crtc_reg *regp = &state->crtc_reg[nv_crtc->index];
|
||||
@ -138,7 +138,7 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
|
||||
* has yet been observed in allowing the use a single stage pll on all
|
||||
* nv43 however. the behaviour of single stage use is untested on nv40
|
||||
*/
|
||||
if (drm->device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
|
||||
if (drm->client.device.info.chipset > 0x40 && dot_clock <= (pll_lim.vco1.max_freq / 2))
|
||||
memset(&pll_lim.vco2, 0, sizeof(pll_lim.vco2));
|
||||
|
||||
|
||||
@ -148,10 +148,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
|
||||
state->pllsel &= PLLSEL_VPLL1_MASK | PLLSEL_VPLL2_MASK | PLLSEL_TV_MASK;
|
||||
|
||||
/* The blob uses this always, so let's do the same */
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_USE_VPLL2_TRUE;
|
||||
/* again nv40 and some nv43 act more like nv3x as described above */
|
||||
if (drm->device.info.chipset < 0x41)
|
||||
if (drm->client.device.info.chipset < 0x41)
|
||||
state->pllsel |= NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_MPLL |
|
||||
NV_PRAMDAC_PLL_COEFF_SELECT_SOURCE_PROG_NVPLL;
|
||||
state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
|
||||
@ -270,7 +270,7 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
|
||||
horizEnd = horizTotal - 2;
|
||||
horizBlankEnd = horizTotal + 4;
|
||||
#if 0
|
||||
if (dev->overlayAdaptor && drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
if (dev->overlayAdaptor && drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
/* This reportedly works around some video overlay bandwidth problems */
|
||||
horizTotal += 2;
|
||||
#endif
|
||||
@ -505,7 +505,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
|
||||
regp->cursor_cfg = NV_PCRTC_CURSOR_CONFIG_CUR_LINES_64 |
|
||||
NV_PCRTC_CURSOR_CONFIG_CUR_PIXELS_64 |
|
||||
NV_PCRTC_CURSOR_CONFIG_ADDRESS_SPACE_PNVM;
|
||||
if (drm->device.info.chipset >= 0x11)
|
||||
if (drm->client.device.info.chipset >= 0x11)
|
||||
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_CUR_BPP_32;
|
||||
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
|
||||
regp->cursor_cfg |= NV_PCRTC_CURSOR_CONFIG_DOUBLE_SCAN_ENABLE;
|
||||
@ -546,26 +546,26 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
|
||||
* 1 << 30 on 0x60.830), for no apparent reason */
|
||||
regp->CRTC[NV_CIO_CRE_59] = off_chip_digital;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
regp->CRTC[0x9f] = off_chip_digital ? 0x11 : 0x1;
|
||||
|
||||
regp->crtc_830 = mode->crtc_vdisplay - 3;
|
||||
regp->crtc_834 = mode->crtc_vdisplay - 1;
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
/* This is what the blob does */
|
||||
regp->crtc_850 = NVReadCRTC(dev, 0, NV_PCRTC_850);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
regp->gpio_ext = NVReadCRTC(dev, 0, NV_PCRTC_GPIO_EXT);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
regp->crtc_cfg = NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC;
|
||||
else
|
||||
regp->crtc_cfg = NV04_PCRTC_CONFIG_START_ADDRESS_HSYNC;
|
||||
|
||||
/* Some misc regs */
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
regp->CRTC[NV_CIO_CRE_85] = 0xFF;
|
||||
regp->CRTC[NV_CIO_CRE_86] = 0x1;
|
||||
}
|
||||
@ -577,7 +577,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
|
||||
|
||||
/* Generic PRAMDAC regs */
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
/* Only bit that bios and blob set. */
|
||||
regp->nv10_cursync = (1 << 25);
|
||||
|
||||
@ -586,7 +586,7 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode)
|
||||
NV_PRAMDAC_GENERAL_CONTROL_PIXMIX_ON;
|
||||
if (fb->format->depth == 16)
|
||||
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_ALT_MODE_SEL;
|
||||
if (drm->device.info.chipset >= 0x11)
|
||||
if (drm->client.device.info.chipset >= 0x11)
|
||||
regp->ramdac_gen_ctrl |= NV_PRAMDAC_GENERAL_CONTROL_PIPE_LONG;
|
||||
|
||||
regp->ramdac_630 = 0; /* turn off green mode (tv test pattern?) */
|
||||
@ -649,7 +649,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
|
||||
nv_crtc_mode_set_vga(crtc, adjusted_mode);
|
||||
/* calculated in nv04_dfp_prepare, nv40 needs it written before calculating PLLs */
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, nv04_display(dev)->mode_reg.sel_clk);
|
||||
nv_crtc_mode_set_regs(crtc, adjusted_mode);
|
||||
nv_crtc_calc_state_ext(crtc, mode, adjusted_mode->clock);
|
||||
@ -710,7 +710,7 @@ static void nv_crtc_prepare(struct drm_crtc *crtc)
|
||||
|
||||
/* Some more preparation. */
|
||||
NVWriteCRTC(dev, nv_crtc->index, NV_PCRTC_CONFIG, NV_PCRTC_CONFIG_START_ADDRESS_NON_VGA);
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
uint32_t reg900 = NVReadRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900);
|
||||
NVWriteRAMDAC(dev, nv_crtc->index, NV_PRAMDAC_900, reg900 & ~0x10000);
|
||||
}
|
||||
@ -886,7 +886,7 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc,
|
||||
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FF_INDEX);
|
||||
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_FFLWM__INDEX);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN) {
|
||||
regp->CRTC[NV_CIO_CRE_47] = arb_lwm >> 8;
|
||||
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_47);
|
||||
}
|
||||
@ -967,7 +967,7 @@ static void nv11_cursor_upload(struct drm_device *dev, struct nouveau_bo *src,
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
|
||||
if (drm->device.info.chipset == 0x11) {
|
||||
if (drm->client.device.info.chipset == 0x11) {
|
||||
pixel = ((pixel & 0x000000ff) << 24) |
|
||||
((pixel & 0x0000ff00) << 8) |
|
||||
((pixel & 0x00ff0000) >> 8) |
|
||||
@ -1008,7 +1008,7 @@ nv04_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
if (drm->device.info.chipset >= 0x11)
|
||||
if (drm->client.device.info.chipset >= 0x11)
|
||||
nv11_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
|
||||
else
|
||||
nv04_cursor_upload(dev, cursor, nv_crtc->cursor.nvbo);
|
||||
@ -1124,8 +1124,9 @@ nv04_crtc_create(struct drm_device *dev, int crtc_num)
|
||||
drm_crtc_helper_add(&nv_crtc->base, &nv04_crtc_helper_funcs);
|
||||
drm_mode_crtc_set_gamma_size(&nv_crtc->base, 256);
|
||||
|
||||
ret = nouveau_bo_new(dev, 64*64*4, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, NULL, &nv_crtc->cursor.nvbo);
|
||||
ret = nouveau_bo_new(&nouveau_drm(dev)->client, 64*64*4, 0x100,
|
||||
TTM_PL_FLAG_VRAM, 0, 0x0000, NULL, NULL,
|
||||
&nv_crtc->cursor.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM, false);
|
||||
if (!ret) {
|
||||
|
@ -55,7 +55,7 @@ nv04_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
|
||||
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
|
||||
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
|
||||
crtc_wr_cio_state(crtc, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
nv_fix_nv40_hw_cursor(dev, nv_crtc->index);
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ int nv04_dac_output_offset(struct drm_encoder *encoder)
|
||||
static int sample_load_twice(struct drm_device *dev, bool sense[2])
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < 2; i++) {
|
||||
@ -80,19 +80,19 @@ static int sample_load_twice(struct drm_device *dev, bool sense[2])
|
||||
* use a 10ms timeout (guards against crtc being inactive, in
|
||||
* which case blank state would never change)
|
||||
*/
|
||||
if (nvif_msec(&drm->device, 10,
|
||||
if (nvif_msec(&drm->client.device, 10,
|
||||
if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (nvif_msec(&drm->device, 10,
|
||||
if (nvif_msec(&drm->client.device, 10,
|
||||
if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (nvif_msec(&drm->device, 10,
|
||||
if (nvif_msec(&drm->client.device, 10,
|
||||
if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 1))
|
||||
break;
|
||||
) < 0)
|
||||
@ -133,7 +133,7 @@ static enum drm_connector_status nv04_dac_detect(struct drm_encoder *encoder,
|
||||
struct drm_connector *connector)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
uint8_t saved_seq1, saved_pi, saved_rpc1, saved_cr_mode;
|
||||
uint8_t saved_palette0[3], saved_palette_mask;
|
||||
@ -236,8 +236,8 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
|
||||
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
|
||||
uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
|
||||
uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
|
||||
@ -288,7 +288,7 @@ uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
|
||||
/* nv driver and nv31 use 0xfffffeee, nv34 and 6600 use 0xfffffece */
|
||||
routput = (saved_routput & 0xfffffece) | head << 8;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE) {
|
||||
if (dcb->type == DCB_OUTPUT_TV)
|
||||
routput |= 0x1a << 16;
|
||||
else
|
||||
@ -403,7 +403,7 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
|
||||
}
|
||||
|
||||
/* This could use refinement for flatpanels, but it should work this way */
|
||||
if (drm->device.info.chipset < 0x44)
|
||||
if (drm->client.device.info.chipset < 0x44)
|
||||
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
|
||||
else
|
||||
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
|
||||
|
@ -281,7 +281,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
|
||||
struct nv04_crtc_reg *regp = &nv04_display(dev)->mode_reg.crtc_reg[nv_crtc->index];
|
||||
@ -417,7 +417,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
|
||||
if ((nv_connector->dithering_mode == DITHERING_MODE_ON) ||
|
||||
(nv_connector->dithering_mode == DITHERING_MODE_AUTO &&
|
||||
fb->format->depth > connector->display_info.bpc * 3)) {
|
||||
if (drm->device.info.chipset == 0x11)
|
||||
if (drm->client.device.info.chipset == 0x11)
|
||||
regp->dither = savep->dither | 0x00010000;
|
||||
else {
|
||||
int i;
|
||||
@ -428,7 +428,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (drm->device.info.chipset != 0x11) {
|
||||
if (drm->client.device.info.chipset != 0x11) {
|
||||
/* reset them */
|
||||
int i;
|
||||
for (i = 0; i < 3; i++) {
|
||||
@ -464,7 +464,7 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
|
||||
NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
|
||||
|
||||
/* This could use refinement for flatpanels, but it should work this way */
|
||||
if (drm->device.info.chipset < 0x44)
|
||||
if (drm->client.device.info.chipset < 0x44)
|
||||
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000);
|
||||
else
|
||||
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000);
|
||||
@ -486,7 +486,7 @@ static void nv04_dfp_update_backlight(struct drm_encoder *encoder, int mode)
|
||||
{
|
||||
#ifdef __powerpc__
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
|
||||
/* BIOS scripts usually take care of the backlight, thanks
|
||||
* Apple for your consistency.
|
||||
@ -624,7 +624,7 @@ static void nv04_tmds_slave_init(struct drm_encoder *encoder)
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct dcb_output *dcb = nouveau_encoder(encoder)->dcb;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, NVKM_I2C_BUS_PRI);
|
||||
struct nvkm_i2c_bus_probe info[] = {
|
||||
{
|
||||
|
@ -35,7 +35,7 @@ int
|
||||
nv04_display_create(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct dcb_table *dcb = &drm->vbios.dcb;
|
||||
struct drm_connector *connector, *ct;
|
||||
struct drm_encoder *encoder;
|
||||
@ -48,7 +48,7 @@ nv04_display_create(struct drm_device *dev)
|
||||
if (!disp)
|
||||
return -ENOMEM;
|
||||
|
||||
nvif_object_map(&drm->device.object);
|
||||
nvif_object_map(&drm->client.device.object);
|
||||
|
||||
nouveau_display(dev)->priv = disp;
|
||||
nouveau_display(dev)->dtor = nv04_display_destroy;
|
||||
@ -139,7 +139,7 @@ nv04_display_destroy(struct drm_device *dev)
|
||||
nouveau_display(dev)->priv = NULL;
|
||||
kfree(disp);
|
||||
|
||||
nvif_object_unmap(&drm->device.object);
|
||||
nvif_object_unmap(&drm->client.device.object);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -129,7 +129,7 @@ nv_two_heads(struct drm_device *dev)
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
const int impl = dev->pdev->device & 0x0ff0;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS && impl != 0x0100 &&
|
||||
impl != 0x0150 && impl != 0x01a0 && impl != 0x0200)
|
||||
return true;
|
||||
|
||||
@ -148,7 +148,7 @@ nv_two_reg_pll(struct drm_device *dev)
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
const int impl = dev->pdev->device & 0x0ff0;
|
||||
|
||||
if (impl == 0x0310 || impl == 0x0340 || drm->device.info.family >= NV_DEVICE_INFO_V0_CURIE)
|
||||
if (impl == 0x0310 || impl == 0x0340 || drm->client.device.info.family >= NV_DEVICE_INFO_V0_CURIE)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
@ -170,7 +170,7 @@ nouveau_bios_run_init_table(struct drm_device *dev, u16 table,
|
||||
struct dcb_output *outp, int crtc)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_bios *bios = nvxx_bios(&drm->device);
|
||||
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
|
||||
struct nvbios_init init = {
|
||||
.subdev = &bios->subdev,
|
||||
.bios = bios,
|
||||
|
@ -89,7 +89,7 @@ NVSetOwner(struct drm_device *dev, int owner)
|
||||
if (owner == 1)
|
||||
owner *= 3;
|
||||
|
||||
if (drm->device.info.chipset == 0x11) {
|
||||
if (drm->client.device.info.chipset == 0x11) {
|
||||
/* This might seem stupid, but the blob does it and
|
||||
* omitting it often locks the system up.
|
||||
*/
|
||||
@ -100,7 +100,7 @@ NVSetOwner(struct drm_device *dev, int owner)
|
||||
/* CR44 is always changed on CRTC0 */
|
||||
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
|
||||
|
||||
if (drm->device.info.chipset == 0x11) { /* set me harder */
|
||||
if (drm->client.device.info.chipset == 0x11) { /* set me harder */
|
||||
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
|
||||
NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
|
||||
}
|
||||
@ -149,7 +149,7 @@ nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
|
||||
pllvals->NM1 = pll1 & 0xffff;
|
||||
if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
|
||||
pllvals->NM2 = pll2 & 0xffff;
|
||||
else if (drm->device.info.chipset == 0x30 || drm->device.info.chipset == 0x35) {
|
||||
else if (drm->client.device.info.chipset == 0x30 || drm->client.device.info.chipset == 0x35) {
|
||||
pllvals->M1 &= 0xf; /* only 4 bits */
|
||||
if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
|
||||
pllvals->M2 = (pll1 >> 4) & 0x7;
|
||||
@ -165,8 +165,8 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
|
||||
struct nvkm_pll_vals *pllvals)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvkm_bios *bios = nvxx_bios(&drm->device);
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
|
||||
uint32_t reg1, pll1, pll2 = 0;
|
||||
struct nvbios_pll pll_lim;
|
||||
int ret;
|
||||
@ -184,7 +184,7 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
|
||||
pll2 = nvif_rd32(device, reg2);
|
||||
}
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
|
||||
uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
|
||||
|
||||
/* check whether vpll has been forced into single stage mode */
|
||||
@ -252,7 +252,7 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
|
||||
*/
|
||||
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
struct nvkm_clk *clk = nvxx_clk(device);
|
||||
struct nvkm_bios *bios = nvxx_bios(device);
|
||||
struct nvbios_pll pll_lim;
|
||||
@ -391,21 +391,21 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
|
||||
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
|
||||
int i;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
|
||||
|
||||
nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, ®p->pllvals);
|
||||
state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
|
||||
if (nv_two_heads(dev))
|
||||
state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
|
||||
if (drm->device.info.chipset == 0x11)
|
||||
if (drm->client.device.info.chipset == 0x11)
|
||||
regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
|
||||
|
||||
regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
|
||||
|
||||
if (nv_gf4_disp_arch(dev))
|
||||
regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
|
||||
if (drm->device.info.chipset >= 0x30)
|
||||
if (drm->client.device.info.chipset >= 0x30)
|
||||
regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
|
||||
|
||||
regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
|
||||
@ -447,7 +447,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
|
||||
if (nv_gf4_disp_arch(dev))
|
||||
regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
|
||||
regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
|
||||
regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
|
||||
@ -463,26 +463,26 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
|
||||
struct nv04_mode_state *state)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_clk *clk = nvxx_clk(&drm->device);
|
||||
struct nvkm_clk *clk = nvxx_clk(&drm->client.device);
|
||||
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
|
||||
uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
|
||||
int i;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS)
|
||||
NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
|
||||
|
||||
clk->pll_prog(clk, pllreg, ®p->pllvals);
|
||||
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
|
||||
if (nv_two_heads(dev))
|
||||
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
|
||||
if (drm->device.info.chipset == 0x11)
|
||||
if (drm->client.device.info.chipset == 0x11)
|
||||
NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
|
||||
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
|
||||
|
||||
if (nv_gf4_disp_arch(dev))
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
|
||||
if (drm->device.info.chipset >= 0x30)
|
||||
if (drm->client.device.info.chipset >= 0x30)
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
|
||||
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
|
||||
@ -519,7 +519,7 @@ nv_load_state_ramdac(struct drm_device *dev, int head,
|
||||
if (nv_gf4_disp_arch(dev))
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
|
||||
NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
|
||||
@ -600,10 +600,10 @@ nv_save_state_ext(struct drm_device *dev, int head,
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
rd_cio_state(dev, head, regp, 0x9f);
|
||||
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
|
||||
@ -612,14 +612,14 @@ nv_save_state_ext(struct drm_device *dev, int head,
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
|
||||
regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
|
||||
|
||||
if (nv_two_heads(dev))
|
||||
@ -631,7 +631,7 @@ nv_save_state_ext(struct drm_device *dev, int head,
|
||||
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
|
||||
rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
|
||||
@ -660,12 +660,12 @@ nv_load_state_ext(struct drm_device *dev, int head,
|
||||
struct nv04_mode_state *state)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
struct nv04_crtc_reg *regp = &state->crtc_reg[head];
|
||||
uint32_t reg900;
|
||||
int i;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
if (nv_two_heads(dev))
|
||||
/* setting ENGINE_CTRL (EC) *must* come before
|
||||
* CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
|
||||
@ -677,20 +677,20 @@ nv_load_state_ext(struct drm_device *dev, int head,
|
||||
nvif_wr32(device, NV_PVIDEO_INTR_EN, 0);
|
||||
nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
|
||||
nvif_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
|
||||
nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->device.info.ram_size - 1);
|
||||
nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->device.info.ram_size - 1);
|
||||
nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->device.info.ram_size - 1);
|
||||
nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->device.info.ram_size - 1);
|
||||
nvif_wr32(device, NV_PVIDEO_LIMIT(0), drm->client.device.info.ram_size - 1);
|
||||
nvif_wr32(device, NV_PVIDEO_LIMIT(1), drm->client.device.info.ram_size - 1);
|
||||
nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), drm->client.device.info.ram_size - 1);
|
||||
nvif_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), drm->client.device.info.ram_size - 1);
|
||||
nvif_wr32(device, NV_PBUS_POWERCTRL_2, 0);
|
||||
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE) {
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
|
||||
|
||||
reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
|
||||
@ -713,23 +713,23 @@ nv_load_state_ext(struct drm_device *dev, int head,
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KELVIN)
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE)
|
||||
wr_cio_state(dev, head, regp, 0x9f);
|
||||
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
nv_fix_nv40_hw_cursor(dev, head);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
|
||||
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
|
||||
wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
|
||||
@ -737,14 +737,14 @@ nv_load_state_ext(struct drm_device *dev, int head,
|
||||
}
|
||||
/* NV11 and NV20 stop at 0x52. */
|
||||
if (nv_gf4_disp_arch(dev)) {
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_KELVIN) {
|
||||
/* Not waiting for vertical retrace before modifying
|
||||
CRE_53/CRE_54 causes lockups. */
|
||||
nvif_msec(&drm->device, 650,
|
||||
nvif_msec(&drm->client.device, 650,
|
||||
if ( (nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
|
||||
break;
|
||||
);
|
||||
nvif_msec(&drm->device, 650,
|
||||
nvif_msec(&drm->client.device, 650,
|
||||
if (!(nvif_rd32(device, NV_PRMCIO_INP0__COLOR) & 8))
|
||||
break;
|
||||
);
|
||||
@ -770,7 +770,7 @@ static void
|
||||
nv_save_state_palette(struct drm_device *dev, int head,
|
||||
struct nv04_mode_state *state)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
int head_offset = head * NV_PRMDIO_SIZE, i;
|
||||
|
||||
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
|
||||
@ -789,7 +789,7 @@ void
|
||||
nouveau_hw_load_state_palette(struct drm_device *dev, int head,
|
||||
struct nv04_mode_state *state)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
int head_offset = head * NV_PRMDIO_SIZE, i;
|
||||
|
||||
nvif_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
|
||||
@ -809,7 +809,7 @@ void nouveau_hw_save_state(struct drm_device *dev, int head,
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
|
||||
if (drm->device.info.chipset == 0x11)
|
||||
if (drm->client.device.info.chipset == 0x11)
|
||||
/* NB: no attempt is made to restore the bad pll later on */
|
||||
nouveau_hw_fix_bad_vpll(dev, head);
|
||||
nv_save_state_ramdac(dev, head, state);
|
||||
|
@ -60,7 +60,7 @@ extern void nouveau_calc_arb(struct drm_device *, int vclk, int bpp,
|
||||
static inline uint32_t NVReadCRTC(struct drm_device *dev,
|
||||
int head, uint32_t reg)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
uint32_t val;
|
||||
if (head)
|
||||
reg += NV_PCRTC0_SIZE;
|
||||
@ -71,7 +71,7 @@ static inline uint32_t NVReadCRTC(struct drm_device *dev,
|
||||
static inline void NVWriteCRTC(struct drm_device *dev,
|
||||
int head, uint32_t reg, uint32_t val)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
if (head)
|
||||
reg += NV_PCRTC0_SIZE;
|
||||
nvif_wr32(device, reg, val);
|
||||
@ -80,7 +80,7 @@ static inline void NVWriteCRTC(struct drm_device *dev,
|
||||
static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
|
||||
int head, uint32_t reg)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
uint32_t val;
|
||||
if (head)
|
||||
reg += NV_PRAMDAC0_SIZE;
|
||||
@ -91,7 +91,7 @@ static inline uint32_t NVReadRAMDAC(struct drm_device *dev,
|
||||
static inline void NVWriteRAMDAC(struct drm_device *dev,
|
||||
int head, uint32_t reg, uint32_t val)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
if (head)
|
||||
reg += NV_PRAMDAC0_SIZE;
|
||||
nvif_wr32(device, reg, val);
|
||||
@ -120,7 +120,7 @@ static inline void nv_write_tmds(struct drm_device *dev,
|
||||
static inline void NVWriteVgaCrtc(struct drm_device *dev,
|
||||
int head, uint8_t index, uint8_t value)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
|
||||
nvif_wr08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE, value);
|
||||
}
|
||||
@ -128,7 +128,7 @@ static inline void NVWriteVgaCrtc(struct drm_device *dev,
|
||||
static inline uint8_t NVReadVgaCrtc(struct drm_device *dev,
|
||||
int head, uint8_t index)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
uint8_t val;
|
||||
nvif_wr08(device, NV_PRMCIO_CRX__COLOR + head * NV_PRMCIO_SIZE, index);
|
||||
val = nvif_rd08(device, NV_PRMCIO_CR__COLOR + head * NV_PRMCIO_SIZE);
|
||||
@ -165,13 +165,13 @@ static inline uint8_t NVReadVgaCrtc5758(struct drm_device *dev, int head, uint8_
|
||||
static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
|
||||
int head, uint32_t reg)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
uint8_t val;
|
||||
|
||||
/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
|
||||
* NVSetOwner for the relevant head to be programmed */
|
||||
if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
reg += NV_PRMVIO_SIZE;
|
||||
|
||||
val = nvif_rd08(device, reg);
|
||||
@ -181,12 +181,12 @@ static inline uint8_t NVReadPRMVIO(struct drm_device *dev,
|
||||
static inline void NVWritePRMVIO(struct drm_device *dev,
|
||||
int head, uint32_t reg, uint8_t value)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
|
||||
/* Only NV4x have two pvio ranges; other twoHeads cards MUST call
|
||||
* NVSetOwner for the relevant head to be programmed */
|
||||
if (head && drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (head && drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
reg += NV_PRMVIO_SIZE;
|
||||
|
||||
nvif_wr08(device, reg, value);
|
||||
@ -194,14 +194,14 @@ static inline void NVWritePRMVIO(struct drm_device *dev,
|
||||
|
||||
static inline void NVSetEnablePalette(struct drm_device *dev, int head, bool enable)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
|
||||
nvif_wr08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE, enable ? 0 : 0x20);
|
||||
}
|
||||
|
||||
static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
nvif_rd08(device, NV_PRMCIO_INP0__COLOR + head * NV_PRMCIO_SIZE);
|
||||
return !(nvif_rd08(device, NV_PRMCIO_ARX + head * NV_PRMCIO_SIZE) & 0x20);
|
||||
}
|
||||
@ -209,7 +209,7 @@ static inline bool NVGetEnablePalette(struct drm_device *dev, int head)
|
||||
static inline void NVWriteVgaAttr(struct drm_device *dev,
|
||||
int head, uint8_t index, uint8_t value)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
if (NVGetEnablePalette(dev, head))
|
||||
index &= ~0x20;
|
||||
else
|
||||
@ -223,7 +223,7 @@ static inline void NVWriteVgaAttr(struct drm_device *dev,
|
||||
static inline uint8_t NVReadVgaAttr(struct drm_device *dev,
|
||||
int head, uint8_t index)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
uint8_t val;
|
||||
if (NVGetEnablePalette(dev, head))
|
||||
index &= ~0x20;
|
||||
@ -259,10 +259,10 @@ static inline void NVVgaProtect(struct drm_device *dev, int head, bool protect)
|
||||
static inline bool
|
||||
nv_heads_tied(struct drm_device *dev)
|
||||
{
|
||||
struct nvif_object *device = &nouveau_drm(dev)->device.object;
|
||||
struct nvif_object *device = &nouveau_drm(dev)->client.device.object;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
|
||||
if (drm->device.info.chipset == 0x11)
|
||||
if (drm->client.device.info.chipset == 0x11)
|
||||
return !!(nvif_rd32(device, NV_PBUS_DEBUG_1) & (1 << 28));
|
||||
|
||||
return NVReadVgaCrtc(dev, 0, NV_CIO_CRE_44) & 0x4;
|
||||
@ -318,7 +318,7 @@ NVLockVgaCrtcs(struct drm_device *dev, bool lock)
|
||||
NVWriteVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX,
|
||||
lock ? NV_CIO_SR_LOCK_VALUE : NV_CIO_SR_UNLOCK_RW_VALUE);
|
||||
/* NV11 has independently lockable extended crtcs, except when tied */
|
||||
if (drm->device.info.chipset == 0x11 && !nv_heads_tied(dev))
|
||||
if (drm->client.device.info.chipset == 0x11 && !nv_heads_tied(dev))
|
||||
NVWriteVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX,
|
||||
lock ? NV_CIO_SR_LOCK_VALUE :
|
||||
NV_CIO_SR_UNLOCK_RW_VALUE);
|
||||
@ -335,7 +335,7 @@ static inline int nv_cursor_width(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
|
||||
return drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
|
||||
return drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS ? NV10_CURSOR_SIZE : NV04_CURSOR_SIZE;
|
||||
}
|
||||
|
||||
static inline void
|
||||
@ -357,7 +357,7 @@ nv_set_crtc_base(struct drm_device *dev, int head, uint32_t offset)
|
||||
|
||||
NVWriteCRTC(dev, head, NV_PCRTC_START, offset);
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT) {
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT) {
|
||||
/*
|
||||
* Hilarious, the 24th bit doesn't want to stick to
|
||||
* PCRTC_START...
|
||||
@ -382,7 +382,7 @@ nv_show_cursor(struct drm_device *dev, int head, bool show)
|
||||
*curctl1 &= ~MASK(NV_CIO_CRE_HCUR_ADDR1_ENABLE);
|
||||
NVWriteVgaCrtc(dev, head, NV_CIO_CRE_HCUR_ADDR1_INDEX, *curctl1);
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
nv_fix_nv40_hw_cursor(dev, head);
|
||||
}
|
||||
|
||||
@ -398,7 +398,7 @@ nv_pitch_align(struct drm_device *dev, uint32_t width, int bpp)
|
||||
bpp = 8;
|
||||
|
||||
/* Alignment requirements taken from the Haiku driver */
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_TNT)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_TNT)
|
||||
mask = 128 / bpp - 1;
|
||||
else
|
||||
mask = 512 / bpp - 1;
|
||||
|
@ -97,7 +97,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(plane->dev);
|
||||
struct nvif_object *dev = &drm->device.object;
|
||||
struct nvif_object *dev = &drm->client.device.object;
|
||||
struct nouveau_plane *nv_plane =
|
||||
container_of(plane, struct nouveau_plane, base);
|
||||
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
|
||||
@ -119,7 +119,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
if (format > 0xffff)
|
||||
return -ERANGE;
|
||||
|
||||
if (drm->device.info.chipset >= 0x30) {
|
||||
if (drm->client.device.info.chipset >= 0x30) {
|
||||
if (crtc_w < (src_w >> 1) || crtc_h < (src_h >> 1))
|
||||
return -ERANGE;
|
||||
} else {
|
||||
@ -174,7 +174,7 @@ nv10_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
static int
|
||||
nv10_disable_plane(struct drm_plane *plane)
|
||||
{
|
||||
struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
|
||||
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
|
||||
struct nouveau_plane *nv_plane =
|
||||
container_of(plane, struct nouveau_plane, base);
|
||||
|
||||
@ -198,7 +198,7 @@ nv_destroy_plane(struct drm_plane *plane)
|
||||
static void
|
||||
nv10_set_params(struct nouveau_plane *plane)
|
||||
{
|
||||
struct nvif_object *dev = &nouveau_drm(plane->base.dev)->device.object;
|
||||
struct nvif_object *dev = &nouveau_drm(plane->base.dev)->client.device.object;
|
||||
u32 luma = (plane->brightness - 512) << 16 | plane->contrast;
|
||||
u32 chroma = ((sin_mul(plane->hue, plane->saturation) & 0xffff) << 16) |
|
||||
(cos_mul(plane->hue, plane->saturation) & 0xffff);
|
||||
@ -268,7 +268,7 @@ nv10_overlay_init(struct drm_device *device)
|
||||
if (!plane)
|
||||
return;
|
||||
|
||||
switch (drm->device.info.chipset) {
|
||||
switch (drm->client.device.info.chipset) {
|
||||
case 0x10:
|
||||
case 0x11:
|
||||
case 0x15:
|
||||
@ -347,7 +347,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
uint32_t src_x, uint32_t src_y,
|
||||
uint32_t src_w, uint32_t src_h)
|
||||
{
|
||||
struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
|
||||
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
|
||||
struct nouveau_plane *nv_plane =
|
||||
container_of(plane, struct nouveau_plane, base);
|
||||
struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb);
|
||||
@ -427,7 +427,7 @@ nv04_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
|
||||
static int
|
||||
nv04_disable_plane(struct drm_plane *plane)
|
||||
{
|
||||
struct nvif_object *dev = &nouveau_drm(plane->dev)->device.object;
|
||||
struct nvif_object *dev = &nouveau_drm(plane->dev)->client.device.object;
|
||||
struct nouveau_plane *nv_plane =
|
||||
container_of(plane, struct nouveau_plane, base);
|
||||
|
||||
@ -495,7 +495,7 @@ err:
|
||||
void
|
||||
nouveau_overlay_init(struct drm_device *device)
|
||||
{
|
||||
struct nvif_device *dev = &nouveau_drm(device)->device;
|
||||
struct nvif_device *dev = &nouveau_drm(device)->client.device;
|
||||
if (dev->info.chipset < 0x10)
|
||||
nv04_overlay_init(device);
|
||||
else if (dev->info.chipset <= 0x40)
|
||||
|
@ -54,7 +54,7 @@ static struct nvkm_i2c_bus_probe nv04_tv_encoder_info[] = {
|
||||
int nv04_tv_identify(struct drm_device *dev, int i2c_index)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, i2c_index);
|
||||
if (bus) {
|
||||
return nvkm_i2c_bus_probe(bus, "TV encoder",
|
||||
@ -206,7 +206,7 @@ nv04_tv_create(struct drm_connector *connector, struct dcb_output *entry)
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nvkm_i2c_bus *bus = nvkm_i2c_bus_find(i2c, entry->i2c_index);
|
||||
int type, ret;
|
||||
|
||||
|
@ -46,7 +46,7 @@ static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
|
||||
uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
|
||||
uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
|
||||
fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
|
||||
@ -130,7 +130,7 @@ static bool
|
||||
get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
|
||||
if (device->quirk && device->quirk->tv_pin_mask) {
|
||||
*pin_mask = device->quirk->tv_pin_mask;
|
||||
@ -154,8 +154,8 @@ nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
|
||||
return connector_status_disconnected;
|
||||
|
||||
if (reliable) {
|
||||
if (drm->device.info.chipset == 0x42 ||
|
||||
drm->device.info.chipset == 0x43)
|
||||
if (drm->client.device.info.chipset == 0x42 ||
|
||||
drm->client.device.info.chipset == 0x43)
|
||||
tv_enc->pin_mask =
|
||||
nv42_tv_sample_load(encoder) >> 28 & 0xe;
|
||||
else
|
||||
@ -362,7 +362,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
|
||||
{
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
|
||||
struct nv17_tv_state *regs = &to_tv_enc(encoder)->state;
|
||||
struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder);
|
||||
|
||||
@ -435,7 +435,7 @@ static void nv17_tv_prepare(struct drm_encoder *encoder)
|
||||
/* Set the DACCLK register */
|
||||
dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1;
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE)
|
||||
dacclk |= 0x1a << 16;
|
||||
|
||||
if (tv_norm->kind == CTV_ENC_MODE) {
|
||||
@ -492,7 +492,7 @@ static void nv17_tv_mode_set(struct drm_encoder *encoder,
|
||||
tv_regs->ptv_614 = 0x13;
|
||||
}
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_RANKINE) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_RANKINE) {
|
||||
tv_regs->ptv_500 = 0xe8e0;
|
||||
tv_regs->ptv_504 = 0x1710;
|
||||
tv_regs->ptv_604 = 0x0;
|
||||
@ -587,7 +587,7 @@ static void nv17_tv_commit(struct drm_encoder *encoder)
|
||||
nv17_tv_state_load(dev, &to_tv_enc(encoder)->state);
|
||||
|
||||
/* This could use refinement for flatpanels, but it should work */
|
||||
if (drm->device.info.chipset < 0x44)
|
||||
if (drm->client.device.info.chipset < 0x44)
|
||||
NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL +
|
||||
nv04_dac_output_offset(encoder),
|
||||
0xf0000000);
|
||||
|
@ -130,13 +130,13 @@ void nv17_ctv_update_rescaler(struct drm_encoder *encoder);
|
||||
static inline void nv_write_ptv(struct drm_device *dev, uint32_t reg,
|
||||
uint32_t val)
|
||||
{
|
||||
struct nvif_device *device = &nouveau_drm(dev)->device;
|
||||
struct nvif_device *device = &nouveau_drm(dev)->client.device;
|
||||
nvif_wr32(&device->object, reg, val);
|
||||
}
|
||||
|
||||
static inline uint32_t nv_read_ptv(struct drm_device *dev, uint32_t reg)
|
||||
{
|
||||
struct nvif_device *device = &nouveau_drm(dev)->device;
|
||||
struct nvif_device *device = &nouveau_drm(dev)->client.device;
|
||||
return nvif_rd32(&device->object, reg);
|
||||
}
|
||||
|
||||
|
@ -10,5 +10,5 @@ struct g82_channel_dma_v0 {
|
||||
__u64 offset;
|
||||
};
|
||||
|
||||
#define G82_CHANNEL_DMA_V0_NTFY_UEVENT 0x00
|
||||
#define NV826E_V0_NTFY_NON_STALL_INTERRUPT 0x00
|
||||
#endif
|
||||
|
@ -11,5 +11,5 @@ struct g82_channel_gpfifo_v0 {
|
||||
__u64 vm;
|
||||
};
|
||||
|
||||
#define G82_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00
|
||||
#define NV826F_V0_NTFY_NON_STALL_INTERRUPT 0x00
|
||||
#endif
|
||||
|
@ -10,5 +10,6 @@ struct fermi_channel_gpfifo_v0 {
|
||||
__u64 vm;
|
||||
};
|
||||
|
||||
#define FERMI_CHANNEL_GPFIFO_V0_NTFY_UEVENT 0x00
|
||||
#define NV906F_V0_NTFY_NON_STALL_INTERRUPT 0x00
|
||||
#define NV906F_V0_NTFY_KILLED 0x01
|
||||
#endif
|
||||
|
@ -25,5 +25,6 @@ struct kepler_channel_gpfifo_a_v0 {
|
||||
__u64 vm;
|
||||
};
|
||||
|
||||
#define NVA06F_V0_NTFY_UEVENT 0x00
|
||||
#define NVA06F_V0_NTFY_NON_STALL_INTERRUPT 0x00
|
||||
#define NVA06F_V0_NTFY_KILLED 0x01
|
||||
#endif
|
||||
|
@ -2,23 +2,31 @@
|
||||
#define __NVIF_CLASS_H__
|
||||
|
||||
/* these class numbers are made up by us, and not nvidia-assigned */
|
||||
#define NVIF_CLASS_CONTROL /* if0001.h */ -1
|
||||
#define NVIF_CLASS_PERFMON /* if0002.h */ -2
|
||||
#define NVIF_CLASS_PERFDOM /* if0003.h */ -3
|
||||
#define NVIF_CLASS_SW_NV04 /* if0004.h */ -4
|
||||
#define NVIF_CLASS_SW_NV10 /* if0005.h */ -5
|
||||
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -6
|
||||
#define NVIF_CLASS_SW_GF100 /* if0005.h */ -7
|
||||
#define NVIF_CLASS_CLIENT /* if0000.h */ -0x00000000
|
||||
|
||||
#define NVIF_CLASS_CONTROL /* if0001.h */ -0x00000001
|
||||
|
||||
#define NVIF_CLASS_PERFMON /* if0002.h */ -0x00000002
|
||||
#define NVIF_CLASS_PERFDOM /* if0003.h */ -0x00000003
|
||||
|
||||
#define NVIF_CLASS_SW_NV04 /* if0004.h */ -0x00000004
|
||||
#define NVIF_CLASS_SW_NV10 /* if0005.h */ -0x00000005
|
||||
#define NVIF_CLASS_SW_NV50 /* if0005.h */ -0x00000006
|
||||
#define NVIF_CLASS_SW_GF100 /* if0005.h */ -0x00000007
|
||||
|
||||
/* the below match nvidia-assigned (either in hw, or sw) class numbers */
|
||||
#define NV_NULL_CLASS 0x00000030
|
||||
|
||||
#define NV_DEVICE /* cl0080.h */ 0x00000080
|
||||
|
||||
#define NV_DMA_FROM_MEMORY /* cl0002.h */ 0x00000002
|
||||
#define NV_DMA_TO_MEMORY /* cl0002.h */ 0x00000003
|
||||
#define NV_DMA_IN_MEMORY /* cl0002.h */ 0x0000003d
|
||||
|
||||
#define NV50_TWOD 0x0000502d
|
||||
#define FERMI_TWOD_A 0x0000902d
|
||||
|
||||
#define NV50_MEMORY_TO_MEMORY_FORMAT 0x00005039
|
||||
#define FERMI_MEMORY_TO_MEMORY_FORMAT_A 0x00009039
|
||||
|
||||
#define KEPLER_INLINE_TO_MEMORY_A 0x0000a040
|
||||
@ -99,6 +107,12 @@
|
||||
#define GF110_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000907e
|
||||
#define GK104_DISP_OVERLAY_CONTROL_DMA /* cl507e.h */ 0x0000917e
|
||||
|
||||
#define NV50_TESLA 0x00005097
|
||||
#define G82_TESLA 0x00008297
|
||||
#define GT200_TESLA 0x00008397
|
||||
#define GT214_TESLA 0x00008597
|
||||
#define GT21A_TESLA 0x00008697
|
||||
|
||||
#define FERMI_A /* cl9097.h */ 0x00009097
|
||||
#define FERMI_B /* cl9097.h */ 0x00009197
|
||||
#define FERMI_C /* cl9097.h */ 0x00009297
|
||||
@ -140,6 +154,8 @@
|
||||
|
||||
#define FERMI_DECOMPRESS 0x000090b8
|
||||
|
||||
#define NV50_COMPUTE 0x000050c0
|
||||
#define GT214_COMPUTE 0x000085c0
|
||||
#define FERMI_COMPUTE_A 0x000090c0
|
||||
#define FERMI_COMPUTE_B 0x000091c0
|
||||
#define KEPLER_COMPUTE_A 0x0000a0c0
|
||||
|
@ -11,8 +11,7 @@ struct nvif_client {
|
||||
bool super;
|
||||
};
|
||||
|
||||
int nvif_client_init(const char *drv, const char *name, u64 device,
|
||||
const char *cfg, const char *dbg,
|
||||
int nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
|
||||
struct nvif_client *);
|
||||
void nvif_client_fini(struct nvif_client *);
|
||||
int nvif_client_ioctl(struct nvif_client *, void *, u32);
|
||||
|
@ -1,5 +1,7 @@
|
||||
#ifndef __NVIF_DRIVER_H__
|
||||
#define __NVIF_DRIVER_H__
|
||||
#include <nvif/os.h>
|
||||
struct nvif_client;
|
||||
|
||||
struct nvif_driver {
|
||||
const char *name;
|
||||
@ -14,9 +16,11 @@ struct nvif_driver {
|
||||
bool keep;
|
||||
};
|
||||
|
||||
int nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
|
||||
const char *name, u64 device, struct nvif_client *);
|
||||
|
||||
extern const struct nvif_driver nvif_driver_nvkm;
|
||||
extern const struct nvif_driver nvif_driver_drm;
|
||||
extern const struct nvif_driver nvif_driver_lib;
|
||||
extern const struct nvif_driver nvif_driver_null;
|
||||
|
||||
#endif
|
||||
|
@ -1,9 +1,16 @@
|
||||
#ifndef __NVIF_IF0000_H__
|
||||
#define __NVIF_IF0000_H__
|
||||
|
||||
#define NV_CLIENT_DEVLIST 0x00
|
||||
struct nvif_client_v0 {
|
||||
__u8 version;
|
||||
__u8 pad01[7];
|
||||
__u64 device;
|
||||
char name[32];
|
||||
};
|
||||
|
||||
struct nv_client_devlist_v0 {
|
||||
#define NVIF_CLIENT_V0_DEVLIST 0x00
|
||||
|
||||
struct nvif_client_devlist_v0 {
|
||||
__u8 version;
|
||||
__u8 count;
|
||||
__u8 pad02[6];
|
||||
|
@ -1,5 +1,6 @@
|
||||
#ifndef __NVKM_CLIENT_H__
|
||||
#define __NVKM_CLIENT_H__
|
||||
#define nvkm_client(p) container_of((p), struct nvkm_client, object)
|
||||
#include <core/object.h>
|
||||
|
||||
struct nvkm_client {
|
||||
@ -8,9 +9,8 @@ struct nvkm_client {
|
||||
u64 device;
|
||||
u32 debug;
|
||||
|
||||
struct nvkm_client_notify *notify[16];
|
||||
struct nvkm_client_notify *notify[32];
|
||||
struct rb_root objroot;
|
||||
struct rb_root dmaroot;
|
||||
|
||||
bool super;
|
||||
void *data;
|
||||
@ -19,15 +19,11 @@ struct nvkm_client {
|
||||
struct nvkm_vm *vm;
|
||||
};
|
||||
|
||||
bool nvkm_client_insert(struct nvkm_client *, struct nvkm_object *);
|
||||
void nvkm_client_remove(struct nvkm_client *, struct nvkm_object *);
|
||||
struct nvkm_object *nvkm_client_search(struct nvkm_client *, u64 object);
|
||||
|
||||
int nvkm_client_new(const char *name, u64 device, const char *cfg,
|
||||
const char *dbg, struct nvkm_client **);
|
||||
void nvkm_client_del(struct nvkm_client **);
|
||||
int nvkm_client_init(struct nvkm_client *);
|
||||
int nvkm_client_fini(struct nvkm_client *, bool suspend);
|
||||
const char *dbg,
|
||||
int (*)(const void *, u32, const void *, u32),
|
||||
struct nvkm_client **);
|
||||
struct nvkm_client *nvkm_client_search(struct nvkm_client *, u64 handle);
|
||||
|
||||
int nvkm_client_notify_new(struct nvkm_object *, struct nvkm_event *,
|
||||
void *data, u32 size);
|
||||
@ -37,8 +33,8 @@ int nvkm_client_notify_put(struct nvkm_client *, int index);
|
||||
|
||||
/* logging for client-facing objects */
|
||||
#define nvif_printk(o,l,p,f,a...) do { \
|
||||
struct nvkm_object *_object = (o); \
|
||||
struct nvkm_client *_client = _object->client; \
|
||||
const struct nvkm_object *_object = (o); \
|
||||
const struct nvkm_client *_client = _object->client; \
|
||||
if (_client->debug >= NV_DBG_##l) \
|
||||
printk(KERN_##p "nouveau: %s:%08x:%08x: "f, _client->name, \
|
||||
_object->handle, _object->oclass, ##a); \
|
||||
|
@ -262,7 +262,7 @@ extern const struct nvkm_sclass nvkm_udevice_sclass;
|
||||
|
||||
/* device logging */
|
||||
#define nvdev_printk_(d,l,p,f,a...) do { \
|
||||
struct nvkm_device *_device = (d); \
|
||||
const struct nvkm_device *_device = (d); \
|
||||
if (_device->debug >= (l)) \
|
||||
dev_##p(_device->dev, f, ##a); \
|
||||
} while(0)
|
||||
|
@ -20,6 +20,7 @@ struct nvkm_engine_func {
|
||||
int (*fini)(struct nvkm_engine *, bool suspend);
|
||||
void (*intr)(struct nvkm_engine *);
|
||||
void (*tile)(struct nvkm_engine *, int region, struct nvkm_fb_tile *);
|
||||
bool (*chsw_load)(struct nvkm_engine *);
|
||||
|
||||
struct {
|
||||
int (*sclass)(struct nvkm_oclass *, int index,
|
||||
@ -44,4 +45,5 @@ int nvkm_engine_new_(const struct nvkm_engine_func *, struct nvkm_device *,
|
||||
struct nvkm_engine *nvkm_engine_ref(struct nvkm_engine *);
|
||||
void nvkm_engine_unref(struct nvkm_engine **);
|
||||
void nvkm_engine_tile(struct nvkm_engine *, int region);
|
||||
bool nvkm_engine_chsw_load(struct nvkm_engine *);
|
||||
#endif
|
||||
|
@ -6,9 +6,10 @@ struct nvkm_vma;
|
||||
struct nvkm_vm;
|
||||
|
||||
enum nvkm_memory_target {
|
||||
NVKM_MEM_TARGET_INST,
|
||||
NVKM_MEM_TARGET_VRAM,
|
||||
NVKM_MEM_TARGET_HOST,
|
||||
NVKM_MEM_TARGET_INST, /* instance memory */
|
||||
NVKM_MEM_TARGET_VRAM, /* video memory */
|
||||
NVKM_MEM_TARGET_HOST, /* coherent system memory */
|
||||
NVKM_MEM_TARGET_NCOH, /* non-coherent system memory */
|
||||
};
|
||||
|
||||
struct nvkm_memory {
|
||||
|
@ -5,7 +5,7 @@
|
||||
struct nvkm_mm_node {
|
||||
struct list_head nl_entry;
|
||||
struct list_head fl_entry;
|
||||
struct list_head rl_entry;
|
||||
struct nvkm_mm_node *next;
|
||||
|
||||
#define NVKM_MM_HEAP_ANY 0x00
|
||||
u8 heap;
|
||||
@ -38,4 +38,10 @@ int nvkm_mm_tail(struct nvkm_mm *, u8 heap, u8 type, u32 size_max,
|
||||
u32 size_min, u32 align, struct nvkm_mm_node **);
|
||||
void nvkm_mm_free(struct nvkm_mm *, struct nvkm_mm_node **);
|
||||
void nvkm_mm_dump(struct nvkm_mm *, const char *);
|
||||
|
||||
static inline bool
|
||||
nvkm_mm_contiguous(struct nvkm_mm_node *node)
|
||||
{
|
||||
return !node->next;
|
||||
}
|
||||
#endif
|
||||
|
@ -62,6 +62,11 @@ int nvkm_object_wr32(struct nvkm_object *, u64 addr, u32 data);
|
||||
int nvkm_object_bind(struct nvkm_object *, struct nvkm_gpuobj *, int align,
|
||||
struct nvkm_gpuobj **);
|
||||
|
||||
bool nvkm_object_insert(struct nvkm_object *);
|
||||
void nvkm_object_remove(struct nvkm_object *);
|
||||
struct nvkm_object *nvkm_object_search(struct nvkm_client *, u64 object,
|
||||
const struct nvkm_object_func *);
|
||||
|
||||
struct nvkm_sclass {
|
||||
int minver;
|
||||
int maxver;
|
||||
|
@ -32,7 +32,7 @@ void nvkm_subdev_intr(struct nvkm_subdev *);
|
||||
|
||||
/* subdev logging */
|
||||
#define nvkm_printk_(s,l,p,f,a...) do { \
|
||||
struct nvkm_subdev *_subdev = (s); \
|
||||
const struct nvkm_subdev *_subdev = (s); \
|
||||
if (_subdev->debug >= (l)) { \
|
||||
dev_##p(_subdev->device->dev, "%s: "f, \
|
||||
nvkm_subdev_name[_subdev->index], ##a); \
|
||||
|
@ -12,9 +12,6 @@ struct nvkm_dmaobj {
|
||||
u32 access;
|
||||
u64 start;
|
||||
u64 limit;
|
||||
|
||||
struct rb_node rb;
|
||||
u64 handle; /*XXX HANDLE MERGE */
|
||||
};
|
||||
|
||||
struct nvkm_dma {
|
||||
@ -22,8 +19,7 @@ struct nvkm_dma {
|
||||
struct nvkm_engine engine;
|
||||
};
|
||||
|
||||
struct nvkm_dmaobj *
|
||||
nvkm_dma_search(struct nvkm_dma *, struct nvkm_client *, u64 object);
|
||||
struct nvkm_dmaobj *nvkm_dmaobj_search(struct nvkm_client *, u64 object);
|
||||
|
||||
int nv04_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
|
||||
int nv50_dma_new(struct nvkm_device *, int, struct nvkm_dma **);
|
||||
|
@ -4,13 +4,26 @@
|
||||
#include <core/engine.h>
|
||||
struct nvkm_fifo_chan;
|
||||
|
||||
enum nvkm_falcon_dmaidx {
|
||||
FALCON_DMAIDX_UCODE = 0,
|
||||
FALCON_DMAIDX_VIRT = 1,
|
||||
FALCON_DMAIDX_PHYS_VID = 2,
|
||||
FALCON_DMAIDX_PHYS_SYS_COH = 3,
|
||||
FALCON_DMAIDX_PHYS_SYS_NCOH = 4,
|
||||
};
|
||||
|
||||
struct nvkm_falcon {
|
||||
const struct nvkm_falcon_func *func;
|
||||
struct nvkm_engine engine;
|
||||
|
||||
const struct nvkm_subdev *owner;
|
||||
const char *name;
|
||||
u32 addr;
|
||||
u8 version;
|
||||
u8 secret;
|
||||
|
||||
struct mutex mutex;
|
||||
const struct nvkm_subdev *user;
|
||||
|
||||
u8 version;
|
||||
u8 secret;
|
||||
bool debug;
|
||||
|
||||
struct nvkm_memory *core;
|
||||
bool external;
|
||||
@ -19,15 +32,25 @@ struct nvkm_falcon {
|
||||
u32 limit;
|
||||
u32 *data;
|
||||
u32 size;
|
||||
u8 ports;
|
||||
} code;
|
||||
|
||||
struct {
|
||||
u32 limit;
|
||||
u32 *data;
|
||||
u32 size;
|
||||
u8 ports;
|
||||
} data;
|
||||
|
||||
struct nvkm_engine engine;
|
||||
};
|
||||
|
||||
int nvkm_falcon_v1_new(struct nvkm_subdev *owner, const char *name, u32 addr,
|
||||
struct nvkm_falcon **);
|
||||
void nvkm_falcon_del(struct nvkm_falcon **);
|
||||
int nvkm_falcon_get(struct nvkm_falcon *, const struct nvkm_subdev *);
|
||||
void nvkm_falcon_put(struct nvkm_falcon *, const struct nvkm_subdev *);
|
||||
|
||||
int nvkm_falcon_new_(const struct nvkm_falcon_func *, struct nvkm_device *,
|
||||
int index, bool enable, u32 addr, struct nvkm_engine **);
|
||||
|
||||
@ -42,6 +65,51 @@ struct nvkm_falcon_func {
|
||||
} data;
|
||||
void (*init)(struct nvkm_falcon *);
|
||||
void (*intr)(struct nvkm_falcon *, struct nvkm_fifo_chan *);
|
||||
void (*load_imem)(struct nvkm_falcon *, void *, u32, u32, u16, u8, bool);
|
||||
void (*load_dmem)(struct nvkm_falcon *, void *, u32, u32, u8);
|
||||
void (*read_dmem)(struct nvkm_falcon *, u32, u32, u8, void *);
|
||||
void (*bind_context)(struct nvkm_falcon *, struct nvkm_gpuobj *);
|
||||
int (*wait_for_halt)(struct nvkm_falcon *, u32);
|
||||
int (*clear_interrupt)(struct nvkm_falcon *, u32);
|
||||
void (*set_start_addr)(struct nvkm_falcon *, u32 start_addr);
|
||||
void (*start)(struct nvkm_falcon *);
|
||||
int (*enable)(struct nvkm_falcon *falcon);
|
||||
void (*disable)(struct nvkm_falcon *falcon);
|
||||
|
||||
struct nvkm_sclass sclass[];
|
||||
};
|
||||
|
||||
static inline u32
|
||||
nvkm_falcon_rd32(struct nvkm_falcon *falcon, u32 addr)
|
||||
{
|
||||
return nvkm_rd32(falcon->owner->device, falcon->addr + addr);
|
||||
}
|
||||
|
||||
static inline void
|
||||
nvkm_falcon_wr32(struct nvkm_falcon *falcon, u32 addr, u32 data)
|
||||
{
|
||||
nvkm_wr32(falcon->owner->device, falcon->addr + addr, data);
|
||||
}
|
||||
|
||||
static inline u32
|
||||
nvkm_falcon_mask(struct nvkm_falcon *falcon, u32 addr, u32 mask, u32 val)
|
||||
{
|
||||
struct nvkm_device *device = falcon->owner->device;
|
||||
|
||||
return nvkm_mask(device, falcon->addr + addr, mask, val);
|
||||
}
|
||||
|
||||
void nvkm_falcon_load_imem(struct nvkm_falcon *, void *, u32, u32, u16, u8,
|
||||
bool);
|
||||
void nvkm_falcon_load_dmem(struct nvkm_falcon *, void *, u32, u32, u8);
|
||||
void nvkm_falcon_read_dmem(struct nvkm_falcon *, u32, u32, u8, void *);
|
||||
void nvkm_falcon_bind_context(struct nvkm_falcon *, struct nvkm_gpuobj *);
|
||||
void nvkm_falcon_set_start_addr(struct nvkm_falcon *, u32);
|
||||
void nvkm_falcon_start(struct nvkm_falcon *);
|
||||
int nvkm_falcon_wait_for_halt(struct nvkm_falcon *, u32);
|
||||
int nvkm_falcon_clear_interrupt(struct nvkm_falcon *, u32);
|
||||
int nvkm_falcon_enable(struct nvkm_falcon *);
|
||||
void nvkm_falcon_disable(struct nvkm_falcon *);
|
||||
int nvkm_falcon_reset(struct nvkm_falcon *);
|
||||
|
||||
#endif
|
||||
|
@ -40,6 +40,7 @@ struct nvkm_fifo {
|
||||
|
||||
struct nvkm_event uevent; /* async user trigger */
|
||||
struct nvkm_event cevent; /* channel creation event */
|
||||
struct nvkm_event kevent; /* channel killed */
|
||||
};
|
||||
|
||||
void nvkm_fifo_pause(struct nvkm_fifo *, unsigned long *);
|
||||
|
@ -0,0 +1,26 @@
|
||||
#ifndef __NVBIOS_POWER_BUDGET_H__
|
||||
#define __NVBIOS_POWER_BUDGET_H__
|
||||
|
||||
#include <nvkm/subdev/bios.h>
|
||||
|
||||
struct nvbios_power_budget_entry {
|
||||
u32 min_w;
|
||||
u32 avg_w;
|
||||
u32 max_w;
|
||||
};
|
||||
|
||||
struct nvbios_power_budget {
|
||||
u32 offset;
|
||||
u8 ver;
|
||||
u8 hlen;
|
||||
u8 elen;
|
||||
u8 ecount;
|
||||
u8 cap_entry;
|
||||
};
|
||||
|
||||
int nvbios_power_budget_header(struct nvkm_bios *,
|
||||
struct nvbios_power_budget *);
|
||||
int nvbios_power_budget_entry(struct nvkm_bios *, struct nvbios_power_budget *,
|
||||
u8 idx, struct nvbios_power_budget_entry *);
|
||||
|
||||
#endif
|
@ -29,7 +29,7 @@ struct nvkm_mem {
|
||||
u8 page_shift;
|
||||
|
||||
struct nvkm_mm_node *tag;
|
||||
struct list_head regions;
|
||||
struct nvkm_mm_node *mem;
|
||||
dma_addr_t *pages;
|
||||
u32 memtype;
|
||||
u64 offset;
|
||||
|
@ -8,6 +8,9 @@ struct nvkm_iccsense {
|
||||
bool data_valid;
|
||||
struct list_head sensors;
|
||||
struct list_head rails;
|
||||
|
||||
u32 power_w_max;
|
||||
u32 power_w_crit;
|
||||
};
|
||||
|
||||
int gf100_iccsense_new(struct nvkm_device *, int index, struct nvkm_iccsense **);
|
||||
|
@ -9,6 +9,7 @@ struct nvkm_mc {
|
||||
|
||||
void nvkm_mc_enable(struct nvkm_device *, enum nvkm_devidx);
|
||||
void nvkm_mc_disable(struct nvkm_device *, enum nvkm_devidx);
|
||||
bool nvkm_mc_enabled(struct nvkm_device *, enum nvkm_devidx);
|
||||
void nvkm_mc_reset(struct nvkm_device *, enum nvkm_devidx);
|
||||
void nvkm_mc_intr(struct nvkm_device *, bool *handled);
|
||||
void nvkm_mc_intr_unarm(struct nvkm_device *);
|
||||
|
@ -43,6 +43,7 @@ int nv40_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int nv46_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int nv4c_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int g84_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int g92_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int g94_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int gf100_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
int gf106_pci_new(struct nvkm_device *, int, struct nvkm_pci **);
|
||||
|
@ -1,10 +1,12 @@
|
||||
#ifndef __NVKM_PMU_H__
|
||||
#define __NVKM_PMU_H__
|
||||
#include <core/subdev.h>
|
||||
#include <engine/falcon.h>
|
||||
|
||||
struct nvkm_pmu {
|
||||
const struct nvkm_pmu_func *func;
|
||||
struct nvkm_subdev subdev;
|
||||
struct nvkm_falcon *falcon;
|
||||
|
||||
struct {
|
||||
u32 base;
|
||||
@ -35,6 +37,7 @@ int gk110_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gk208_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gk20a_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gm107_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gm20b_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gp100_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
int gp102_pmu_new(struct nvkm_device *, int, struct nvkm_pmu **);
|
||||
|
||||
|
@ -26,7 +26,7 @@
|
||||
#include <core/subdev.h>
|
||||
|
||||
enum nvkm_secboot_falcon {
|
||||
NVKM_SECBOOT_FALCON_PMU = 0,
|
||||
NVKM_SECBOOT_FALCON_PMU = 0,
|
||||
NVKM_SECBOOT_FALCON_RESERVED = 1,
|
||||
NVKM_SECBOOT_FALCON_FECS = 2,
|
||||
NVKM_SECBOOT_FALCON_GPCCS = 3,
|
||||
@ -35,22 +35,23 @@ enum nvkm_secboot_falcon {
|
||||
};
|
||||
|
||||
/**
|
||||
* @base: base IO address of the falcon performing secure boot
|
||||
* @irq_mask: IRQ mask of the falcon performing secure boot
|
||||
* @enable_mask: enable mask of the falcon performing secure boot
|
||||
* @wpr_set: whether the WPR region is currently set
|
||||
*/
|
||||
struct nvkm_secboot {
|
||||
const struct nvkm_secboot_func *func;
|
||||
struct nvkm_acr *acr;
|
||||
struct nvkm_subdev subdev;
|
||||
struct nvkm_falcon *boot_falcon;
|
||||
|
||||
enum nvkm_devidx devidx;
|
||||
u32 base;
|
||||
u64 wpr_addr;
|
||||
u32 wpr_size;
|
||||
|
||||
bool wpr_set;
|
||||
};
|
||||
#define nvkm_secboot(p) container_of((p), struct nvkm_secboot, subdev)
|
||||
|
||||
bool nvkm_secboot_is_managed(struct nvkm_secboot *, enum nvkm_secboot_falcon);
|
||||
int nvkm_secboot_reset(struct nvkm_secboot *, u32 falcon);
|
||||
int nvkm_secboot_start(struct nvkm_secboot *, u32 falcon);
|
||||
int nvkm_secboot_reset(struct nvkm_secboot *, enum nvkm_secboot_falcon);
|
||||
|
||||
int gm200_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
|
||||
int gm20b_secboot_new(struct nvkm_device *, int, struct nvkm_secboot **);
|
||||
|
@ -48,10 +48,8 @@ void nvkm_timer_alarm_cancel(struct nvkm_timer *, struct nvkm_alarm *);
|
||||
} while (_taken = nvkm_timer_read(_tmr) - _time0, _taken < _nsecs); \
|
||||
\
|
||||
if (_taken >= _nsecs) { \
|
||||
if (_warn) { \
|
||||
dev_warn(_device->dev, "timeout at %s:%d/%s()!\n", \
|
||||
__FILE__, __LINE__, __func__); \
|
||||
} \
|
||||
if (_warn) \
|
||||
dev_WARN(_device->dev, "timeout\n"); \
|
||||
_taken = -ETIMEDOUT; \
|
||||
} \
|
||||
_taken; \
|
||||
|
@ -11,6 +11,7 @@ struct nvkm_top {
|
||||
u32 nvkm_top_reset(struct nvkm_device *, enum nvkm_devidx);
|
||||
u32 nvkm_top_intr(struct nvkm_device *, u32 intr, u64 *subdevs);
|
||||
u32 nvkm_top_intr_mask(struct nvkm_device *, enum nvkm_devidx);
|
||||
int nvkm_top_fault_id(struct nvkm_device *, enum nvkm_devidx);
|
||||
enum nvkm_devidx nvkm_top_fault(struct nvkm_device *, int fault);
|
||||
enum nvkm_devidx nvkm_top_engine(struct nvkm_device *, int, int *runl, int *engn);
|
||||
|
||||
|
@ -87,7 +87,7 @@ nouveau_abi16_put(struct nouveau_abi16 *abi16, int ret)
|
||||
s32
|
||||
nouveau_abi16_swclass(struct nouveau_drm *drm)
|
||||
{
|
||||
switch (drm->device.info.family) {
|
||||
switch (drm->client.device.info.family) {
|
||||
case NV_DEVICE_INFO_V0_TNT:
|
||||
return NVIF_CLASS_SW_NV04;
|
||||
case NV_DEVICE_INFO_V0_CELSIUS:
|
||||
@ -175,7 +175,7 @@ nouveau_abi16_ioctl_getparam(ABI16_IOCTL_ARGS)
|
||||
{
|
||||
struct nouveau_cli *cli = nouveau_cli(file_priv);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
struct nvkm_gr *gr = nvxx_gr(device);
|
||||
struct drm_nouveau_getparam *getparam = data;
|
||||
|
||||
@ -321,7 +321,7 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
|
||||
}
|
||||
|
||||
/* Named memory object area */
|
||||
ret = nouveau_gem_new(dev, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
|
||||
ret = nouveau_gem_new(cli, PAGE_SIZE, 0, NOUVEAU_GEM_DOMAIN_GART,
|
||||
0, 0, &chan->ntfy);
|
||||
if (ret == 0)
|
||||
ret = nouveau_bo_pin(chan->ntfy, TTM_PL_FLAG_TT, false);
|
||||
|
@ -65,7 +65,7 @@ static int
|
||||
nv40_get_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_drm *drm = bl_get_data(bd);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int val = (nvif_rd32(device, NV40_PMC_BACKLIGHT) &
|
||||
NV40_PMC_BACKLIGHT_MASK) >> 16;
|
||||
|
||||
@ -76,7 +76,7 @@ static int
|
||||
nv40_set_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_drm *drm = bl_get_data(bd);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int val = bd->props.brightness;
|
||||
int reg = nvif_rd32(device, NV40_PMC_BACKLIGHT);
|
||||
|
||||
@ -96,7 +96,7 @@ static int
|
||||
nv40_backlight_init(struct drm_connector *connector)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
struct backlight_properties props;
|
||||
struct backlight_device *bd;
|
||||
struct backlight_connector bl_connector;
|
||||
@ -133,7 +133,7 @@ nv50_get_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int or = nv_encoder->or;
|
||||
u32 div = 1025;
|
||||
u32 val;
|
||||
@ -148,7 +148,7 @@ nv50_set_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int or = nv_encoder->or;
|
||||
u32 div = 1025;
|
||||
u32 val = (bd->props.brightness * div) / 100;
|
||||
@ -169,7 +169,7 @@ nva3_get_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int or = nv_encoder->or;
|
||||
u32 div, val;
|
||||
|
||||
@ -187,7 +187,7 @@ nva3_set_intensity(struct backlight_device *bd)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = bl_get_data(bd);
|
||||
struct nouveau_drm *drm = nouveau_drm(nv_encoder->base.base.dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
int or = nv_encoder->or;
|
||||
u32 div, val;
|
||||
|
||||
@ -213,7 +213,7 @@ static int
|
||||
nv50_backlight_init(struct drm_connector *connector)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct backlight_properties props;
|
||||
struct backlight_device *bd;
|
||||
@ -231,9 +231,9 @@ nv50_backlight_init(struct drm_connector *connector)
|
||||
if (!nvif_rd32(device, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
|
||||
return 0;
|
||||
|
||||
if (drm->device.info.chipset <= 0xa0 ||
|
||||
drm->device.info.chipset == 0xaa ||
|
||||
drm->device.info.chipset == 0xac)
|
||||
if (drm->client.device.info.chipset <= 0xa0 ||
|
||||
drm->client.device.info.chipset == 0xaa ||
|
||||
drm->client.device.info.chipset == 0xac)
|
||||
ops = &nv50_bl_ops;
|
||||
else
|
||||
ops = &nva3_bl_ops;
|
||||
@ -265,7 +265,7 @@ int
|
||||
nouveau_backlight_init(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
struct drm_connector *connector;
|
||||
|
||||
if (apple_gmux_present()) {
|
||||
|
@ -215,7 +215,7 @@ int call_lvds_script(struct drm_device *dev, struct dcb_output *dcbent, int head
|
||||
*/
|
||||
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
struct nvbios *bios = &drm->vbios;
|
||||
uint8_t lvds_ver = bios->data[bios->fp.lvdsmanufacturerpointer];
|
||||
uint32_t sel_clk_binding, sel_clk;
|
||||
@ -319,7 +319,7 @@ static int
|
||||
get_fp_strap(struct drm_device *dev, struct nvbios *bios)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
|
||||
/*
|
||||
* The fp strap is normally dictated by the "User Strap" in
|
||||
@ -333,10 +333,10 @@ get_fp_strap(struct drm_device *dev, struct nvbios *bios)
|
||||
if (bios->major_version < 5 && bios->data[0x48] & 0x4)
|
||||
return NVReadVgaCrtc5758(dev, 0, 0xf) & 0xf;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_MAXWELL)
|
||||
return nvif_rd32(device, 0x001800) & 0x0000000f;
|
||||
else
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 24) & 0xf;
|
||||
else
|
||||
return (nvif_rd32(device, NV_PEXTDEV_BOOT_0) >> 16) & 0xf;
|
||||
@ -638,7 +638,7 @@ int run_tmds_table(struct drm_device *dev, struct dcb_output *dcbent, int head,
|
||||
*/
|
||||
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
struct nvbios *bios = &drm->vbios;
|
||||
int cv = bios->chip_version;
|
||||
uint16_t clktable = 0, scriptptr;
|
||||
@ -1255,7 +1255,7 @@ olddcb_table(struct drm_device *dev)
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
u8 *dcb = NULL;
|
||||
|
||||
if (drm->device.info.family > NV_DEVICE_INFO_V0_TNT)
|
||||
if (drm->client.device.info.family > NV_DEVICE_INFO_V0_TNT)
|
||||
dcb = ROMPTR(dev, drm->vbios.data[0x36]);
|
||||
if (!dcb) {
|
||||
NV_WARN(drm, "No DCB data found in VBIOS\n");
|
||||
@ -1918,7 +1918,7 @@ static int load_nv17_hwsq_ucode_entry(struct drm_device *dev, struct nvbios *bio
|
||||
*/
|
||||
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
uint8_t bytes_to_write;
|
||||
uint16_t hwsq_entry_offset;
|
||||
int i;
|
||||
@ -2012,7 +2012,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
|
||||
static bool NVInitVBIOS(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_bios *bios = nvxx_bios(&drm->device);
|
||||
struct nvkm_bios *bios = nvxx_bios(&drm->client.device);
|
||||
struct nvbios *legacy = &drm->vbios;
|
||||
|
||||
memset(legacy, 0, sizeof(struct nvbios));
|
||||
@ -2064,7 +2064,7 @@ nouveau_bios_posted(struct drm_device *dev)
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
unsigned htotal;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
return true;
|
||||
|
||||
htotal = NVReadVgaCrtc(dev, 0, 0x06);
|
||||
|
@ -48,7 +48,7 @@ nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
int i = reg - drm->tile.reg;
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
struct nvkm_fb *fb = device->fb;
|
||||
struct nvkm_fb_tile *tile = &fb->tile.region[i];
|
||||
|
||||
@ -100,7 +100,7 @@ nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
|
||||
u32 size, u32 pitch, u32 flags)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_fb *fb = nvxx_fb(&drm->device);
|
||||
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
|
||||
struct nouveau_drm_tile *tile, *found = NULL;
|
||||
int i;
|
||||
|
||||
@ -139,60 +139,62 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
|
||||
kfree(nvbo);
|
||||
}
|
||||
|
||||
static inline u64
|
||||
roundup_64(u64 x, u32 y)
|
||||
{
|
||||
x += y - 1;
|
||||
do_div(x, y);
|
||||
return x * y;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
|
||||
int *align, int *size)
|
||||
int *align, u64 *size)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
|
||||
if (device->info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (nvbo->tile_mode) {
|
||||
if (device->info.chipset >= 0x40) {
|
||||
*align = 65536;
|
||||
*size = roundup(*size, 64 * nvbo->tile_mode);
|
||||
*size = roundup_64(*size, 64 * nvbo->tile_mode);
|
||||
|
||||
} else if (device->info.chipset >= 0x30) {
|
||||
*align = 32768;
|
||||
*size = roundup(*size, 64 * nvbo->tile_mode);
|
||||
*size = roundup_64(*size, 64 * nvbo->tile_mode);
|
||||
|
||||
} else if (device->info.chipset >= 0x20) {
|
||||
*align = 16384;
|
||||
*size = roundup(*size, 64 * nvbo->tile_mode);
|
||||
*size = roundup_64(*size, 64 * nvbo->tile_mode);
|
||||
|
||||
} else if (device->info.chipset >= 0x10) {
|
||||
*align = 16384;
|
||||
*size = roundup(*size, 32 * nvbo->tile_mode);
|
||||
*size = roundup_64(*size, 32 * nvbo->tile_mode);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
*size = roundup(*size, (1 << nvbo->page_shift));
|
||||
*size = roundup_64(*size, (1 << nvbo->page_shift));
|
||||
*align = max((1 << nvbo->page_shift), *align);
|
||||
}
|
||||
|
||||
*size = roundup(*size, PAGE_SIZE);
|
||||
*size = roundup_64(*size, PAGE_SIZE);
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_bo_new(struct drm_device *dev, int size, int align,
|
||||
nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align,
|
||||
uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
|
||||
struct sg_table *sg, struct reservation_object *robj,
|
||||
struct nouveau_bo **pnvbo)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_drm *drm = nouveau_drm(cli->dev);
|
||||
struct nouveau_bo *nvbo;
|
||||
size_t acc_size;
|
||||
int ret;
|
||||
int type = ttm_bo_type_device;
|
||||
int lpg_shift = 12;
|
||||
int max_size;
|
||||
|
||||
if (drm->client.vm)
|
||||
lpg_shift = drm->client.vm->mmu->lpg_shift;
|
||||
max_size = INT_MAX & ~((1 << lpg_shift) - 1);
|
||||
|
||||
if (size <= 0 || size > max_size) {
|
||||
NV_WARN(drm, "skipped size %x\n", (u32)size);
|
||||
if (!size) {
|
||||
NV_WARN(drm, "skipped size %016llx\n", size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -208,8 +210,9 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
|
||||
nvbo->tile_mode = tile_mode;
|
||||
nvbo->tile_flags = tile_flags;
|
||||
nvbo->bo.bdev = &drm->ttm.bdev;
|
||||
nvbo->cli = cli;
|
||||
|
||||
if (!nvxx_device(&drm->device)->func->cpu_coherent)
|
||||
if (!nvxx_device(&drm->client.device)->func->cpu_coherent)
|
||||
nvbo->force_coherent = flags & TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
nvbo->page_shift = 12;
|
||||
@ -255,10 +258,10 @@ static void
|
||||
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
u32 vram_pages = drm->device.info.ram_size >> PAGE_SHIFT;
|
||||
u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
|
||||
unsigned i, fpfn, lpfn;
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
|
||||
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
|
||||
nvbo->bo.mem.num_pages < vram_pages / 4) {
|
||||
/*
|
||||
@ -316,12 +319,12 @@ nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype, bool contig)
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA &&
|
||||
memtype == TTM_PL_FLAG_VRAM && contig) {
|
||||
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
struct nvkm_mem *mem = bo->mem.mm_node;
|
||||
if (!list_is_singular(&mem->regions))
|
||||
if (!nvkm_mm_contiguous(mem->mem))
|
||||
evict = true;
|
||||
}
|
||||
nvbo->tile_flags &= ~NOUVEAU_GEM_TILE_NONCONTIG;
|
||||
@ -443,7 +446,7 @@ void
|
||||
nouveau_bo_sync_for_device(struct nouveau_bo *nvbo)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
|
||||
int i;
|
||||
|
||||
@ -463,7 +466,7 @@ void
|
||||
nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm;
|
||||
int i;
|
||||
|
||||
@ -579,9 +582,9 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
TTM_PL_FLAG_WC;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
/* Some BARs do not support being ioremapped WC */
|
||||
if (nvxx_bar(&drm->device)->iomap_uncached) {
|
||||
if (nvxx_bar(&drm->client.device)->iomap_uncached) {
|
||||
man->available_caching = TTM_PL_FLAG_UNCACHED;
|
||||
man->default_caching = TTM_PL_FLAG_UNCACHED;
|
||||
}
|
||||
@ -594,7 +597,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
}
|
||||
break;
|
||||
case TTM_PL_TT:
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
man->func = &nouveau_gart_manager;
|
||||
else
|
||||
if (!drm->agp.bridge)
|
||||
@ -654,20 +657,20 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
|
||||
|
||||
static int
|
||||
nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *node = old_mem->mm_node;
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
int ret = RING_SPACE(chan, 10);
|
||||
if (ret == 0) {
|
||||
BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
|
||||
OUT_RING (chan, upper_32_bits(node->vma[0].offset));
|
||||
OUT_RING (chan, lower_32_bits(node->vma[0].offset));
|
||||
OUT_RING (chan, upper_32_bits(node->vma[1].offset));
|
||||
OUT_RING (chan, lower_32_bits(node->vma[1].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, PAGE_SIZE);
|
||||
OUT_RING (chan, PAGE_SIZE);
|
||||
OUT_RING (chan, PAGE_SIZE);
|
||||
OUT_RING (chan, new_mem->num_pages);
|
||||
OUT_RING (chan, new_reg->num_pages);
|
||||
BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
|
||||
}
|
||||
return ret;
|
||||
@ -686,15 +689,15 @@ nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
|
||||
|
||||
static int
|
||||
nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *node = old_mem->mm_node;
|
||||
u64 src_offset = node->vma[0].offset;
|
||||
u64 dst_offset = node->vma[1].offset;
|
||||
u32 page_count = new_mem->num_pages;
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
u64 src_offset = mem->vma[0].offset;
|
||||
u64 dst_offset = mem->vma[1].offset;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
int ret;
|
||||
|
||||
page_count = new_mem->num_pages;
|
||||
page_count = new_reg->num_pages;
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 8191) ? 8191 : page_count;
|
||||
|
||||
@ -724,15 +727,15 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
|
||||
static int
|
||||
nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *node = old_mem->mm_node;
|
||||
u64 src_offset = node->vma[0].offset;
|
||||
u64 dst_offset = node->vma[1].offset;
|
||||
u32 page_count = new_mem->num_pages;
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
u64 src_offset = mem->vma[0].offset;
|
||||
u64 dst_offset = mem->vma[1].offset;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
int ret;
|
||||
|
||||
page_count = new_mem->num_pages;
|
||||
page_count = new_reg->num_pages;
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 2047) ? 2047 : page_count;
|
||||
|
||||
@ -763,15 +766,15 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
|
||||
static int
|
||||
nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *node = old_mem->mm_node;
|
||||
u64 src_offset = node->vma[0].offset;
|
||||
u64 dst_offset = node->vma[1].offset;
|
||||
u32 page_count = new_mem->num_pages;
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
u64 src_offset = mem->vma[0].offset;
|
||||
u64 dst_offset = mem->vma[1].offset;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
int ret;
|
||||
|
||||
page_count = new_mem->num_pages;
|
||||
page_count = new_reg->num_pages;
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 8191) ? 8191 : page_count;
|
||||
|
||||
@ -801,35 +804,35 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
|
||||
static int
|
||||
nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *node = old_mem->mm_node;
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
int ret = RING_SPACE(chan, 7);
|
||||
if (ret == 0) {
|
||||
BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
|
||||
OUT_RING (chan, upper_32_bits(node->vma[0].offset));
|
||||
OUT_RING (chan, lower_32_bits(node->vma[0].offset));
|
||||
OUT_RING (chan, upper_32_bits(node->vma[1].offset));
|
||||
OUT_RING (chan, lower_32_bits(node->vma[1].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, 0x00000000 /* COPY */);
|
||||
OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
|
||||
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *node = old_mem->mm_node;
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
int ret = RING_SPACE(chan, 7);
|
||||
if (ret == 0) {
|
||||
BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
|
||||
OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
|
||||
OUT_RING (chan, upper_32_bits(node->vma[0].offset));
|
||||
OUT_RING (chan, lower_32_bits(node->vma[0].offset));
|
||||
OUT_RING (chan, upper_32_bits(node->vma[1].offset));
|
||||
OUT_RING (chan, lower_32_bits(node->vma[1].offset));
|
||||
OUT_RING (chan, new_reg->num_pages << PAGE_SHIFT);
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[0].offset));
|
||||
OUT_RING (chan, upper_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, lower_32_bits(mem->vma[1].offset));
|
||||
OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
|
||||
}
|
||||
return ret;
|
||||
@ -853,14 +856,14 @@ nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
|
||||
|
||||
static int
|
||||
nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nvkm_mem *node = old_mem->mm_node;
|
||||
u64 length = (new_mem->num_pages << PAGE_SHIFT);
|
||||
u64 src_offset = node->vma[0].offset;
|
||||
u64 dst_offset = node->vma[1].offset;
|
||||
int src_tiled = !!node->memtype;
|
||||
int dst_tiled = !!((struct nvkm_mem *)new_mem->mm_node)->memtype;
|
||||
struct nvkm_mem *mem = old_reg->mm_node;
|
||||
u64 length = (new_reg->num_pages << PAGE_SHIFT);
|
||||
u64 src_offset = mem->vma[0].offset;
|
||||
u64 dst_offset = mem->vma[1].offset;
|
||||
int src_tiled = !!mem->memtype;
|
||||
int dst_tiled = !!((struct nvkm_mem *)new_reg->mm_node)->memtype;
|
||||
int ret;
|
||||
|
||||
while (length) {
|
||||
@ -940,20 +943,20 @@ nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
|
||||
|
||||
static inline uint32_t
|
||||
nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
|
||||
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
|
||||
struct nouveau_channel *chan, struct ttm_mem_reg *reg)
|
||||
{
|
||||
if (mem->mem_type == TTM_PL_TT)
|
||||
if (reg->mem_type == TTM_PL_TT)
|
||||
return NvDmaTT;
|
||||
return chan->vram.handle;
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
|
||||
struct ttm_mem_reg *old_reg, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
u32 src_offset = old_mem->start << PAGE_SHIFT;
|
||||
u32 dst_offset = new_mem->start << PAGE_SHIFT;
|
||||
u32 page_count = new_mem->num_pages;
|
||||
u32 src_offset = old_reg->start << PAGE_SHIFT;
|
||||
u32 dst_offset = new_reg->start << PAGE_SHIFT;
|
||||
u32 page_count = new_reg->num_pages;
|
||||
int ret;
|
||||
|
||||
ret = RING_SPACE(chan, 3);
|
||||
@ -961,10 +964,10 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
return ret;
|
||||
|
||||
BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
|
||||
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
|
||||
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
|
||||
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_reg));
|
||||
OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_reg));
|
||||
|
||||
page_count = new_mem->num_pages;
|
||||
page_count = new_reg->num_pages;
|
||||
while (page_count) {
|
||||
int line_count = (page_count > 2047) ? 2047 : page_count;
|
||||
|
||||
@ -995,33 +998,33 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
|
||||
|
||||
static int
|
||||
nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *mem)
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nvkm_mem *old_node = bo->mem.mm_node;
|
||||
struct nvkm_mem *new_node = mem->mm_node;
|
||||
u64 size = (u64)mem->num_pages << PAGE_SHIFT;
|
||||
struct nvkm_mem *old_mem = bo->mem.mm_node;
|
||||
struct nvkm_mem *new_mem = reg->mm_node;
|
||||
u64 size = (u64)reg->num_pages << PAGE_SHIFT;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_vm_get(drm->client.vm, size, old_node->page_shift,
|
||||
NV_MEM_ACCESS_RW, &old_node->vma[0]);
|
||||
ret = nvkm_vm_get(drm->client.vm, size, old_mem->page_shift,
|
||||
NV_MEM_ACCESS_RW, &old_mem->vma[0]);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_vm_get(drm->client.vm, size, new_node->page_shift,
|
||||
NV_MEM_ACCESS_RW, &old_node->vma[1]);
|
||||
ret = nvkm_vm_get(drm->client.vm, size, new_mem->page_shift,
|
||||
NV_MEM_ACCESS_RW, &old_mem->vma[1]);
|
||||
if (ret) {
|
||||
nvkm_vm_put(&old_node->vma[0]);
|
||||
nvkm_vm_put(&old_mem->vma[0]);
|
||||
return ret;
|
||||
}
|
||||
|
||||
nvkm_vm_map(&old_node->vma[0], old_node);
|
||||
nvkm_vm_map(&old_node->vma[1], new_node);
|
||||
nvkm_vm_map(&old_mem->vma[0], old_mem);
|
||||
nvkm_vm_map(&old_mem->vma[1], new_mem);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_channel *chan = drm->ttm.chan;
|
||||
@ -1033,8 +1036,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
||||
* old nvkm_mem node, these will get cleaned up after ttm has
|
||||
* destroyed the ttm_mem_reg
|
||||
*/
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_bo_move_prep(drm, bo, new_mem);
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_bo_move_prep(drm, bo, new_reg);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
@ -1042,14 +1045,14 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
|
||||
mutex_lock_nested(&cli->mutex, SINGLE_DEPTH_NESTING);
|
||||
ret = nouveau_fence_sync(nouveau_bo(bo), chan, true, intr);
|
||||
if (ret == 0) {
|
||||
ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
|
||||
ret = drm->ttm.move(chan, bo, &bo->mem, new_reg);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_fence_new(chan, false, &fence);
|
||||
if (ret == 0) {
|
||||
ret = ttm_bo_move_accel_cleanup(bo,
|
||||
&fence->base,
|
||||
evict,
|
||||
new_mem);
|
||||
new_reg);
|
||||
nouveau_fence_unref(&fence);
|
||||
}
|
||||
}
|
||||
@ -1124,7 +1127,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm)
|
||||
|
||||
static int
|
||||
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct ttm_place placement_memtype = {
|
||||
.fpfn = 0,
|
||||
@ -1132,35 +1135,35 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
|
||||
};
|
||||
struct ttm_placement placement;
|
||||
struct ttm_mem_reg tmp_mem;
|
||||
struct ttm_mem_reg tmp_reg;
|
||||
int ret;
|
||||
|
||||
placement.num_placement = placement.num_busy_placement = 1;
|
||||
placement.placement = placement.busy_placement = &placement_memtype;
|
||||
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
|
||||
tmp_reg = *new_reg;
|
||||
tmp_reg.mm_node = NULL;
|
||||
ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm_tt_bind(bo->ttm, &tmp_mem);
|
||||
ret = ttm_tt_bind(bo->ttm, &tmp_reg);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
|
||||
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_reg);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_mem);
|
||||
ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, new_reg);
|
||||
out:
|
||||
ttm_bo_mem_put(bo, &tmp_mem);
|
||||
ttm_bo_mem_put(bo, &tmp_reg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct ttm_place placement_memtype = {
|
||||
.fpfn = 0,
|
||||
@ -1168,34 +1171,34 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
.flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING
|
||||
};
|
||||
struct ttm_placement placement;
|
||||
struct ttm_mem_reg tmp_mem;
|
||||
struct ttm_mem_reg tmp_reg;
|
||||
int ret;
|
||||
|
||||
placement.num_placement = placement.num_busy_placement = 1;
|
||||
placement.placement = placement.busy_placement = &placement_memtype;
|
||||
|
||||
tmp_mem = *new_mem;
|
||||
tmp_mem.mm_node = NULL;
|
||||
ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
|
||||
tmp_reg = *new_reg;
|
||||
tmp_reg.mm_node = NULL;
|
||||
ret = ttm_bo_mem_space(bo, &placement, &tmp_reg, intr, no_wait_gpu);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_mem);
|
||||
ret = ttm_bo_move_ttm(bo, intr, no_wait_gpu, &tmp_reg);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
|
||||
ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_reg);
|
||||
if (ret)
|
||||
goto out;
|
||||
|
||||
out:
|
||||
ttm_bo_mem_put(bo, &tmp_mem);
|
||||
ttm_bo_mem_put(bo, &tmp_reg);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nvkm_vma *vma;
|
||||
@ -1205,10 +1208,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
|
||||
return;
|
||||
|
||||
list_for_each_entry(vma, &nvbo->vma_list, head) {
|
||||
if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
|
||||
(new_mem->mem_type == TTM_PL_VRAM ||
|
||||
if (new_reg && new_reg->mem_type != TTM_PL_SYSTEM &&
|
||||
(new_reg->mem_type == TTM_PL_VRAM ||
|
||||
nvbo->page_shift != vma->vm->mmu->lpg_shift)) {
|
||||
nvkm_vm_map(vma, new_mem->mm_node);
|
||||
nvkm_vm_map(vma, new_reg->mm_node);
|
||||
} else {
|
||||
WARN_ON(ttm_bo_wait(bo, false, false));
|
||||
nvkm_vm_unmap(vma);
|
||||
@ -1217,20 +1220,20 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, bool evict,
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
|
||||
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_reg,
|
||||
struct nouveau_drm_tile **new_tile)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct drm_device *dev = drm->dev;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
u64 offset = new_mem->start << PAGE_SHIFT;
|
||||
u64 offset = new_reg->start << PAGE_SHIFT;
|
||||
|
||||
*new_tile = NULL;
|
||||
if (new_mem->mem_type != TTM_PL_VRAM)
|
||||
if (new_reg->mem_type != TTM_PL_VRAM)
|
||||
return 0;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
*new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
*new_tile = nv10_bo_set_tiling(dev, offset, new_reg->size,
|
||||
nvbo->tile_mode,
|
||||
nvbo->tile_flags);
|
||||
}
|
||||
@ -1253,11 +1256,11 @@ nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
|
||||
|
||||
static int
|
||||
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_mem)
|
||||
bool no_wait_gpu, struct ttm_mem_reg *new_reg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct ttm_mem_reg *old_mem = &bo->mem;
|
||||
struct ttm_mem_reg *old_reg = &bo->mem;
|
||||
struct nouveau_drm_tile *new_tile = NULL;
|
||||
int ret = 0;
|
||||
|
||||
@ -1268,31 +1271,31 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
if (nvbo->pin_refcnt)
|
||||
NV_WARN(drm, "Moving pinned object %p!\n", nvbo);
|
||||
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Fake bo copy. */
|
||||
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
|
||||
if (old_reg->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
|
||||
BUG_ON(bo->mem.mm_node != NULL);
|
||||
bo->mem = *new_mem;
|
||||
new_mem->mm_node = NULL;
|
||||
bo->mem = *new_reg;
|
||||
new_reg->mm_node = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
/* Hardware assisted copy. */
|
||||
if (drm->ttm.move) {
|
||||
if (new_mem->mem_type == TTM_PL_SYSTEM)
|
||||
if (new_reg->mem_type == TTM_PL_SYSTEM)
|
||||
ret = nouveau_bo_move_flipd(bo, evict, intr,
|
||||
no_wait_gpu, new_mem);
|
||||
else if (old_mem->mem_type == TTM_PL_SYSTEM)
|
||||
no_wait_gpu, new_reg);
|
||||
else if (old_reg->mem_type == TTM_PL_SYSTEM)
|
||||
ret = nouveau_bo_move_flips(bo, evict, intr,
|
||||
no_wait_gpu, new_mem);
|
||||
no_wait_gpu, new_reg);
|
||||
else
|
||||
ret = nouveau_bo_move_m2mf(bo, evict, intr,
|
||||
no_wait_gpu, new_mem);
|
||||
no_wait_gpu, new_reg);
|
||||
if (!ret)
|
||||
goto out;
|
||||
}
|
||||
@ -1300,10 +1303,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
/* Fallback to software copy. */
|
||||
ret = ttm_bo_wait(bo, intr, no_wait_gpu);
|
||||
if (ret == 0)
|
||||
ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_mem);
|
||||
ret = ttm_bo_move_memcpy(bo, intr, no_wait_gpu, new_reg);
|
||||
|
||||
out:
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (ret)
|
||||
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
|
||||
else
|
||||
@ -1323,54 +1326,54 @@ nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
struct ttm_mem_type_manager *man = &bdev->man[reg->mem_type];
|
||||
struct nouveau_drm *drm = nouveau_bdev(bdev);
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
struct nvkm_mem *node = mem->mm_node;
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
struct nvkm_mem *mem = reg->mm_node;
|
||||
int ret;
|
||||
|
||||
mem->bus.addr = NULL;
|
||||
mem->bus.offset = 0;
|
||||
mem->bus.size = mem->num_pages << PAGE_SHIFT;
|
||||
mem->bus.base = 0;
|
||||
mem->bus.is_iomem = false;
|
||||
reg->bus.addr = NULL;
|
||||
reg->bus.offset = 0;
|
||||
reg->bus.size = reg->num_pages << PAGE_SHIFT;
|
||||
reg->bus.base = 0;
|
||||
reg->bus.is_iomem = false;
|
||||
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
|
||||
return -EINVAL;
|
||||
switch (mem->mem_type) {
|
||||
switch (reg->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
return 0;
|
||||
case TTM_PL_TT:
|
||||
#if IS_ENABLED(CONFIG_AGP)
|
||||
if (drm->agp.bridge) {
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
mem->bus.base = drm->agp.base;
|
||||
mem->bus.is_iomem = !drm->agp.cma;
|
||||
reg->bus.offset = reg->start << PAGE_SHIFT;
|
||||
reg->bus.base = drm->agp.base;
|
||||
reg->bus.is_iomem = !drm->agp.cma;
|
||||
}
|
||||
#endif
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA || !node->memtype)
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA || !mem->memtype)
|
||||
/* untiled */
|
||||
break;
|
||||
/* fallthrough, tiled memory */
|
||||
case TTM_PL_VRAM:
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
mem->bus.base = device->func->resource_addr(device, 1);
|
||||
mem->bus.is_iomem = true;
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
struct nvkm_bar *bar = nvxx_bar(&drm->device);
|
||||
reg->bus.offset = reg->start << PAGE_SHIFT;
|
||||
reg->bus.base = device->func->resource_addr(device, 1);
|
||||
reg->bus.is_iomem = true;
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
struct nvkm_bar *bar = nvxx_bar(&drm->client.device);
|
||||
int page_shift = 12;
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
|
||||
page_shift = node->page_shift;
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
|
||||
page_shift = mem->page_shift;
|
||||
|
||||
ret = nvkm_bar_umap(bar, node->size << 12, page_shift,
|
||||
&node->bar_vma);
|
||||
ret = nvkm_bar_umap(bar, mem->size << 12, page_shift,
|
||||
&mem->bar_vma);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_vm_map(&node->bar_vma, node);
|
||||
mem->bus.offset = node->bar_vma.offset;
|
||||
nvkm_vm_map(&mem->bar_vma, mem);
|
||||
reg->bus.offset = mem->bar_vma.offset;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -1380,15 +1383,15 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nvkm_mem *node = mem->mm_node;
|
||||
struct nvkm_mem *mem = reg->mm_node;
|
||||
|
||||
if (!node->bar_vma.node)
|
||||
if (!mem->bar_vma.node)
|
||||
return;
|
||||
|
||||
nvkm_vm_unmap(&node->bar_vma);
|
||||
nvkm_vm_put(&node->bar_vma);
|
||||
nvkm_vm_unmap(&mem->bar_vma);
|
||||
nvkm_vm_put(&mem->bar_vma);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -1396,7 +1399,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
u32 mappable = device->func->resource_size(device, 1) >> PAGE_SHIFT;
|
||||
int i, ret;
|
||||
|
||||
@ -1404,7 +1407,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
* nothing to do here.
|
||||
*/
|
||||
if (bo->mem.mem_type != TTM_PL_VRAM) {
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA ||
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA ||
|
||||
!nouveau_bo_tile_layout(nvbo))
|
||||
return 0;
|
||||
|
||||
@ -1419,7 +1422,7 @@ nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
|
||||
}
|
||||
|
||||
/* make sure bo is in mappable vram */
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA ||
|
||||
bo->mem.start + bo->mem.num_pages < mappable)
|
||||
return 0;
|
||||
|
||||
@ -1461,7 +1464,7 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
|
||||
}
|
||||
|
||||
drm = nouveau_bdev(ttm->bdev);
|
||||
device = nvxx_device(&drm->device);
|
||||
device = nvxx_device(&drm->client.device);
|
||||
dev = drm->dev;
|
||||
pdev = device->dev;
|
||||
|
||||
@ -1518,7 +1521,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
|
||||
return;
|
||||
|
||||
drm = nouveau_bdev(ttm->bdev);
|
||||
device = nvxx_device(&drm->device);
|
||||
device = nvxx_device(&drm->client.device);
|
||||
dev = drm->dev;
|
||||
pdev = device->dev;
|
||||
|
||||
|
@ -26,6 +26,8 @@ struct nouveau_bo {
|
||||
struct list_head vma_list;
|
||||
unsigned page_shift;
|
||||
|
||||
struct nouveau_cli *cli;
|
||||
|
||||
u32 tile_mode;
|
||||
u32 tile_flags;
|
||||
struct nouveau_drm_tile *tile;
|
||||
@ -69,7 +71,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
|
||||
extern struct ttm_bo_driver nouveau_bo_driver;
|
||||
|
||||
void nouveau_bo_move_init(struct nouveau_drm *);
|
||||
int nouveau_bo_new(struct drm_device *, int size, int align, u32 flags,
|
||||
int nouveau_bo_new(struct nouveau_cli *, u64 size, int align, u32 flags,
|
||||
u32 tile_mode, u32 tile_flags, struct sg_table *sg,
|
||||
struct reservation_object *robj,
|
||||
struct nouveau_bo **);
|
||||
|
@ -45,10 +45,20 @@ MODULE_PARM_DESC(vram_pushbuf, "Create DMA push buffers in VRAM");
|
||||
int nouveau_vram_pushbuf;
|
||||
module_param_named(vram_pushbuf, nouveau_vram_pushbuf, int, 0400);
|
||||
|
||||
static int
|
||||
nouveau_channel_killed(struct nvif_notify *ntfy)
|
||||
{
|
||||
struct nouveau_channel *chan = container_of(ntfy, typeof(*chan), kill);
|
||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||
NV_PRINTK(warn, cli, "channel %d killed!\n", chan->chid);
|
||||
atomic_set(&chan->killed, 1);
|
||||
return NVIF_NOTIFY_DROP;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_channel_idle(struct nouveau_channel *chan)
|
||||
{
|
||||
if (likely(chan && chan->fence)) {
|
||||
if (likely(chan && chan->fence && !atomic_read(&chan->killed))) {
|
||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||
struct nouveau_fence *fence = NULL;
|
||||
int ret;
|
||||
@ -78,6 +88,7 @@ nouveau_channel_del(struct nouveau_channel **pchan)
|
||||
nvif_object_fini(&chan->nvsw);
|
||||
nvif_object_fini(&chan->gart);
|
||||
nvif_object_fini(&chan->vram);
|
||||
nvif_notify_fini(&chan->kill);
|
||||
nvif_object_fini(&chan->user);
|
||||
nvif_object_fini(&chan->push.ctxdma);
|
||||
nouveau_bo_vma_del(chan->push.buffer, &chan->push.vma);
|
||||
@ -107,13 +118,14 @@ nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device,
|
||||
|
||||
chan->device = device;
|
||||
chan->drm = drm;
|
||||
atomic_set(&chan->killed, 0);
|
||||
|
||||
/* allocate memory for dma push buffer */
|
||||
target = TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
|
||||
if (nouveau_vram_pushbuf)
|
||||
target = TTM_PL_FLAG_VRAM;
|
||||
|
||||
ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, NULL,
|
||||
ret = nouveau_bo_new(cli, size, 0, target, 0, 0, NULL, NULL,
|
||||
&chan->push.buffer);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_pin(chan->push.buffer, target, false);
|
||||
@ -301,12 +313,26 @@ nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart)
|
||||
{
|
||||
struct nvif_device *device = chan->device;
|
||||
struct nouveau_cli *cli = (void *)chan->user.client;
|
||||
struct nouveau_drm *drm = chan->drm;
|
||||
struct nvkm_mmu *mmu = nvxx_mmu(device);
|
||||
struct nv_dma_v0 args = {};
|
||||
int ret, i;
|
||||
|
||||
nvif_object_map(&chan->user);
|
||||
|
||||
if (chan->user.oclass >= FERMI_CHANNEL_GPFIFO) {
|
||||
ret = nvif_notify_init(&chan->user, nouveau_channel_killed,
|
||||
true, NV906F_V0_NTFY_KILLED,
|
||||
NULL, 0, 0, &chan->kill);
|
||||
if (ret == 0)
|
||||
ret = nvif_notify_get(&chan->kill);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "Failed to request channel kill "
|
||||
"notification: %d\n", ret);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* allocate dma objects to cover all allowed vram, and gart */
|
||||
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
|
||||
if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
|
@ -1,7 +1,7 @@
|
||||
#ifndef __NOUVEAU_CHAN_H__
|
||||
#define __NOUVEAU_CHAN_H__
|
||||
|
||||
#include <nvif/object.h>
|
||||
#include <nvif/notify.h>
|
||||
struct nvif_device;
|
||||
|
||||
struct nouveau_channel {
|
||||
@ -38,6 +38,9 @@ struct nouveau_channel {
|
||||
u32 user_put;
|
||||
|
||||
struct nvif_object user;
|
||||
|
||||
struct nvif_notify kill;
|
||||
atomic_t killed;
|
||||
};
|
||||
|
||||
|
||||
|
@ -419,7 +419,7 @@ nouveau_connector_ddc_detect(struct drm_connector *connector)
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
int i, panel = -ENODEV;
|
||||
@ -521,7 +521,7 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
|
||||
return;
|
||||
nv_connector->detected_encoder = nv_encoder;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
connector->interlace_allowed = true;
|
||||
connector->doublescan_allowed = true;
|
||||
} else
|
||||
@ -531,8 +531,8 @@ nouveau_connector_set_encoder(struct drm_connector *connector,
|
||||
connector->interlace_allowed = false;
|
||||
} else {
|
||||
connector->doublescan_allowed = true;
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
|
||||
(drm->device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_KELVIN ||
|
||||
(drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
|
||||
(dev->pdev->device & 0x0ff0) != 0x0100 &&
|
||||
(dev->pdev->device & 0x0ff0) != 0x0150))
|
||||
/* HW is broken */
|
||||
@ -984,17 +984,17 @@ get_tmds_link_bandwidth(struct drm_connector *connector, bool hdmi)
|
||||
/* Note: these limits are conservative, some Fermi's
|
||||
* can do 297 MHz. Unclear how this can be determined.
|
||||
*/
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_KEPLER)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_KEPLER)
|
||||
return 297000;
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_FERMI)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_FERMI)
|
||||
return 225000;
|
||||
}
|
||||
if (dcb->location != DCB_LOC_ON_CHIP ||
|
||||
drm->device.info.chipset >= 0x46)
|
||||
drm->client.device.info.chipset >= 0x46)
|
||||
return 165000;
|
||||
else if (drm->device.info.chipset >= 0x40)
|
||||
else if (drm->client.device.info.chipset >= 0x40)
|
||||
return 155000;
|
||||
else if (drm->device.info.chipset >= 0x18)
|
||||
else if (drm->client.device.info.chipset >= 0x18)
|
||||
return 135000;
|
||||
else
|
||||
return 112000;
|
||||
@ -1041,7 +1041,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
|
||||
clock = clock * (connector->display_info.bpc * 3) / 10;
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
BUG();
|
||||
return MODE_BAD;
|
||||
}
|
||||
|
||||
|
@ -259,8 +259,9 @@ nouveau_debugfs_init(struct nouveau_drm *drm)
|
||||
if (!drm->debugfs)
|
||||
return -ENOMEM;
|
||||
|
||||
ret = nvif_object_init(&drm->device.object, 0, NVIF_CLASS_CONTROL,
|
||||
NULL, 0, &drm->debugfs->ctrl);
|
||||
ret = nvif_object_init(&drm->client.device.object, 0,
|
||||
NVIF_CLASS_CONTROL, NULL, 0,
|
||||
&drm->debugfs->ctrl);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -495,7 +495,7 @@ int
|
||||
nouveau_display_create(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
struct nouveau_display *disp;
|
||||
int ret;
|
||||
|
||||
@ -512,15 +512,15 @@ nouveau_display_create(struct drm_device *dev)
|
||||
|
||||
dev->mode_config.min_width = 0;
|
||||
dev->mode_config.min_height = 0;
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_CELSIUS) {
|
||||
dev->mode_config.max_width = 2048;
|
||||
dev->mode_config.max_height = 2048;
|
||||
} else
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
dev->mode_config.max_width = 4096;
|
||||
dev->mode_config.max_height = 4096;
|
||||
} else
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI) {
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI) {
|
||||
dev->mode_config.max_width = 8192;
|
||||
dev->mode_config.max_height = 8192;
|
||||
} else {
|
||||
@ -531,7 +531,7 @@ nouveau_display_create(struct drm_device *dev)
|
||||
dev->mode_config.preferred_depth = 24;
|
||||
dev->mode_config.prefer_shadow = 1;
|
||||
|
||||
if (drm->device.info.chipset < 0x11)
|
||||
if (drm->client.device.info.chipset < 0x11)
|
||||
dev->mode_config.async_page_flip = false;
|
||||
else
|
||||
dev->mode_config.async_page_flip = true;
|
||||
@ -558,7 +558,7 @@ nouveau_display_create(struct drm_device *dev)
|
||||
int i;
|
||||
|
||||
for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
|
||||
ret = nvif_object_init(&drm->device.object, 0,
|
||||
ret = nvif_object_init(&drm->client.device.object, 0,
|
||||
oclass[i], NULL, 0, &disp->disp);
|
||||
}
|
||||
|
||||
@ -1057,6 +1057,7 @@ int
|
||||
nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
|
||||
struct drm_mode_create_dumb *args)
|
||||
{
|
||||
struct nouveau_cli *cli = nouveau_cli(file_priv);
|
||||
struct nouveau_bo *bo;
|
||||
uint32_t domain;
|
||||
int ret;
|
||||
@ -1066,12 +1067,12 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
|
||||
args->size = roundup(args->size, PAGE_SIZE);
|
||||
|
||||
/* Use VRAM if there is any ; otherwise fallback to system memory */
|
||||
if (nouveau_drm(dev)->device.info.ram_size != 0)
|
||||
if (nouveau_drm(dev)->client.device.info.ram_size != 0)
|
||||
domain = NOUVEAU_GEM_DOMAIN_VRAM;
|
||||
else
|
||||
domain = NOUVEAU_GEM_DOMAIN_GART;
|
||||
|
||||
ret = nouveau_gem_new(dev, args->size, 0, domain, 0, 0, &bo);
|
||||
ret = nouveau_gem_new(cli, args->size, 0, domain, 0, 0, &bo);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
|
@ -37,6 +37,8 @@
|
||||
#include <core/pci.h>
|
||||
#include <core/tegra.h>
|
||||
|
||||
#include <nvif/driver.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/cl0002.h>
|
||||
#include <nvif/cla06f.h>
|
||||
@ -109,35 +111,53 @@ nouveau_name(struct drm_device *dev)
|
||||
return nouveau_platform_name(dev->platformdev);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_cli_create(struct drm_device *dev, const char *sname,
|
||||
int size, void **pcli)
|
||||
{
|
||||
struct nouveau_cli *cli = *pcli = kzalloc(size, GFP_KERNEL);
|
||||
int ret;
|
||||
if (cli) {
|
||||
snprintf(cli->name, sizeof(cli->name), "%s", sname);
|
||||
cli->dev = dev;
|
||||
|
||||
ret = nvif_client_init(NULL, cli->name, nouveau_name(dev),
|
||||
nouveau_config, nouveau_debug,
|
||||
&cli->base);
|
||||
if (ret == 0) {
|
||||
mutex_init(&cli->mutex);
|
||||
usif_client_init(cli);
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void
|
||||
nouveau_cli_destroy(struct nouveau_cli *cli)
|
||||
nouveau_cli_fini(struct nouveau_cli *cli)
|
||||
{
|
||||
nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
|
||||
nvif_client_fini(&cli->base);
|
||||
usif_client_fini(cli);
|
||||
kfree(cli);
|
||||
nvif_device_fini(&cli->device);
|
||||
nvif_client_fini(&cli->base);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_cli_init(struct nouveau_drm *drm, const char *sname,
|
||||
struct nouveau_cli *cli)
|
||||
{
|
||||
u64 device = nouveau_name(drm->dev);
|
||||
int ret;
|
||||
|
||||
snprintf(cli->name, sizeof(cli->name), "%s", sname);
|
||||
cli->dev = drm->dev;
|
||||
mutex_init(&cli->mutex);
|
||||
usif_client_init(cli);
|
||||
|
||||
if (cli == &drm->client) {
|
||||
ret = nvif_driver_init(NULL, nouveau_config, nouveau_debug,
|
||||
cli->name, device, &cli->base);
|
||||
} else {
|
||||
ret = nvif_client_init(&drm->client.base, cli->name, device,
|
||||
&cli->base);
|
||||
}
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "Client allocation failed: %d\n", ret);
|
||||
goto done;
|
||||
}
|
||||
|
||||
ret = nvif_device_init(&cli->base.object, 0, NV_DEVICE,
|
||||
&(struct nv_device_v0) {
|
||||
.device = ~0,
|
||||
}, sizeof(struct nv_device_v0),
|
||||
&cli->device);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "Device allocation failed: %d\n", ret);
|
||||
goto done;
|
||||
}
|
||||
|
||||
done:
|
||||
if (ret)
|
||||
nouveau_cli_fini(cli);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -161,7 +181,7 @@ nouveau_accel_fini(struct nouveau_drm *drm)
|
||||
static void
|
||||
nouveau_accel_init(struct nouveau_drm *drm)
|
||||
{
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
struct nvif_sclass *sclass;
|
||||
u32 arg0, arg1;
|
||||
int ret, i, n;
|
||||
@ -215,7 +235,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
|
||||
}
|
||||
|
||||
if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) {
|
||||
ret = nouveau_channel_new(drm, &drm->device,
|
||||
ret = nouveau_channel_new(drm, &drm->client.device,
|
||||
NVA06F_V0_ENGINE_CE0 |
|
||||
NVA06F_V0_ENGINE_CE1,
|
||||
0, &drm->cechan);
|
||||
@ -228,7 +248,7 @@ nouveau_accel_init(struct nouveau_drm *drm)
|
||||
if (device->info.chipset >= 0xa3 &&
|
||||
device->info.chipset != 0xaa &&
|
||||
device->info.chipset != 0xac) {
|
||||
ret = nouveau_channel_new(drm, &drm->device,
|
||||
ret = nouveau_channel_new(drm, &drm->client.device,
|
||||
NvDmaFB, NvDmaTT, &drm->cechan);
|
||||
if (ret)
|
||||
NV_ERROR(drm, "failed to create ce channel, %d\n", ret);
|
||||
@ -240,7 +260,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
|
||||
arg1 = NvDmaTT;
|
||||
}
|
||||
|
||||
ret = nouveau_channel_new(drm, &drm->device, arg0, arg1, &drm->channel);
|
||||
ret = nouveau_channel_new(drm, &drm->client.device,
|
||||
arg0, arg1, &drm->channel);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to create kernel channel, %d\n", ret);
|
||||
nouveau_accel_fini(drm);
|
||||
@ -280,8 +301,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
|
||||
}
|
||||
|
||||
if (device->info.family < NV_DEVICE_INFO_V0_FERMI) {
|
||||
ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false,
|
||||
NULL, &drm->notify);
|
||||
ret = nvkm_gpuobj_new(nvxx_device(&drm->client.device), 32, 0,
|
||||
false, NULL, &drm->notify);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to allocate notifier, %d\n", ret);
|
||||
nouveau_accel_fini(drm);
|
||||
@ -407,12 +428,17 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
|
||||
struct nouveau_drm *drm;
|
||||
int ret;
|
||||
|
||||
ret = nouveau_cli_create(dev, "DRM", sizeof(*drm), (void **)&drm);
|
||||
if (!(drm = kzalloc(sizeof(*drm), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
dev->dev_private = drm;
|
||||
drm->dev = dev;
|
||||
|
||||
ret = nouveau_cli_init(drm, "DRM", &drm->client);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dev->dev_private = drm;
|
||||
drm->dev = dev;
|
||||
dev->irq_enabled = true;
|
||||
|
||||
nvxx_client(&drm->client.base)->debug =
|
||||
nvkm_dbgopt(nouveau_debug, "DRM");
|
||||
|
||||
@ -421,33 +447,24 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
|
||||
|
||||
nouveau_get_hdmi_dev(drm);
|
||||
|
||||
ret = nvif_device_init(&drm->client.base.object, 0, NV_DEVICE,
|
||||
&(struct nv_device_v0) {
|
||||
.device = ~0,
|
||||
}, sizeof(struct nv_device_v0),
|
||||
&drm->device);
|
||||
if (ret)
|
||||
goto fail_device;
|
||||
|
||||
dev->irq_enabled = true;
|
||||
|
||||
/* workaround an odd issue on nvc1 by disabling the device's
|
||||
* nosnoop capability. hopefully won't cause issues until a
|
||||
* better fix is found - assuming there is one...
|
||||
*/
|
||||
if (drm->device.info.chipset == 0xc1)
|
||||
nvif_mask(&drm->device.object, 0x00088080, 0x00000800, 0x00000000);
|
||||
if (drm->client.device.info.chipset == 0xc1)
|
||||
nvif_mask(&drm->client.device.object, 0x00088080, 0x00000800, 0x00000000);
|
||||
|
||||
nouveau_vga_init(drm);
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (!nvxx_device(&drm->device)->mmu) {
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (!nvxx_device(&drm->client.device)->mmu) {
|
||||
ret = -ENOSYS;
|
||||
goto fail_device;
|
||||
}
|
||||
|
||||
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
|
||||
0x1000, NULL, &drm->client.vm);
|
||||
ret = nvkm_vm_new(nvxx_device(&drm->client.device),
|
||||
0, (1ULL << 40), 0x1000, NULL,
|
||||
&drm->client.vm);
|
||||
if (ret)
|
||||
goto fail_device;
|
||||
|
||||
@ -497,8 +514,8 @@ fail_bios:
|
||||
fail_ttm:
|
||||
nouveau_vga_fini(drm);
|
||||
fail_device:
|
||||
nvif_device_fini(&drm->device);
|
||||
nouveau_cli_destroy(&drm->client);
|
||||
nouveau_cli_fini(&drm->client);
|
||||
kfree(drm);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -527,10 +544,10 @@ nouveau_drm_unload(struct drm_device *dev)
|
||||
nouveau_ttm_fini(drm);
|
||||
nouveau_vga_fini(drm);
|
||||
|
||||
nvif_device_fini(&drm->device);
|
||||
if (drm->hdmi_device)
|
||||
pci_dev_put(drm->hdmi_device);
|
||||
nouveau_cli_destroy(&drm->client);
|
||||
nouveau_cli_fini(&drm->client);
|
||||
kfree(drm);
|
||||
}
|
||||
|
||||
void
|
||||
@ -560,7 +577,6 @@ static int
|
||||
nouveau_do_suspend(struct drm_device *dev, bool runtime)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_cli *cli;
|
||||
int ret;
|
||||
|
||||
nouveau_led_suspend(dev);
|
||||
@ -590,7 +606,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
|
||||
goto fail_display;
|
||||
}
|
||||
|
||||
NV_INFO(drm, "suspending client object trees...\n");
|
||||
NV_INFO(drm, "suspending fence...\n");
|
||||
if (drm->fence && nouveau_fence(drm)->suspend) {
|
||||
if (!nouveau_fence(drm)->suspend(drm)) {
|
||||
ret = -ENOMEM;
|
||||
@ -598,13 +614,7 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry(cli, &drm->clients, head) {
|
||||
ret = nvif_client_suspend(&cli->base);
|
||||
if (ret)
|
||||
goto fail_client;
|
||||
}
|
||||
|
||||
NV_INFO(drm, "suspending kernel object tree...\n");
|
||||
NV_INFO(drm, "suspending object tree...\n");
|
||||
ret = nvif_client_suspend(&drm->client.base);
|
||||
if (ret)
|
||||
goto fail_client;
|
||||
@ -612,10 +622,6 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime)
|
||||
return 0;
|
||||
|
||||
fail_client:
|
||||
list_for_each_entry_continue_reverse(cli, &drm->clients, head) {
|
||||
nvif_client_resume(&cli->base);
|
||||
}
|
||||
|
||||
if (drm->fence && nouveau_fence(drm)->resume)
|
||||
nouveau_fence(drm)->resume(drm);
|
||||
|
||||
@ -631,19 +637,14 @@ static int
|
||||
nouveau_do_resume(struct drm_device *dev, bool runtime)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_cli *cli;
|
||||
|
||||
NV_INFO(drm, "resuming kernel object tree...\n");
|
||||
NV_INFO(drm, "resuming object tree...\n");
|
||||
nvif_client_resume(&drm->client.base);
|
||||
|
||||
NV_INFO(drm, "resuming client object trees...\n");
|
||||
NV_INFO(drm, "resuming fence...\n");
|
||||
if (drm->fence && nouveau_fence(drm)->resume)
|
||||
nouveau_fence(drm)->resume(drm);
|
||||
|
||||
list_for_each_entry(cli, &drm->clients, head) {
|
||||
nvif_client_resume(&cli->base);
|
||||
}
|
||||
|
||||
nouveau_run_vbios_init(dev);
|
||||
|
||||
if (dev->mode_config.num_crtc) {
|
||||
@ -758,7 +759,7 @@ nouveau_pmops_runtime_resume(struct device *dev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(dev);
|
||||
struct drm_device *drm_dev = pci_get_drvdata(pdev);
|
||||
struct nvif_device *device = &nouveau_drm(drm_dev)->device;
|
||||
struct nvif_device *device = &nouveau_drm(drm_dev)->client.device;
|
||||
int ret;
|
||||
|
||||
if (nouveau_runtime_pm == 0)
|
||||
@ -841,20 +842,20 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
|
||||
get_task_comm(tmpname, current);
|
||||
snprintf(name, sizeof(name), "%s[%d]", tmpname, pid_nr(fpriv->pid));
|
||||
|
||||
ret = nouveau_cli_create(dev, name, sizeof(*cli), (void **)&cli);
|
||||
if (!(cli = kzalloc(sizeof(*cli), GFP_KERNEL)))
|
||||
return ret;
|
||||
|
||||
ret = nouveau_cli_init(drm, name, cli);
|
||||
if (ret)
|
||||
goto out_suspend;
|
||||
goto done;
|
||||
|
||||
cli->base.super = false;
|
||||
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nvkm_vm_new(nvxx_device(&drm->device), 0, (1ULL << 40),
|
||||
0x1000, NULL, &cli->vm);
|
||||
if (ret) {
|
||||
nouveau_cli_destroy(cli);
|
||||
goto out_suspend;
|
||||
}
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) {
|
||||
ret = nvkm_vm_new(nvxx_device(&drm->client.device), 0,
|
||||
(1ULL << 40), 0x1000, NULL, &cli->vm);
|
||||
if (ret)
|
||||
goto done;
|
||||
|
||||
nvxx_client(&cli->base)->vm = cli->vm;
|
||||
}
|
||||
@ -865,10 +866,14 @@ nouveau_drm_open(struct drm_device *dev, struct drm_file *fpriv)
|
||||
list_add(&cli->head, &drm->clients);
|
||||
mutex_unlock(&drm->client.mutex);
|
||||
|
||||
out_suspend:
|
||||
done:
|
||||
if (ret && cli) {
|
||||
nouveau_cli_fini(cli);
|
||||
kfree(cli);
|
||||
}
|
||||
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -895,7 +900,8 @@ static void
|
||||
nouveau_drm_postclose(struct drm_device *dev, struct drm_file *fpriv)
|
||||
{
|
||||
struct nouveau_cli *cli = nouveau_cli(fpriv);
|
||||
nouveau_cli_destroy(cli);
|
||||
nouveau_cli_fini(cli);
|
||||
kfree(cli);
|
||||
pm_runtime_mark_last_busy(dev->dev);
|
||||
pm_runtime_put_autosuspend(dev->dev);
|
||||
}
|
||||
|
@ -86,14 +86,17 @@ enum nouveau_drm_handle {
|
||||
|
||||
struct nouveau_cli {
|
||||
struct nvif_client base;
|
||||
struct drm_device *dev;
|
||||
struct mutex mutex;
|
||||
|
||||
struct nvif_device device;
|
||||
|
||||
struct nvkm_vm *vm; /*XXX*/
|
||||
struct list_head head;
|
||||
struct mutex mutex;
|
||||
void *abi16;
|
||||
struct list_head objects;
|
||||
struct list_head notifys;
|
||||
char name[32];
|
||||
struct drm_device *dev;
|
||||
};
|
||||
|
||||
static inline struct nouveau_cli *
|
||||
@ -111,7 +114,6 @@ struct nouveau_drm {
|
||||
struct nouveau_cli client;
|
||||
struct drm_device *dev;
|
||||
|
||||
struct nvif_device device;
|
||||
struct list_head clients;
|
||||
|
||||
struct {
|
||||
|
@ -60,7 +60,7 @@ nouveau_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
@ -92,7 +92,7 @@ nouveau_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *image)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
@ -124,7 +124,7 @@ nouveau_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
|
||||
{
|
||||
struct nouveau_fbdev *fbcon = info->par;
|
||||
struct nouveau_drm *drm = nouveau_drm(fbcon->helper.dev);
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
int ret;
|
||||
|
||||
if (info->state != FBINFO_STATE_RUNNING)
|
||||
@ -266,10 +266,10 @@ nouveau_fbcon_accel_init(struct drm_device *dev)
|
||||
struct fb_info *info = fbcon->helper.fbdev;
|
||||
int ret;
|
||||
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
|
||||
ret = nv04_fbcon_accel_init(info);
|
||||
else
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_FERMI)
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_FERMI)
|
||||
ret = nv50_fbcon_accel_init(info);
|
||||
else
|
||||
ret = nvc0_fbcon_accel_init(info);
|
||||
@ -324,7 +324,7 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
container_of(helper, struct nouveau_fbdev, helper);
|
||||
struct drm_device *dev = fbcon->helper.dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
struct fb_info *info;
|
||||
struct nouveau_framebuffer *fb;
|
||||
struct nouveau_channel *chan;
|
||||
@ -341,8 +341,9 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
|
||||
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
|
||||
sizes->surface_depth);
|
||||
|
||||
ret = nouveau_gem_new(dev, mode_cmd.pitches[0] * mode_cmd.height,
|
||||
0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0x0000, &nvbo);
|
||||
ret = nouveau_gem_new(&drm->client, mode_cmd.pitches[0] *
|
||||
mode_cmd.height, 0, NOUVEAU_GEM_DOMAIN_VRAM,
|
||||
0, 0x0000, &nvbo);
|
||||
if (ret) {
|
||||
NV_ERROR(drm, "failed to allocate framebuffer\n");
|
||||
goto out;
|
||||
@ -515,10 +516,10 @@ nouveau_fbcon_init(struct drm_device *dev)
|
||||
if (ret)
|
||||
goto fini;
|
||||
|
||||
if (drm->device.info.ram_size <= 32 * 1024 * 1024)
|
||||
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
|
||||
preferred_bpp = 8;
|
||||
else
|
||||
if (drm->device.info.ram_size <= 64 * 1024 * 1024)
|
||||
if (drm->client.device.info.ram_size <= 64 * 1024 * 1024)
|
||||
preferred_bpp = 16;
|
||||
else
|
||||
preferred_bpp = 32;
|
||||
|
@ -190,7 +190,7 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
|
||||
return;
|
||||
|
||||
ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
|
||||
false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
|
||||
false, NV826E_V0_NTFY_NON_STALL_INTERRUPT,
|
||||
&(struct nvif_notify_uevent_req) { },
|
||||
sizeof(struct nvif_notify_uevent_req),
|
||||
sizeof(struct nvif_notify_uevent_rep),
|
||||
|
@ -175,11 +175,11 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
|
||||
nouveau_gem_new(struct nouveau_cli *cli, u64 size, int align, uint32_t domain,
|
||||
uint32_t tile_mode, uint32_t tile_flags,
|
||||
struct nouveau_bo **pnvbo)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_drm *drm = nouveau_drm(cli->dev);
|
||||
struct nouveau_bo *nvbo;
|
||||
u32 flags = 0;
|
||||
int ret;
|
||||
@ -194,7 +194,7 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
|
||||
if (domain & NOUVEAU_GEM_DOMAIN_COHERENT)
|
||||
flags |= TTM_PL_FLAG_UNCACHED;
|
||||
|
||||
ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
|
||||
ret = nouveau_bo_new(cli, size, align, flags, tile_mode,
|
||||
tile_flags, NULL, NULL, pnvbo);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -206,12 +206,12 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
|
||||
*/
|
||||
nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
|
||||
NOUVEAU_GEM_DOMAIN_GART;
|
||||
if (drm->device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA)
|
||||
nvbo->valid_domains &= domain;
|
||||
|
||||
/* Initialize the embedded gem-object. We return a single gem-reference
|
||||
* to the caller, instead of a normal nouveau_bo ttm reference. */
|
||||
ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size);
|
||||
ret = drm_gem_object_init(drm->dev, &nvbo->gem, nvbo->bo.mem.size);
|
||||
if (ret) {
|
||||
nouveau_bo_ref(NULL, pnvbo);
|
||||
return -ENOMEM;
|
||||
@ -257,7 +257,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_cli *cli = nouveau_cli(file_priv);
|
||||
struct nvkm_fb *fb = nvxx_fb(&drm->device);
|
||||
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
|
||||
struct drm_nouveau_gem_new *req = data;
|
||||
struct nouveau_bo *nvbo = NULL;
|
||||
int ret = 0;
|
||||
@ -267,7 +267,7 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
ret = nouveau_gem_new(dev, req->info.size, req->align,
|
||||
ret = nouveau_gem_new(cli, req->info.size, req->align,
|
||||
req->info.domain, req->info.tile_mode,
|
||||
req->info.tile_flags, &nvbo);
|
||||
if (ret)
|
||||
@ -496,7 +496,7 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) {
|
||||
if (nvbo->bo.offset == b->presumed.offset &&
|
||||
((nvbo->bo.mem.mem_type == TTM_PL_VRAM &&
|
||||
b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) ||
|
||||
@ -767,7 +767,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
|
||||
push[i].length);
|
||||
}
|
||||
} else
|
||||
if (drm->device.info.chipset >= 0x25) {
|
||||
if (drm->client.device.info.chipset >= 0x25) {
|
||||
ret = RING_SPACE(chan, req->nr_push * 2);
|
||||
if (ret) {
|
||||
NV_PRINTK(err, cli, "cal_space: %d\n", ret);
|
||||
@ -840,7 +840,7 @@ out_next:
|
||||
req->suffix0 = 0x00000000;
|
||||
req->suffix1 = 0x00000000;
|
||||
} else
|
||||
if (drm->device.info.chipset >= 0x25) {
|
||||
if (drm->client.device.info.chipset >= 0x25) {
|
||||
req->suffix0 = 0x00020000;
|
||||
req->suffix1 = 0x00000000;
|
||||
} else {
|
||||
|
@ -16,7 +16,7 @@ nouveau_gem_object(struct drm_gem_object *gem)
|
||||
}
|
||||
|
||||
/* nouveau_gem.c */
|
||||
extern int nouveau_gem_new(struct drm_device *, int size, int align,
|
||||
extern int nouveau_gem_new(struct nouveau_cli *, u64 size, int align,
|
||||
uint32_t domain, uint32_t tile_mode,
|
||||
uint32_t tile_flags, struct nouveau_bo **);
|
||||
extern void nouveau_gem_object_del(struct drm_gem_object *);
|
||||
|
@ -43,7 +43,7 @@ nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
int temp = nvkm_therm_temp_get(therm);
|
||||
|
||||
if (temp < 0)
|
||||
@ -69,7 +69,7 @@ nouveau_hwmon_temp1_auto_point1_temp(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST) * 1000);
|
||||
@ -81,7 +81,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
|
||||
if (kstrtol(buf, 10, &value) == -EINVAL)
|
||||
@ -102,7 +102,7 @@ nouveau_hwmon_temp1_auto_point1_temp_hyst(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_FAN_BOOST_HYST) * 1000);
|
||||
@ -114,7 +114,7 @@ nouveau_hwmon_set_temp1_auto_point1_temp_hyst(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
|
||||
if (kstrtol(buf, 10, &value) == -EINVAL)
|
||||
@ -134,7 +134,7 @@ nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK) * 1000);
|
||||
@ -145,7 +145,7 @@ nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
|
||||
if (kstrtol(buf, 10, &value) == -EINVAL)
|
||||
@ -165,7 +165,7 @@ nouveau_hwmon_max_temp_hyst(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_DOWN_CLK_HYST) * 1000);
|
||||
@ -176,7 +176,7 @@ nouveau_hwmon_set_max_temp_hyst(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
|
||||
if (kstrtol(buf, 10, &value) == -EINVAL)
|
||||
@ -197,7 +197,7 @@ nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL) * 1000);
|
||||
@ -209,7 +209,7 @@ nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
|
||||
if (kstrtol(buf, 10, &value) == -EINVAL)
|
||||
@ -230,7 +230,7 @@ nouveau_hwmon_critical_temp_hyst(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_CRITICAL_HYST) * 1000);
|
||||
@ -243,7 +243,7 @@ nouveau_hwmon_set_critical_temp_hyst(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
|
||||
if (kstrtol(buf, 10, &value) == -EINVAL)
|
||||
@ -263,7 +263,7 @@ nouveau_hwmon_emergency_temp(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN) * 1000);
|
||||
@ -275,7 +275,7 @@ nouveau_hwmon_set_emergency_temp(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
|
||||
if (kstrtol(buf, 10, &value) == -EINVAL)
|
||||
@ -296,7 +296,7 @@ nouveau_hwmon_emergency_temp_hyst(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n",
|
||||
therm->attr_get(therm, NVKM_THERM_ATTR_THRS_SHUTDOWN_HYST) * 1000);
|
||||
@ -309,7 +309,7 @@ nouveau_hwmon_set_emergency_temp_hyst(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
|
||||
if (kstrtol(buf, 10, &value) == -EINVAL)
|
||||
@ -349,7 +349,7 @@ nouveau_hwmon_show_fan1_input(struct device *d, struct device_attribute *attr,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", nvkm_therm_fan_sense(therm));
|
||||
}
|
||||
@ -362,7 +362,7 @@ nouveau_hwmon_get_pwm1_enable(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
int ret;
|
||||
|
||||
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MODE);
|
||||
@ -378,7 +378,7 @@ nouveau_hwmon_set_pwm1_enable(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
int ret;
|
||||
|
||||
@ -401,7 +401,7 @@ nouveau_hwmon_get_pwm1(struct device *d, struct device_attribute *a, char *buf)
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
int ret;
|
||||
|
||||
ret = therm->fan_get(therm);
|
||||
@ -417,7 +417,7 @@ nouveau_hwmon_set_pwm1(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
int ret = -ENODEV;
|
||||
long value;
|
||||
|
||||
@ -441,7 +441,7 @@ nouveau_hwmon_get_pwm1_min(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
int ret;
|
||||
|
||||
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MIN_DUTY);
|
||||
@ -457,7 +457,7 @@ nouveau_hwmon_set_pwm1_min(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
int ret;
|
||||
|
||||
@ -481,7 +481,7 @@ nouveau_hwmon_get_pwm1_max(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
int ret;
|
||||
|
||||
ret = therm->attr_get(therm, NVKM_THERM_ATTR_FAN_MAX_DUTY);
|
||||
@ -497,7 +497,7 @@ nouveau_hwmon_set_pwm1_max(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
long value;
|
||||
int ret;
|
||||
|
||||
@ -521,7 +521,7 @@ nouveau_hwmon_get_in0_input(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->device);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
|
||||
int ret;
|
||||
|
||||
ret = nvkm_volt_get(volt);
|
||||
@ -540,7 +540,7 @@ nouveau_hwmon_get_in0_min(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->device);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
|
||||
|
||||
if (!volt || !volt->min_uv)
|
||||
return -ENODEV;
|
||||
@ -557,7 +557,7 @@ nouveau_hwmon_get_in0_max(struct device *d,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->device);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
|
||||
|
||||
if (!volt || !volt->max_uv)
|
||||
return -ENODEV;
|
||||
@ -584,7 +584,7 @@ nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a,
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device);
|
||||
struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
|
||||
int result = nvkm_iccsense_read_all(iccsense);
|
||||
|
||||
if (result < 0)
|
||||
@ -596,6 +596,32 @@ nouveau_hwmon_get_power1_input(struct device *d, struct device_attribute *a,
|
||||
static SENSOR_DEVICE_ATTR(power1_input, S_IRUGO,
|
||||
nouveau_hwmon_get_power1_input, NULL, 0);
|
||||
|
||||
static ssize_t
|
||||
nouveau_hwmon_get_power1_max(struct device *d, struct device_attribute *a,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
|
||||
return sprintf(buf, "%i\n", iccsense->power_w_max);
|
||||
}
|
||||
|
||||
static SENSOR_DEVICE_ATTR(power1_max, S_IRUGO,
|
||||
nouveau_hwmon_get_power1_max, NULL, 0);
|
||||
|
||||
static ssize_t
|
||||
nouveau_hwmon_get_power1_crit(struct device *d, struct device_attribute *a,
|
||||
char *buf)
|
||||
{
|
||||
struct drm_device *dev = dev_get_drvdata(d);
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
|
||||
return sprintf(buf, "%i\n", iccsense->power_w_crit);
|
||||
}
|
||||
|
||||
static SENSOR_DEVICE_ATTR(power1_crit, S_IRUGO,
|
||||
nouveau_hwmon_get_power1_crit, NULL, 0);
|
||||
|
||||
static struct attribute *hwmon_default_attributes[] = {
|
||||
&sensor_dev_attr_name.dev_attr.attr,
|
||||
&sensor_dev_attr_update_rate.dev_attr.attr,
|
||||
@ -639,6 +665,12 @@ static struct attribute *hwmon_power_attributes[] = {
|
||||
NULL
|
||||
};
|
||||
|
||||
static struct attribute *hwmon_power_caps_attributes[] = {
|
||||
&sensor_dev_attr_power1_max.dev_attr.attr,
|
||||
&sensor_dev_attr_power1_crit.dev_attr.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
static const struct attribute_group hwmon_default_attrgroup = {
|
||||
.attrs = hwmon_default_attributes,
|
||||
};
|
||||
@ -657,6 +689,9 @@ static const struct attribute_group hwmon_in0_attrgroup = {
|
||||
static const struct attribute_group hwmon_power_attrgroup = {
|
||||
.attrs = hwmon_power_attributes,
|
||||
};
|
||||
static const struct attribute_group hwmon_power_caps_attrgroup = {
|
||||
.attrs = hwmon_power_caps_attributes,
|
||||
};
|
||||
#endif
|
||||
|
||||
int
|
||||
@ -664,9 +699,9 @@ nouveau_hwmon_init(struct drm_device *dev)
|
||||
{
|
||||
#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->device);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->device);
|
||||
struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->device);
|
||||
struct nvkm_therm *therm = nvxx_therm(&drm->client.device);
|
||||
struct nvkm_volt *volt = nvxx_volt(&drm->client.device);
|
||||
struct nvkm_iccsense *iccsense = nvxx_iccsense(&drm->client.device);
|
||||
struct nouveau_hwmon *hwmon;
|
||||
struct device *hwmon_dev;
|
||||
int ret = 0;
|
||||
@ -728,8 +763,16 @@ nouveau_hwmon_init(struct drm_device *dev)
|
||||
if (iccsense && iccsense->data_valid && !list_empty(&iccsense->rails)) {
|
||||
ret = sysfs_create_group(&hwmon_dev->kobj,
|
||||
&hwmon_power_attrgroup);
|
||||
|
||||
if (ret)
|
||||
goto error;
|
||||
|
||||
if (iccsense->power_w_max && iccsense->power_w_crit) {
|
||||
ret = sysfs_create_group(&hwmon_dev->kobj,
|
||||
&hwmon_power_caps_attrgroup);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
hwmon->hwmon = hwmon_dev;
|
||||
@ -759,6 +802,7 @@ nouveau_hwmon_fini(struct drm_device *dev)
|
||||
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_fan_rpm_attrgroup);
|
||||
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_in0_attrgroup);
|
||||
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_attrgroup);
|
||||
sysfs_remove_group(&hwmon->hwmon->kobj, &hwmon_power_caps_attrgroup);
|
||||
|
||||
hwmon_device_unregister(hwmon->hwmon);
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ nouveau_led_get_brightness(struct led_classdev *led)
|
||||
{
|
||||
struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(drm_dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
u32 div, duty;
|
||||
|
||||
div = nvif_rd32(device, 0x61c880) & 0x00ffffff;
|
||||
@ -55,7 +55,7 @@ nouveau_led_set_brightness(struct led_classdev *led, enum led_brightness value)
|
||||
{
|
||||
struct drm_device *drm_dev = container_of(led, struct nouveau_led, led)->dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(drm_dev);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
|
||||
u32 input_clk = 27e6; /* PDISPLAY.SOR[1].PWM is connected to the crystal */
|
||||
u32 freq = 100; /* this is what nvidia uses and it should be good-enough */
|
||||
@ -78,7 +78,7 @@ int
|
||||
nouveau_led_init(struct drm_device *dev)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->device);
|
||||
struct nvkm_gpio *gpio = nvxx_gpio(&drm->client.device);
|
||||
struct dcb_gpio_func logo_led;
|
||||
int ret;
|
||||
|
||||
@ -102,6 +102,7 @@ nouveau_led_init(struct drm_device *dev)
|
||||
ret = led_classdev_register(dev->dev, &drm->led->led);
|
||||
if (ret) {
|
||||
kfree(drm->led);
|
||||
drm->led = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -60,20 +60,15 @@ nvkm_client_ioctl(void *priv, bool super, void *data, u32 size, void **hack)
|
||||
static int
|
||||
nvkm_client_resume(void *priv)
|
||||
{
|
||||
return nvkm_client_init(priv);
|
||||
struct nvkm_client *client = priv;
|
||||
return nvkm_object_init(&client->object);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_client_suspend(void *priv)
|
||||
{
|
||||
return nvkm_client_fini(priv, true);
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_client_driver_fini(void *priv)
|
||||
{
|
||||
struct nvkm_client *client = priv;
|
||||
nvkm_client_del(&client);
|
||||
return nvkm_object_fini(&client->object, true);
|
||||
}
|
||||
|
||||
static int
|
||||
@ -108,23 +103,14 @@ static int
|
||||
nvkm_client_driver_init(const char *name, u64 device, const char *cfg,
|
||||
const char *dbg, void **ppriv)
|
||||
{
|
||||
struct nvkm_client *client;
|
||||
int ret;
|
||||
|
||||
ret = nvkm_client_new(name, device, cfg, dbg, &client);
|
||||
*ppriv = client;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
client->ntfy = nvkm_client_ntfy;
|
||||
return 0;
|
||||
return nvkm_client_new(name, device, cfg, dbg, nvkm_client_ntfy,
|
||||
(struct nvkm_client **)ppriv);
|
||||
}
|
||||
|
||||
const struct nvif_driver
|
||||
nvif_driver_nvkm = {
|
||||
.name = "nvkm",
|
||||
.init = nvkm_client_driver_init,
|
||||
.fini = nvkm_client_driver_fini,
|
||||
.suspend = nvkm_client_suspend,
|
||||
.resume = nvkm_client_resume,
|
||||
.ioctl = nvkm_client_ioctl,
|
||||
|
@ -60,6 +60,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
struct dma_buf_attachment *attach,
|
||||
struct sg_table *sg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_bo *nvbo;
|
||||
struct reservation_object *robj = attach->dmabuf->resv;
|
||||
u32 flags = 0;
|
||||
@ -68,7 +69,7 @@ struct drm_gem_object *nouveau_gem_prime_import_sg_table(struct drm_device *dev,
|
||||
flags = TTM_PL_FLAG_TT;
|
||||
|
||||
ww_mutex_lock(&robj->lock, NULL);
|
||||
ret = nouveau_bo_new(dev, attach->dmabuf->size, 0, flags, 0, 0,
|
||||
ret = nouveau_bo_new(&drm->client, attach->dmabuf->size, 0, flags, 0, 0,
|
||||
sg, robj, &nvbo);
|
||||
ww_mutex_unlock(&robj->lock);
|
||||
if (ret)
|
||||
|
@ -24,10 +24,10 @@ nouveau_sgdma_destroy(struct ttm_tt *ttm)
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct nvkm_mem *node = mem->mm_node;
|
||||
struct nvkm_mem *node = reg->mm_node;
|
||||
|
||||
if (ttm->sg) {
|
||||
node->sg = ttm->sg;
|
||||
@ -36,7 +36,7 @@ nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
node->sg = NULL;
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
}
|
||||
node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
|
||||
node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
|
||||
|
||||
nvkm_vm_map(&node->vma[0], node);
|
||||
nvbe->node = node;
|
||||
@ -58,10 +58,10 @@ static struct ttm_backend_func nv04_sgdma_backend = {
|
||||
};
|
||||
|
||||
static int
|
||||
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
|
||||
struct nvkm_mem *node = mem->mm_node;
|
||||
struct nvkm_mem *node = reg->mm_node;
|
||||
|
||||
/* noop: bound in move_notify() */
|
||||
if (ttm->sg) {
|
||||
@ -71,7 +71,7 @@ nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
|
||||
node->sg = NULL;
|
||||
node->pages = nvbe->ttm.dma_address;
|
||||
}
|
||||
node->size = (mem->num_pages << PAGE_SHIFT) >> 12;
|
||||
node->size = (reg->num_pages << PAGE_SHIFT) >> 12;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -100,7 +100,7 @@ nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
|
||||
if (!nvbe)
|
||||
return NULL;
|
||||
|
||||
if (drm->device.info.family < NV_DEVICE_INFO_V0_TESLA)
|
||||
if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA)
|
||||
nvbe->ttm.ttm.func = &nv04_sgdma_backend;
|
||||
else
|
||||
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
|
||||
|
@ -36,7 +36,7 @@ static int
|
||||
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
|
||||
struct nvkm_fb *fb = nvxx_fb(&drm->device);
|
||||
struct nvkm_fb *fb = nvxx_fb(&drm->client.device);
|
||||
man->priv = fb;
|
||||
return 0;
|
||||
}
|
||||
@ -64,45 +64,45 @@ nvkm_mem_node_cleanup(struct nvkm_mem *node)
|
||||
|
||||
static void
|
||||
nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
|
||||
struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
|
||||
nvkm_mem_node_cleanup(mem->mm_node);
|
||||
ram->func->put(ram, (struct nvkm_mem **)&mem->mm_node);
|
||||
struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
|
||||
nvkm_mem_node_cleanup(reg->mm_node);
|
||||
ram->func->put(ram, (struct nvkm_mem **)®->mm_node);
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *mem)
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
|
||||
struct nvkm_ram *ram = nvxx_fb(&drm->device)->ram;
|
||||
struct nvkm_ram *ram = nvxx_fb(&drm->client.device)->ram;
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
struct nvkm_mem *node;
|
||||
u32 size_nc = 0;
|
||||
int ret;
|
||||
|
||||
if (drm->device.info.ram_size == 0)
|
||||
if (drm->client.device.info.ram_size == 0)
|
||||
return -ENOMEM;
|
||||
|
||||
if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
|
||||
size_nc = 1 << nvbo->page_shift;
|
||||
|
||||
ret = ram->func->get(ram, mem->num_pages << PAGE_SHIFT,
|
||||
mem->page_alignment << PAGE_SHIFT, size_nc,
|
||||
ret = ram->func->get(ram, reg->num_pages << PAGE_SHIFT,
|
||||
reg->page_alignment << PAGE_SHIFT, size_nc,
|
||||
(nvbo->tile_flags >> 8) & 0x3ff, &node);
|
||||
if (ret) {
|
||||
mem->mm_node = NULL;
|
||||
reg->mm_node = NULL;
|
||||
return (ret == -ENOSPC) ? 0 : ret;
|
||||
}
|
||||
|
||||
node->page_shift = nvbo->page_shift;
|
||||
|
||||
mem->mm_node = node;
|
||||
mem->start = node->offset >> PAGE_SHIFT;
|
||||
reg->mm_node = node;
|
||||
reg->start = node->offset >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -127,18 +127,18 @@ nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
|
||||
|
||||
static void
|
||||
nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
nvkm_mem_node_cleanup(mem->mm_node);
|
||||
kfree(mem->mm_node);
|
||||
mem->mm_node = NULL;
|
||||
nvkm_mem_node_cleanup(reg->mm_node);
|
||||
kfree(reg->mm_node);
|
||||
reg->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *mem)
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
@ -150,7 +150,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
|
||||
node->page_shift = 12;
|
||||
|
||||
switch (drm->device.info.family) {
|
||||
switch (drm->client.device.info.family) {
|
||||
case NV_DEVICE_INFO_V0_TNT:
|
||||
case NV_DEVICE_INFO_V0_CELSIUS:
|
||||
case NV_DEVICE_INFO_V0_KELVIN:
|
||||
@ -158,7 +158,7 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
case NV_DEVICE_INFO_V0_CURIE:
|
||||
break;
|
||||
case NV_DEVICE_INFO_V0_TESLA:
|
||||
if (drm->device.info.chipset != 0x50)
|
||||
if (drm->client.device.info.chipset != 0x50)
|
||||
node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
|
||||
break;
|
||||
case NV_DEVICE_INFO_V0_FERMI:
|
||||
@ -169,12 +169,12 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
break;
|
||||
default:
|
||||
NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
|
||||
drm->device.info.family);
|
||||
drm->client.device.info.family);
|
||||
break;
|
||||
}
|
||||
|
||||
mem->mm_node = node;
|
||||
mem->start = 0;
|
||||
reg->mm_node = node;
|
||||
reg->start = 0;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -197,7 +197,7 @@ static int
|
||||
nv04_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_bdev(man->bdev);
|
||||
struct nvkm_mmu *mmu = nvxx_mmu(&drm->device);
|
||||
struct nvkm_mmu *mmu = nvxx_mmu(&drm->client.device);
|
||||
struct nv04_mmu *priv = (void *)mmu;
|
||||
struct nvkm_vm *vm = NULL;
|
||||
nvkm_vm_ref(priv->vm, &vm, NULL);
|
||||
@ -215,20 +215,20 @@ nv04_gart_manager_fini(struct ttm_mem_type_manager *man)
|
||||
}
|
||||
|
||||
static void
|
||||
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *mem)
|
||||
nv04_gart_manager_del(struct ttm_mem_type_manager *man, struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nvkm_mem *node = mem->mm_node;
|
||||
struct nvkm_mem *node = reg->mm_node;
|
||||
if (node->vma[0].node)
|
||||
nvkm_vm_put(&node->vma[0]);
|
||||
kfree(mem->mm_node);
|
||||
mem->mm_node = NULL;
|
||||
kfree(reg->mm_node);
|
||||
reg->mm_node = NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nv04_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
const struct ttm_place *place,
|
||||
struct ttm_mem_reg *mem)
|
||||
struct ttm_mem_reg *reg)
|
||||
{
|
||||
struct nvkm_mem *node;
|
||||
int ret;
|
||||
@ -239,15 +239,15 @@ nv04_gart_manager_new(struct ttm_mem_type_manager *man,
|
||||
|
||||
node->page_shift = 12;
|
||||
|
||||
ret = nvkm_vm_get(man->priv, mem->num_pages << 12, node->page_shift,
|
||||
ret = nvkm_vm_get(man->priv, reg->num_pages << 12, node->page_shift,
|
||||
NV_MEM_ACCESS_RW, &node->vma[0]);
|
||||
if (ret) {
|
||||
kfree(node);
|
||||
return ret;
|
||||
}
|
||||
|
||||
mem->mm_node = node;
|
||||
mem->start = node->vma[0].offset >> PAGE_SHIFT;
|
||||
reg->mm_node = node;
|
||||
reg->start = node->vma[0].offset >> PAGE_SHIFT;
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -339,7 +339,7 @@ nouveau_ttm_global_release(struct nouveau_drm *drm)
|
||||
int
|
||||
nouveau_ttm_init(struct nouveau_drm *drm)
|
||||
{
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
struct nvkm_pci *pci = device->pci;
|
||||
struct drm_device *dev = drm->dev;
|
||||
u8 bits;
|
||||
@ -352,8 +352,8 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
||||
drm->agp.cma = pci->agp.cma;
|
||||
}
|
||||
|
||||
bits = nvxx_mmu(&drm->device)->dma_bits;
|
||||
if (nvxx_device(&drm->device)->func->pci) {
|
||||
bits = nvxx_mmu(&drm->client.device)->dma_bits;
|
||||
if (nvxx_device(&drm->client.device)->func->pci) {
|
||||
if (drm->agp.bridge)
|
||||
bits = 32;
|
||||
} else if (device->func->tegra) {
|
||||
@ -396,7 +396,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
||||
}
|
||||
|
||||
/* VRAM init */
|
||||
drm->gem.vram_available = drm->device.info.ram_user;
|
||||
drm->gem.vram_available = drm->client.device.info.ram_user;
|
||||
|
||||
arch_io_reserve_memtype_wc(device->func->resource_addr(device, 1),
|
||||
device->func->resource_size(device, 1));
|
||||
@ -413,7 +413,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
||||
|
||||
/* GART init */
|
||||
if (!drm->agp.bridge) {
|
||||
drm->gem.gart_available = nvxx_mmu(&drm->device)->limit;
|
||||
drm->gem.gart_available = nvxx_mmu(&drm->client.device)->limit;
|
||||
} else {
|
||||
drm->gem.gart_available = drm->agp.size;
|
||||
}
|
||||
@ -433,7 +433,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
|
||||
void
|
||||
nouveau_ttm_fini(struct nouveau_drm *drm)
|
||||
{
|
||||
struct nvkm_device *device = nvxx_device(&drm->device);
|
||||
struct nvkm_device *device = nvxx_device(&drm->client.device);
|
||||
|
||||
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_VRAM);
|
||||
ttm_bo_clean_mm(&drm->ttm.bdev, TTM_PL_TT);
|
||||
|
@ -103,7 +103,7 @@ usif_notify(const void *header, u32 length, const void *data, u32 size)
|
||||
}
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -13,13 +13,13 @@ static unsigned int
|
||||
nouveau_vga_set_decode(void *priv, bool state)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(priv);
|
||||
struct nvif_object *device = &drm->device.object;
|
||||
struct nvif_object *device = &drm->client.device.object;
|
||||
|
||||
if (drm->device.info.family == NV_DEVICE_INFO_V0_CURIE &&
|
||||
drm->device.info.chipset >= 0x4c)
|
||||
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CURIE &&
|
||||
drm->client.device.info.chipset >= 0x4c)
|
||||
nvif_wr32(device, 0x088060, state);
|
||||
else
|
||||
if (drm->device.info.chipset >= 0x40)
|
||||
if (drm->client.device.info.chipset >= 0x40)
|
||||
nvif_wr32(device, 0x088054, state);
|
||||
else
|
||||
nvif_wr32(device, 0x001854, state);
|
||||
|
@ -136,7 +136,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
|
||||
struct drm_device *dev = nfbdev->helper.dev;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nouveau_channel *chan = drm->channel;
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
int surface_fmt, pattern_fmt, rect_fmt;
|
||||
int ret;
|
||||
|
||||
|
@ -76,9 +76,9 @@ nv17_fence_context_new(struct nouveau_channel *chan)
|
||||
{
|
||||
struct nv10_fence_priv *priv = chan->drm->fence;
|
||||
struct nv10_fence_chan *fctx;
|
||||
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
|
||||
u32 start = mem->start * PAGE_SIZE;
|
||||
u32 limit = start + mem->size - 1;
|
||||
struct ttm_mem_reg *reg = &priv->bo->bo.mem;
|
||||
u32 start = reg->start * PAGE_SIZE;
|
||||
u32 limit = start + reg->size - 1;
|
||||
int ret = 0;
|
||||
|
||||
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
|
||||
@ -129,7 +129,7 @@ nv17_fence_create(struct nouveau_drm *drm)
|
||||
priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, NULL, &priv->bo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
|
||||
|
@ -447,18 +447,18 @@ nv50_dmac_ctxdma_new(struct nv50_dmac *dmac, struct nouveau_framebuffer *fb)
|
||||
args.base.target = NV_DMA_V0_TARGET_VRAM;
|
||||
args.base.access = NV_DMA_V0_ACCESS_RDWR;
|
||||
args.base.start = 0;
|
||||
args.base.limit = drm->device.info.ram_user - 1;
|
||||
args.base.limit = drm->client.device.info.ram_user - 1;
|
||||
|
||||
if (drm->device.info.chipset < 0x80) {
|
||||
if (drm->client.device.info.chipset < 0x80) {
|
||||
args.nv50.part = NV50_DMA_V0_PART_256;
|
||||
argc += sizeof(args.nv50);
|
||||
} else
|
||||
if (drm->device.info.chipset < 0xc0) {
|
||||
if (drm->client.device.info.chipset < 0xc0) {
|
||||
args.nv50.part = NV50_DMA_V0_PART_256;
|
||||
args.nv50.kind = kind;
|
||||
argc += sizeof(args.nv50);
|
||||
} else
|
||||
if (drm->device.info.chipset < 0xd0) {
|
||||
if (drm->client.device.info.chipset < 0xd0) {
|
||||
args.gf100.kind = kind;
|
||||
argc += sizeof(args.gf100);
|
||||
} else {
|
||||
@ -848,7 +848,7 @@ nv50_wndw_atomic_check_acquire(struct nv50_wndw *wndw,
|
||||
asyw->image.kind = (fb->nvbo->tile_flags & 0x0000ff00) >> 8;
|
||||
if (asyw->image.kind) {
|
||||
asyw->image.layout = 0;
|
||||
if (drm->device.info.chipset >= 0xc0)
|
||||
if (drm->client.device.info.chipset >= 0xc0)
|
||||
asyw->image.block = fb->nvbo->tile_mode >> 4;
|
||||
else
|
||||
asyw->image.block = fb->nvbo->tile_mode;
|
||||
@ -1397,7 +1397,7 @@ nv50_base_ntfy_wait_begun(struct nv50_wndw *wndw, struct nv50_wndw_atom *asyw)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(wndw->plane.dev);
|
||||
struct nv50_disp *disp = nv50_disp(wndw->plane.dev);
|
||||
if (nvif_msec(&drm->device, 2000ULL,
|
||||
if (nvif_msec(&drm->client.device, 2000ULL,
|
||||
u32 data = nouveau_bo_rd32(disp->sync, asyw->ntfy.offset / 4);
|
||||
if ((data & 0xc0000000) == 0x40000000)
|
||||
break;
|
||||
@ -1522,7 +1522,7 @@ nv50_base_new(struct nouveau_drm *drm, struct nv50_head *head,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nv50_base_create(&drm->device, disp->disp, base->id,
|
||||
ret = nv50_base_create(&drm->client.device, disp->disp, base->id,
|
||||
disp->sync->bo.offset, &base->chan);
|
||||
if (ret)
|
||||
return ret;
|
||||
@ -2394,7 +2394,7 @@ static int
|
||||
nv50_head_create(struct drm_device *dev, int index)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct nvif_device *device = &drm->device;
|
||||
struct nvif_device *device = &drm->client.device;
|
||||
struct nv50_disp *disp = nv50_disp(dev);
|
||||
struct nv50_head *head;
|
||||
struct nv50_base *base;
|
||||
@ -2428,7 +2428,7 @@ nv50_head_create(struct drm_device *dev, int index)
|
||||
drm_crtc_helper_add(crtc, &nv50_head_help);
|
||||
drm_mode_crtc_set_gamma_size(crtc, 256);
|
||||
|
||||
ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
|
||||
ret = nouveau_bo_new(&drm->client, 8192, 0x100, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, NULL, &head->base.lut.nvbo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(head->base.lut.nvbo, TTM_PL_FLAG_VRAM, true);
|
||||
@ -2667,7 +2667,7 @@ static int
|
||||
nv50_dac_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nvkm_i2c_bus *bus;
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
@ -3623,7 +3623,7 @@ nv50_sor_enable(struct drm_encoder *encoder)
|
||||
nv50_audio_enable(encoder, mode);
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3657,7 +3657,7 @@ nv50_sor_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
{
|
||||
struct nouveau_connector *nv_connector = nouveau_connector(connector);
|
||||
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
int type, ret;
|
||||
@ -3796,7 +3796,7 @@ nv50_pior_enable(struct drm_encoder *encoder)
|
||||
proto = 0x0;
|
||||
break;
|
||||
default:
|
||||
BUG_ON(1);
|
||||
BUG();
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3842,7 +3842,7 @@ static int
|
||||
nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
|
||||
{
|
||||
struct nouveau_drm *drm = nouveau_drm(connector->dev);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->device);
|
||||
struct nvkm_i2c *i2c = nvxx_i2c(&drm->client.device);
|
||||
struct nvkm_i2c_bus *bus = NULL;
|
||||
struct nvkm_i2c_aux *aux = NULL;
|
||||
struct i2c_adapter *ddc;
|
||||
@ -3915,7 +3915,7 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 interlock)
|
||||
evo_data(push, 0x00000000);
|
||||
nouveau_bo_wr32(disp->sync, 0, 0x00000000);
|
||||
evo_kick(push, core);
|
||||
if (nvif_msec(&drm->device, 2000ULL,
|
||||
if (nvif_msec(&drm->client.device, 2000ULL,
|
||||
if (nouveau_bo_rd32(disp->sync, 0))
|
||||
break;
|
||||
usleep_range(1, 2);
|
||||
@ -4427,7 +4427,7 @@ module_param_named(atomic, nouveau_atomic, int, 0400);
|
||||
int
|
||||
nv50_display_create(struct drm_device *dev)
|
||||
{
|
||||
struct nvif_device *device = &nouveau_drm(dev)->device;
|
||||
struct nvif_device *device = &nouveau_drm(dev)->client.device;
|
||||
struct nouveau_drm *drm = nouveau_drm(dev);
|
||||
struct dcb_table *dcb = &drm->vbios.dcb;
|
||||
struct drm_connector *connector, *tmp;
|
||||
@ -4451,7 +4451,7 @@ nv50_display_create(struct drm_device *dev)
|
||||
dev->driver->driver_features |= DRIVER_ATOMIC;
|
||||
|
||||
/* small shared memory area we use for notifiers and semaphores */
|
||||
ret = nouveau_bo_new(dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, NULL, &disp->sync);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(disp->sync, TTM_PL_FLAG_VRAM, true);
|
||||
|
@ -37,9 +37,9 @@ nv50_fence_context_new(struct nouveau_channel *chan)
|
||||
{
|
||||
struct nv10_fence_priv *priv = chan->drm->fence;
|
||||
struct nv10_fence_chan *fctx;
|
||||
struct ttm_mem_reg *mem = &priv->bo->bo.mem;
|
||||
u32 start = mem->start * PAGE_SIZE;
|
||||
u32 limit = start + mem->size - 1;
|
||||
struct ttm_mem_reg *reg = &priv->bo->bo.mem;
|
||||
u32 start = reg->start * PAGE_SIZE;
|
||||
u32 limit = start + reg->size - 1;
|
||||
int ret;
|
||||
|
||||
fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL);
|
||||
@ -82,7 +82,7 @@ nv50_fence_create(struct nouveau_drm *drm)
|
||||
priv->base.context_base = dma_fence_context_alloc(priv->base.contexts);
|
||||
spin_lock_init(&priv->lock);
|
||||
|
||||
ret = nouveau_bo_new(drm->dev, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
|
||||
0, 0x0000, NULL, NULL, &priv->bo);
|
||||
if (!ret) {
|
||||
ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false);
|
||||
|
@ -193,7 +193,7 @@ nv84_fence_destroy(struct nouveau_drm *drm)
|
||||
int
|
||||
nv84_fence_create(struct nouveau_drm *drm)
|
||||
{
|
||||
struct nvkm_fifo *fifo = nvxx_fifo(&drm->device);
|
||||
struct nvkm_fifo *fifo = nvxx_fifo(&drm->client.device);
|
||||
struct nv84_fence_priv *priv;
|
||||
u32 domain;
|
||||
int ret;
|
||||
@ -213,14 +213,14 @@ nv84_fence_create(struct nouveau_drm *drm)
|
||||
priv->base.uevent = true;
|
||||
|
||||
/* Use VRAM if there is any ; otherwise fallback to system memory */
|
||||
domain = drm->device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
|
||||
domain = drm->client.device.info.ram_size != 0 ? TTM_PL_FLAG_VRAM :
|
||||
/*
|
||||
* fences created in sysmem must be non-cached or we
|
||||
* will lose CPU/GPU coherency!
|
||||
*/
|
||||
TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED;
|
||||
ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0, domain, 0,
|
||||
0, NULL, NULL, &priv->bo);
|
||||
ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
|
||||
domain, 0, 0, NULL, NULL, &priv->bo);
|
||||
if (ret == 0) {
|
||||
ret = nouveau_bo_pin(priv->bo, domain, false);
|
||||
if (ret == 0) {
|
||||
@ -233,7 +233,7 @@ nv84_fence_create(struct nouveau_drm *drm)
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
ret = nouveau_bo_new(drm->dev, 16 * priv->base.contexts, 0,
|
||||
ret = nouveau_bo_new(&drm->client, 16 * priv->base.contexts, 0,
|
||||
TTM_PL_FLAG_TT | TTM_PL_FLAG_UNCACHED, 0,
|
||||
0, NULL, NULL, &priv->bo_gart);
|
||||
if (ret == 0) {
|
||||
|
@ -1,4 +1,5 @@
|
||||
nvif-y := nvif/object.o
|
||||
nvif-y += nvif/client.o
|
||||
nvif-y += nvif/device.o
|
||||
nvif-y += nvif/driver.o
|
||||
nvif-y += nvif/notify.o
|
||||
|
@ -26,6 +26,9 @@
|
||||
#include <nvif/driver.h>
|
||||
#include <nvif/ioctl.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
#include <nvif/if0000.h>
|
||||
|
||||
int
|
||||
nvif_client_ioctl(struct nvif_client *client, void *data, u32 size)
|
||||
{
|
||||
@ -47,37 +50,29 @@ nvif_client_resume(struct nvif_client *client)
|
||||
void
|
||||
nvif_client_fini(struct nvif_client *client)
|
||||
{
|
||||
nvif_object_fini(&client->object);
|
||||
if (client->driver) {
|
||||
client->driver->fini(client->object.priv);
|
||||
if (client->driver->fini)
|
||||
client->driver->fini(client->object.priv);
|
||||
client->driver = NULL;
|
||||
client->object.client = NULL;
|
||||
nvif_object_fini(&client->object);
|
||||
}
|
||||
}
|
||||
|
||||
static const struct nvif_driver *
|
||||
nvif_drivers[] = {
|
||||
#ifdef __KERNEL__
|
||||
&nvif_driver_nvkm,
|
||||
#else
|
||||
&nvif_driver_drm,
|
||||
&nvif_driver_lib,
|
||||
&nvif_driver_null,
|
||||
#endif
|
||||
NULL
|
||||
};
|
||||
|
||||
int
|
||||
nvif_client_init(const char *driver, const char *name, u64 device,
|
||||
const char *cfg, const char *dbg, struct nvif_client *client)
|
||||
nvif_client_init(struct nvif_client *parent, const char *name, u64 device,
|
||||
struct nvif_client *client)
|
||||
{
|
||||
struct nvif_client_v0 args = { .device = device };
|
||||
struct {
|
||||
struct nvif_ioctl_v0 ioctl;
|
||||
struct nvif_ioctl_nop_v0 nop;
|
||||
} args = {};
|
||||
int ret, i;
|
||||
} nop = {};
|
||||
int ret;
|
||||
|
||||
ret = nvif_object_init(NULL, 0, 0, NULL, 0, &client->object);
|
||||
strncpy(args.name, name, sizeof(args.name));
|
||||
ret = nvif_object_init(parent != client ? &parent->object : NULL,
|
||||
0, NVIF_CLASS_CLIENT, &args, sizeof(args),
|
||||
&client->object);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -85,19 +80,11 @@ nvif_client_init(const char *driver, const char *name, u64 device,
|
||||
client->object.handle = ~0;
|
||||
client->route = NVIF_IOCTL_V0_ROUTE_NVIF;
|
||||
client->super = true;
|
||||
|
||||
for (i = 0, ret = -EINVAL; (client->driver = nvif_drivers[i]); i++) {
|
||||
if (!driver || !strcmp(client->driver->name, driver)) {
|
||||
ret = client->driver->init(name, device, cfg, dbg,
|
||||
&client->object.priv);
|
||||
if (!ret || driver)
|
||||
break;
|
||||
}
|
||||
}
|
||||
client->driver = parent->driver;
|
||||
|
||||
if (ret == 0) {
|
||||
ret = nvif_client_ioctl(client, &args, sizeof(args));
|
||||
client->version = args.nop.version;
|
||||
ret = nvif_client_ioctl(client, &nop, sizeof(nop));
|
||||
client->version = nop.nop.version;
|
||||
}
|
||||
|
||||
if (ret)
|
||||
|
58
drivers/gpu/drm/nouveau/nvif/driver.c
Normal file
58
drivers/gpu/drm/nouveau/nvif/driver.c
Normal file
@ -0,0 +1,58 @@
|
||||
/*
|
||||
* Copyright 2016 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
#include <nvif/driver.h>
|
||||
#include <nvif/client.h>
|
||||
|
||||
static const struct nvif_driver *
|
||||
nvif_driver[] = {
|
||||
#ifdef __KERNEL__
|
||||
&nvif_driver_nvkm,
|
||||
#else
|
||||
&nvif_driver_drm,
|
||||
&nvif_driver_lib,
|
||||
&nvif_driver_null,
|
||||
#endif
|
||||
NULL
|
||||
};
|
||||
|
||||
int
|
||||
nvif_driver_init(const char *drv, const char *cfg, const char *dbg,
|
||||
const char *name, u64 device, struct nvif_client *client)
|
||||
{
|
||||
int ret = -EINVAL, i;
|
||||
|
||||
for (i = 0; (client->driver = nvif_driver[i]); i++) {
|
||||
if (!drv || !strcmp(client->driver->name, drv)) {
|
||||
ret = client->driver->init(name, device, cfg, dbg,
|
||||
&client->object.priv);
|
||||
if (ret == 0)
|
||||
break;
|
||||
client->driver->fini(client->object.priv);
|
||||
}
|
||||
}
|
||||
|
||||
if (ret == 0)
|
||||
ret = nvif_client_init(client, name, device, client);
|
||||
return ret;
|
||||
}
|
@ -1,3 +1,4 @@
|
||||
include $(src)/nvkm/core/Kbuild
|
||||
include $(src)/nvkm/falcon/Kbuild
|
||||
include $(src)/nvkm/subdev/Kbuild
|
||||
include $(src)/nvkm/engine/Kbuild
|
||||
|
@ -31,6 +31,43 @@
|
||||
#include <nvif/if0000.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static int
|
||||
nvkm_uclient_new(const struct nvkm_oclass *oclass, void *argv, u32 argc,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
union {
|
||||
struct nvif_client_v0 v0;
|
||||
} *args = argv;
|
||||
struct nvkm_client *client;
|
||||
int ret = -ENOSYS;
|
||||
|
||||
if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))){
|
||||
args->v0.name[sizeof(args->v0.name) - 1] = 0;
|
||||
ret = nvkm_client_new(args->v0.name, args->v0.device, NULL,
|
||||
NULL, oclass->client->ntfy, &client);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else
|
||||
return ret;
|
||||
|
||||
client->object.client = oclass->client;
|
||||
client->object.handle = oclass->handle;
|
||||
client->object.route = oclass->route;
|
||||
client->object.token = oclass->token;
|
||||
client->object.object = oclass->object;
|
||||
client->debug = oclass->client->debug;
|
||||
*pobject = &client->object;
|
||||
return 0;
|
||||
}
|
||||
|
||||
const struct nvkm_sclass
|
||||
nvkm_uclient_sclass = {
|
||||
.oclass = NVIF_CLASS_CLIENT,
|
||||
.minver = 0,
|
||||
.maxver = 0,
|
||||
.ctor = nvkm_uclient_new,
|
||||
};
|
||||
|
||||
struct nvkm_client_notify {
|
||||
struct nvkm_client *client;
|
||||
struct nvkm_notify n;
|
||||
@ -138,17 +175,30 @@ nvkm_client_notify_new(struct nvkm_object *object,
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct nvkm_object_func nvkm_client;
|
||||
struct nvkm_client *
|
||||
nvkm_client_search(struct nvkm_client *client, u64 handle)
|
||||
{
|
||||
struct nvkm_object *object;
|
||||
|
||||
object = nvkm_object_search(client, handle, &nvkm_client);
|
||||
if (IS_ERR(object))
|
||||
return (void *)object;
|
||||
|
||||
return nvkm_client(object);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_client_mthd_devlist(struct nvkm_client *client, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nv_client_devlist_v0 v0;
|
||||
struct nvif_client_devlist_v0 v0;
|
||||
} *args = data;
|
||||
int ret = -ENOSYS;
|
||||
|
||||
nvif_ioctl(object, "client devlist size %d\n", size);
|
||||
nvif_ioctl(&client->object, "client devlist size %d\n", size);
|
||||
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
|
||||
nvif_ioctl(object, "client devlist vers %d count %d\n",
|
||||
nvif_ioctl(&client->object, "client devlist vers %d count %d\n",
|
||||
args->v0.version, args->v0.count);
|
||||
if (size == sizeof(args->v0.device[0]) * args->v0.count) {
|
||||
ret = nvkm_device_list(args->v0.device, args->v0.count);
|
||||
@ -167,9 +217,10 @@ nvkm_client_mthd_devlist(struct nvkm_object *object, void *data, u32 size)
|
||||
static int
|
||||
nvkm_client_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
|
||||
{
|
||||
struct nvkm_client *client = nvkm_client(object);
|
||||
switch (mthd) {
|
||||
case NV_CLIENT_DEVLIST:
|
||||
return nvkm_client_mthd_devlist(object, data, size);
|
||||
case NVIF_CLIENT_V0_DEVLIST:
|
||||
return nvkm_client_mthd_devlist(client, data, size);
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -190,7 +241,8 @@ nvkm_client_child_get(struct nvkm_object *object, int index,
|
||||
const struct nvkm_sclass *sclass;
|
||||
|
||||
switch (index) {
|
||||
case 0: sclass = &nvkm_udevice_sclass; break;
|
||||
case 0: sclass = &nvkm_uclient_sclass; break;
|
||||
case 1: sclass = &nvkm_udevice_sclass; break;
|
||||
default:
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -200,110 +252,54 @@ nvkm_client_child_get(struct nvkm_object *object, int index,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct nvkm_object_func
|
||||
nvkm_client_object_func = {
|
||||
.mthd = nvkm_client_mthd,
|
||||
.sclass = nvkm_client_child_get,
|
||||
};
|
||||
|
||||
void
|
||||
nvkm_client_remove(struct nvkm_client *client, struct nvkm_object *object)
|
||||
static int
|
||||
nvkm_client_fini(struct nvkm_object *object, bool suspend)
|
||||
{
|
||||
if (!RB_EMPTY_NODE(&object->node))
|
||||
rb_erase(&object->node, &client->objroot);
|
||||
}
|
||||
|
||||
bool
|
||||
nvkm_client_insert(struct nvkm_client *client, struct nvkm_object *object)
|
||||
{
|
||||
struct rb_node **ptr = &client->objroot.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
|
||||
while (*ptr) {
|
||||
struct nvkm_object *this =
|
||||
container_of(*ptr, typeof(*this), node);
|
||||
parent = *ptr;
|
||||
if (object->object < this->object)
|
||||
ptr = &parent->rb_left;
|
||||
else
|
||||
if (object->object > this->object)
|
||||
ptr = &parent->rb_right;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
rb_link_node(&object->node, parent, ptr);
|
||||
rb_insert_color(&object->node, &client->objroot);
|
||||
return true;
|
||||
}
|
||||
|
||||
struct nvkm_object *
|
||||
nvkm_client_search(struct nvkm_client *client, u64 handle)
|
||||
{
|
||||
struct rb_node *node = client->objroot.rb_node;
|
||||
while (node) {
|
||||
struct nvkm_object *object =
|
||||
container_of(node, typeof(*object), node);
|
||||
if (handle < object->object)
|
||||
node = node->rb_left;
|
||||
else
|
||||
if (handle > object->object)
|
||||
node = node->rb_right;
|
||||
else
|
||||
return object;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_client_fini(struct nvkm_client *client, bool suspend)
|
||||
{
|
||||
struct nvkm_object *object = &client->object;
|
||||
struct nvkm_client *client = nvkm_client(object);
|
||||
const char *name[2] = { "fini", "suspend" };
|
||||
int i;
|
||||
nvif_debug(object, "%s notify\n", name[suspend]);
|
||||
for (i = 0; i < ARRAY_SIZE(client->notify); i++)
|
||||
nvkm_client_notify_put(client, i);
|
||||
return nvkm_object_fini(&client->object, suspend);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_client_init(struct nvkm_client *client)
|
||||
static void *
|
||||
nvkm_client_dtor(struct nvkm_object *object)
|
||||
{
|
||||
return nvkm_object_init(&client->object);
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_client_del(struct nvkm_client **pclient)
|
||||
{
|
||||
struct nvkm_client *client = *pclient;
|
||||
struct nvkm_client *client = nvkm_client(object);
|
||||
int i;
|
||||
if (client) {
|
||||
nvkm_client_fini(client, false);
|
||||
for (i = 0; i < ARRAY_SIZE(client->notify); i++)
|
||||
nvkm_client_notify_del(client, i);
|
||||
nvkm_object_dtor(&client->object);
|
||||
kfree(*pclient);
|
||||
*pclient = NULL;
|
||||
}
|
||||
for (i = 0; i < ARRAY_SIZE(client->notify); i++)
|
||||
nvkm_client_notify_del(client, i);
|
||||
return client;
|
||||
}
|
||||
|
||||
static const struct nvkm_object_func
|
||||
nvkm_client = {
|
||||
.dtor = nvkm_client_dtor,
|
||||
.fini = nvkm_client_fini,
|
||||
.mthd = nvkm_client_mthd,
|
||||
.sclass = nvkm_client_child_get,
|
||||
};
|
||||
|
||||
int
|
||||
nvkm_client_new(const char *name, u64 device, const char *cfg,
|
||||
const char *dbg, struct nvkm_client **pclient)
|
||||
const char *dbg,
|
||||
int (*ntfy)(const void *, u32, const void *, u32),
|
||||
struct nvkm_client **pclient)
|
||||
{
|
||||
struct nvkm_oclass oclass = {};
|
||||
struct nvkm_oclass oclass = { .base = nvkm_uclient_sclass };
|
||||
struct nvkm_client *client;
|
||||
|
||||
if (!(client = *pclient = kzalloc(sizeof(*client), GFP_KERNEL)))
|
||||
return -ENOMEM;
|
||||
oclass.client = client;
|
||||
|
||||
nvkm_object_ctor(&nvkm_client_object_func, &oclass, &client->object);
|
||||
nvkm_object_ctor(&nvkm_client, &oclass, &client->object);
|
||||
snprintf(client->name, sizeof(client->name), "%s", name);
|
||||
client->device = device;
|
||||
client->debug = nvkm_dbgopt(dbg, "CLIENT");
|
||||
client->objroot = RB_ROOT;
|
||||
client->dmaroot = RB_ROOT;
|
||||
client->ntfy = ntfy;
|
||||
return 0;
|
||||
}
|
||||
|
@ -27,6 +27,14 @@
|
||||
|
||||
#include <subdev/fb.h>
|
||||
|
||||
bool
|
||||
nvkm_engine_chsw_load(struct nvkm_engine *engine)
|
||||
{
|
||||
if (engine->func->chsw_load)
|
||||
return engine->func->chsw_load(engine);
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_engine_unref(struct nvkm_engine **pengine)
|
||||
{
|
||||
|
@ -29,7 +29,8 @@
|
||||
#include <nvif/ioctl.h>
|
||||
|
||||
static int
|
||||
nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_nop(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_nop_v0 v0;
|
||||
@ -46,7 +47,8 @@ nvkm_ioctl_nop(struct nvkm_object *object, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_sclass(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_sclass_v0 v0;
|
||||
@ -78,12 +80,12 @@ nvkm_ioctl_sclass(struct nvkm_object *object, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
|
||||
nvkm_ioctl_new(struct nvkm_client *client,
|
||||
struct nvkm_object *parent, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_new_v0 v0;
|
||||
} *args = data;
|
||||
struct nvkm_client *client = parent->client;
|
||||
struct nvkm_object *object = NULL;
|
||||
struct nvkm_oclass oclass;
|
||||
int ret = -ENOSYS, i = 0;
|
||||
@ -104,9 +106,11 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
|
||||
|
||||
do {
|
||||
memset(&oclass, 0x00, sizeof(oclass));
|
||||
oclass.client = client;
|
||||
oclass.handle = args->v0.handle;
|
||||
oclass.route = args->v0.route;
|
||||
oclass.token = args->v0.token;
|
||||
oclass.object = args->v0.object;
|
||||
oclass.client = client;
|
||||
oclass.parent = parent;
|
||||
ret = parent->func->sclass(parent, i++, &oclass);
|
||||
if (ret)
|
||||
@ -125,10 +129,7 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
|
||||
ret = nvkm_object_init(object);
|
||||
if (ret == 0) {
|
||||
list_add(&object->head, &parent->tree);
|
||||
object->route = args->v0.route;
|
||||
object->token = args->v0.token;
|
||||
object->object = args->v0.object;
|
||||
if (nvkm_client_insert(client, object)) {
|
||||
if (nvkm_object_insert(object)) {
|
||||
client->data = object;
|
||||
return 0;
|
||||
}
|
||||
@ -142,7 +143,8 @@ nvkm_ioctl_new(struct nvkm_object *parent, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_del(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_del none;
|
||||
@ -156,11 +158,12 @@ nvkm_ioctl_del(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_object_del(&object);
|
||||
}
|
||||
|
||||
return ret;
|
||||
return ret ? ret : 1;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_mthd(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_mthd_v0 v0;
|
||||
@ -179,7 +182,8 @@ nvkm_ioctl_mthd(struct nvkm_object *object, void *data, u32 size)
|
||||
|
||||
|
||||
static int
|
||||
nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_rd(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_rd_v0 v0;
|
||||
@ -218,7 +222,8 @@ nvkm_ioctl_rd(struct nvkm_object *object, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_wr(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_wr_v0 v0;
|
||||
@ -246,7 +251,8 @@ nvkm_ioctl_wr(struct nvkm_object *object, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_map(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_map_v0 v0;
|
||||
@ -264,7 +270,8 @@ nvkm_ioctl_map(struct nvkm_object *object, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_unmap(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_unmap none;
|
||||
@ -280,7 +287,8 @@ nvkm_ioctl_unmap(struct nvkm_object *object, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_ntfy_new(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
union {
|
||||
struct nvif_ioctl_ntfy_new_v0 v0;
|
||||
@ -306,9 +314,9 @@ nvkm_ioctl_ntfy_new(struct nvkm_object *object, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_ntfy_del(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
struct nvkm_client *client = object->client;
|
||||
union {
|
||||
struct nvif_ioctl_ntfy_del_v0 v0;
|
||||
} *args = data;
|
||||
@ -325,9 +333,9 @@ nvkm_ioctl_ntfy_del(struct nvkm_object *object, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_ntfy_get(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
struct nvkm_client *client = object->client;
|
||||
union {
|
||||
struct nvif_ioctl_ntfy_get_v0 v0;
|
||||
} *args = data;
|
||||
@ -344,9 +352,9 @@ nvkm_ioctl_ntfy_get(struct nvkm_object *object, void *data, u32 size)
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
|
||||
nvkm_ioctl_ntfy_put(struct nvkm_client *client,
|
||||
struct nvkm_object *object, void *data, u32 size)
|
||||
{
|
||||
struct nvkm_client *client = object->client;
|
||||
union {
|
||||
struct nvif_ioctl_ntfy_put_v0 v0;
|
||||
} *args = data;
|
||||
@ -364,7 +372,7 @@ nvkm_ioctl_ntfy_put(struct nvkm_object *object, void *data, u32 size)
|
||||
|
||||
static struct {
|
||||
int version;
|
||||
int (*func)(struct nvkm_object *, void *, u32);
|
||||
int (*func)(struct nvkm_client *, struct nvkm_object *, void *, u32);
|
||||
}
|
||||
nvkm_ioctl_v0[] = {
|
||||
{ 0x00, nvkm_ioctl_nop },
|
||||
@ -389,13 +397,10 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
|
||||
struct nvkm_object *object;
|
||||
int ret;
|
||||
|
||||
if (handle)
|
||||
object = nvkm_client_search(client, handle);
|
||||
else
|
||||
object = &client->object;
|
||||
if (unlikely(!object)) {
|
||||
object = nvkm_object_search(client, handle, NULL);
|
||||
if (IS_ERR(object)) {
|
||||
nvif_ioctl(&client->object, "object not found\n");
|
||||
return -ENOENT;
|
||||
return PTR_ERR(object);
|
||||
}
|
||||
|
||||
if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) {
|
||||
@ -407,7 +412,7 @@ nvkm_ioctl_path(struct nvkm_client *client, u64 handle, u32 type,
|
||||
|
||||
if (ret = -EINVAL, type < ARRAY_SIZE(nvkm_ioctl_v0)) {
|
||||
if (nvkm_ioctl_v0[type].version == 0)
|
||||
ret = nvkm_ioctl_v0[type].func(object, data, size);
|
||||
ret = nvkm_ioctl_v0[type].func(client, object, data, size);
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -436,12 +441,13 @@ nvkm_ioctl(struct nvkm_client *client, bool supervisor,
|
||||
&args->v0.route, &args->v0.token);
|
||||
}
|
||||
|
||||
nvif_ioctl(object, "return %d\n", ret);
|
||||
if (hack) {
|
||||
*hack = client->data;
|
||||
client->data = NULL;
|
||||
if (ret != 1) {
|
||||
nvif_ioctl(object, "return %d\n", ret);
|
||||
if (hack) {
|
||||
*hack = client->data;
|
||||
client->data = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
client->super = false;
|
||||
return ret;
|
||||
}
|
||||
|
@ -147,6 +147,7 @@ nvkm_mm_head(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
|
||||
if (!this)
|
||||
return -ENOMEM;
|
||||
|
||||
this->next = NULL;
|
||||
this->type = type;
|
||||
list_del(&this->fl_entry);
|
||||
*pnode = this;
|
||||
@ -225,6 +226,7 @@ nvkm_mm_tail(struct nvkm_mm *mm, u8 heap, u8 type, u32 size_max, u32 size_min,
|
||||
if (!this)
|
||||
return -ENOMEM;
|
||||
|
||||
this->next = NULL;
|
||||
this->type = type;
|
||||
list_del(&this->fl_entry);
|
||||
*pnode = this;
|
||||
|
@ -25,6 +25,65 @@
|
||||
#include <core/client.h>
|
||||
#include <core/engine.h>
|
||||
|
||||
struct nvkm_object *
|
||||
nvkm_object_search(struct nvkm_client *client, u64 handle,
|
||||
const struct nvkm_object_func *func)
|
||||
{
|
||||
struct nvkm_object *object;
|
||||
|
||||
if (handle) {
|
||||
struct rb_node *node = client->objroot.rb_node;
|
||||
while (node) {
|
||||
object = rb_entry(node, typeof(*object), node);
|
||||
if (handle < object->object)
|
||||
node = node->rb_left;
|
||||
else
|
||||
if (handle > object->object)
|
||||
node = node->rb_right;
|
||||
else
|
||||
goto done;
|
||||
}
|
||||
return ERR_PTR(-ENOENT);
|
||||
} else {
|
||||
object = &client->object;
|
||||
}
|
||||
|
||||
done:
|
||||
if (unlikely(func && object->func != func))
|
||||
return ERR_PTR(-EINVAL);
|
||||
return object;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_object_remove(struct nvkm_object *object)
|
||||
{
|
||||
if (!RB_EMPTY_NODE(&object->node))
|
||||
rb_erase(&object->node, &object->client->objroot);
|
||||
}
|
||||
|
||||
bool
|
||||
nvkm_object_insert(struct nvkm_object *object)
|
||||
{
|
||||
struct rb_node **ptr = &object->client->objroot.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
|
||||
while (*ptr) {
|
||||
struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node);
|
||||
parent = *ptr;
|
||||
if (object->object < this->object)
|
||||
ptr = &parent->rb_left;
|
||||
else
|
||||
if (object->object > this->object)
|
||||
ptr = &parent->rb_right;
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
rb_link_node(&object->node, parent, ptr);
|
||||
rb_insert_color(&object->node, &object->client->objroot);
|
||||
return true;
|
||||
}
|
||||
|
||||
int
|
||||
nvkm_object_mthd(struct nvkm_object *object, u32 mthd, void *data, u32 size)
|
||||
{
|
||||
@ -214,7 +273,7 @@ nvkm_object_del(struct nvkm_object **pobject)
|
||||
struct nvkm_object *object = *pobject;
|
||||
if (object && !WARN_ON(!object->func)) {
|
||||
*pobject = nvkm_object_dtor(object);
|
||||
nvkm_client_remove(object->client, object);
|
||||
nvkm_object_remove(object);
|
||||
list_del(&object->head);
|
||||
kfree(*pobject);
|
||||
*pobject = NULL;
|
||||
@ -230,6 +289,9 @@ nvkm_object_ctor(const struct nvkm_object_func *func,
|
||||
object->engine = nvkm_engine_ref(oclass->engine);
|
||||
object->oclass = oclass->base.oclass;
|
||||
object->handle = oclass->handle;
|
||||
object->route = oclass->route;
|
||||
object->token = oclass->token;
|
||||
object->object = oclass->object;
|
||||
INIT_LIST_HEAD(&object->head);
|
||||
INIT_LIST_HEAD(&object->tree);
|
||||
RB_CLEAR_NODE(&object->node);
|
||||
|
@ -993,7 +993,7 @@ nv92_chipset = {
|
||||
.mc = g84_mc_new,
|
||||
.mmu = nv50_mmu_new,
|
||||
.mxm = nv50_mxm_new,
|
||||
.pci = g84_pci_new,
|
||||
.pci = g92_pci_new,
|
||||
.therm = g84_therm_new,
|
||||
.timer = nv41_timer_new,
|
||||
.volt = nv40_volt_new,
|
||||
@ -2138,6 +2138,7 @@ nv12b_chipset = {
|
||||
.ltc = gm200_ltc_new,
|
||||
.mc = gk20a_mc_new,
|
||||
.mmu = gf100_mmu_new,
|
||||
.pmu = gm20b_pmu_new,
|
||||
.secboot = gm20b_secboot_new,
|
||||
.timer = gk20a_timer_new,
|
||||
.top = gk104_top_new,
|
||||
|
@ -137,7 +137,6 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
|
||||
const struct nvkm_oclass *oclass,
|
||||
struct nvkm_object **pobject)
|
||||
{
|
||||
struct nvkm_device *device = root->disp->base.engine.subdev.device;
|
||||
struct nvkm_client *client = oclass->client;
|
||||
struct nvkm_dmaobj *dmaobj;
|
||||
struct nv50_disp_dmac *chan;
|
||||
@ -153,9 +152,9 @@ nv50_disp_dmac_new_(const struct nv50_disp_dmac_func *func,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dmaobj = nvkm_dma_search(device->dma, client, push);
|
||||
if (!dmaobj)
|
||||
return -ENOENT;
|
||||
dmaobj = nvkm_dmaobj_search(client, push);
|
||||
if (IS_ERR(dmaobj))
|
||||
return PTR_ERR(dmaobj);
|
||||
|
||||
if (dmaobj->limit - dmaobj->start != 0xfff)
|
||||
return -EINVAL;
|
||||
|
@ -38,13 +38,6 @@ g94_sor_loff(struct nvkm_output_dp *outp)
|
||||
return g94_sor_soff(outp) + !(outp->base.info.sorconf.link & 1) * 0x80;
|
||||
}
|
||||
|
||||
/*******************************************************************************
|
||||
* TMDS/LVDS
|
||||
******************************************************************************/
|
||||
static const struct nvkm_output_func
|
||||
g94_sor_output_func = {
|
||||
};
|
||||
|
||||
/*******************************************************************************
|
||||
* DisplayPort
|
||||
******************************************************************************/
|
||||
|
@ -28,24 +28,6 @@
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
struct nvkm_dmaobj *
|
||||
nvkm_dma_search(struct nvkm_dma *dma, struct nvkm_client *client, u64 object)
|
||||
{
|
||||
struct rb_node *node = client->dmaroot.rb_node;
|
||||
while (node) {
|
||||
struct nvkm_dmaobj *dmaobj =
|
||||
container_of(node, typeof(*dmaobj), rb);
|
||||
if (object < dmaobj->handle)
|
||||
node = node->rb_left;
|
||||
else
|
||||
if (object > dmaobj->handle)
|
||||
node = node->rb_right;
|
||||
else
|
||||
return dmaobj;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_dma_oclass_new(struct nvkm_device *device,
|
||||
const struct nvkm_oclass *oclass, void *data, u32 size,
|
||||
@ -53,34 +35,12 @@ nvkm_dma_oclass_new(struct nvkm_device *device,
|
||||
{
|
||||
struct nvkm_dma *dma = nvkm_dma(oclass->engine);
|
||||
struct nvkm_dmaobj *dmaobj = NULL;
|
||||
struct nvkm_client *client = oclass->client;
|
||||
struct rb_node **ptr = &client->dmaroot.rb_node;
|
||||
struct rb_node *parent = NULL;
|
||||
int ret;
|
||||
|
||||
ret = dma->func->class_new(dma, oclass, data, size, &dmaobj);
|
||||
if (dmaobj)
|
||||
*pobject = &dmaobj->object;
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
dmaobj->handle = oclass->object;
|
||||
|
||||
while (*ptr) {
|
||||
struct nvkm_dmaobj *obj = container_of(*ptr, typeof(*obj), rb);
|
||||
parent = *ptr;
|
||||
if (dmaobj->handle < obj->handle)
|
||||
ptr = &parent->rb_left;
|
||||
else
|
||||
if (dmaobj->handle > obj->handle)
|
||||
ptr = &parent->rb_right;
|
||||
else
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
rb_link_node(&dmaobj->rb, parent, ptr);
|
||||
rb_insert_color(&dmaobj->rb, &client->dmaroot);
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const struct nvkm_device_oclass
|
||||
|
@ -31,6 +31,19 @@
|
||||
#include <nvif/cl0002.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
static const struct nvkm_object_func nvkm_dmaobj_func;
|
||||
struct nvkm_dmaobj *
|
||||
nvkm_dmaobj_search(struct nvkm_client *client, u64 handle)
|
||||
{
|
||||
struct nvkm_object *object;
|
||||
|
||||
object = nvkm_object_search(client, handle, &nvkm_dmaobj_func);
|
||||
if (IS_ERR(object))
|
||||
return (void *)object;
|
||||
|
||||
return nvkm_dmaobj(object);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
|
||||
int align, struct nvkm_gpuobj **pgpuobj)
|
||||
@ -42,10 +55,7 @@ nvkm_dmaobj_bind(struct nvkm_object *base, struct nvkm_gpuobj *gpuobj,
|
||||
static void *
|
||||
nvkm_dmaobj_dtor(struct nvkm_object *base)
|
||||
{
|
||||
struct nvkm_dmaobj *dmaobj = nvkm_dmaobj(base);
|
||||
if (!RB_EMPTY_NODE(&dmaobj->rb))
|
||||
rb_erase(&dmaobj->rb, &dmaobj->object.client->dmaroot);
|
||||
return dmaobj;
|
||||
return nvkm_dmaobj(base);
|
||||
}
|
||||
|
||||
static const struct nvkm_object_func
|
||||
@ -74,7 +84,6 @@ nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma,
|
||||
nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object);
|
||||
dmaobj->func = func;
|
||||
dmaobj->dma = dma;
|
||||
RB_CLEAR_NODE(&dmaobj->rb);
|
||||
|
||||
nvif_ioctl(parent, "create dma size %d\n", *psize);
|
||||
if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, true))) {
|
||||
|
@ -31,6 +31,17 @@
|
||||
#include <nvif/event.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
void
|
||||
nvkm_fifo_recover_chan(struct nvkm_fifo *fifo, int chid)
|
||||
{
|
||||
unsigned long flags;
|
||||
if (WARN_ON(!fifo->func->recover_chan))
|
||||
return;
|
||||
spin_lock_irqsave(&fifo->lock, flags);
|
||||
fifo->func->recover_chan(fifo, chid);
|
||||
spin_unlock_irqrestore(&fifo->lock, flags);
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_fifo_pause(struct nvkm_fifo *fifo, unsigned long *flags)
|
||||
{
|
||||
@ -54,19 +65,29 @@ nvkm_fifo_chan_put(struct nvkm_fifo *fifo, unsigned long flags,
|
||||
}
|
||||
}
|
||||
|
||||
struct nvkm_fifo_chan *
|
||||
nvkm_fifo_chan_inst_locked(struct nvkm_fifo *fifo, u64 inst)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan;
|
||||
list_for_each_entry(chan, &fifo->chan, head) {
|
||||
if (chan->inst->addr == inst) {
|
||||
list_del(&chan->head);
|
||||
list_add(&chan->head, &fifo->chan);
|
||||
return chan;
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
struct nvkm_fifo_chan *
|
||||
nvkm_fifo_chan_inst(struct nvkm_fifo *fifo, u64 inst, unsigned long *rflags)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan;
|
||||
unsigned long flags;
|
||||
spin_lock_irqsave(&fifo->lock, flags);
|
||||
list_for_each_entry(chan, &fifo->chan, head) {
|
||||
if (chan->inst->addr == inst) {
|
||||
list_del(&chan->head);
|
||||
list_add(&chan->head, &fifo->chan);
|
||||
*rflags = flags;
|
||||
return chan;
|
||||
}
|
||||
if ((chan = nvkm_fifo_chan_inst_locked(fifo, inst))) {
|
||||
*rflags = flags;
|
||||
return chan;
|
||||
}
|
||||
spin_unlock_irqrestore(&fifo->lock, flags);
|
||||
return NULL;
|
||||
@ -90,9 +111,34 @@ nvkm_fifo_chan_chid(struct nvkm_fifo *fifo, int chid, unsigned long *rflags)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void
|
||||
nvkm_fifo_kevent(struct nvkm_fifo *fifo, int chid)
|
||||
{
|
||||
nvkm_event_send(&fifo->kevent, 1, chid, NULL, 0);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
struct nvkm_notify *notify)
|
||||
nvkm_fifo_kevent_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
struct nvkm_notify *notify)
|
||||
{
|
||||
struct nvkm_fifo_chan *chan = nvkm_fifo_chan(object);
|
||||
if (size == 0) {
|
||||
notify->size = 0;
|
||||
notify->types = 1;
|
||||
notify->index = chan->chid;
|
||||
return 0;
|
||||
}
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
static const struct nvkm_event_func
|
||||
nvkm_fifo_kevent_func = {
|
||||
.ctor = nvkm_fifo_kevent_ctor,
|
||||
};
|
||||
|
||||
static int
|
||||
nvkm_fifo_cevent_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
struct nvkm_notify *notify)
|
||||
{
|
||||
if (size == 0) {
|
||||
notify->size = 0;
|
||||
@ -104,10 +150,16 @@ nvkm_fifo_event_ctor(struct nvkm_object *object, void *data, u32 size,
|
||||
}
|
||||
|
||||
static const struct nvkm_event_func
|
||||
nvkm_fifo_event_func = {
|
||||
.ctor = nvkm_fifo_event_ctor,
|
||||
nvkm_fifo_cevent_func = {
|
||||
.ctor = nvkm_fifo_cevent_ctor,
|
||||
};
|
||||
|
||||
void
|
||||
nvkm_fifo_cevent(struct nvkm_fifo *fifo)
|
||||
{
|
||||
nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_fifo_uevent_fini(struct nvkm_event *event, int type, int index)
|
||||
{
|
||||
@ -241,6 +293,7 @@ nvkm_fifo_dtor(struct nvkm_engine *engine)
|
||||
void *data = fifo;
|
||||
if (fifo->func->dtor)
|
||||
data = fifo->func->dtor(fifo);
|
||||
nvkm_event_fini(&fifo->kevent);
|
||||
nvkm_event_fini(&fifo->cevent);
|
||||
nvkm_event_fini(&fifo->uevent);
|
||||
return data;
|
||||
@ -283,5 +336,9 @@ nvkm_fifo_ctor(const struct nvkm_fifo_func *func, struct nvkm_device *device,
|
||||
return ret;
|
||||
}
|
||||
|
||||
return nvkm_event_init(&nvkm_fifo_event_func, 1, 1, &fifo->cevent);
|
||||
ret = nvkm_event_init(&nvkm_fifo_cevent_func, 1, 1, &fifo->cevent);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return nvkm_event_init(&nvkm_fifo_kevent_func, 1, nr, &fifo->kevent);
|
||||
}
|
||||
|
@ -371,9 +371,9 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
|
||||
|
||||
/* allocate push buffer ctxdma instance */
|
||||
if (push) {
|
||||
dmaobj = nvkm_dma_search(device->dma, oclass->client, push);
|
||||
if (!dmaobj)
|
||||
return -ENOENT;
|
||||
dmaobj = nvkm_dmaobj_search(client, push);
|
||||
if (IS_ERR(dmaobj))
|
||||
return PTR_ERR(dmaobj);
|
||||
|
||||
ret = nvkm_object_bind(&dmaobj->object, chan->inst, -16,
|
||||
&chan->push);
|
||||
@ -410,6 +410,6 @@ nvkm_fifo_chan_ctor(const struct nvkm_fifo_chan_func *func,
|
||||
base + user * chan->chid;
|
||||
chan->size = user;
|
||||
|
||||
nvkm_event_send(&fifo->cevent, 1, 0, NULL, 0);
|
||||
nvkm_fifo_cevent(fifo);
|
||||
return 0;
|
||||
}
|
||||
|
@ -29,5 +29,5 @@ struct nvkm_fifo_chan_oclass {
|
||||
struct nvkm_sclass base;
|
||||
};
|
||||
|
||||
int g84_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
|
||||
int gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *, u32, struct nvkm_event **);
|
||||
#endif
|
||||
|
@ -30,12 +30,12 @@
|
||||
|
||||
#include <nvif/cl826e.h>
|
||||
|
||||
int
|
||||
static int
|
||||
g84_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
|
||||
struct nvkm_event **pevent)
|
||||
{
|
||||
switch (type) {
|
||||
case G82_CHANNEL_DMA_V0_NTFY_UEVENT:
|
||||
case NV826E_V0_NTFY_NON_STALL_INTERRUPT:
|
||||
*pevent = &chan->fifo->uevent;
|
||||
return 0;
|
||||
default:
|
||||
|
@ -68,7 +68,14 @@ gf100_fifo_runlist_commit(struct gf100_fifo *fifo)
|
||||
}
|
||||
nvkm_done(cur);
|
||||
|
||||
target = (nvkm_memory_target(cur) == NVKM_MEM_TARGET_HOST) ? 0x3 : 0x0;
|
||||
switch (nvkm_memory_target(cur)) {
|
||||
case NVKM_MEM_TARGET_VRAM: target = 0; break;
|
||||
case NVKM_MEM_TARGET_NCOH: target = 3; break;
|
||||
default:
|
||||
mutex_unlock(&subdev->mutex);
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x002270, (nvkm_memory_addr(cur) >> 12) |
|
||||
(target << 28));
|
||||
@ -183,6 +190,7 @@ gf100_fifo_recover(struct gf100_fifo *fifo, struct nvkm_engine *engine,
|
||||
if (engine != &fifo->base.engine)
|
||||
fifo->recover.mask |= 1ULL << engine->subdev.index;
|
||||
schedule_work(&fifo->recover.work);
|
||||
nvkm_fifo_kevent(&fifo->base, chid);
|
||||
}
|
||||
|
||||
static const struct nvkm_enum
|
||||
|
@ -27,11 +27,71 @@
|
||||
#include <core/client.h>
|
||||
#include <core/gpuobj.h>
|
||||
#include <subdev/bar.h>
|
||||
#include <subdev/timer.h>
|
||||
#include <subdev/top.h>
|
||||
#include <engine/sw.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
struct gk104_fifo_engine_status {
|
||||
bool busy;
|
||||
bool faulted;
|
||||
bool chsw;
|
||||
bool save;
|
||||
bool load;
|
||||
struct {
|
||||
bool tsg;
|
||||
u32 id;
|
||||
} prev, next, *chan;
|
||||
};
|
||||
|
||||
static void
|
||||
gk104_fifo_engine_status(struct gk104_fifo *fifo, int engn,
|
||||
struct gk104_fifo_engine_status *status)
|
||||
{
|
||||
struct nvkm_engine *engine = fifo->engine[engn].engine;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
|
||||
|
||||
status->busy = !!(stat & 0x80000000);
|
||||
status->faulted = !!(stat & 0x40000000);
|
||||
status->next.tsg = !!(stat & 0x10000000);
|
||||
status->next.id = (stat & 0x0fff0000) >> 16;
|
||||
status->chsw = !!(stat & 0x00008000);
|
||||
status->save = !!(stat & 0x00004000);
|
||||
status->load = !!(stat & 0x00002000);
|
||||
status->prev.tsg = !!(stat & 0x00001000);
|
||||
status->prev.id = (stat & 0x00000fff);
|
||||
status->chan = NULL;
|
||||
|
||||
if (status->busy && status->chsw) {
|
||||
if (status->load && status->save) {
|
||||
if (engine && nvkm_engine_chsw_load(engine))
|
||||
status->chan = &status->next;
|
||||
else
|
||||
status->chan = &status->prev;
|
||||
} else
|
||||
if (status->load) {
|
||||
status->chan = &status->next;
|
||||
} else {
|
||||
status->chan = &status->prev;
|
||||
}
|
||||
} else
|
||||
if (status->load) {
|
||||
status->chan = &status->prev;
|
||||
}
|
||||
|
||||
nvkm_debug(subdev, "engine %02d: busy %d faulted %d chsw %d "
|
||||
"save %d load %d %sid %d%s-> %sid %d%s\n",
|
||||
engn, status->busy, status->faulted,
|
||||
status->chsw, status->save, status->load,
|
||||
status->prev.tsg ? "tsg" : "ch", status->prev.id,
|
||||
status->chan == &status->prev ? "*" : " ",
|
||||
status->next.tsg ? "tsg" : "ch", status->next.id,
|
||||
status->chan == &status->next ? "*" : " ");
|
||||
}
|
||||
|
||||
static int
|
||||
gk104_fifo_class_get(struct nvkm_fifo *base, int index,
|
||||
const struct nvkm_fifo_chan_oclass **psclass)
|
||||
@ -83,10 +143,13 @@ gk104_fifo_runlist_commit(struct gk104_fifo *fifo, int runl)
|
||||
}
|
||||
nvkm_done(mem);
|
||||
|
||||
if (nvkm_memory_target(mem) == NVKM_MEM_TARGET_VRAM)
|
||||
target = 0;
|
||||
else
|
||||
target = 3;
|
||||
switch (nvkm_memory_target(mem)) {
|
||||
case NVKM_MEM_TARGET_VRAM: target = 0; break;
|
||||
case NVKM_MEM_TARGET_NCOH: target = 3; break;
|
||||
default:
|
||||
WARN_ON(1);
|
||||
return;
|
||||
}
|
||||
|
||||
nvkm_wr32(device, 0x002270, (nvkm_memory_addr(mem) >> 12) |
|
||||
(target << 28));
|
||||
@ -149,31 +212,137 @@ gk104_fifo_recover_work(struct work_struct *w)
|
||||
nvkm_mask(device, 0x002630, runm, 0x00000000);
|
||||
}
|
||||
|
||||
static void gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn);
|
||||
|
||||
static void
|
||||
gk104_fifo_recover(struct gk104_fifo *fifo, struct nvkm_engine *engine,
|
||||
struct gk104_fifo_chan *chan)
|
||||
gk104_fifo_recover_runl(struct gk104_fifo *fifo, int runl)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
u32 chid = chan->base.chid;
|
||||
int engn;
|
||||
const u32 runm = BIT(runl);
|
||||
|
||||
nvkm_error(subdev, "%s engine fault on channel %d, recovering...\n",
|
||||
nvkm_subdev_name[engine->subdev.index], chid);
|
||||
assert_spin_locked(&fifo->base.lock);
|
||||
if (fifo->recover.runm & runm)
|
||||
return;
|
||||
fifo->recover.runm |= runm;
|
||||
|
||||
nvkm_mask(device, 0x800004 + (chid * 0x08), 0x00000800, 0x00000800);
|
||||
list_del_init(&chan->head);
|
||||
chan->killed = true;
|
||||
/* Block runlist to prevent channel assignment(s) from changing. */
|
||||
nvkm_mask(device, 0x002630, runm, runm);
|
||||
|
||||
for (engn = 0; engn < fifo->engine_nr; engn++) {
|
||||
if (fifo->engine[engn].engine == engine) {
|
||||
fifo->recover.engm |= BIT(engn);
|
||||
/* Schedule recovery. */
|
||||
nvkm_warn(subdev, "runlist %d: scheduled for recovery\n", runl);
|
||||
schedule_work(&fifo->recover.work);
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_fifo_recover_chan(struct nvkm_fifo *base, int chid)
|
||||
{
|
||||
struct gk104_fifo *fifo = gk104_fifo(base);
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
const u32 stat = nvkm_rd32(device, 0x800004 + (chid * 0x08));
|
||||
const u32 runl = (stat & 0x000f0000) >> 16;
|
||||
const bool used = (stat & 0x00000001);
|
||||
unsigned long engn, engm = fifo->runlist[runl].engm;
|
||||
struct gk104_fifo_chan *chan;
|
||||
|
||||
assert_spin_locked(&fifo->base.lock);
|
||||
if (!used)
|
||||
return;
|
||||
|
||||
/* Lookup SW state for channel, and mark it as dead. */
|
||||
list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
|
||||
if (chan->base.chid == chid) {
|
||||
list_del_init(&chan->head);
|
||||
chan->killed = true;
|
||||
nvkm_fifo_kevent(&fifo->base, chid);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
fifo->recover.runm |= BIT(chan->runl);
|
||||
/* Disable channel. */
|
||||
nvkm_wr32(device, 0x800004 + (chid * 0x08), stat | 0x00000800);
|
||||
nvkm_warn(subdev, "channel %d: killed\n", chid);
|
||||
|
||||
/* Block channel assignments from changing during recovery. */
|
||||
gk104_fifo_recover_runl(fifo, runl);
|
||||
|
||||
/* Schedule recovery for any engines the channel is on. */
|
||||
for_each_set_bit(engn, &engm, fifo->engine_nr) {
|
||||
struct gk104_fifo_engine_status status;
|
||||
gk104_fifo_engine_status(fifo, engn, &status);
|
||||
if (!status.chan || status.chan->id != chid)
|
||||
continue;
|
||||
gk104_fifo_recover_engn(fifo, engn);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
gk104_fifo_recover_engn(struct gk104_fifo *fifo, int engn)
|
||||
{
|
||||
struct nvkm_engine *engine = fifo->engine[engn].engine;
|
||||
struct nvkm_subdev *subdev = &fifo->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
const u32 runl = fifo->engine[engn].runl;
|
||||
const u32 engm = BIT(engn);
|
||||
struct gk104_fifo_engine_status status;
|
||||
int mmui = -1;
|
||||
|
||||
assert_spin_locked(&fifo->base.lock);
|
||||
if (fifo->recover.engm & engm)
|
||||
return;
|
||||
fifo->recover.engm |= engm;
|
||||
|
||||
/* Block channel assignments from changing during recovery. */
|
||||
gk104_fifo_recover_runl(fifo, runl);
|
||||
|
||||
/* Determine which channel (if any) is currently on the engine. */
|
||||
gk104_fifo_engine_status(fifo, engn, &status);
|
||||
if (status.chan) {
|
||||
/* The channel is not longer viable, kill it. */
|
||||
gk104_fifo_recover_chan(&fifo->base, status.chan->id);
|
||||
}
|
||||
|
||||
/* Determine MMU fault ID for the engine, if we're not being
|
||||
* called from the fault handler already.
|
||||
*/
|
||||
if (!status.faulted && engine) {
|
||||
mmui = nvkm_top_fault_id(device, engine->subdev.index);
|
||||
if (mmui < 0) {
|
||||
const struct nvkm_enum *en = fifo->func->fault.engine;
|
||||
for (; en && en->name; en++) {
|
||||
if (en->data2 == engine->subdev.index) {
|
||||
mmui = en->value;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
WARN_ON(mmui < 0);
|
||||
}
|
||||
|
||||
/* Trigger a MMU fault for the engine.
|
||||
*
|
||||
* No good idea why this is needed, but nvgpu does something similar,
|
||||
* and it makes recovery from CTXSW_TIMEOUT a lot more reliable.
|
||||
*/
|
||||
if (mmui >= 0) {
|
||||
nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000100 | mmui);
|
||||
|
||||
/* Wait for fault to trigger. */
|
||||
nvkm_msec(device, 2000,
|
||||
gk104_fifo_engine_status(fifo, engn, &status);
|
||||
if (status.faulted)
|
||||
break;
|
||||
);
|
||||
|
||||
/* Release MMU fault trigger, and ACK the fault. */
|
||||
nvkm_wr32(device, 0x002a30 + (engn * 0x04), 0x00000000);
|
||||
nvkm_wr32(device, 0x00259c, BIT(mmui));
|
||||
nvkm_wr32(device, 0x002100, 0x10000000);
|
||||
}
|
||||
|
||||
/* Schedule recovery. */
|
||||
nvkm_warn(subdev, "engine %d: scheduled for recovery\n", engn);
|
||||
schedule_work(&fifo->recover.work);
|
||||
}
|
||||
|
||||
@ -211,34 +380,30 @@ static void
|
||||
gk104_fifo_intr_sched_ctxsw(struct gk104_fifo *fifo)
|
||||
{
|
||||
struct nvkm_device *device = fifo->base.engine.subdev.device;
|
||||
struct gk104_fifo_chan *chan;
|
||||
unsigned long flags;
|
||||
unsigned long flags, engm = 0;
|
||||
u32 engn;
|
||||
|
||||
/* We need to ACK the SCHED_ERROR here, and prevent it reasserting,
|
||||
* as MMU_FAULT cannot be triggered while it's pending.
|
||||
*/
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
for (engn = 0; engn < fifo->engine_nr; engn++) {
|
||||
struct nvkm_engine *engine = fifo->engine[engn].engine;
|
||||
int runl = fifo->engine[engn].runl;
|
||||
u32 stat = nvkm_rd32(device, 0x002640 + (engn * 0x08));
|
||||
u32 busy = (stat & 0x80000000);
|
||||
u32 next = (stat & 0x0fff0000) >> 16;
|
||||
u32 chsw = (stat & 0x00008000);
|
||||
u32 save = (stat & 0x00004000);
|
||||
u32 load = (stat & 0x00002000);
|
||||
u32 prev = (stat & 0x00000fff);
|
||||
u32 chid = load ? next : prev;
|
||||
(void)save;
|
||||
nvkm_mask(device, 0x002140, 0x00000100, 0x00000000);
|
||||
nvkm_wr32(device, 0x002100, 0x00000100);
|
||||
|
||||
if (!busy || !chsw)
|
||||
for (engn = 0; engn < fifo->engine_nr; engn++) {
|
||||
struct gk104_fifo_engine_status status;
|
||||
|
||||
gk104_fifo_engine_status(fifo, engn, &status);
|
||||
if (!status.busy || !status.chsw)
|
||||
continue;
|
||||
|
||||
list_for_each_entry(chan, &fifo->runlist[runl].chan, head) {
|
||||
if (chan->base.chid == chid && engine) {
|
||||
gk104_fifo_recover(fifo, engine, chan);
|
||||
break;
|
||||
}
|
||||
}
|
||||
engm |= BIT(engn);
|
||||
}
|
||||
|
||||
for_each_set_bit(engn, &engm, fifo->engine_nr)
|
||||
gk104_fifo_recover_engn(fifo, engn);
|
||||
|
||||
nvkm_mask(device, 0x002140, 0x00000100, 0x00000100);
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
}
|
||||
|
||||
@ -301,6 +466,7 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
|
||||
struct nvkm_fifo_chan *chan;
|
||||
unsigned long flags;
|
||||
char gpcid[8] = "", en[16] = "";
|
||||
int engn;
|
||||
|
||||
er = nvkm_enum_find(fifo->func->fault.reason, reason);
|
||||
eu = nvkm_enum_find(fifo->func->fault.engine, unit);
|
||||
@ -342,7 +508,8 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
|
||||
snprintf(en, sizeof(en), "%s", eu->name);
|
||||
}
|
||||
|
||||
chan = nvkm_fifo_chan_inst(&fifo->base, (u64)inst << 12, &flags);
|
||||
spin_lock_irqsave(&fifo->base.lock, flags);
|
||||
chan = nvkm_fifo_chan_inst_locked(&fifo->base, (u64)inst << 12);
|
||||
|
||||
nvkm_error(subdev,
|
||||
"%s fault at %010llx engine %02x [%s] client %02x [%s%s] "
|
||||
@ -353,9 +520,23 @@ gk104_fifo_intr_fault(struct gk104_fifo *fifo, int unit)
|
||||
(u64)inst << 12,
|
||||
chan ? chan->object.client->name : "unknown");
|
||||
|
||||
if (engine && chan)
|
||||
gk104_fifo_recover(fifo, engine, (void *)chan);
|
||||
nvkm_fifo_chan_put(&fifo->base, flags, &chan);
|
||||
|
||||
/* Kill the channel that caused the fault. */
|
||||
if (chan)
|
||||
gk104_fifo_recover_chan(&fifo->base, chan->chid);
|
||||
|
||||
/* Channel recovery will probably have already done this for the
|
||||
* correct engine(s), but just in case we can't find the channel
|
||||
* information...
|
||||
*/
|
||||
for (engn = 0; engn < fifo->engine_nr && engine; engn++) {
|
||||
if (fifo->engine[engn].engine == engine) {
|
||||
gk104_fifo_recover_engn(fifo, engn);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&fifo->base.lock, flags);
|
||||
}
|
||||
|
||||
static const struct nvkm_bitfield gk104_fifo_pbdma_intr_0[] = {
|
||||
@ -716,6 +897,7 @@ gk104_fifo_ = {
|
||||
.intr = gk104_fifo_intr,
|
||||
.uevent_init = gk104_fifo_uevent_init,
|
||||
.uevent_fini = gk104_fifo_uevent_fini,
|
||||
.recover_chan = gk104_fifo_recover_chan,
|
||||
.class_get = gk104_fifo_class_get,
|
||||
};
|
||||
|
||||
|
@ -32,6 +32,23 @@
|
||||
#include <nvif/cl906f.h>
|
||||
#include <nvif/unpack.h>
|
||||
|
||||
int
|
||||
gf100_fifo_chan_ntfy(struct nvkm_fifo_chan *chan, u32 type,
|
||||
struct nvkm_event **pevent)
|
||||
{
|
||||
switch (type) {
|
||||
case NV906F_V0_NTFY_NON_STALL_INTERRUPT:
|
||||
*pevent = &chan->fifo->uevent;
|
||||
return 0;
|
||||
case NV906F_V0_NTFY_KILLED:
|
||||
*pevent = &chan->fifo->kevent;
|
||||
return 0;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
static u32
|
||||
gf100_fifo_gpfifo_engine_addr(struct nvkm_engine *engine)
|
||||
{
|
||||
@ -184,7 +201,7 @@ gf100_fifo_gpfifo_func = {
|
||||
.dtor = gf100_fifo_gpfifo_dtor,
|
||||
.init = gf100_fifo_gpfifo_init,
|
||||
.fini = gf100_fifo_gpfifo_fini,
|
||||
.ntfy = g84_fifo_chan_ntfy,
|
||||
.ntfy = gf100_fifo_chan_ntfy,
|
||||
.engine_ctor = gf100_fifo_gpfifo_engine_ctor,
|
||||
.engine_dtor = gf100_fifo_gpfifo_engine_dtor,
|
||||
.engine_init = gf100_fifo_gpfifo_engine_init,
|
||||
|
@ -50,6 +50,7 @@ gk104_fifo_gpfifo_kick(struct gk104_fifo_chan *chan)
|
||||
) < 0) {
|
||||
nvkm_error(subdev, "channel %d [%s] kick timeout\n",
|
||||
chan->base.chid, client->name);
|
||||
nvkm_fifo_recover_chan(&fifo->base, chan->base.chid);
|
||||
ret = -ETIMEDOUT;
|
||||
}
|
||||
mutex_unlock(&subdev->mutex);
|
||||
@ -213,7 +214,7 @@ gk104_fifo_gpfifo_func = {
|
||||
.dtor = gk104_fifo_gpfifo_dtor,
|
||||
.init = gk104_fifo_gpfifo_init,
|
||||
.fini = gk104_fifo_gpfifo_fini,
|
||||
.ntfy = g84_fifo_chan_ntfy,
|
||||
.ntfy = gf100_fifo_chan_ntfy,
|
||||
.engine_ctor = gk104_fifo_gpfifo_engine_ctor,
|
||||
.engine_dtor = gk104_fifo_gpfifo_engine_dtor,
|
||||
.engine_init = gk104_fifo_gpfifo_engine_init,
|
||||
|
@ -6,6 +6,12 @@
|
||||
int nvkm_fifo_ctor(const struct nvkm_fifo_func *, struct nvkm_device *,
|
||||
int index, int nr, struct nvkm_fifo *);
|
||||
void nvkm_fifo_uevent(struct nvkm_fifo *);
|
||||
void nvkm_fifo_cevent(struct nvkm_fifo *);
|
||||
void nvkm_fifo_kevent(struct nvkm_fifo *, int chid);
|
||||
void nvkm_fifo_recover_chan(struct nvkm_fifo *, int chid);
|
||||
|
||||
struct nvkm_fifo_chan *
|
||||
nvkm_fifo_chan_inst_locked(struct nvkm_fifo *, u64 inst);
|
||||
|
||||
struct nvkm_fifo_chan_oclass;
|
||||
struct nvkm_fifo_func {
|
||||
@ -18,6 +24,7 @@ struct nvkm_fifo_func {
|
||||
void (*start)(struct nvkm_fifo *, unsigned long *);
|
||||
void (*uevent_init)(struct nvkm_fifo *);
|
||||
void (*uevent_fini)(struct nvkm_fifo *);
|
||||
void (*recover_chan)(struct nvkm_fifo *, int chid);
|
||||
int (*class_get)(struct nvkm_fifo *, int index,
|
||||
const struct nvkm_fifo_chan_oclass **);
|
||||
const struct nvkm_fifo_chan_oclass *chan[];
|
||||
|
@ -25,6 +25,15 @@
|
||||
|
||||
#include <engine/fifo.h>
|
||||
|
||||
static bool
|
||||
nvkm_gr_chsw_load(struct nvkm_engine *engine)
|
||||
{
|
||||
struct nvkm_gr *gr = nvkm_gr(engine);
|
||||
if (gr->func->chsw_load)
|
||||
return gr->func->chsw_load(gr);
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
nvkm_gr_tile(struct nvkm_engine *engine, int region, struct nvkm_fb_tile *tile)
|
||||
{
|
||||
@ -106,6 +115,15 @@ nvkm_gr_init(struct nvkm_engine *engine)
|
||||
return gr->func->init(gr);
|
||||
}
|
||||
|
||||
static int
|
||||
nvkm_gr_fini(struct nvkm_engine *engine, bool suspend)
|
||||
{
|
||||
struct nvkm_gr *gr = nvkm_gr(engine);
|
||||
if (gr->func->fini)
|
||||
return gr->func->fini(gr, suspend);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void *
|
||||
nvkm_gr_dtor(struct nvkm_engine *engine)
|
||||
{
|
||||
@ -120,8 +138,10 @@ nvkm_gr = {
|
||||
.dtor = nvkm_gr_dtor,
|
||||
.oneinit = nvkm_gr_oneinit,
|
||||
.init = nvkm_gr_init,
|
||||
.fini = nvkm_gr_fini,
|
||||
.intr = nvkm_gr_intr,
|
||||
.tile = nvkm_gr_tile,
|
||||
.chsw_load = nvkm_gr_chsw_load,
|
||||
.fifo.cclass = nvkm_gr_cclass_new,
|
||||
.fifo.sclass = nvkm_gr_oclass_get,
|
||||
};
|
||||
|
@ -25,6 +25,8 @@
|
||||
|
||||
#include <subdev/timer.h>
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static const struct nvkm_bitfield nv50_gr_status[] = {
|
||||
{ 0x00000001, "BUSY" }, /* set when any bit is set */
|
||||
{ 0x00000002, "DISPATCH" },
|
||||
@ -180,11 +182,11 @@ g84_gr = {
|
||||
.tlb_flush = g84_gr_tlb_flush,
|
||||
.units = nv50_gr_units,
|
||||
.sclass = {
|
||||
{ -1, -1, 0x0030, &nv50_gr_object },
|
||||
{ -1, -1, 0x502d, &nv50_gr_object },
|
||||
{ -1, -1, 0x5039, &nv50_gr_object },
|
||||
{ -1, -1, 0x50c0, &nv50_gr_object },
|
||||
{ -1, -1, 0x8297, &nv50_gr_object },
|
||||
{ -1, -1, NV_NULL_CLASS, &nv50_gr_object },
|
||||
{ -1, -1, NV50_TWOD, &nv50_gr_object },
|
||||
{ -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
|
||||
{ -1, -1, NV50_COMPUTE, &nv50_gr_object },
|
||||
{ -1, -1, G82_TESLA, &nv50_gr_object },
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
@ -702,6 +702,22 @@ gf100_gr_pack_mmio[] = {
|
||||
* PGRAPH engine/subdev functions
|
||||
******************************************************************************/
|
||||
|
||||
static bool
|
||||
gf100_gr_chsw_load(struct nvkm_gr *base)
|
||||
{
|
||||
struct gf100_gr *gr = gf100_gr(base);
|
||||
if (!gr->firmware) {
|
||||
u32 trace = nvkm_rd32(gr->base.engine.subdev.device, 0x40981c);
|
||||
if (trace & 0x00000040)
|
||||
return true;
|
||||
} else {
|
||||
u32 mthd = nvkm_rd32(gr->base.engine.subdev.device, 0x409808);
|
||||
if (mthd & 0x00080000)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
int
|
||||
gf100_gr_rops(struct gf100_gr *gr)
|
||||
{
|
||||
@ -1136,7 +1152,7 @@ gf100_gr_trap_intr(struct gf100_gr *gr)
|
||||
if (trap & 0x00000008) {
|
||||
u32 stat = nvkm_rd32(device, 0x408030);
|
||||
|
||||
nvkm_snprintbf(error, sizeof(error), gf100_m2mf_error,
|
||||
nvkm_snprintbf(error, sizeof(error), gf100_ccache_error,
|
||||
stat & 0x3fffffff);
|
||||
nvkm_error(subdev, "CCACHE %08x [%s]\n", stat, error);
|
||||
nvkm_wr32(device, 0x408030, 0xc0000000);
|
||||
@ -1391,26 +1407,11 @@ gf100_gr_intr(struct nvkm_gr *base)
|
||||
}
|
||||
|
||||
static void
|
||||
gf100_gr_init_fw(struct gf100_gr *gr, u32 fuc_base,
|
||||
gf100_gr_init_fw(struct nvkm_falcon *falcon,
|
||||
struct gf100_gr_fuc *code, struct gf100_gr_fuc *data)
|
||||
{
|
||||
struct nvkm_device *device = gr->base.engine.subdev.device;
|
||||
int i;
|
||||
|
||||
nvkm_wr32(device, fuc_base + 0x01c0, 0x01000000);
|
||||
for (i = 0; i < data->size / 4; i++)
|
||||
nvkm_wr32(device, fuc_base + 0x01c4, data->data[i]);
|
||||
|
||||
nvkm_wr32(device, fuc_base + 0x0180, 0x01000000);
|
||||
for (i = 0; i < code->size / 4; i++) {
|
||||
if ((i & 0x3f) == 0)
|
||||
nvkm_wr32(device, fuc_base + 0x0188, i >> 6);
|
||||
nvkm_wr32(device, fuc_base + 0x0184, code->data[i]);
|
||||
}
|
||||
|
||||
/* code must be padded to 0x40 words */
|
||||
for (; i & 0x3f; i++)
|
||||
nvkm_wr32(device, fuc_base + 0x0184, 0);
|
||||
nvkm_falcon_load_dmem(falcon, data->data, 0x0, data->size, 0);
|
||||
nvkm_falcon_load_imem(falcon, code->data, 0x0, code->size, 0, 0, false);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -1455,162 +1456,149 @@ gf100_gr_init_csdata(struct gf100_gr *gr,
|
||||
nvkm_wr32(device, falcon + 0x01c4, star + 4);
|
||||
}
|
||||
|
||||
int
|
||||
gf100_gr_init_ctxctl(struct gf100_gr *gr)
|
||||
/* Initialize context from an external (secure or not) firmware */
|
||||
static int
|
||||
gf100_gr_init_ctxctl_ext(struct gf100_gr *gr)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_secboot *sb = device->secboot;
|
||||
int ret = 0;
|
||||
|
||||
/* load fuc microcode */
|
||||
nvkm_mc_unk260(device, 0);
|
||||
|
||||
/* securely-managed falcons must be reset using secure boot */
|
||||
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
|
||||
ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
|
||||
else
|
||||
gf100_gr_init_fw(gr->fecs, &gr->fuc409c, &gr->fuc409d);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
|
||||
ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
|
||||
else
|
||||
gf100_gr_init_fw(gr->gpccs, &gr->fuc41ac, &gr->fuc41ad);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_mc_unk260(device, 1);
|
||||
|
||||
/* start both of them running */
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x41a10c, 0x00000000);
|
||||
nvkm_wr32(device, 0x40910c, 0x00000000);
|
||||
|
||||
nvkm_falcon_start(gr->gpccs);
|
||||
nvkm_falcon_start(gr->fecs);
|
||||
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800) & 0x00000001)
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x409500, 0x7fffffff);
|
||||
nvkm_wr32(device, 0x409504, 0x00000021);
|
||||
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x409500, 0x00000000);
|
||||
nvkm_wr32(device, 0x409504, 0x00000010);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if ((gr->size = nvkm_rd32(device, 0x409800)))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x409500, 0x00000000);
|
||||
nvkm_wr32(device, 0x409504, 0x00000016);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x409500, 0x00000000);
|
||||
nvkm_wr32(device, 0x409504, 0x00000025);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (device->chipset >= 0xe0) {
|
||||
nvkm_wr32(device, 0x409800, 0x00000000);
|
||||
nvkm_wr32(device, 0x409500, 0x00000001);
|
||||
nvkm_wr32(device, 0x409504, 0x00000030);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409810, 0xb00095c8);
|
||||
nvkm_wr32(device, 0x409800, 0x00000000);
|
||||
nvkm_wr32(device, 0x409500, 0x00000001);
|
||||
nvkm_wr32(device, 0x409504, 0x00000031);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409810, 0x00080420);
|
||||
nvkm_wr32(device, 0x409800, 0x00000000);
|
||||
nvkm_wr32(device, 0x409500, 0x00000001);
|
||||
nvkm_wr32(device, 0x409504, 0x00000032);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409614, 0x00000070);
|
||||
nvkm_wr32(device, 0x409614, 0x00000770);
|
||||
nvkm_wr32(device, 0x40802c, 0x00000001);
|
||||
}
|
||||
|
||||
if (gr->data == NULL) {
|
||||
int ret = gf100_grctx_generate(gr);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "failed to construct context\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_gr_init_ctxctl_int(struct gf100_gr *gr)
|
||||
{
|
||||
const struct gf100_grctx_func *grctx = gr->func->grctx;
|
||||
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
|
||||
struct nvkm_device *device = subdev->device;
|
||||
struct nvkm_secboot *sb = device->secboot;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
if (gr->firmware) {
|
||||
/* load fuc microcode */
|
||||
nvkm_mc_unk260(device, 0);
|
||||
|
||||
/* securely-managed falcons must be reset using secure boot */
|
||||
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
|
||||
ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_FECS);
|
||||
else
|
||||
gf100_gr_init_fw(gr, 0x409000, &gr->fuc409c,
|
||||
&gr->fuc409d);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
|
||||
ret = nvkm_secboot_reset(sb, NVKM_SECBOOT_FALCON_GPCCS);
|
||||
else
|
||||
gf100_gr_init_fw(gr, 0x41a000, &gr->fuc41ac,
|
||||
&gr->fuc41ad);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
nvkm_mc_unk260(device, 1);
|
||||
|
||||
/* start both of them running */
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x41a10c, 0x00000000);
|
||||
nvkm_wr32(device, 0x40910c, 0x00000000);
|
||||
|
||||
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_GPCCS))
|
||||
nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_GPCCS);
|
||||
else
|
||||
nvkm_wr32(device, 0x41a100, 0x00000002);
|
||||
if (nvkm_secboot_is_managed(sb, NVKM_SECBOOT_FALCON_FECS))
|
||||
nvkm_secboot_start(sb, NVKM_SECBOOT_FALCON_FECS);
|
||||
else
|
||||
nvkm_wr32(device, 0x409100, 0x00000002);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800) & 0x00000001)
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x409500, 0x7fffffff);
|
||||
nvkm_wr32(device, 0x409504, 0x00000021);
|
||||
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x409500, 0x00000000);
|
||||
nvkm_wr32(device, 0x409504, 0x00000010);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if ((gr->size = nvkm_rd32(device, 0x409800)))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x409500, 0x00000000);
|
||||
nvkm_wr32(device, 0x409504, 0x00000016);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409840, 0xffffffff);
|
||||
nvkm_wr32(device, 0x409500, 0x00000000);
|
||||
nvkm_wr32(device, 0x409504, 0x00000025);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
if (device->chipset >= 0xe0) {
|
||||
nvkm_wr32(device, 0x409800, 0x00000000);
|
||||
nvkm_wr32(device, 0x409500, 0x00000001);
|
||||
nvkm_wr32(device, 0x409504, 0x00000030);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409810, 0xb00095c8);
|
||||
nvkm_wr32(device, 0x409800, 0x00000000);
|
||||
nvkm_wr32(device, 0x409500, 0x00000001);
|
||||
nvkm_wr32(device, 0x409504, 0x00000031);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409810, 0x00080420);
|
||||
nvkm_wr32(device, 0x409800, 0x00000000);
|
||||
nvkm_wr32(device, 0x409500, 0x00000001);
|
||||
nvkm_wr32(device, 0x409504, 0x00000032);
|
||||
if (nvkm_msec(device, 2000,
|
||||
if (nvkm_rd32(device, 0x409800))
|
||||
break;
|
||||
) < 0)
|
||||
return -EBUSY;
|
||||
|
||||
nvkm_wr32(device, 0x409614, 0x00000070);
|
||||
nvkm_wr32(device, 0x409614, 0x00000770);
|
||||
nvkm_wr32(device, 0x40802c, 0x00000001);
|
||||
}
|
||||
|
||||
if (gr->data == NULL) {
|
||||
int ret = gf100_grctx_generate(gr);
|
||||
if (ret) {
|
||||
nvkm_error(subdev, "failed to construct context\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
} else
|
||||
if (!gr->func->fecs.ucode) {
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
/* load HUB microcode */
|
||||
nvkm_mc_unk260(device, 0);
|
||||
nvkm_wr32(device, 0x4091c0, 0x01000000);
|
||||
for (i = 0; i < gr->func->fecs.ucode->data.size / 4; i++)
|
||||
nvkm_wr32(device, 0x4091c4, gr->func->fecs.ucode->data.data[i]);
|
||||
|
||||
nvkm_wr32(device, 0x409180, 0x01000000);
|
||||
for (i = 0; i < gr->func->fecs.ucode->code.size / 4; i++) {
|
||||
if ((i & 0x3f) == 0)
|
||||
nvkm_wr32(device, 0x409188, i >> 6);
|
||||
nvkm_wr32(device, 0x409184, gr->func->fecs.ucode->code.data[i]);
|
||||
}
|
||||
nvkm_falcon_load_dmem(gr->fecs, gr->func->fecs.ucode->data.data, 0x0,
|
||||
gr->func->fecs.ucode->data.size, 0);
|
||||
nvkm_falcon_load_imem(gr->fecs, gr->func->fecs.ucode->code.data, 0x0,
|
||||
gr->func->fecs.ucode->code.size, 0, 0, false);
|
||||
|
||||
/* load GPC microcode */
|
||||
nvkm_wr32(device, 0x41a1c0, 0x01000000);
|
||||
for (i = 0; i < gr->func->gpccs.ucode->data.size / 4; i++)
|
||||
nvkm_wr32(device, 0x41a1c4, gr->func->gpccs.ucode->data.data[i]);
|
||||
|
||||
nvkm_wr32(device, 0x41a180, 0x01000000);
|
||||
for (i = 0; i < gr->func->gpccs.ucode->code.size / 4; i++) {
|
||||
if ((i & 0x3f) == 0)
|
||||
nvkm_wr32(device, 0x41a188, i >> 6);
|
||||
nvkm_wr32(device, 0x41a184, gr->func->gpccs.ucode->code.data[i]);
|
||||
}
|
||||
nvkm_falcon_load_dmem(gr->gpccs, gr->func->gpccs.ucode->data.data, 0x0,
|
||||
gr->func->gpccs.ucode->data.size, 0);
|
||||
nvkm_falcon_load_imem(gr->gpccs, gr->func->gpccs.ucode->code.data, 0x0,
|
||||
gr->func->gpccs.ucode->code.size, 0, 0, false);
|
||||
nvkm_mc_unk260(device, 1);
|
||||
|
||||
/* load register lists */
|
||||
@ -1642,6 +1630,19 @@ gf100_gr_init_ctxctl(struct gf100_gr *gr)
|
||||
return 0;
|
||||
}
|
||||
|
||||
int
|
||||
gf100_gr_init_ctxctl(struct gf100_gr *gr)
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (gr->firmware)
|
||||
ret = gf100_gr_init_ctxctl_ext(gr);
|
||||
else
|
||||
ret = gf100_gr_init_ctxctl_int(gr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_gr_oneinit(struct nvkm_gr *base)
|
||||
{
|
||||
@ -1711,10 +1712,32 @@ static int
|
||||
gf100_gr_init_(struct nvkm_gr *base)
|
||||
{
|
||||
struct gf100_gr *gr = gf100_gr(base);
|
||||
struct nvkm_subdev *subdev = &base->engine.subdev;
|
||||
u32 ret;
|
||||
|
||||
nvkm_pmu_pgob(gr->base.engine.subdev.device->pmu, false);
|
||||
|
||||
ret = nvkm_falcon_get(gr->fecs, subdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nvkm_falcon_get(gr->gpccs, subdev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return gr->func->init(gr);
|
||||
}
|
||||
|
||||
static int
|
||||
gf100_gr_fini_(struct nvkm_gr *base, bool suspend)
|
||||
{
|
||||
struct gf100_gr *gr = gf100_gr(base);
|
||||
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
|
||||
nvkm_falcon_put(gr->gpccs, subdev);
|
||||
nvkm_falcon_put(gr->fecs, subdev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
gf100_gr_dtor_fw(struct gf100_gr_fuc *fuc)
|
||||
{
|
||||
@ -1737,6 +1760,9 @@ gf100_gr_dtor(struct nvkm_gr *base)
|
||||
gr->func->dtor(gr);
|
||||
kfree(gr->data);
|
||||
|
||||
nvkm_falcon_del(&gr->gpccs);
|
||||
nvkm_falcon_del(&gr->fecs);
|
||||
|
||||
gf100_gr_dtor_fw(&gr->fuc409c);
|
||||
gf100_gr_dtor_fw(&gr->fuc409d);
|
||||
gf100_gr_dtor_fw(&gr->fuc41ac);
|
||||
@ -1755,10 +1781,12 @@ gf100_gr_ = {
|
||||
.dtor = gf100_gr_dtor,
|
||||
.oneinit = gf100_gr_oneinit,
|
||||
.init = gf100_gr_init_,
|
||||
.fini = gf100_gr_fini_,
|
||||
.intr = gf100_gr_intr,
|
||||
.units = gf100_gr_units,
|
||||
.chan_new = gf100_gr_chan_new,
|
||||
.object_get = gf100_gr_object_get,
|
||||
.chsw_load = gf100_gr_chsw_load,
|
||||
};
|
||||
|
||||
int
|
||||
@ -1828,6 +1856,7 @@ int
|
||||
gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
|
||||
int index, struct gf100_gr *gr)
|
||||
{
|
||||
struct nvkm_subdev *subdev = &gr->base.engine.subdev;
|
||||
int ret;
|
||||
|
||||
gr->func = func;
|
||||
@ -1840,7 +1869,11 @@ gf100_gr_ctor(const struct gf100_gr_func *func, struct nvkm_device *device,
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
ret = nvkm_falcon_v1_new(subdev, "FECS", 0x409000, &gr->fecs);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
return nvkm_falcon_v1_new(subdev, "GPCCS", 0x41a000, &gr->gpccs);
|
||||
}
|
||||
|
||||
int
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <core/gpuobj.h>
|
||||
#include <subdev/ltc.h>
|
||||
#include <subdev/mmu.h>
|
||||
#include <engine/falcon.h>
|
||||
|
||||
#define GPC_MAX 32
|
||||
#define TPC_MAX_PER_GPC 8
|
||||
@ -75,6 +76,8 @@ struct gf100_gr {
|
||||
const struct gf100_gr_func *func;
|
||||
struct nvkm_gr base;
|
||||
|
||||
struct nvkm_falcon *fecs;
|
||||
struct nvkm_falcon *gpccs;
|
||||
struct gf100_gr_fuc fuc409c;
|
||||
struct gf100_gr_fuc fuc409d;
|
||||
struct gf100_gr_fuc fuc41ac;
|
||||
|
@ -23,6 +23,8 @@
|
||||
*/
|
||||
#include "nv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static const struct nvkm_gr_func
|
||||
gt200_gr = {
|
||||
.init = nv50_gr_init,
|
||||
@ -31,11 +33,11 @@ gt200_gr = {
|
||||
.tlb_flush = g84_gr_tlb_flush,
|
||||
.units = nv50_gr_units,
|
||||
.sclass = {
|
||||
{ -1, -1, 0x0030, &nv50_gr_object },
|
||||
{ -1, -1, 0x502d, &nv50_gr_object },
|
||||
{ -1, -1, 0x5039, &nv50_gr_object },
|
||||
{ -1, -1, 0x50c0, &nv50_gr_object },
|
||||
{ -1, -1, 0x8397, &nv50_gr_object },
|
||||
{ -1, -1, NV_NULL_CLASS, &nv50_gr_object },
|
||||
{ -1, -1, NV50_TWOD, &nv50_gr_object },
|
||||
{ -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
|
||||
{ -1, -1, NV50_COMPUTE, &nv50_gr_object },
|
||||
{ -1, -1, GT200_TESLA, &nv50_gr_object },
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
@ -23,6 +23,8 @@
|
||||
*/
|
||||
#include "nv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static const struct nvkm_gr_func
|
||||
gt215_gr = {
|
||||
.init = nv50_gr_init,
|
||||
@ -31,12 +33,12 @@ gt215_gr = {
|
||||
.tlb_flush = g84_gr_tlb_flush,
|
||||
.units = nv50_gr_units,
|
||||
.sclass = {
|
||||
{ -1, -1, 0x0030, &nv50_gr_object },
|
||||
{ -1, -1, 0x502d, &nv50_gr_object },
|
||||
{ -1, -1, 0x5039, &nv50_gr_object },
|
||||
{ -1, -1, 0x50c0, &nv50_gr_object },
|
||||
{ -1, -1, 0x8597, &nv50_gr_object },
|
||||
{ -1, -1, 0x85c0, &nv50_gr_object },
|
||||
{ -1, -1, NV_NULL_CLASS, &nv50_gr_object },
|
||||
{ -1, -1, NV50_TWOD, &nv50_gr_object },
|
||||
{ -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
|
||||
{ -1, -1, NV50_COMPUTE, &nv50_gr_object },
|
||||
{ -1, -1, GT214_TESLA, &nv50_gr_object },
|
||||
{ -1, -1, GT214_COMPUTE, &nv50_gr_object },
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
@ -23,6 +23,8 @@
|
||||
*/
|
||||
#include "nv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static const struct nvkm_gr_func
|
||||
mcp79_gr = {
|
||||
.init = nv50_gr_init,
|
||||
@ -30,11 +32,11 @@ mcp79_gr = {
|
||||
.chan_new = nv50_gr_chan_new,
|
||||
.units = nv50_gr_units,
|
||||
.sclass = {
|
||||
{ -1, -1, 0x0030, &nv50_gr_object },
|
||||
{ -1, -1, 0x502d, &nv50_gr_object },
|
||||
{ -1, -1, 0x5039, &nv50_gr_object },
|
||||
{ -1, -1, 0x50c0, &nv50_gr_object },
|
||||
{ -1, -1, 0x8397, &nv50_gr_object },
|
||||
{ -1, -1, NV_NULL_CLASS, &nv50_gr_object },
|
||||
{ -1, -1, NV50_TWOD, &nv50_gr_object },
|
||||
{ -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
|
||||
{ -1, -1, NV50_COMPUTE, &nv50_gr_object },
|
||||
{ -1, -1, GT200_TESLA, &nv50_gr_object },
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
@ -23,6 +23,8 @@
|
||||
*/
|
||||
#include "nv50.h"
|
||||
|
||||
#include <nvif/class.h>
|
||||
|
||||
static const struct nvkm_gr_func
|
||||
mcp89_gr = {
|
||||
.init = nv50_gr_init,
|
||||
@ -31,12 +33,12 @@ mcp89_gr = {
|
||||
.tlb_flush = g84_gr_tlb_flush,
|
||||
.units = nv50_gr_units,
|
||||
.sclass = {
|
||||
{ -1, -1, 0x0030, &nv50_gr_object },
|
||||
{ -1, -1, 0x502d, &nv50_gr_object },
|
||||
{ -1, -1, 0x5039, &nv50_gr_object },
|
||||
{ -1, -1, 0x50c0, &nv50_gr_object },
|
||||
{ -1, -1, 0x85c0, &nv50_gr_object },
|
||||
{ -1, -1, 0x8697, &nv50_gr_object },
|
||||
{ -1, -1, NV_NULL_CLASS, &nv50_gr_object },
|
||||
{ -1, -1, NV50_TWOD, &nv50_gr_object },
|
||||
{ -1, -1, NV50_MEMORY_TO_MEMORY_FORMAT, &nv50_gr_object },
|
||||
{ -1, -1, NV50_COMPUTE, &nv50_gr_object },
|
||||
{ -1, -1, GT214_COMPUTE, &nv50_gr_object },
|
||||
{ -1, -1, GT21A_TESLA, &nv50_gr_object },
|
||||
{}
|
||||
}
|
||||
};
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user