mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-17 22:41:25 +00:00
Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (35 commits) drm/radeon/kms: add definitions for v4 power tables drm/radeon/kms: never combine LVDS with another encoder drm/radeon/kms: Check module arguments to be valid V2 drm/radeon/kms: Avoid crash when trying to cleanup uninitialized structure drm/radeon/kms: add cvt mode if we only have lvds w/h and no edid (v4) drm/radeon/kms: add 3DC compression support drm/radeon/kms: allow rendering while no colorbuffer is set on r300 drm/radeon/kms: enable memory clock reading on legacy (V2) drm/radeon/kms: prevent parallel AtomBIOS calls drm/radeon/kms: set proper default tv standard drm/radeon/kms: fix legacy rmx drm/radeon/kms/atom: fill in proper defines for digital setup drm/kms: silencing a false positive warning. drm/mm: fix logic for selection of best fit block drm/vmwgfx: Use TTM handles instead of SIDs as user-space surface handles. drm/vmwgfx: Return -ERESTARTSYS when interrupted by a signal. drm/vmwgfx: Fix unlocked ioctl and add proper access control drm/radeon: fix build on 64-bit with some compilers. drivers/gpu: Use kzalloc for allocating only one thing DRM: Rename clamp variable ...
This commit is contained in:
commit
f42ecb2808
@ -434,11 +434,11 @@ static int drm_version(struct drm_device *dev, void *data,
|
||||
* Looks up the ioctl function in the ::ioctls table, checking for root
|
||||
* previleges if so required, and dispatches to the respective function.
|
||||
*/
|
||||
int drm_ioctl(struct inode *inode, struct file *filp,
|
||||
long drm_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg)
|
||||
{
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct drm_device *dev = file_priv->minor->dev;
|
||||
struct drm_device *dev;
|
||||
struct drm_ioctl_desc *ioctl;
|
||||
drm_ioctl_t *func;
|
||||
unsigned int nr = DRM_IOCTL_NR(cmd);
|
||||
@ -446,6 +446,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
|
||||
char stack_kdata[128];
|
||||
char *kdata = NULL;
|
||||
|
||||
dev = file_priv->minor->dev;
|
||||
atomic_inc(&dev->ioctl_count);
|
||||
atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
|
||||
++file_priv->ioctl_count;
|
||||
@ -501,7 +502,13 @@ int drm_ioctl(struct inode *inode, struct file *filp,
|
||||
goto err_i1;
|
||||
}
|
||||
}
|
||||
retcode = func(dev, kdata, file_priv);
|
||||
if (ioctl->flags & DRM_UNLOCKED)
|
||||
retcode = func(dev, kdata, file_priv);
|
||||
else {
|
||||
lock_kernel();
|
||||
retcode = func(dev, kdata, file_priv);
|
||||
unlock_kernel();
|
||||
}
|
||||
|
||||
if (cmd & IOC_OUT) {
|
||||
if (copy_to_user((void __user *)arg, kdata,
|
||||
|
@ -913,7 +913,7 @@ static int drm_cvt_modes(struct drm_connector *connector,
|
||||
const int rates[] = { 60, 85, 75, 60, 50 };
|
||||
|
||||
for (i = 0; i < 4; i++) {
|
||||
int width, height;
|
||||
int uninitialized_var(width), height;
|
||||
cvt = &(timing->data.other_data.data.cvt[i]);
|
||||
|
||||
height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
|
||||
|
@ -104,7 +104,7 @@ static int compat_drm_version(struct file *file, unsigned int cmd,
|
||||
&version->desc))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
err = drm_ioctl(file,
|
||||
DRM_IOCTL_VERSION, (unsigned long)version);
|
||||
if (err)
|
||||
return err;
|
||||
@ -145,8 +145,7 @@ static int compat_drm_getunique(struct file *file, unsigned int cmd,
|
||||
&u->unique))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
|
||||
err = drm_ioctl(file, DRM_IOCTL_GET_UNIQUE, (unsigned long)u);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -174,8 +173,7 @@ static int compat_drm_setunique(struct file *file, unsigned int cmd,
|
||||
&u->unique))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
|
||||
return drm_ioctl(file, DRM_IOCTL_SET_UNIQUE, (unsigned long)u);
|
||||
}
|
||||
|
||||
typedef struct drm_map32 {
|
||||
@ -205,8 +203,7 @@ static int compat_drm_getmap(struct file *file, unsigned int cmd,
|
||||
if (__put_user(idx, &map->offset))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_GET_MAP, (unsigned long)map);
|
||||
err = drm_ioctl(file, DRM_IOCTL_GET_MAP, (unsigned long)map);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -246,8 +243,7 @@ static int compat_drm_addmap(struct file *file, unsigned int cmd,
|
||||
|| __put_user(m32.flags, &map->flags))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_ADD_MAP, (unsigned long)map);
|
||||
err = drm_ioctl(file, DRM_IOCTL_ADD_MAP, (unsigned long)map);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -284,8 +280,7 @@ static int compat_drm_rmmap(struct file *file, unsigned int cmd,
|
||||
if (__put_user((void *)(unsigned long)handle, &map->handle))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RM_MAP, (unsigned long)map);
|
||||
return drm_ioctl(file, DRM_IOCTL_RM_MAP, (unsigned long)map);
|
||||
}
|
||||
|
||||
typedef struct drm_client32 {
|
||||
@ -314,8 +309,7 @@ static int compat_drm_getclient(struct file *file, unsigned int cmd,
|
||||
if (__put_user(idx, &client->idx))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_GET_CLIENT, (unsigned long)client);
|
||||
err = drm_ioctl(file, DRM_IOCTL_GET_CLIENT, (unsigned long)client);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -351,8 +345,7 @@ static int compat_drm_getstats(struct file *file, unsigned int cmd,
|
||||
if (!access_ok(VERIFY_WRITE, stats, sizeof(*stats)))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_GET_STATS, (unsigned long)stats);
|
||||
err = drm_ioctl(file, DRM_IOCTL_GET_STATS, (unsigned long)stats);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -395,8 +388,7 @@ static int compat_drm_addbufs(struct file *file, unsigned int cmd,
|
||||
|| __put_user(agp_start, &buf->agp_start))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
|
||||
err = drm_ioctl(file, DRM_IOCTL_ADD_BUFS, (unsigned long)buf);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -427,8 +419,7 @@ static int compat_drm_markbufs(struct file *file, unsigned int cmd,
|
||||
|| __put_user(b32.high_mark, &buf->high_mark))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
|
||||
return drm_ioctl(file, DRM_IOCTL_MARK_BUFS, (unsigned long)buf);
|
||||
}
|
||||
|
||||
typedef struct drm_buf_info32 {
|
||||
@ -469,8 +460,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
|
||||
|| __put_user(list, &request->list))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_INFO_BUFS, (unsigned long)request);
|
||||
err = drm_ioctl(file, DRM_IOCTL_INFO_BUFS, (unsigned long)request);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -531,8 +521,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
|
||||
|| __put_user(list, &request->list))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_MAP_BUFS, (unsigned long)request);
|
||||
err = drm_ioctl(file, DRM_IOCTL_MAP_BUFS, (unsigned long)request);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -578,8 +567,7 @@ static int compat_drm_freebufs(struct file *file, unsigned int cmd,
|
||||
&request->list))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_FREE_BUFS, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_FREE_BUFS, (unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_ctx_priv_map32 {
|
||||
@ -605,8 +593,7 @@ static int compat_drm_setsareactx(struct file *file, unsigned int cmd,
|
||||
&request->handle))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_SET_SAREA_CTX, (unsigned long)request);
|
||||
}
|
||||
|
||||
static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
|
||||
@ -628,8 +615,7 @@ static int compat_drm_getsareactx(struct file *file, unsigned int cmd,
|
||||
if (__put_user(ctx_id, &request->ctx_id))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
|
||||
err = drm_ioctl(file, DRM_IOCTL_GET_SAREA_CTX, (unsigned long)request);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -664,8 +650,7 @@ static int compat_drm_resctx(struct file *file, unsigned int cmd,
|
||||
&res->contexts))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RES_CTX, (unsigned long)res);
|
||||
err = drm_ioctl(file, DRM_IOCTL_RES_CTX, (unsigned long)res);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -718,8 +703,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
|
||||
&d->request_sizes))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_DMA, (unsigned long)d);
|
||||
err = drm_ioctl(file, DRM_IOCTL_DMA, (unsigned long)d);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -751,8 +735,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
|
||||
if (put_user(m32.mode, &mode->mode))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
|
||||
return drm_ioctl(file, DRM_IOCTL_AGP_ENABLE, (unsigned long)mode);
|
||||
}
|
||||
|
||||
typedef struct drm_agp_info32 {
|
||||
@ -781,8 +764,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
|
||||
if (!access_ok(VERIFY_WRITE, info, sizeof(*info)))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_AGP_INFO, (unsigned long)info);
|
||||
err = drm_ioctl(file, DRM_IOCTL_AGP_INFO, (unsigned long)info);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -827,16 +809,14 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
|
||||
|| __put_user(req32.type, &request->type))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
|
||||
err = drm_ioctl(file, DRM_IOCTL_AGP_ALLOC, (unsigned long)request);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
if (__get_user(req32.handle, &request->handle)
|
||||
|| __get_user(req32.physical, &request->physical)
|
||||
|| copy_to_user(argp, &req32, sizeof(req32))) {
|
||||
drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_AGP_FREE, (unsigned long)request);
|
||||
drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
|
||||
return -EFAULT;
|
||||
}
|
||||
|
||||
@ -856,8 +836,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
|
||||
|| __put_user(handle, &request->handle))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_AGP_FREE, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_AGP_FREE, (unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_agp_binding32 {
|
||||
@ -881,8 +860,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
|
||||
|| __put_user(req32.offset, &request->offset))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_AGP_BIND, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_AGP_BIND, (unsigned long)request);
|
||||
}
|
||||
|
||||
static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
|
||||
@ -898,8 +876,7 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
|
||||
|| __put_user(handle, &request->handle))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_AGP_UNBIND, (unsigned long)request);
|
||||
}
|
||||
#endif /* __OS_HAS_AGP */
|
||||
|
||||
@ -923,8 +900,7 @@ static int compat_drm_sg_alloc(struct file *file, unsigned int cmd,
|
||||
|| __put_user(x, &request->size))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_SG_ALLOC, (unsigned long)request);
|
||||
err = drm_ioctl(file, DRM_IOCTL_SG_ALLOC, (unsigned long)request);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -950,8 +926,7 @@ static int compat_drm_sg_free(struct file *file, unsigned int cmd,
|
||||
|| __put_user(x << PAGE_SHIFT, &request->handle))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_SG_FREE, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_SG_FREE, (unsigned long)request);
|
||||
}
|
||||
|
||||
#if defined(CONFIG_X86) || defined(CONFIG_IA64)
|
||||
@ -981,8 +956,7 @@ static int compat_drm_update_draw(struct file *file, unsigned int cmd,
|
||||
__put_user(update32.data, &request->data))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
|
||||
err = drm_ioctl(file, DRM_IOCTL_UPDATE_DRAW, (unsigned long)request);
|
||||
return err;
|
||||
}
|
||||
#endif
|
||||
@ -1023,8 +997,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
|
||||
|| __put_user(req32.request.signal, &request->request.signal))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
|
||||
err = drm_ioctl(file, DRM_IOCTL_WAIT_VBLANK, (unsigned long)request);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
@ -1094,16 +1067,14 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
* than always failing.
|
||||
*/
|
||||
if (nr >= ARRAY_SIZE(drm_compat_ioctls))
|
||||
return drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
|
||||
return drm_ioctl(filp, cmd, arg);
|
||||
|
||||
fn = drm_compat_ioctls[nr];
|
||||
|
||||
lock_kernel(); /* XXX for now */
|
||||
if (fn != NULL)
|
||||
ret = (*fn) (filp, cmd, arg);
|
||||
else
|
||||
ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -358,7 +358,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
|
||||
if (entry->size >= size + wasted) {
|
||||
if (!best_match)
|
||||
return entry;
|
||||
if (size < best_size) {
|
||||
if (entry->size < best_size) {
|
||||
best = entry;
|
||||
best_size = entry->size;
|
||||
}
|
||||
@ -408,7 +408,7 @@ struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
|
||||
if (entry->size >= size + wasted) {
|
||||
if (!best_match)
|
||||
return entry;
|
||||
if (size < best_size) {
|
||||
if (entry->size < best_size) {
|
||||
best = entry;
|
||||
best_size = entry->size;
|
||||
}
|
||||
|
@ -408,6 +408,11 @@ static int ch7006_probe(struct i2c_client *client, const struct i2c_device_id *i
|
||||
|
||||
ch7006_info(client, "Detected version ID: %x\n", val);
|
||||
|
||||
/* I don't know what this is for, but otherwise I get no
|
||||
* signal.
|
||||
*/
|
||||
ch7006_write(client, 0x3d, 0x0);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
|
@ -427,11 +427,6 @@ void ch7006_state_load(struct i2c_client *client,
|
||||
ch7006_load_reg(client, state, CH7006_SUBC_INC7);
|
||||
ch7006_load_reg(client, state, CH7006_PLL_CONTROL);
|
||||
ch7006_load_reg(client, state, CH7006_CALC_SUBC_INC0);
|
||||
|
||||
/* I don't know what this is for, but otherwise I get no
|
||||
* signal.
|
||||
*/
|
||||
ch7006_write(client, 0x3d, 0x0);
|
||||
}
|
||||
|
||||
void ch7006_state_save(struct i2c_client *client,
|
||||
|
@ -115,7 +115,7 @@ static int i810_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
||||
static const struct file_operations i810_buffer_fops = {
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = i810_mmap_buffers,
|
||||
.fasync = drm_fasync,
|
||||
};
|
||||
|
@ -59,7 +59,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -117,7 +117,7 @@ static int i830_mmap_buffers(struct file *filp, struct vm_area_struct *vma)
|
||||
static const struct file_operations i830_buffer_fops = {
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = i830_mmap_buffers,
|
||||
.fasync = drm_fasync,
|
||||
};
|
||||
|
@ -70,7 +70,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -329,7 +329,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_gem_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -66,8 +66,7 @@ static int compat_i915_batchbuffer(struct file *file, unsigned int cmd,
|
||||
&batchbuffer->cliprects))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_I915_BATCHBUFFER,
|
||||
return drm_ioctl(file, DRM_IOCTL_I915_BATCHBUFFER,
|
||||
(unsigned long)batchbuffer);
|
||||
}
|
||||
|
||||
@ -102,8 +101,8 @@ static int compat_i915_cmdbuffer(struct file *file, unsigned int cmd,
|
||||
&cmdbuffer->cliprects))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_I915_CMDBUFFER, (unsigned long)cmdbuffer);
|
||||
return drm_ioctl(file, DRM_IOCTL_I915_CMDBUFFER,
|
||||
(unsigned long)cmdbuffer);
|
||||
}
|
||||
|
||||
typedef struct drm_i915_irq_emit32 {
|
||||
@ -125,8 +124,8 @@ static int compat_i915_irq_emit(struct file *file, unsigned int cmd,
|
||||
&request->irq_seq))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_I915_IRQ_EMIT, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_I915_IRQ_EMIT,
|
||||
(unsigned long)request);
|
||||
}
|
||||
typedef struct drm_i915_getparam32 {
|
||||
int param;
|
||||
@ -149,8 +148,8 @@ static int compat_i915_getparam(struct file *file, unsigned int cmd,
|
||||
&request->value))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_I915_GETPARAM, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_I915_GETPARAM,
|
||||
(unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_i915_mem_alloc32 {
|
||||
@ -178,8 +177,8 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
|
||||
&request->region_offset))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_I915_ALLOC, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_I915_ALLOC,
|
||||
(unsigned long)request);
|
||||
}
|
||||
|
||||
drm_ioctl_compat_t *i915_compat_ioctls[] = {
|
||||
@ -211,12 +210,10 @@ long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
|
||||
fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
|
||||
|
||||
lock_kernel(); /* XXX for now */
|
||||
if (fn != NULL)
|
||||
ret = (*fn) (filp, cmd, arg);
|
||||
else
|
||||
ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -100,8 +100,7 @@ static int compat_mga_init(struct file *file, unsigned int cmd,
|
||||
if (err)
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_MGA_INIT, (unsigned long)init);
|
||||
return drm_ioctl(file, DRM_IOCTL_MGA_INIT, (unsigned long)init);
|
||||
}
|
||||
|
||||
typedef struct drm_mga_getparam32 {
|
||||
@ -125,8 +124,7 @@ static int compat_mga_getparam(struct file *file, unsigned int cmd,
|
||||
&getparam->value))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
|
||||
return drm_ioctl(file, DRM_IOCTL_MGA_GETPARAM, (unsigned long)getparam);
|
||||
}
|
||||
|
||||
typedef struct drm_mga_drm_bootstrap32 {
|
||||
@ -166,8 +164,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
|
||||
|| __put_user(dma_bootstrap32.agp_size, &dma_bootstrap->agp_size))
|
||||
return -EFAULT;
|
||||
|
||||
err = drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_MGA_DMA_BOOTSTRAP,
|
||||
err = drm_ioctl(file, DRM_IOCTL_MGA_DMA_BOOTSTRAP,
|
||||
(unsigned long)dma_bootstrap);
|
||||
if (err)
|
||||
return err;
|
||||
@ -220,12 +217,10 @@ long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
|
||||
fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
|
||||
|
||||
lock_kernel(); /* XXX for now */
|
||||
if (fn != NULL)
|
||||
ret = (*fn) (filp, cmd, arg);
|
||||
else
|
||||
ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -8,14 +8,15 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
|
||||
nouveau_sgdma.o nouveau_dma.o \
|
||||
nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
|
||||
nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
|
||||
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
|
||||
nouveau_dp.o \
|
||||
nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
|
||||
nouveau_dp.o nouveau_grctx.o \
|
||||
nv04_timer.o \
|
||||
nv04_mc.o nv40_mc.o nv50_mc.o \
|
||||
nv04_fb.o nv10_fb.o nv40_fb.o \
|
||||
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
|
||||
nv04_graph.o nv10_graph.o nv20_graph.o \
|
||||
nv40_graph.o nv50_graph.o \
|
||||
nv40_grctx.o \
|
||||
nv04_instmem.o nv50_instmem.o \
|
||||
nv50_crtc.o nv50_dac.o nv50_sor.o \
|
||||
nv50_cursor.o nv50_display.o nv50_fbcon.o \
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -227,6 +227,7 @@ struct nvbios {
|
||||
|
||||
uint16_t pll_limit_tbl_ptr;
|
||||
uint16_t ram_restrict_tbl_ptr;
|
||||
uint8_t ram_restrict_group_count;
|
||||
|
||||
uint16_t some_script_ptr; /* BIT I + 14 */
|
||||
uint16_t init96_tbl_ptr; /* BIT I + 16 */
|
||||
|
@ -154,6 +154,11 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
|
||||
nvbo->placement.busy_placement = nvbo->placements;
|
||||
nvbo->placement.num_placement = n;
|
||||
nvbo->placement.num_busy_placement = n;
|
||||
|
||||
if (nvbo->pin_refcnt) {
|
||||
while (n--)
|
||||
nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
|
||||
}
|
||||
}
|
||||
|
||||
int
|
||||
@ -400,10 +405,16 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
|
||||
struct nouveau_bo *nvbo = nouveau_bo(bo);
|
||||
|
||||
switch (bo->mem.mem_type) {
|
||||
case TTM_PL_VRAM:
|
||||
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
|
||||
TTM_PL_FLAG_SYSTEM);
|
||||
break;
|
||||
default:
|
||||
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
|
||||
break;
|
||||
}
|
||||
|
||||
*pl = nvbo->placement;
|
||||
}
|
||||
|
||||
|
||||
@ -455,11 +466,8 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
|
||||
int ret;
|
||||
|
||||
chan = nvbo->channel;
|
||||
if (!chan || nvbo->tile_flags || nvbo->no_vm) {
|
||||
if (!chan || nvbo->tile_flags || nvbo->no_vm)
|
||||
chan = dev_priv->channel;
|
||||
if (!chan)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
src_offset = old_mem->mm_node->start << PAGE_SHIFT;
|
||||
dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
|
||||
@ -625,7 +633,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE)
|
||||
if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
|
||||
!dev_priv->channel)
|
||||
return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
|
||||
|
||||
if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
|
||||
|
@ -86,7 +86,7 @@ nouveau_connector_destroy(struct drm_connector *drm_connector)
|
||||
struct nouveau_connector *connector = nouveau_connector(drm_connector);
|
||||
struct drm_device *dev = connector->base.dev;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
if (!connector)
|
||||
return;
|
||||
@ -420,7 +420,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector)
|
||||
/* Use preferred mode if there is one.. */
|
||||
list_for_each_entry(mode, &connector->base.probed_modes, head) {
|
||||
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
|
||||
NV_DEBUG(dev, "native mode from preferred\n");
|
||||
NV_DEBUG_KMS(dev, "native mode from preferred\n");
|
||||
return drm_mode_duplicate(dev, mode);
|
||||
}
|
||||
}
|
||||
@ -445,7 +445,7 @@ nouveau_connector_native_mode(struct nouveau_connector *connector)
|
||||
largest = mode;
|
||||
}
|
||||
|
||||
NV_DEBUG(dev, "native mode from largest: %dx%d@%d\n",
|
||||
NV_DEBUG_KMS(dev, "native mode from largest: %dx%d@%d\n",
|
||||
high_w, high_h, high_v);
|
||||
return largest ? drm_mode_duplicate(dev, largest) : NULL;
|
||||
}
|
||||
@ -725,7 +725,7 @@ nouveau_connector_create(struct drm_device *dev, int index, int type)
|
||||
struct drm_encoder *encoder;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
|
||||
if (!nv_connector)
|
||||
|
@ -187,7 +187,7 @@ nouveau_dp_link_train_adjust(struct drm_encoder *encoder, uint8_t *config)
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
NV_DEBUG(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
|
||||
NV_DEBUG_KMS(dev, "\t\tadjust 0x%02x 0x%02x\n", request[0], request[1]);
|
||||
|
||||
/* Keep all lanes at the same level.. */
|
||||
for (i = 0; i < nv_encoder->dp.link_nr; i++) {
|
||||
@ -228,7 +228,7 @@ nouveau_dp_link_train_commit(struct drm_encoder *encoder, uint8_t *config)
|
||||
int or = nv_encoder->or, link = !(nv_encoder->dcb->sorconf.link & 1);
|
||||
int dpe_headerlen, ret, i;
|
||||
|
||||
NV_DEBUG(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
|
||||
NV_DEBUG_KMS(dev, "\t\tconfig 0x%02x 0x%02x 0x%02x 0x%02x\n",
|
||||
config[0], config[1], config[2], config[3]);
|
||||
|
||||
dpe = nouveau_bios_dp_table(dev, nv_encoder->dcb, &dpe_headerlen);
|
||||
@ -276,12 +276,12 @@ nouveau_dp_link_train(struct drm_encoder *encoder)
|
||||
bool cr_done, cr_max_vs, eq_done;
|
||||
int ret = 0, i, tries, voltage;
|
||||
|
||||
NV_DEBUG(dev, "link training!!\n");
|
||||
NV_DEBUG_KMS(dev, "link training!!\n");
|
||||
train:
|
||||
cr_done = eq_done = false;
|
||||
|
||||
/* set link configuration */
|
||||
NV_DEBUG(dev, "\tbegin train: bw %d, lanes %d\n",
|
||||
NV_DEBUG_KMS(dev, "\tbegin train: bw %d, lanes %d\n",
|
||||
nv_encoder->dp.link_bw, nv_encoder->dp.link_nr);
|
||||
|
||||
ret = nouveau_dp_link_bw_set(encoder, nv_encoder->dp.link_bw);
|
||||
@ -297,7 +297,7 @@ train:
|
||||
return false;
|
||||
|
||||
/* clock recovery */
|
||||
NV_DEBUG(dev, "\tbegin cr\n");
|
||||
NV_DEBUG_KMS(dev, "\tbegin cr\n");
|
||||
ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_1);
|
||||
if (ret)
|
||||
goto stop;
|
||||
@ -314,7 +314,7 @@ train:
|
||||
ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 2);
|
||||
if (ret)
|
||||
break;
|
||||
NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
|
||||
NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
|
||||
status[0], status[1]);
|
||||
|
||||
cr_done = true;
|
||||
@ -346,7 +346,7 @@ train:
|
||||
goto stop;
|
||||
|
||||
/* channel equalisation */
|
||||
NV_DEBUG(dev, "\tbegin eq\n");
|
||||
NV_DEBUG_KMS(dev, "\tbegin eq\n");
|
||||
ret = nouveau_dp_link_train_set(encoder, DP_TRAINING_PATTERN_2);
|
||||
if (ret)
|
||||
goto stop;
|
||||
@ -357,7 +357,7 @@ train:
|
||||
ret = auxch_rd(encoder, DP_LANE0_1_STATUS, status, 3);
|
||||
if (ret)
|
||||
break;
|
||||
NV_DEBUG(dev, "\t\tstatus: 0x%02x 0x%02x\n",
|
||||
NV_DEBUG_KMS(dev, "\t\tstatus: 0x%02x 0x%02x\n",
|
||||
status[0], status[1]);
|
||||
|
||||
eq_done = true;
|
||||
@ -395,9 +395,9 @@ stop:
|
||||
|
||||
/* retry at a lower setting, if possible */
|
||||
if (!ret && !(eq_done && cr_done)) {
|
||||
NV_DEBUG(dev, "\twe failed\n");
|
||||
NV_DEBUG_KMS(dev, "\twe failed\n");
|
||||
if (nv_encoder->dp.link_bw != DP_LINK_BW_1_62) {
|
||||
NV_DEBUG(dev, "retry link training at low rate\n");
|
||||
NV_DEBUG_KMS(dev, "retry link training at low rate\n");
|
||||
nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
|
||||
goto train;
|
||||
}
|
||||
@ -418,7 +418,7 @@ nouveau_dp_detect(struct drm_encoder *encoder)
|
||||
if (ret)
|
||||
return false;
|
||||
|
||||
NV_DEBUG(dev, "encoder: link_bw %d, link_nr %d\n"
|
||||
NV_DEBUG_KMS(dev, "encoder: link_bw %d, link_nr %d\n"
|
||||
"display: link_bw %d, link_nr %d version 0x%02x\n",
|
||||
nv_encoder->dcb->dpconf.link_bw,
|
||||
nv_encoder->dcb->dpconf.link_nr,
|
||||
@ -446,7 +446,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
|
||||
uint32_t tmp, ctrl, stat = 0, data32[4] = {};
|
||||
int ret = 0, i, index = auxch->rd;
|
||||
|
||||
NV_DEBUG(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
|
||||
NV_DEBUG_KMS(dev, "ch %d cmd %d addr 0x%x len %d\n", index, cmd, addr, data_nr);
|
||||
|
||||
tmp = nv_rd32(dev, NV50_AUXCH_CTRL(auxch->rd));
|
||||
nv_wr32(dev, NV50_AUXCH_CTRL(auxch->rd), tmp | 0x00100000);
|
||||
@ -472,7 +472,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
|
||||
if (!(cmd & 1)) {
|
||||
memcpy(data32, data, data_nr);
|
||||
for (i = 0; i < 4; i++) {
|
||||
NV_DEBUG(dev, "wr %d: 0x%08x\n", i, data32[i]);
|
||||
NV_DEBUG_KMS(dev, "wr %d: 0x%08x\n", i, data32[i]);
|
||||
nv_wr32(dev, NV50_AUXCH_DATA_OUT(index, i), data32[i]);
|
||||
}
|
||||
}
|
||||
@ -504,7 +504,7 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
|
||||
if (cmd & 1) {
|
||||
for (i = 0; i < 4; i++) {
|
||||
data32[i] = nv_rd32(dev, NV50_AUXCH_DATA_IN(index, i));
|
||||
NV_DEBUG(dev, "rd %d: 0x%08x\n", i, data32[i]);
|
||||
NV_DEBUG_KMS(dev, "rd %d: 0x%08x\n", i, data32[i]);
|
||||
}
|
||||
memcpy(data, data32, data_nr);
|
||||
}
|
||||
|
@ -35,6 +35,10 @@
|
||||
|
||||
#include "drm_pciids.h"
|
||||
|
||||
MODULE_PARM_DESC(ctxfw, "Use external firmware blob for grctx init (NV40)");
|
||||
int nouveau_ctxfw = 0;
|
||||
module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
|
||||
|
||||
MODULE_PARM_DESC(noagp, "Disable AGP");
|
||||
int nouveau_noagp;
|
||||
module_param_named(noagp, nouveau_noagp, int, 0400);
|
||||
@ -273,7 +277,7 @@ nouveau_pci_resume(struct pci_dev *pdev)
|
||||
|
||||
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
|
||||
chan = dev_priv->fifos[i];
|
||||
if (!chan)
|
||||
if (!chan || !chan->pushbuf_bo)
|
||||
continue;
|
||||
|
||||
for (j = 0; j < NOUVEAU_DMA_SKIPS; j++)
|
||||
@ -341,7 +345,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = nouveau_ttm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -54,6 +54,7 @@ struct nouveau_fpriv {
|
||||
#include "nouveau_drm.h"
|
||||
#include "nouveau_reg.h"
|
||||
#include "nouveau_bios.h"
|
||||
struct nouveau_grctx;
|
||||
|
||||
#define MAX_NUM_DCB_ENTRIES 16
|
||||
|
||||
@ -317,6 +318,7 @@ struct nouveau_pgraph_engine {
|
||||
bool accel_blocked;
|
||||
void *ctxprog;
|
||||
void *ctxvals;
|
||||
int grctx_size;
|
||||
|
||||
int (*init)(struct drm_device *);
|
||||
void (*takedown)(struct drm_device *);
|
||||
@ -647,6 +649,7 @@ extern int nouveau_fbpercrtc;
|
||||
extern char *nouveau_tv_norm;
|
||||
extern int nouveau_reg_debug;
|
||||
extern char *nouveau_vbios;
|
||||
extern int nouveau_ctxfw;
|
||||
|
||||
/* nouveau_state.c */
|
||||
extern void nouveau_preclose(struct drm_device *dev, struct drm_file *);
|
||||
@ -959,9 +962,7 @@ extern int nv40_graph_create_context(struct nouveau_channel *);
|
||||
extern void nv40_graph_destroy_context(struct nouveau_channel *);
|
||||
extern int nv40_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv40_graph_unload_context(struct drm_device *);
|
||||
extern int nv40_grctx_init(struct drm_device *);
|
||||
extern void nv40_grctx_fini(struct drm_device *);
|
||||
extern void nv40_grctx_vals_load(struct drm_device *, struct nouveau_gpuobj *);
|
||||
extern void nv40_grctx_init(struct nouveau_grctx *);
|
||||
|
||||
/* nv50_graph.c */
|
||||
extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
|
||||
@ -975,6 +976,12 @@ extern int nv50_graph_load_context(struct nouveau_channel *);
|
||||
extern int nv50_graph_unload_context(struct drm_device *);
|
||||
extern void nv50_graph_context_switch(struct drm_device *);
|
||||
|
||||
/* nouveau_grctx.c */
|
||||
extern int nouveau_grctx_prog_load(struct drm_device *);
|
||||
extern void nouveau_grctx_vals_load(struct drm_device *,
|
||||
struct nouveau_gpuobj *);
|
||||
extern void nouveau_grctx_fini(struct drm_device *);
|
||||
|
||||
/* nv04_instmem.c */
|
||||
extern int nv04_instmem_init(struct drm_device *);
|
||||
extern void nv04_instmem_takedown(struct drm_device *);
|
||||
@ -1207,14 +1214,24 @@ static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
|
||||
pci_name(d->pdev), ##arg)
|
||||
#ifndef NV_DEBUG_NOTRACE
|
||||
#define NV_DEBUG(d, fmt, arg...) do { \
|
||||
if (drm_debug) { \
|
||||
if (drm_debug & DRM_UT_DRIVER) { \
|
||||
NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
|
||||
__LINE__, ##arg); \
|
||||
} \
|
||||
} while (0)
|
||||
#define NV_DEBUG_KMS(d, fmt, arg...) do { \
|
||||
if (drm_debug & DRM_UT_KMS) { \
|
||||
NV_PRINTK(KERN_DEBUG, d, "%s:%d - " fmt, __func__, \
|
||||
__LINE__, ##arg); \
|
||||
} \
|
||||
} while (0)
|
||||
#else
|
||||
#define NV_DEBUG(d, fmt, arg...) do { \
|
||||
if (drm_debug) \
|
||||
if (drm_debug & DRM_UT_DRIVER) \
|
||||
NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
|
||||
} while (0)
|
||||
#define NV_DEBUG_KMS(d, fmt, arg...) do { \
|
||||
if (drm_debug & DRM_UT_KMS) \
|
||||
NV_PRINTK(KERN_DEBUG, d, fmt, ##arg); \
|
||||
} while (0)
|
||||
#endif
|
||||
|
@ -58,7 +58,7 @@ nouveau_fbcon_sync(struct fb_info *info)
|
||||
struct nouveau_channel *chan = dev_priv->channel;
|
||||
int ret, i;
|
||||
|
||||
if (!chan->accel_done ||
|
||||
if (!chan || !chan->accel_done ||
|
||||
info->state != FBINFO_STATE_RUNNING ||
|
||||
info->flags & FBINFO_HWACCEL_DISABLED)
|
||||
return 0;
|
||||
@ -318,14 +318,16 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
|
||||
par->nouveau_fb = nouveau_fb;
|
||||
par->dev = dev;
|
||||
|
||||
switch (dev_priv->card_type) {
|
||||
case NV_50:
|
||||
nv50_fbcon_accel_init(info);
|
||||
break;
|
||||
default:
|
||||
nv04_fbcon_accel_init(info);
|
||||
break;
|
||||
};
|
||||
if (dev_priv->channel) {
|
||||
switch (dev_priv->card_type) {
|
||||
case NV_50:
|
||||
nv50_fbcon_accel_init(info);
|
||||
break;
|
||||
default:
|
||||
nv04_fbcon_accel_init(info);
|
||||
break;
|
||||
};
|
||||
}
|
||||
|
||||
nouveau_fbcon_zfill(dev);
|
||||
|
||||
@ -347,7 +349,7 @@ out:
|
||||
int
|
||||
nouveau_fbcon_probe(struct drm_device *dev)
|
||||
{
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
return drm_fb_helper_single_fb_probe(dev, 32, nouveau_fbcon_create);
|
||||
}
|
||||
|
161
drivers/gpu/drm/nouveau/nouveau_grctx.c
Normal file
161
drivers/gpu/drm/nouveau/nouveau_grctx.c
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
* Copyright 2009 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
struct nouveau_ctxprog {
|
||||
uint32_t signature;
|
||||
uint8_t version;
|
||||
uint16_t length;
|
||||
uint32_t data[];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct nouveau_ctxvals {
|
||||
uint32_t signature;
|
||||
uint8_t version;
|
||||
uint32_t length;
|
||||
struct {
|
||||
uint32_t offset;
|
||||
uint32_t value;
|
||||
} data[];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
int
|
||||
nouveau_grctx_prog_load(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
const int chipset = dev_priv->chipset;
|
||||
const struct firmware *fw;
|
||||
const struct nouveau_ctxprog *cp;
|
||||
const struct nouveau_ctxvals *cv;
|
||||
char name[32];
|
||||
int ret, i;
|
||||
|
||||
if (pgraph->accel_blocked)
|
||||
return -ENODEV;
|
||||
|
||||
if (!pgraph->ctxprog) {
|
||||
sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
|
||||
ret = request_firmware(&fw, name, &dev->pdev->dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
|
||||
if (!pgraph->ctxprog) {
|
||||
NV_ERROR(dev, "OOM copying ctxprog\n");
|
||||
release_firmware(fw);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(pgraph->ctxprog, fw->data, fw->size);
|
||||
|
||||
cp = pgraph->ctxprog;
|
||||
if (le32_to_cpu(cp->signature) != 0x5043564e ||
|
||||
cp->version != 0 ||
|
||||
le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
|
||||
NV_ERROR(dev, "ctxprog invalid\n");
|
||||
release_firmware(fw);
|
||||
nouveau_grctx_fini(dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
release_firmware(fw);
|
||||
}
|
||||
|
||||
if (!pgraph->ctxvals) {
|
||||
sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
|
||||
ret = request_firmware(&fw, name, &dev->pdev->dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
|
||||
nouveau_grctx_fini(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
|
||||
if (!pgraph->ctxprog) {
|
||||
NV_ERROR(dev, "OOM copying ctxprog\n");
|
||||
release_firmware(fw);
|
||||
nouveau_grctx_fini(dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(pgraph->ctxvals, fw->data, fw->size);
|
||||
|
||||
cv = (void *)pgraph->ctxvals;
|
||||
if (le32_to_cpu(cv->signature) != 0x5643564e ||
|
||||
cv->version != 0 ||
|
||||
le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
|
||||
NV_ERROR(dev, "ctxvals invalid\n");
|
||||
release_firmware(fw);
|
||||
nouveau_grctx_fini(dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
release_firmware(fw);
|
||||
}
|
||||
|
||||
cp = pgraph->ctxprog;
|
||||
|
||||
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
|
||||
for (i = 0; i < le16_to_cpu(cp->length); i++)
|
||||
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
|
||||
le32_to_cpu(cp->data[i]));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_grctx_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
|
||||
if (pgraph->ctxprog) {
|
||||
kfree(pgraph->ctxprog);
|
||||
pgraph->ctxprog = NULL;
|
||||
}
|
||||
|
||||
if (pgraph->ctxvals) {
|
||||
kfree(pgraph->ctxprog);
|
||||
pgraph->ctxvals = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nouveau_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_ctxvals *cv = pgraph->ctxvals;
|
||||
int i;
|
||||
|
||||
if (!cv)
|
||||
return;
|
||||
|
||||
for (i = 0; i < le32_to_cpu(cv->length); i++)
|
||||
nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
|
||||
le32_to_cpu(cv->data[i].value));
|
||||
}
|
133
drivers/gpu/drm/nouveau/nouveau_grctx.h
Normal file
133
drivers/gpu/drm/nouveau/nouveau_grctx.h
Normal file
@ -0,0 +1,133 @@
|
||||
#ifndef __NOUVEAU_GRCTX_H__
|
||||
#define __NOUVEAU_GRCTX_H__
|
||||
|
||||
struct nouveau_grctx {
|
||||
struct drm_device *dev;
|
||||
|
||||
enum {
|
||||
NOUVEAU_GRCTX_PROG,
|
||||
NOUVEAU_GRCTX_VALS
|
||||
} mode;
|
||||
void *data;
|
||||
|
||||
uint32_t ctxprog_max;
|
||||
uint32_t ctxprog_len;
|
||||
uint32_t ctxprog_reg;
|
||||
int ctxprog_label[32];
|
||||
uint32_t ctxvals_pos;
|
||||
uint32_t ctxvals_base;
|
||||
};
|
||||
|
||||
#ifdef CP_CTX
|
||||
static inline void
|
||||
cp_out(struct nouveau_grctx *ctx, uint32_t inst)
|
||||
{
|
||||
uint32_t *ctxprog = ctx->data;
|
||||
|
||||
if (ctx->mode != NOUVEAU_GRCTX_PROG)
|
||||
return;
|
||||
|
||||
BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max);
|
||||
ctxprog[ctx->ctxprog_len++] = inst;
|
||||
}
|
||||
|
||||
static inline void
|
||||
cp_lsr(struct nouveau_grctx *ctx, uint32_t val)
|
||||
{
|
||||
cp_out(ctx, CP_LOAD_SR | val);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cp_ctx(struct nouveau_grctx *ctx, uint32_t reg, uint32_t length)
|
||||
{
|
||||
ctx->ctxprog_reg = (reg - 0x00400000) >> 2;
|
||||
|
||||
ctx->ctxvals_base = ctx->ctxvals_pos;
|
||||
ctx->ctxvals_pos = ctx->ctxvals_base + length;
|
||||
|
||||
if (length > (CP_CTX_COUNT >> CP_CTX_COUNT_SHIFT)) {
|
||||
cp_lsr(ctx, length);
|
||||
length = 0;
|
||||
}
|
||||
|
||||
cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg);
|
||||
}
|
||||
|
||||
static inline void
|
||||
cp_name(struct nouveau_grctx *ctx, int name)
|
||||
{
|
||||
uint32_t *ctxprog = ctx->data;
|
||||
int i;
|
||||
|
||||
if (ctx->mode != NOUVEAU_GRCTX_PROG)
|
||||
return;
|
||||
|
||||
ctx->ctxprog_label[name] = ctx->ctxprog_len;
|
||||
for (i = 0; i < ctx->ctxprog_len; i++) {
|
||||
if ((ctxprog[i] & 0xfff00000) != 0xff400000)
|
||||
continue;
|
||||
if ((ctxprog[i] & CP_BRA_IP) != ((name) << CP_BRA_IP_SHIFT))
|
||||
continue;
|
||||
ctxprog[i] = (ctxprog[i] & 0x00ff00ff) |
|
||||
(ctx->ctxprog_len << CP_BRA_IP_SHIFT);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void
|
||||
_cp_bra(struct nouveau_grctx *ctx, u32 mod, int flag, int state, int name)
|
||||
{
|
||||
int ip = 0;
|
||||
|
||||
if (mod != 2) {
|
||||
ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT;
|
||||
if (ip == 0)
|
||||
ip = 0xff000000 | (name << CP_BRA_IP_SHIFT);
|
||||
}
|
||||
|
||||
cp_out(ctx, CP_BRA | (mod << 18) | ip | flag |
|
||||
(state ? 0 : CP_BRA_IF_CLEAR));
|
||||
}
|
||||
#define cp_bra(c,f,s,n) _cp_bra((c), 0, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
|
||||
#ifdef CP_BRA_MOD
|
||||
#define cp_cal(c,f,s,n) _cp_bra((c), 1, CP_FLAG_##f, CP_FLAG_##f##_##s, n)
|
||||
#define cp_ret(c,f,s) _cp_bra((c), 2, CP_FLAG_##f, CP_FLAG_##f##_##s, 0)
|
||||
#endif
|
||||
|
||||
static inline void
|
||||
_cp_wait(struct nouveau_grctx *ctx, int flag, int state)
|
||||
{
|
||||
cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0));
|
||||
}
|
||||
#define cp_wait(c,f,s) _cp_wait((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
|
||||
|
||||
static inline void
|
||||
_cp_set(struct nouveau_grctx *ctx, int flag, int state)
|
||||
{
|
||||
cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0));
|
||||
}
|
||||
#define cp_set(c,f,s) _cp_set((c), CP_FLAG_##f, CP_FLAG_##f##_##s)
|
||||
|
||||
static inline void
|
||||
cp_pos(struct nouveau_grctx *ctx, int offset)
|
||||
{
|
||||
ctx->ctxvals_pos = offset;
|
||||
ctx->ctxvals_base = ctx->ctxvals_pos;
|
||||
|
||||
cp_lsr(ctx, ctx->ctxvals_pos);
|
||||
cp_out(ctx, CP_SET_CONTEXT_POINTER);
|
||||
}
|
||||
|
||||
static inline void
|
||||
gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
|
||||
{
|
||||
if (ctx->mode != NOUVEAU_GRCTX_VALS)
|
||||
return;
|
||||
|
||||
reg = (reg - 0x00400000) / 4;
|
||||
reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
|
||||
|
||||
nv_wo32(ctx->dev, ctx->data, reg, val);
|
||||
}
|
||||
#endif
|
||||
|
||||
#endif
|
@ -61,12 +61,10 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
|
||||
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
|
||||
fn = nouveau_compat_ioctls[nr - DRM_COMMAND_BASE];
|
||||
#endif
|
||||
lock_kernel(); /* XXX for now */
|
||||
if (fn != NULL)
|
||||
ret = (*fn)(filp, cmd, arg);
|
||||
else
|
||||
ret = drm_ioctl(filp->f_dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -299,94 +299,13 @@ nouveau_vga_set_decode(void *priv, bool state)
|
||||
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_card_init(struct drm_device *dev)
|
||||
static int
|
||||
nouveau_card_init_channel(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_engine *engine;
|
||||
struct nouveau_gpuobj *gpuobj;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
|
||||
|
||||
if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
|
||||
return 0;
|
||||
|
||||
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
|
||||
|
||||
/* Initialise internal driver API hooks */
|
||||
ret = nouveau_init_engine_ptrs(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
engine = &dev_priv->engine;
|
||||
dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
|
||||
|
||||
/* Parse BIOS tables / Run init tables if card not POSTed */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = nouveau_bios_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_early_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Initialise instance memory, must happen before mem_init so we
|
||||
* know exactly how much VRAM we're able to use for "normal"
|
||||
* purposes.
|
||||
*/
|
||||
ret = engine->instmem.init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Setup the memory manager */
|
||||
ret = nouveau_mem_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = nouveau_gpuobj_init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PMC */
|
||||
ret = engine->mc.init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PTIMER */
|
||||
ret = engine->timer.init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PFB */
|
||||
ret = engine->fb.init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PGRAPH */
|
||||
ret = engine->graph.init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* PFIFO */
|
||||
ret = engine->fifo.init(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* this call irq_preinstall, register irq handler and
|
||||
* call irq_postinstall
|
||||
*/
|
||||
ret = drm_irq_install(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
ret = drm_vblank_init(dev, 0);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
|
||||
|
||||
ret = nouveau_channel_alloc(dev, &dev_priv->channel,
|
||||
(struct drm_file *)-2,
|
||||
NvDmaFB, NvDmaTT);
|
||||
@ -399,39 +318,133 @@ nouveau_card_init(struct drm_device *dev)
|
||||
NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM,
|
||||
&gpuobj);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_err;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaVRAM,
|
||||
gpuobj, NULL);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
gpuobj = NULL;
|
||||
ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0,
|
||||
dev_priv->gart_info.aper_size,
|
||||
NV_DMA_ACCESS_RW, &gpuobj, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto out_err;
|
||||
|
||||
ret = nouveau_gpuobj_ref_add(dev, dev_priv->channel, NvDmaGART,
|
||||
gpuobj, NULL);
|
||||
if (ret) {
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
return ret;
|
||||
if (ret)
|
||||
goto out_err;
|
||||
|
||||
return 0;
|
||||
out_err:
|
||||
nouveau_gpuobj_del(dev, &gpuobj);
|
||||
nouveau_channel_free(dev_priv->channel);
|
||||
dev_priv->channel = NULL;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int
|
||||
nouveau_card_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_engine *engine;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "prev state = %d\n", dev_priv->init_state);
|
||||
|
||||
if (dev_priv->init_state == NOUVEAU_CARD_INIT_DONE)
|
||||
return 0;
|
||||
|
||||
vga_client_register(dev->pdev, dev, NULL, nouveau_vga_set_decode);
|
||||
|
||||
/* Initialise internal driver API hooks */
|
||||
ret = nouveau_init_engine_ptrs(dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
engine = &dev_priv->engine;
|
||||
dev_priv->init_state = NOUVEAU_CARD_INIT_FAILED;
|
||||
|
||||
/* Parse BIOS tables / Run init tables if card not POSTed */
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
ret = nouveau_bios_init(dev);
|
||||
if (ret)
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = nouveau_gpuobj_early_init(dev);
|
||||
if (ret)
|
||||
goto out_bios;
|
||||
|
||||
/* Initialise instance memory, must happen before mem_init so we
|
||||
* know exactly how much VRAM we're able to use for "normal"
|
||||
* purposes.
|
||||
*/
|
||||
ret = engine->instmem.init(dev);
|
||||
if (ret)
|
||||
goto out_gpuobj_early;
|
||||
|
||||
/* Setup the memory manager */
|
||||
ret = nouveau_mem_init(dev);
|
||||
if (ret)
|
||||
goto out_instmem;
|
||||
|
||||
ret = nouveau_gpuobj_init(dev);
|
||||
if (ret)
|
||||
goto out_mem;
|
||||
|
||||
/* PMC */
|
||||
ret = engine->mc.init(dev);
|
||||
if (ret)
|
||||
goto out_gpuobj;
|
||||
|
||||
/* PTIMER */
|
||||
ret = engine->timer.init(dev);
|
||||
if (ret)
|
||||
goto out_mc;
|
||||
|
||||
/* PFB */
|
||||
ret = engine->fb.init(dev);
|
||||
if (ret)
|
||||
goto out_timer;
|
||||
|
||||
/* PGRAPH */
|
||||
ret = engine->graph.init(dev);
|
||||
if (ret)
|
||||
goto out_fb;
|
||||
|
||||
/* PFIFO */
|
||||
ret = engine->fifo.init(dev);
|
||||
if (ret)
|
||||
goto out_graph;
|
||||
|
||||
/* this call irq_preinstall, register irq handler and
|
||||
* call irq_postinstall
|
||||
*/
|
||||
ret = drm_irq_install(dev);
|
||||
if (ret)
|
||||
goto out_fifo;
|
||||
|
||||
ret = drm_vblank_init(dev, 0);
|
||||
if (ret)
|
||||
goto out_irq;
|
||||
|
||||
/* what about PVIDEO/PCRTC/PRAMDAC etc? */
|
||||
|
||||
if (!engine->graph.accel_blocked) {
|
||||
ret = nouveau_card_init_channel(dev);
|
||||
if (ret)
|
||||
goto out_irq;
|
||||
}
|
||||
|
||||
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
|
||||
if (dev_priv->card_type >= NV_50) {
|
||||
if (dev_priv->card_type >= NV_50)
|
||||
ret = nv50_display_create(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
} else {
|
||||
else
|
||||
ret = nv04_display_create(dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
goto out_irq;
|
||||
}
|
||||
|
||||
ret = nouveau_backlight_init(dev);
|
||||
@ -444,6 +457,32 @@ nouveau_card_init(struct drm_device *dev)
|
||||
drm_helper_initial_config(dev);
|
||||
|
||||
return 0;
|
||||
|
||||
out_irq:
|
||||
drm_irq_uninstall(dev);
|
||||
out_fifo:
|
||||
engine->fifo.takedown(dev);
|
||||
out_graph:
|
||||
engine->graph.takedown(dev);
|
||||
out_fb:
|
||||
engine->fb.takedown(dev);
|
||||
out_timer:
|
||||
engine->timer.takedown(dev);
|
||||
out_mc:
|
||||
engine->mc.takedown(dev);
|
||||
out_gpuobj:
|
||||
nouveau_gpuobj_takedown(dev);
|
||||
out_mem:
|
||||
nouveau_mem_close(dev);
|
||||
out_instmem:
|
||||
engine->instmem.takedown(dev);
|
||||
out_gpuobj_early:
|
||||
nouveau_gpuobj_late_takedown(dev);
|
||||
out_bios:
|
||||
nouveau_bios_takedown(dev);
|
||||
out:
|
||||
vga_client_register(dev->pdev, NULL, NULL, NULL);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void nouveau_card_takedown(struct drm_device *dev)
|
||||
|
@ -143,10 +143,10 @@ static void nv_crtc_calc_state_ext(struct drm_crtc *crtc, struct drm_display_mod
|
||||
state->pllsel |= nv_crtc->index ? PLLSEL_VPLL2_MASK : PLLSEL_VPLL1_MASK;
|
||||
|
||||
if (pv->NM2)
|
||||
NV_TRACE(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
|
||||
NV_DEBUG_KMS(dev, "vpll: n1 %d n2 %d m1 %d m2 %d log2p %d\n",
|
||||
pv->N1, pv->N2, pv->M1, pv->M2, pv->log2P);
|
||||
else
|
||||
NV_TRACE(dev, "vpll: n %d m %d log2p %d\n",
|
||||
NV_DEBUG_KMS(dev, "vpll: n %d m %d log2p %d\n",
|
||||
pv->N1, pv->M1, pv->log2P);
|
||||
|
||||
nv_crtc->cursor.set_offset(nv_crtc, nv_crtc->cursor.offset);
|
||||
@ -160,7 +160,7 @@ nv_crtc_dpms(struct drm_crtc *crtc, int mode)
|
||||
unsigned char seq1 = 0, crtc17 = 0;
|
||||
unsigned char crtc1A;
|
||||
|
||||
NV_TRACE(dev, "Setting dpms mode %d on CRTC %d\n", mode,
|
||||
NV_DEBUG_KMS(dev, "Setting dpms mode %d on CRTC %d\n", mode,
|
||||
nv_crtc->index);
|
||||
|
||||
if (nv_crtc->last_dpms == mode) /* Don't do unnecesary mode changes. */
|
||||
@ -603,7 +603,7 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
NV_DEBUG(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
|
||||
NV_DEBUG_KMS(dev, "CTRC mode on CRTC %d:\n", nv_crtc->index);
|
||||
drm_mode_debug_printmodeline(adjusted_mode);
|
||||
|
||||
/* unlock must come after turning off FP_TG_CONTROL in output_prepare */
|
||||
@ -703,7 +703,7 @@ static void nv_crtc_destroy(struct drm_crtc *crtc)
|
||||
{
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
|
||||
NV_DEBUG(crtc->dev, "\n");
|
||||
NV_DEBUG_KMS(crtc->dev, "\n");
|
||||
|
||||
if (!nv_crtc)
|
||||
return;
|
||||
|
@ -205,7 +205,7 @@ out:
|
||||
NVWriteVgaSeq(dev, 0, NV_VIO_SR_CLOCK_INDEX, saved_seq1);
|
||||
|
||||
if (blue == 0x18) {
|
||||
NV_TRACE(dev, "Load detected on head A\n");
|
||||
NV_INFO(dev, "Load detected on head A\n");
|
||||
return connector_status_connected;
|
||||
}
|
||||
|
||||
@ -350,14 +350,10 @@ static void nv04_dac_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
||||
struct drm_device *dev = encoder->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
int head = nouveau_crtc(encoder->crtc)->index;
|
||||
|
||||
NV_TRACE(dev, "%s called for encoder %d\n", __func__,
|
||||
nv_encoder->dcb->index);
|
||||
|
||||
if (nv_gf4_disp_arch(dev)) {
|
||||
struct drm_encoder *rebind;
|
||||
uint32_t dac_offset = nv04_dac_output_offset(encoder);
|
||||
@ -466,7 +462,7 @@ static void nv04_dac_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
||||
|
||||
NV_DEBUG(encoder->dev, "\n");
|
||||
NV_DEBUG_KMS(encoder->dev, "\n");
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(nv_encoder);
|
||||
|
@ -261,7 +261,7 @@ static void nv04_dfp_mode_set(struct drm_encoder *encoder,
|
||||
struct drm_display_mode *output_mode = &nv_encoder->mode;
|
||||
uint32_t mode_ratio, panel_ratio;
|
||||
|
||||
NV_DEBUG(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
|
||||
NV_DEBUG_KMS(dev, "Output mode on CRTC %d:\n", nv_crtc->index);
|
||||
drm_mode_debug_printmodeline(output_mode);
|
||||
|
||||
/* Initialize the FP registers in this CRTC. */
|
||||
@ -413,7 +413,9 @@ static void nv04_dfp_commit(struct drm_encoder *encoder)
|
||||
struct dcb_entry *dcbe = nv_encoder->dcb;
|
||||
int head = nouveau_crtc(encoder->crtc)->index;
|
||||
|
||||
NV_TRACE(dev, "%s called for encoder %d\n", __func__, nv_encoder->dcb->index);
|
||||
NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n",
|
||||
drm_get_connector_name(&nouveau_encoder_connector_get(nv_encoder)->base),
|
||||
nv_crtc->index, '@' + ffs(nv_encoder->dcb->or));
|
||||
|
||||
if (dcbe->type == OUTPUT_TMDS)
|
||||
run_tmds_table(dev, dcbe, head, nv_encoder->mode.clock);
|
||||
@ -550,7 +552,7 @@ static void nv04_dfp_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
||||
|
||||
NV_DEBUG(encoder->dev, "\n");
|
||||
NV_DEBUG_KMS(encoder->dev, "\n");
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(nv_encoder);
|
||||
|
@ -99,10 +99,11 @@ nv04_display_create(struct drm_device *dev)
|
||||
uint16_t connector[16] = { 0 };
|
||||
int i, ret;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
if (nv_two_heads(dev))
|
||||
nv04_display_store_initial_head_owner(dev);
|
||||
nouveau_hw_save_vga_fonts(dev, 1);
|
||||
|
||||
drm_mode_config_init(dev);
|
||||
drm_mode_create_scaling_mode_property(dev);
|
||||
@ -203,8 +204,6 @@ nv04_display_create(struct drm_device *dev)
|
||||
/* Save previous state */
|
||||
NVLockVgaCrtcs(dev, false);
|
||||
|
||||
nouveau_hw_save_vga_fonts(dev, 1);
|
||||
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
crtc->funcs->save(crtc);
|
||||
|
||||
@ -223,7 +222,7 @@ nv04_display_destroy(struct drm_device *dev)
|
||||
struct drm_encoder *encoder;
|
||||
struct drm_crtc *crtc;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
/* Turn every CRTC off. */
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
|
||||
@ -246,9 +245,9 @@ nv04_display_destroy(struct drm_device *dev)
|
||||
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
|
||||
crtc->funcs->restore(crtc);
|
||||
|
||||
nouveau_hw_save_vga_fonts(dev, 0);
|
||||
|
||||
drm_mode_config_cleanup(dev);
|
||||
|
||||
nouveau_hw_save_vga_fonts(dev, 0);
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -543,7 +543,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
|
||||
|
||||
nv_wi32(dev, instance, tmp);
|
||||
nv_wr32(dev, NV04_PGRAPH_CTX_SWITCH1, tmp);
|
||||
nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + subc, tmp);
|
||||
nv_wr32(dev, NV04_PGRAPH_CTX_CACHE1 + (subc<<2), tmp);
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
@ -389,49 +389,50 @@ struct graph_state {
|
||||
int nv10[ARRAY_SIZE(nv10_graph_ctx_regs)];
|
||||
int nv17[ARRAY_SIZE(nv17_graph_ctx_regs)];
|
||||
struct pipe_state pipe_state;
|
||||
uint32_t lma_window[4];
|
||||
};
|
||||
|
||||
#define PIPE_SAVE(dev, state, addr) \
|
||||
do { \
|
||||
int __i; \
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
|
||||
for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
|
||||
state[__i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
|
||||
} while (0)
|
||||
|
||||
#define PIPE_RESTORE(dev, state, addr) \
|
||||
do { \
|
||||
int __i; \
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
|
||||
for (__i = 0; __i < ARRAY_SIZE(state); __i++) \
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, state[__i]); \
|
||||
} while (0)
|
||||
|
||||
static void nv10_graph_save_pipe(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
|
||||
struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
|
||||
int i;
|
||||
#define PIPE_SAVE(addr) \
|
||||
do { \
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
|
||||
for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
|
||||
fifo_pipe_state->pipe_##addr[i] = nv_rd32(dev, NV10_PGRAPH_PIPE_DATA); \
|
||||
} while (0)
|
||||
struct pipe_state *pipe = &pgraph_ctx->pipe_state;
|
||||
|
||||
PIPE_SAVE(0x4400);
|
||||
PIPE_SAVE(0x0200);
|
||||
PIPE_SAVE(0x6400);
|
||||
PIPE_SAVE(0x6800);
|
||||
PIPE_SAVE(0x6c00);
|
||||
PIPE_SAVE(0x7000);
|
||||
PIPE_SAVE(0x7400);
|
||||
PIPE_SAVE(0x7800);
|
||||
PIPE_SAVE(0x0040);
|
||||
PIPE_SAVE(0x0000);
|
||||
|
||||
#undef PIPE_SAVE
|
||||
PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x6400, 0x6400);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x6800, 0x6800);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x6c00, 0x6c00);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x7000, 0x7000);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x7400, 0x7400);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x7800, 0x7800);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x0040, 0x0040);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x0000, 0x0000);
|
||||
}
|
||||
|
||||
static void nv10_graph_load_pipe(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct graph_state *pgraph_ctx = chan->pgraph_ctx;
|
||||
struct pipe_state *fifo_pipe_state = &pgraph_ctx->pipe_state;
|
||||
int i;
|
||||
struct pipe_state *pipe = &pgraph_ctx->pipe_state;
|
||||
uint32_t xfmode0, xfmode1;
|
||||
#define PIPE_RESTORE(addr) \
|
||||
do { \
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, addr); \
|
||||
for (i = 0; i < ARRAY_SIZE(fifo_pipe_state->pipe_##addr); i++) \
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, fifo_pipe_state->pipe_##addr[i]); \
|
||||
} while (0)
|
||||
|
||||
int i;
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
/* XXX check haiku comments */
|
||||
@ -457,24 +458,22 @@ static void nv10_graph_load_pipe(struct nouveau_channel *chan)
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
|
||||
|
||||
|
||||
PIPE_RESTORE(0x0200);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
/* restore XFMODE */
|
||||
nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
|
||||
nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
|
||||
PIPE_RESTORE(0x6400);
|
||||
PIPE_RESTORE(0x6800);
|
||||
PIPE_RESTORE(0x6c00);
|
||||
PIPE_RESTORE(0x7000);
|
||||
PIPE_RESTORE(0x7400);
|
||||
PIPE_RESTORE(0x7800);
|
||||
PIPE_RESTORE(0x4400);
|
||||
PIPE_RESTORE(0x0000);
|
||||
PIPE_RESTORE(0x0040);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x6400, 0x6400);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x6800, 0x6800);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x6c00, 0x6c00);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x7000, 0x7000);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x7400, 0x7400);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x7800, 0x7800);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x0000, 0x0000);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x0040, 0x0040);
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
#undef PIPE_RESTORE
|
||||
}
|
||||
|
||||
static void nv10_graph_create_pipe(struct nouveau_channel *chan)
|
||||
@ -832,6 +831,9 @@ int nv10_graph_init(struct drm_device *dev)
|
||||
(1<<31));
|
||||
if (dev_priv->chipset >= 0x17) {
|
||||
nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x1f000000);
|
||||
nv_wr32(dev, 0x400a10, 0x3ff3fb6);
|
||||
nv_wr32(dev, 0x400838, 0x2f8684);
|
||||
nv_wr32(dev, 0x40083c, 0x115f3f);
|
||||
nv_wr32(dev, 0x004006b0, 0x40000020);
|
||||
} else
|
||||
nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
|
||||
@ -867,6 +869,115 @@ void nv10_graph_takedown(struct drm_device *dev)
|
||||
{
|
||||
}
|
||||
|
||||
static int
|
||||
nv17_graph_mthd_lma_window(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct graph_state *ctx = chan->pgraph_ctx;
|
||||
struct pipe_state *pipe = &ctx->pipe_state;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
uint32_t pipe_0x0040[1], pipe_0x64c0[8], pipe_0x6a80[3], pipe_0x6ab0[3];
|
||||
uint32_t xfmode0, xfmode1;
|
||||
int i;
|
||||
|
||||
ctx->lma_window[(mthd - 0x1638) / 4] = data;
|
||||
|
||||
if (mthd != 0x1644)
|
||||
return 0;
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
PIPE_SAVE(dev, pipe_0x0040, 0x0040);
|
||||
PIPE_SAVE(dev, pipe->pipe_0x0200, 0x0200);
|
||||
|
||||
PIPE_RESTORE(dev, ctx->lma_window, 0x6790);
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
xfmode0 = nv_rd32(dev, NV10_PGRAPH_XFMODE0);
|
||||
xfmode1 = nv_rd32(dev, NV10_PGRAPH_XFMODE1);
|
||||
|
||||
PIPE_SAVE(dev, pipe->pipe_0x4400, 0x4400);
|
||||
PIPE_SAVE(dev, pipe_0x64c0, 0x64c0);
|
||||
PIPE_SAVE(dev, pipe_0x6ab0, 0x6ab0);
|
||||
PIPE_SAVE(dev, pipe_0x6a80, 0x6a80);
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_XFMODE0, 0x10000000);
|
||||
nv_wr32(dev, NV10_PGRAPH_XFMODE1, 0x00000000);
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000064c0);
|
||||
for (i = 0; i < 4; i++)
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
|
||||
for (i = 0; i < 4; i++)
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006ab0);
|
||||
for (i = 0; i < 3; i++)
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x3f800000);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00006a80);
|
||||
for (i = 0; i < 3; i++)
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x00000040);
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000008);
|
||||
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x0200, 0x0200);
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
PIPE_RESTORE(dev, pipe_0x0040, 0x0040);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_XFMODE0, xfmode0);
|
||||
nv_wr32(dev, NV10_PGRAPH_XFMODE1, xfmode1);
|
||||
|
||||
PIPE_RESTORE(dev, pipe_0x64c0, 0x64c0);
|
||||
PIPE_RESTORE(dev, pipe_0x6ab0, 0x6ab0);
|
||||
PIPE_RESTORE(dev, pipe_0x6a80, 0x6a80);
|
||||
PIPE_RESTORE(dev, pipe->pipe_0x4400, 0x4400);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_ADDRESS, 0x000000c0);
|
||||
nv_wr32(dev, NV10_PGRAPH_PIPE_DATA, 0x00000000);
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int
|
||||
nv17_graph_mthd_lma_enable(struct nouveau_channel *chan, int grclass,
|
||||
int mthd, uint32_t data)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
|
||||
nouveau_wait_for_idle(dev);
|
||||
|
||||
nv_wr32(dev, NV10_PGRAPH_DEBUG_4,
|
||||
nv_rd32(dev, NV10_PGRAPH_DEBUG_4) | 0x1 << 8);
|
||||
nv_wr32(dev, 0x004006b0,
|
||||
nv_rd32(dev, 0x004006b0) | 0x8 << 24);
|
||||
|
||||
pgraph->fifo_access(dev, true);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct nouveau_pgraph_object_method nv17_graph_celsius_mthds[] = {
|
||||
{ 0x1638, nv17_graph_mthd_lma_window },
|
||||
{ 0x163c, nv17_graph_mthd_lma_window },
|
||||
{ 0x1640, nv17_graph_mthd_lma_window },
|
||||
{ 0x1644, nv17_graph_mthd_lma_window },
|
||||
{ 0x1658, nv17_graph_mthd_lma_enable },
|
||||
{}
|
||||
};
|
||||
|
||||
struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
|
||||
{ 0x0030, false, NULL }, /* null */
|
||||
{ 0x0039, false, NULL }, /* m2mf */
|
||||
@ -887,6 +998,6 @@ struct nouveau_pgraph_object_class nv10_graph_grclass[] = {
|
||||
{ 0x0095, false, NULL }, /* multitex_tri */
|
||||
{ 0x0056, false, NULL }, /* celcius (nv10) */
|
||||
{ 0x0096, false, NULL }, /* celcius (nv11) */
|
||||
{ 0x0099, false, NULL }, /* celcius (nv17) */
|
||||
{ 0x0099, false, nv17_graph_celsius_mthds }, /* celcius (nv17) */
|
||||
{}
|
||||
};
|
||||
|
@ -219,7 +219,7 @@ static void nv17_tv_dpms(struct drm_encoder *encoder, int mode)
|
||||
return;
|
||||
nouveau_encoder(encoder)->last_dpms = mode;
|
||||
|
||||
NV_TRACE(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
|
||||
NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n",
|
||||
mode, nouveau_encoder(encoder)->dcb->index);
|
||||
|
||||
regs->ptv_200 &= ~1;
|
||||
@ -619,7 +619,7 @@ static void nv17_tv_destroy(struct drm_encoder *encoder)
|
||||
{
|
||||
struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
|
||||
|
||||
NV_DEBUG(encoder->dev, "\n");
|
||||
NV_DEBUG_KMS(encoder->dev, "\n");
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(tv_enc);
|
||||
|
@ -24,36 +24,10 @@
|
||||
*
|
||||
*/
|
||||
|
||||
#include <linux/firmware.h>
|
||||
|
||||
#include "drmP.h"
|
||||
#include "drm.h"
|
||||
#include "nouveau_drv.h"
|
||||
|
||||
MODULE_FIRMWARE("nouveau/nv40.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv40.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv41.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv41.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv42.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv42.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv43.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv43.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv44.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv44.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv46.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv46.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv47.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv47.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv49.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv49.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv4a.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv4a.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv4b.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv4b.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv4c.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv4c.ctxvals");
|
||||
MODULE_FIRMWARE("nouveau/nv4e.ctxprog");
|
||||
MODULE_FIRMWARE("nouveau/nv4e.ctxvals");
|
||||
#include "nouveau_grctx.h"
|
||||
|
||||
struct nouveau_channel *
|
||||
nv40_graph_channel(struct drm_device *dev)
|
||||
@ -83,27 +57,30 @@ nv40_graph_create_context(struct nouveau_channel *chan)
|
||||
{
|
||||
struct drm_device *dev = chan->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *ctx;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
int ret;
|
||||
|
||||
/* Allocate a 175KiB block of PRAMIN to store the context. This
|
||||
* is massive overkill for a lot of chipsets, but it should be safe
|
||||
* until we're able to implement this properly (will happen at more
|
||||
* or less the same time we're able to write our own context programs.
|
||||
*/
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 175*1024, 16,
|
||||
NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&chan->ramin_grctx);
|
||||
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pgraph->grctx_size,
|
||||
16, NVOBJ_FLAG_ZERO_ALLOC,
|
||||
&chan->ramin_grctx);
|
||||
if (ret)
|
||||
return ret;
|
||||
ctx = chan->ramin_grctx->gpuobj;
|
||||
|
||||
/* Initialise default context values */
|
||||
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||
nv40_grctx_vals_load(dev, ctx);
|
||||
nv_wo32(dev, ctx, 0, ctx->im_pramin->start);
|
||||
dev_priv->engine.instmem.finish_access(dev);
|
||||
if (!pgraph->ctxprog) {
|
||||
struct nouveau_grctx ctx = {};
|
||||
|
||||
ctx.dev = chan->dev;
|
||||
ctx.mode = NOUVEAU_GRCTX_VALS;
|
||||
ctx.data = chan->ramin_grctx->gpuobj;
|
||||
nv40_grctx_init(&ctx);
|
||||
} else {
|
||||
nouveau_grctx_vals_load(dev, chan->ramin_grctx->gpuobj);
|
||||
}
|
||||
nv_wo32(dev, chan->ramin_grctx->gpuobj, 0,
|
||||
chan->ramin_grctx->gpuobj->im_pramin->start);
|
||||
dev_priv->engine.instmem.finish_access(dev);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -204,139 +181,6 @@ nv40_graph_unload_context(struct drm_device *dev)
|
||||
return ret;
|
||||
}
|
||||
|
||||
struct nouveau_ctxprog {
|
||||
uint32_t signature;
|
||||
uint8_t version;
|
||||
uint16_t length;
|
||||
uint32_t data[];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
struct nouveau_ctxvals {
|
||||
uint32_t signature;
|
||||
uint8_t version;
|
||||
uint32_t length;
|
||||
struct {
|
||||
uint32_t offset;
|
||||
uint32_t value;
|
||||
} data[];
|
||||
} __attribute__ ((packed));
|
||||
|
||||
int
|
||||
nv40_grctx_init(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
const int chipset = dev_priv->chipset;
|
||||
const struct firmware *fw;
|
||||
const struct nouveau_ctxprog *cp;
|
||||
const struct nouveau_ctxvals *cv;
|
||||
char name[32];
|
||||
int ret, i;
|
||||
|
||||
pgraph->accel_blocked = true;
|
||||
|
||||
if (!pgraph->ctxprog) {
|
||||
sprintf(name, "nouveau/nv%02x.ctxprog", chipset);
|
||||
ret = request_firmware(&fw, name, &dev->pdev->dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "No ctxprog for NV%02x\n", chipset);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pgraph->ctxprog = kmalloc(fw->size, GFP_KERNEL);
|
||||
if (!pgraph->ctxprog) {
|
||||
NV_ERROR(dev, "OOM copying ctxprog\n");
|
||||
release_firmware(fw);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(pgraph->ctxprog, fw->data, fw->size);
|
||||
|
||||
cp = pgraph->ctxprog;
|
||||
if (le32_to_cpu(cp->signature) != 0x5043564e ||
|
||||
cp->version != 0 ||
|
||||
le16_to_cpu(cp->length) != ((fw->size - 7) / 4)) {
|
||||
NV_ERROR(dev, "ctxprog invalid\n");
|
||||
release_firmware(fw);
|
||||
nv40_grctx_fini(dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
release_firmware(fw);
|
||||
}
|
||||
|
||||
if (!pgraph->ctxvals) {
|
||||
sprintf(name, "nouveau/nv%02x.ctxvals", chipset);
|
||||
ret = request_firmware(&fw, name, &dev->pdev->dev);
|
||||
if (ret) {
|
||||
NV_ERROR(dev, "No ctxvals for NV%02x\n", chipset);
|
||||
nv40_grctx_fini(dev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
pgraph->ctxvals = kmalloc(fw->size, GFP_KERNEL);
|
||||
if (!pgraph->ctxprog) {
|
||||
NV_ERROR(dev, "OOM copying ctxprog\n");
|
||||
release_firmware(fw);
|
||||
nv40_grctx_fini(dev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
memcpy(pgraph->ctxvals, fw->data, fw->size);
|
||||
|
||||
cv = (void *)pgraph->ctxvals;
|
||||
if (le32_to_cpu(cv->signature) != 0x5643564e ||
|
||||
cv->version != 0 ||
|
||||
le32_to_cpu(cv->length) != ((fw->size - 9) / 8)) {
|
||||
NV_ERROR(dev, "ctxvals invalid\n");
|
||||
release_firmware(fw);
|
||||
nv40_grctx_fini(dev);
|
||||
return -EINVAL;
|
||||
}
|
||||
release_firmware(fw);
|
||||
}
|
||||
|
||||
cp = pgraph->ctxprog;
|
||||
|
||||
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
|
||||
for (i = 0; i < le16_to_cpu(cp->length); i++)
|
||||
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA,
|
||||
le32_to_cpu(cp->data[i]));
|
||||
|
||||
pgraph->accel_blocked = false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void
|
||||
nv40_grctx_fini(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
|
||||
if (pgraph->ctxprog) {
|
||||
kfree(pgraph->ctxprog);
|
||||
pgraph->ctxprog = NULL;
|
||||
}
|
||||
|
||||
if (pgraph->ctxvals) {
|
||||
kfree(pgraph->ctxprog);
|
||||
pgraph->ctxvals = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nv40_grctx_vals_load(struct drm_device *dev, struct nouveau_gpuobj *ctx)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
|
||||
struct nouveau_ctxvals *cv = pgraph->ctxvals;
|
||||
int i;
|
||||
|
||||
if (!cv)
|
||||
return;
|
||||
|
||||
for (i = 0; i < le32_to_cpu(cv->length); i++)
|
||||
nv_wo32(dev, ctx, le32_to_cpu(cv->data[i].offset),
|
||||
le32_to_cpu(cv->data[i].value));
|
||||
}
|
||||
|
||||
/*
|
||||
* G70 0x47
|
||||
* G71 0x49
|
||||
@ -359,7 +203,26 @@ nv40_graph_init(struct drm_device *dev)
|
||||
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) |
|
||||
NV_PMC_ENABLE_PGRAPH);
|
||||
|
||||
nv40_grctx_init(dev);
|
||||
if (nouveau_ctxfw) {
|
||||
nouveau_grctx_prog_load(dev);
|
||||
dev_priv->engine.graph.grctx_size = 175 * 1024;
|
||||
}
|
||||
|
||||
if (!dev_priv->engine.graph.ctxprog) {
|
||||
struct nouveau_grctx ctx = {};
|
||||
uint32_t cp[256];
|
||||
|
||||
ctx.dev = dev;
|
||||
ctx.mode = NOUVEAU_GRCTX_PROG;
|
||||
ctx.data = cp;
|
||||
ctx.ctxprog_max = 256;
|
||||
nv40_grctx_init(&ctx);
|
||||
dev_priv->engine.graph.grctx_size = ctx.ctxvals_pos * 4;
|
||||
|
||||
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_INDEX, 0);
|
||||
for (i = 0; i < ctx.ctxprog_len; i++)
|
||||
nv_wr32(dev, NV40_PGRAPH_CTXCTL_UCODE_DATA, cp[i]);
|
||||
}
|
||||
|
||||
/* No context present currently */
|
||||
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
|
||||
@ -539,6 +402,7 @@ nv40_graph_init(struct drm_device *dev)
|
||||
|
||||
void nv40_graph_takedown(struct drm_device *dev)
|
||||
{
|
||||
nouveau_grctx_fini(dev);
|
||||
}
|
||||
|
||||
struct nouveau_pgraph_object_class nv40_graph_grclass[] = {
|
||||
|
678
drivers/gpu/drm/nouveau/nv40_grctx.c
Normal file
678
drivers/gpu/drm/nouveau/nv40_grctx.c
Normal file
@ -0,0 +1,678 @@
|
||||
/*
|
||||
* Copyright 2009 Red Hat Inc.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the "Software"),
|
||||
* to deal in the Software without restriction, including without limitation
|
||||
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
|
||||
* and/or sell copies of the Software, and to permit persons to whom the
|
||||
* Software is furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
|
||||
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
|
||||
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
* OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
* Authors: Ben Skeggs
|
||||
*/
|
||||
|
||||
/* NVIDIA context programs handle a number of other conditions which are
|
||||
* not implemented in our versions. It's not clear why NVIDIA context
|
||||
* programs have this code, nor whether it's strictly necessary for
|
||||
* correct operation. We'll implement additional handling if/when we
|
||||
* discover it's necessary.
|
||||
*
|
||||
* - On context save, NVIDIA set 0x400314 bit 0 to 1 if the "3D state"
|
||||
* flag is set, this gets saved into the context.
|
||||
* - On context save, the context program for all cards load nsource
|
||||
* into a flag register and check for ILLEGAL_MTHD. If it's set,
|
||||
* opcode 0x60000d is called before resuming normal operation.
|
||||
* - Some context programs check more conditions than the above. NV44
|
||||
* checks: ((nsource & 0x0857) || (0x400718 & 0x0100) || (intr & 0x0001))
|
||||
* and calls 0x60000d before resuming normal operation.
|
||||
* - At the very beginning of NVIDIA's context programs, flag 9 is checked
|
||||
* and if true 0x800001 is called with count=0, pos=0, the flag is cleared
|
||||
* and then the ctxprog is aborted. It looks like a complicated NOP,
|
||||
* its purpose is unknown.
|
||||
* - In the section of code that loads the per-vs state, NVIDIA check
|
||||
* flag 10. If it's set, they only transfer the small 0x300 byte block
|
||||
* of state + the state for a single vs as opposed to the state for
|
||||
* all vs units. It doesn't seem likely that it'll occur in normal
|
||||
* operation, especially seeing as it appears NVIDIA may have screwed
|
||||
* up the ctxprogs for some cards and have an invalid instruction
|
||||
* rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
|
||||
* - There's a number of places where context offset 0 (where we place
|
||||
* the PRAMIN offset of the context) is loaded into either 0x408000,
|
||||
* 0x408004 or 0x408008. Not sure what's up there either.
|
||||
* - The ctxprogs for some cards save 0x400a00 again during the cleanup
|
||||
* path for auto-loadctx.
|
||||
*/
|
||||
|
||||
#define CP_FLAG_CLEAR 0
|
||||
#define CP_FLAG_SET 1
|
||||
#define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0)
|
||||
#define CP_FLAG_SWAP_DIRECTION_LOAD 0
|
||||
#define CP_FLAG_SWAP_DIRECTION_SAVE 1
|
||||
#define CP_FLAG_USER_SAVE ((0 * 32) + 5)
|
||||
#define CP_FLAG_USER_SAVE_NOT_PENDING 0
|
||||
#define CP_FLAG_USER_SAVE_PENDING 1
|
||||
#define CP_FLAG_USER_LOAD ((0 * 32) + 6)
|
||||
#define CP_FLAG_USER_LOAD_NOT_PENDING 0
|
||||
#define CP_FLAG_USER_LOAD_PENDING 1
|
||||
#define CP_FLAG_STATUS ((3 * 32) + 0)
|
||||
#define CP_FLAG_STATUS_IDLE 0
|
||||
#define CP_FLAG_STATUS_BUSY 1
|
||||
#define CP_FLAG_AUTO_SAVE ((3 * 32) + 4)
|
||||
#define CP_FLAG_AUTO_SAVE_NOT_PENDING 0
|
||||
#define CP_FLAG_AUTO_SAVE_PENDING 1
|
||||
#define CP_FLAG_AUTO_LOAD ((3 * 32) + 5)
|
||||
#define CP_FLAG_AUTO_LOAD_NOT_PENDING 0
|
||||
#define CP_FLAG_AUTO_LOAD_PENDING 1
|
||||
#define CP_FLAG_UNK54 ((3 * 32) + 6)
|
||||
#define CP_FLAG_UNK54_CLEAR 0
|
||||
#define CP_FLAG_UNK54_SET 1
|
||||
#define CP_FLAG_ALWAYS ((3 * 32) + 8)
|
||||
#define CP_FLAG_ALWAYS_FALSE 0
|
||||
#define CP_FLAG_ALWAYS_TRUE 1
|
||||
#define CP_FLAG_UNK57 ((3 * 32) + 9)
|
||||
#define CP_FLAG_UNK57_CLEAR 0
|
||||
#define CP_FLAG_UNK57_SET 1
|
||||
|
||||
#define CP_CTX 0x00100000
|
||||
#define CP_CTX_COUNT 0x000fc000
|
||||
#define CP_CTX_COUNT_SHIFT 14
|
||||
#define CP_CTX_REG 0x00003fff
|
||||
#define CP_LOAD_SR 0x00200000
|
||||
#define CP_LOAD_SR_VALUE 0x000fffff
|
||||
#define CP_BRA 0x00400000
|
||||
#define CP_BRA_IP 0x0000ff00
|
||||
#define CP_BRA_IP_SHIFT 8
|
||||
#define CP_BRA_IF_CLEAR 0x00000080
|
||||
#define CP_BRA_FLAG 0x0000007f
|
||||
#define CP_WAIT 0x00500000
|
||||
#define CP_WAIT_SET 0x00000080
|
||||
#define CP_WAIT_FLAG 0x0000007f
|
||||
#define CP_SET 0x00700000
|
||||
#define CP_SET_1 0x00000080
|
||||
#define CP_SET_FLAG 0x0000007f
|
||||
#define CP_NEXT_TO_SWAP 0x00600007
|
||||
#define CP_NEXT_TO_CURRENT 0x00600009
|
||||
#define CP_SET_CONTEXT_POINTER 0x0060000a
|
||||
#define CP_END 0x0060000e
|
||||
#define CP_LOAD_MAGIC_UNK01 0x00800001 /* unknown */
|
||||
#define CP_LOAD_MAGIC_NV44TCL 0x00800029 /* per-vs state (0x4497) */
|
||||
#define CP_LOAD_MAGIC_NV40TCL 0x00800041 /* per-vs state (0x4097) */
|
||||
|
||||
#include "drmP.h"
|
||||
#include "nouveau_drv.h"
|
||||
#include "nouveau_grctx.h"
|
||||
|
||||
/* TODO:
|
||||
* - get vs count from 0x1540
|
||||
* - document unimplemented bits compared to nvidia
|
||||
* - nsource handling
|
||||
* - R0 & 0x0200 handling
|
||||
* - single-vs handling
|
||||
* - 400314 bit 0
|
||||
*/
|
||||
|
||||
static int
|
||||
nv40_graph_4097(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
if ((dev_priv->chipset & 0xf0) == 0x60)
|
||||
return 0;
|
||||
|
||||
return !!(0x0baf & (1 << dev_priv->chipset));
|
||||
}
|
||||
|
||||
static int
|
||||
nv40_graph_vs_count(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x47:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
return 8;
|
||||
case 0x40:
|
||||
return 6;
|
||||
case 0x41:
|
||||
case 0x42:
|
||||
return 5;
|
||||
case 0x43:
|
||||
case 0x44:
|
||||
case 0x46:
|
||||
case 0x4a:
|
||||
return 3;
|
||||
case 0x4c:
|
||||
case 0x4e:
|
||||
case 0x67:
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
enum cp_label {
|
||||
cp_check_load = 1,
|
||||
cp_setup_auto_load,
|
||||
cp_setup_load,
|
||||
cp_setup_save,
|
||||
cp_swap_state,
|
||||
cp_swap_state3d_3_is_save,
|
||||
cp_prepare_exit,
|
||||
cp_exit,
|
||||
};
|
||||
|
||||
static void
|
||||
nv40_graph_construct_general(struct nouveau_grctx *ctx)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
|
||||
int i;
|
||||
|
||||
cp_ctx(ctx, 0x4000a4, 1);
|
||||
gr_def(ctx, 0x4000a4, 0x00000008);
|
||||
cp_ctx(ctx, 0x400144, 58);
|
||||
gr_def(ctx, 0x400144, 0x00000001);
|
||||
cp_ctx(ctx, 0x400314, 1);
|
||||
gr_def(ctx, 0x400314, 0x00000000);
|
||||
cp_ctx(ctx, 0x400400, 10);
|
||||
cp_ctx(ctx, 0x400480, 10);
|
||||
cp_ctx(ctx, 0x400500, 19);
|
||||
gr_def(ctx, 0x400514, 0x00040000);
|
||||
gr_def(ctx, 0x400524, 0x55555555);
|
||||
gr_def(ctx, 0x400528, 0x55555555);
|
||||
gr_def(ctx, 0x40052c, 0x55555555);
|
||||
gr_def(ctx, 0x400530, 0x55555555);
|
||||
cp_ctx(ctx, 0x400560, 6);
|
||||
gr_def(ctx, 0x400568, 0x0000ffff);
|
||||
gr_def(ctx, 0x40056c, 0x0000ffff);
|
||||
cp_ctx(ctx, 0x40057c, 5);
|
||||
cp_ctx(ctx, 0x400710, 3);
|
||||
gr_def(ctx, 0x400710, 0x20010001);
|
||||
gr_def(ctx, 0x400714, 0x0f73ef00);
|
||||
cp_ctx(ctx, 0x400724, 1);
|
||||
gr_def(ctx, 0x400724, 0x02008821);
|
||||
cp_ctx(ctx, 0x400770, 3);
|
||||
if (dev_priv->chipset == 0x40) {
|
||||
cp_ctx(ctx, 0x400814, 4);
|
||||
cp_ctx(ctx, 0x400828, 5);
|
||||
cp_ctx(ctx, 0x400840, 5);
|
||||
gr_def(ctx, 0x400850, 0x00000040);
|
||||
cp_ctx(ctx, 0x400858, 4);
|
||||
gr_def(ctx, 0x400858, 0x00000040);
|
||||
gr_def(ctx, 0x40085c, 0x00000040);
|
||||
gr_def(ctx, 0x400864, 0x80000000);
|
||||
cp_ctx(ctx, 0x40086c, 9);
|
||||
gr_def(ctx, 0x40086c, 0x80000000);
|
||||
gr_def(ctx, 0x400870, 0x80000000);
|
||||
gr_def(ctx, 0x400874, 0x80000000);
|
||||
gr_def(ctx, 0x400878, 0x80000000);
|
||||
gr_def(ctx, 0x400888, 0x00000040);
|
||||
gr_def(ctx, 0x40088c, 0x80000000);
|
||||
cp_ctx(ctx, 0x4009c0, 8);
|
||||
gr_def(ctx, 0x4009cc, 0x80000000);
|
||||
gr_def(ctx, 0x4009dc, 0x80000000);
|
||||
} else {
|
||||
cp_ctx(ctx, 0x400840, 20);
|
||||
if (!nv40_graph_4097(ctx->dev)) {
|
||||
for (i = 0; i < 8; i++)
|
||||
gr_def(ctx, 0x400860 + (i * 4), 0x00000001);
|
||||
}
|
||||
gr_def(ctx, 0x400880, 0x00000040);
|
||||
gr_def(ctx, 0x400884, 0x00000040);
|
||||
gr_def(ctx, 0x400888, 0x00000040);
|
||||
cp_ctx(ctx, 0x400894, 11);
|
||||
gr_def(ctx, 0x400894, 0x00000040);
|
||||
if (nv40_graph_4097(ctx->dev)) {
|
||||
for (i = 0; i < 8; i++)
|
||||
gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000);
|
||||
}
|
||||
cp_ctx(ctx, 0x4008e0, 2);
|
||||
cp_ctx(ctx, 0x4008f8, 2);
|
||||
if (dev_priv->chipset == 0x4c ||
|
||||
(dev_priv->chipset & 0xf0) == 0x60)
|
||||
cp_ctx(ctx, 0x4009f8, 1);
|
||||
}
|
||||
cp_ctx(ctx, 0x400a00, 73);
|
||||
gr_def(ctx, 0x400b0c, 0x0b0b0b0c);
|
||||
cp_ctx(ctx, 0x401000, 4);
|
||||
cp_ctx(ctx, 0x405004, 1);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x47:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
cp_ctx(ctx, 0x403448, 1);
|
||||
gr_def(ctx, 0x403448, 0x00001010);
|
||||
break;
|
||||
default:
|
||||
cp_ctx(ctx, 0x403440, 1);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x40:
|
||||
gr_def(ctx, 0x403440, 0x00000010);
|
||||
break;
|
||||
case 0x44:
|
||||
case 0x46:
|
||||
case 0x4a:
|
||||
gr_def(ctx, 0x403440, 0x00003010);
|
||||
break;
|
||||
case 0x41:
|
||||
case 0x42:
|
||||
case 0x43:
|
||||
case 0x4c:
|
||||
case 0x4e:
|
||||
case 0x67:
|
||||
default:
|
||||
gr_def(ctx, 0x403440, 0x00001010);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
nv40_graph_construct_state3d(struct nouveau_grctx *ctx)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
|
||||
int i;
|
||||
|
||||
if (dev_priv->chipset == 0x40) {
|
||||
cp_ctx(ctx, 0x401880, 51);
|
||||
gr_def(ctx, 0x401940, 0x00000100);
|
||||
} else
|
||||
if (dev_priv->chipset == 0x46 || dev_priv->chipset == 0x47 ||
|
||||
dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b) {
|
||||
cp_ctx(ctx, 0x401880, 32);
|
||||
for (i = 0; i < 16; i++)
|
||||
gr_def(ctx, 0x401880 + (i * 4), 0x00000111);
|
||||
if (dev_priv->chipset == 0x46)
|
||||
cp_ctx(ctx, 0x401900, 16);
|
||||
cp_ctx(ctx, 0x401940, 3);
|
||||
}
|
||||
cp_ctx(ctx, 0x40194c, 18);
|
||||
gr_def(ctx, 0x401954, 0x00000111);
|
||||
gr_def(ctx, 0x401958, 0x00080060);
|
||||
gr_def(ctx, 0x401974, 0x00000080);
|
||||
gr_def(ctx, 0x401978, 0xffff0000);
|
||||
gr_def(ctx, 0x40197c, 0x00000001);
|
||||
gr_def(ctx, 0x401990, 0x46400000);
|
||||
if (dev_priv->chipset == 0x40) {
|
||||
cp_ctx(ctx, 0x4019a0, 2);
|
||||
cp_ctx(ctx, 0x4019ac, 5);
|
||||
} else {
|
||||
cp_ctx(ctx, 0x4019a0, 1);
|
||||
cp_ctx(ctx, 0x4019b4, 3);
|
||||
}
|
||||
gr_def(ctx, 0x4019bc, 0xffff0000);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x46:
|
||||
case 0x47:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
cp_ctx(ctx, 0x4019c0, 18);
|
||||
for (i = 0; i < 16; i++)
|
||||
gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888);
|
||||
break;
|
||||
}
|
||||
cp_ctx(ctx, 0x401a08, 8);
|
||||
gr_def(ctx, 0x401a10, 0x0fff0000);
|
||||
gr_def(ctx, 0x401a14, 0x0fff0000);
|
||||
gr_def(ctx, 0x401a1c, 0x00011100);
|
||||
cp_ctx(ctx, 0x401a2c, 4);
|
||||
cp_ctx(ctx, 0x401a44, 26);
|
||||
for (i = 0; i < 16; i++)
|
||||
gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000);
|
||||
gr_def(ctx, 0x401a8c, 0x4b7fffff);
|
||||
if (dev_priv->chipset == 0x40) {
|
||||
cp_ctx(ctx, 0x401ab8, 3);
|
||||
} else {
|
||||
cp_ctx(ctx, 0x401ab8, 1);
|
||||
cp_ctx(ctx, 0x401ac0, 1);
|
||||
}
|
||||
cp_ctx(ctx, 0x401ad0, 8);
|
||||
gr_def(ctx, 0x401ad0, 0x30201000);
|
||||
gr_def(ctx, 0x401ad4, 0x70605040);
|
||||
gr_def(ctx, 0x401ad8, 0xb8a89888);
|
||||
gr_def(ctx, 0x401adc, 0xf8e8d8c8);
|
||||
cp_ctx(ctx, 0x401b10, dev_priv->chipset == 0x40 ? 2 : 1);
|
||||
gr_def(ctx, 0x401b10, 0x40100000);
|
||||
cp_ctx(ctx, 0x401b18, dev_priv->chipset == 0x40 ? 6 : 5);
|
||||
gr_def(ctx, 0x401b28, dev_priv->chipset == 0x40 ?
|
||||
0x00000004 : 0x00000000);
|
||||
cp_ctx(ctx, 0x401b30, 25);
|
||||
gr_def(ctx, 0x401b34, 0x0000ffff);
|
||||
gr_def(ctx, 0x401b68, 0x435185d6);
|
||||
gr_def(ctx, 0x401b6c, 0x2155b699);
|
||||
gr_def(ctx, 0x401b70, 0xfedcba98);
|
||||
gr_def(ctx, 0x401b74, 0x00000098);
|
||||
gr_def(ctx, 0x401b84, 0xffffffff);
|
||||
gr_def(ctx, 0x401b88, 0x00ff7000);
|
||||
gr_def(ctx, 0x401b8c, 0x0000ffff);
|
||||
if (dev_priv->chipset != 0x44 && dev_priv->chipset != 0x4a &&
|
||||
dev_priv->chipset != 0x4e)
|
||||
cp_ctx(ctx, 0x401b94, 1);
|
||||
cp_ctx(ctx, 0x401b98, 8);
|
||||
gr_def(ctx, 0x401b9c, 0x00ff0000);
|
||||
cp_ctx(ctx, 0x401bc0, 9);
|
||||
gr_def(ctx, 0x401be0, 0x00ffff00);
|
||||
cp_ctx(ctx, 0x401c00, 192);
|
||||
for (i = 0; i < 16; i++) { /* fragment texture units */
|
||||
gr_def(ctx, 0x401c40 + (i * 4), 0x00018488);
|
||||
gr_def(ctx, 0x401c80 + (i * 4), 0x00028202);
|
||||
gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4);
|
||||
gr_def(ctx, 0x401d40 + (i * 4), 0x01012000);
|
||||
gr_def(ctx, 0x401d80 + (i * 4), 0x00080008);
|
||||
gr_def(ctx, 0x401e00 + (i * 4), 0x00100008);
|
||||
}
|
||||
for (i = 0; i < 4; i++) { /* vertex texture units */
|
||||
gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80);
|
||||
gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202);
|
||||
gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008);
|
||||
gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008);
|
||||
}
|
||||
cp_ctx(ctx, 0x400f5c, 3);
|
||||
gr_def(ctx, 0x400f5c, 0x00000002);
|
||||
cp_ctx(ctx, 0x400f84, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
nv40_graph_construct_state3d_2(struct nouveau_grctx *ctx)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = ctx->dev->dev_private;
|
||||
int i;
|
||||
|
||||
cp_ctx(ctx, 0x402000, 1);
|
||||
cp_ctx(ctx, 0x402404, dev_priv->chipset == 0x40 ? 1 : 2);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x40:
|
||||
gr_def(ctx, 0x402404, 0x00000001);
|
||||
break;
|
||||
case 0x4c:
|
||||
case 0x4e:
|
||||
case 0x67:
|
||||
gr_def(ctx, 0x402404, 0x00000020);
|
||||
break;
|
||||
case 0x46:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
gr_def(ctx, 0x402404, 0x00000421);
|
||||
break;
|
||||
default:
|
||||
gr_def(ctx, 0x402404, 0x00000021);
|
||||
}
|
||||
if (dev_priv->chipset != 0x40)
|
||||
gr_def(ctx, 0x402408, 0x030c30c3);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x44:
|
||||
case 0x46:
|
||||
case 0x4a:
|
||||
case 0x4c:
|
||||
case 0x4e:
|
||||
case 0x67:
|
||||
cp_ctx(ctx, 0x402440, 1);
|
||||
gr_def(ctx, 0x402440, 0x00011001);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
cp_ctx(ctx, 0x402480, dev_priv->chipset == 0x40 ? 8 : 9);
|
||||
gr_def(ctx, 0x402488, 0x3e020200);
|
||||
gr_def(ctx, 0x40248c, 0x00ffffff);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x40:
|
||||
gr_def(ctx, 0x402490, 0x60103f00);
|
||||
break;
|
||||
case 0x47:
|
||||
gr_def(ctx, 0x402490, 0x40103f00);
|
||||
break;
|
||||
case 0x41:
|
||||
case 0x42:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
gr_def(ctx, 0x402490, 0x20103f00);
|
||||
break;
|
||||
default:
|
||||
gr_def(ctx, 0x402490, 0x0c103f00);
|
||||
break;
|
||||
}
|
||||
gr_def(ctx, 0x40249c, dev_priv->chipset <= 0x43 ?
|
||||
0x00020000 : 0x00040000);
|
||||
cp_ctx(ctx, 0x402500, 31);
|
||||
gr_def(ctx, 0x402530, 0x00008100);
|
||||
if (dev_priv->chipset == 0x40)
|
||||
cp_ctx(ctx, 0x40257c, 6);
|
||||
cp_ctx(ctx, 0x402594, 16);
|
||||
cp_ctx(ctx, 0x402800, 17);
|
||||
gr_def(ctx, 0x402800, 0x00000001);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x47:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
cp_ctx(ctx, 0x402864, 1);
|
||||
gr_def(ctx, 0x402864, 0x00001001);
|
||||
cp_ctx(ctx, 0x402870, 3);
|
||||
gr_def(ctx, 0x402878, 0x00000003);
|
||||
if (dev_priv->chipset != 0x47) { /* belong at end!! */
|
||||
cp_ctx(ctx, 0x402900, 1);
|
||||
cp_ctx(ctx, 0x402940, 1);
|
||||
cp_ctx(ctx, 0x402980, 1);
|
||||
cp_ctx(ctx, 0x4029c0, 1);
|
||||
cp_ctx(ctx, 0x402a00, 1);
|
||||
cp_ctx(ctx, 0x402a40, 1);
|
||||
cp_ctx(ctx, 0x402a80, 1);
|
||||
cp_ctx(ctx, 0x402ac0, 1);
|
||||
}
|
||||
break;
|
||||
case 0x40:
|
||||
cp_ctx(ctx, 0x402844, 1);
|
||||
gr_def(ctx, 0x402844, 0x00000001);
|
||||
cp_ctx(ctx, 0x402850, 1);
|
||||
break;
|
||||
default:
|
||||
cp_ctx(ctx, 0x402844, 1);
|
||||
gr_def(ctx, 0x402844, 0x00001001);
|
||||
cp_ctx(ctx, 0x402850, 2);
|
||||
gr_def(ctx, 0x402854, 0x00000003);
|
||||
break;
|
||||
}
|
||||
|
||||
cp_ctx(ctx, 0x402c00, 4);
|
||||
gr_def(ctx, 0x402c00, dev_priv->chipset == 0x40 ?
|
||||
0x80800001 : 0x00888001);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x47:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
cp_ctx(ctx, 0x402c20, 40);
|
||||
for (i = 0; i < 32; i++)
|
||||
gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff);
|
||||
cp_ctx(ctx, 0x4030b8, 13);
|
||||
gr_def(ctx, 0x4030dc, 0x00000005);
|
||||
gr_def(ctx, 0x4030e8, 0x0000ffff);
|
||||
break;
|
||||
default:
|
||||
cp_ctx(ctx, 0x402c10, 4);
|
||||
if (dev_priv->chipset == 0x40)
|
||||
cp_ctx(ctx, 0x402c20, 36);
|
||||
else
|
||||
if (dev_priv->chipset <= 0x42)
|
||||
cp_ctx(ctx, 0x402c20, 24);
|
||||
else
|
||||
if (dev_priv->chipset <= 0x4a)
|
||||
cp_ctx(ctx, 0x402c20, 16);
|
||||
else
|
||||
cp_ctx(ctx, 0x402c20, 8);
|
||||
cp_ctx(ctx, 0x402cb0, dev_priv->chipset == 0x40 ? 12 : 13);
|
||||
gr_def(ctx, 0x402cd4, 0x00000005);
|
||||
if (dev_priv->chipset != 0x40)
|
||||
gr_def(ctx, 0x402ce0, 0x0000ffff);
|
||||
break;
|
||||
}
|
||||
|
||||
cp_ctx(ctx, 0x403400, dev_priv->chipset == 0x40 ? 4 : 3);
|
||||
cp_ctx(ctx, 0x403410, dev_priv->chipset == 0x40 ? 4 : 3);
|
||||
cp_ctx(ctx, 0x403420, nv40_graph_vs_count(ctx->dev));
|
||||
for (i = 0; i < nv40_graph_vs_count(ctx->dev); i++)
|
||||
gr_def(ctx, 0x403420 + (i * 4), 0x00005555);
|
||||
|
||||
if (dev_priv->chipset != 0x40) {
|
||||
cp_ctx(ctx, 0x403600, 1);
|
||||
gr_def(ctx, 0x403600, 0x00000001);
|
||||
}
|
||||
cp_ctx(ctx, 0x403800, 1);
|
||||
|
||||
cp_ctx(ctx, 0x403c18, 1);
|
||||
gr_def(ctx, 0x403c18, 0x00000001);
|
||||
switch (dev_priv->chipset) {
|
||||
case 0x46:
|
||||
case 0x47:
|
||||
case 0x49:
|
||||
case 0x4b:
|
||||
cp_ctx(ctx, 0x405018, 1);
|
||||
gr_def(ctx, 0x405018, 0x08e00001);
|
||||
cp_ctx(ctx, 0x405c24, 1);
|
||||
gr_def(ctx, 0x405c24, 0x000e3000);
|
||||
break;
|
||||
}
|
||||
if (dev_priv->chipset != 0x4e)
|
||||
cp_ctx(ctx, 0x405800, 11);
|
||||
cp_ctx(ctx, 0x407000, 1);
|
||||
}
|
||||
|
||||
static void
|
||||
nv40_graph_construct_state3d_3(struct nouveau_grctx *ctx)
|
||||
{
|
||||
int len = nv40_graph_4097(ctx->dev) ? 0x0684 : 0x0084;
|
||||
|
||||
cp_out (ctx, 0x300000);
|
||||
cp_lsr (ctx, len - 4);
|
||||
cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save);
|
||||
cp_lsr (ctx, len);
|
||||
cp_name(ctx, cp_swap_state3d_3_is_save);
|
||||
cp_out (ctx, 0x800001);
|
||||
|
||||
ctx->ctxvals_pos += len;
|
||||
}
|
||||
|
||||
static void
|
||||
nv40_graph_construct_shader(struct nouveau_grctx *ctx)
|
||||
{
|
||||
struct drm_device *dev = ctx->dev;
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
struct nouveau_gpuobj *obj = ctx->data;
|
||||
int vs, vs_nr, vs_len, vs_nr_b0, vs_nr_b1, b0_offset, b1_offset;
|
||||
int offset, i;
|
||||
|
||||
vs_nr = nv40_graph_vs_count(ctx->dev);
|
||||
vs_nr_b0 = 363;
|
||||
vs_nr_b1 = dev_priv->chipset == 0x40 ? 128 : 64;
|
||||
if (dev_priv->chipset == 0x40) {
|
||||
b0_offset = 0x2200/4; /* 33a0 */
|
||||
b1_offset = 0x55a0/4; /* 1500 */
|
||||
vs_len = 0x6aa0/4;
|
||||
} else
|
||||
if (dev_priv->chipset == 0x41 || dev_priv->chipset == 0x42) {
|
||||
b0_offset = 0x2200/4; /* 2200 */
|
||||
b1_offset = 0x4400/4; /* 0b00 */
|
||||
vs_len = 0x4f00/4;
|
||||
} else {
|
||||
b0_offset = 0x1d40/4; /* 2200 */
|
||||
b1_offset = 0x3f40/4; /* 0b00 : 0a40 */
|
||||
vs_len = nv40_graph_4097(dev) ? 0x4a40/4 : 0x4980/4;
|
||||
}
|
||||
|
||||
cp_lsr(ctx, vs_len * vs_nr + 0x300/4);
|
||||
cp_out(ctx, nv40_graph_4097(dev) ? 0x800041 : 0x800029);
|
||||
|
||||
offset = ctx->ctxvals_pos;
|
||||
ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len));
|
||||
|
||||
if (ctx->mode != NOUVEAU_GRCTX_VALS)
|
||||
return;
|
||||
|
||||
offset += 0x0280/4;
|
||||
for (i = 0; i < 16; i++, offset += 2)
|
||||
nv_wo32(dev, obj, offset, 0x3f800000);
|
||||
|
||||
for (vs = 0; vs < vs_nr; vs++, offset += vs_len) {
|
||||
for (i = 0; i < vs_nr_b0 * 6; i += 6)
|
||||
nv_wo32(dev, obj, offset + b0_offset + i, 0x00000001);
|
||||
for (i = 0; i < vs_nr_b1 * 4; i += 4)
|
||||
nv_wo32(dev, obj, offset + b1_offset + i, 0x3f800000);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
nv40_grctx_init(struct nouveau_grctx *ctx)
|
||||
{
|
||||
/* decide whether we're loading/unloading the context */
|
||||
cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save);
|
||||
cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save);
|
||||
|
||||
cp_name(ctx, cp_check_load);
|
||||
cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load);
|
||||
cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load);
|
||||
cp_bra (ctx, ALWAYS, TRUE, cp_exit);
|
||||
|
||||
/* setup for context load */
|
||||
cp_name(ctx, cp_setup_auto_load);
|
||||
cp_wait(ctx, STATUS, IDLE);
|
||||
cp_out (ctx, CP_NEXT_TO_SWAP);
|
||||
cp_name(ctx, cp_setup_load);
|
||||
cp_wait(ctx, STATUS, IDLE);
|
||||
cp_set (ctx, SWAP_DIRECTION, LOAD);
|
||||
cp_out (ctx, 0x00910880); /* ?? */
|
||||
cp_out (ctx, 0x00901ffe); /* ?? */
|
||||
cp_out (ctx, 0x01940000); /* ?? */
|
||||
cp_lsr (ctx, 0x20);
|
||||
cp_out (ctx, 0x0060000b); /* ?? */
|
||||
cp_wait(ctx, UNK57, CLEAR);
|
||||
cp_out (ctx, 0x0060000c); /* ?? */
|
||||
cp_bra (ctx, ALWAYS, TRUE, cp_swap_state);
|
||||
|
||||
/* setup for context save */
|
||||
cp_name(ctx, cp_setup_save);
|
||||
cp_set (ctx, SWAP_DIRECTION, SAVE);
|
||||
|
||||
/* general PGRAPH state */
|
||||
cp_name(ctx, cp_swap_state);
|
||||
cp_pos (ctx, 0x00020/4);
|
||||
nv40_graph_construct_general(ctx);
|
||||
cp_wait(ctx, STATUS, IDLE);
|
||||
|
||||
/* 3D state, block 1 */
|
||||
cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit);
|
||||
nv40_graph_construct_state3d(ctx);
|
||||
cp_wait(ctx, STATUS, IDLE);
|
||||
|
||||
/* 3D state, block 2 */
|
||||
nv40_graph_construct_state3d_2(ctx);
|
||||
|
||||
/* Some other block of "random" state */
|
||||
nv40_graph_construct_state3d_3(ctx);
|
||||
|
||||
/* Per-vertex shader state */
|
||||
cp_pos (ctx, ctx->ctxvals_pos);
|
||||
nv40_graph_construct_shader(ctx);
|
||||
|
||||
/* pre-exit state updates */
|
||||
cp_name(ctx, cp_prepare_exit);
|
||||
cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load);
|
||||
cp_bra (ctx, USER_SAVE, PENDING, cp_exit);
|
||||
cp_out (ctx, CP_NEXT_TO_CURRENT);
|
||||
|
||||
cp_name(ctx, cp_exit);
|
||||
cp_set (ctx, USER_SAVE, NOT_PENDING);
|
||||
cp_set (ctx, USER_LOAD, NOT_PENDING);
|
||||
cp_out (ctx, CP_END);
|
||||
}
|
||||
|
@ -45,7 +45,7 @@ nv50_crtc_lut_load(struct drm_crtc *crtc)
|
||||
void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
|
||||
int i;
|
||||
|
||||
NV_DEBUG(crtc->dev, "\n");
|
||||
NV_DEBUG_KMS(crtc->dev, "\n");
|
||||
|
||||
for (i = 0; i < 256; i++) {
|
||||
writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
|
||||
@ -68,8 +68,8 @@ nv50_crtc_blank(struct nouveau_crtc *nv_crtc, bool blanked)
|
||||
struct nouveau_channel *evo = dev_priv->evo;
|
||||
int index = nv_crtc->index, ret;
|
||||
|
||||
NV_DEBUG(dev, "index %d\n", nv_crtc->index);
|
||||
NV_DEBUG(dev, "%s\n", blanked ? "blanked" : "unblanked");
|
||||
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
|
||||
NV_DEBUG_KMS(dev, "%s\n", blanked ? "blanked" : "unblanked");
|
||||
|
||||
if (blanked) {
|
||||
nv_crtc->cursor.hide(nv_crtc, false);
|
||||
@ -139,7 +139,7 @@ nv50_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
|
||||
struct nouveau_channel *evo = dev_priv->evo;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
ret = RING_SPACE(evo, 2 + (update ? 2 : 0));
|
||||
if (ret) {
|
||||
@ -193,7 +193,7 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, int scaling_mode, bool update)
|
||||
uint32_t outX, outY, horiz, vert;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
switch (scaling_mode) {
|
||||
case DRM_MODE_SCALE_NONE:
|
||||
@ -301,7 +301,7 @@ nv50_crtc_destroy(struct drm_crtc *crtc)
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
if (!crtc)
|
||||
return;
|
||||
@ -433,7 +433,7 @@ nv50_crtc_prepare(struct drm_crtc *crtc)
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
NV_DEBUG(dev, "index %d\n", nv_crtc->index);
|
||||
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
|
||||
|
||||
/* Disconnect all unused encoders. */
|
||||
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
|
||||
@ -458,7 +458,7 @@ nv50_crtc_commit(struct drm_crtc *crtc)
|
||||
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "index %d\n", nv_crtc->index);
|
||||
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
|
||||
|
||||
nv50_crtc_blank(nv_crtc, false);
|
||||
|
||||
@ -497,7 +497,7 @@ nv50_crtc_do_mode_set_base(struct drm_crtc *crtc, int x, int y,
|
||||
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
|
||||
int ret, format;
|
||||
|
||||
NV_DEBUG(dev, "index %d\n", nv_crtc->index);
|
||||
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
|
||||
|
||||
switch (drm_fb->depth) {
|
||||
case 8:
|
||||
@ -612,7 +612,7 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
|
||||
|
||||
*nv_crtc->mode = *adjusted_mode;
|
||||
|
||||
NV_DEBUG(dev, "index %d\n", nv_crtc->index);
|
||||
NV_DEBUG_KMS(dev, "index %d\n", nv_crtc->index);
|
||||
|
||||
hsync_dur = adjusted_mode->hsync_end - adjusted_mode->hsync_start;
|
||||
vsync_dur = adjusted_mode->vsync_end - adjusted_mode->vsync_start;
|
||||
@ -706,7 +706,7 @@ nv50_crtc_create(struct drm_device *dev, int index)
|
||||
struct nouveau_crtc *nv_crtc = NULL;
|
||||
int ret, i;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
|
||||
if (!nv_crtc)
|
||||
|
@ -41,7 +41,7 @@ nv50_cursor_show(struct nouveau_crtc *nv_crtc, bool update)
|
||||
struct drm_device *dev = nv_crtc->base.dev;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
if (update && nv_crtc->cursor.visible)
|
||||
return;
|
||||
@ -76,7 +76,7 @@ nv50_cursor_hide(struct nouveau_crtc *nv_crtc, bool update)
|
||||
struct drm_device *dev = nv_crtc->base.dev;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
if (update && !nv_crtc->cursor.visible)
|
||||
return;
|
||||
@ -116,7 +116,7 @@ nv50_cursor_set_pos(struct nouveau_crtc *nv_crtc, int x, int y)
|
||||
static void
|
||||
nv50_cursor_set_offset(struct nouveau_crtc *nv_crtc, uint32_t offset)
|
||||
{
|
||||
NV_DEBUG(nv_crtc->base.dev, "\n");
|
||||
NV_DEBUG_KMS(nv_crtc->base.dev, "\n");
|
||||
if (offset == nv_crtc->cursor.offset)
|
||||
return;
|
||||
|
||||
@ -143,7 +143,7 @@ nv50_cursor_fini(struct nouveau_crtc *nv_crtc)
|
||||
struct drm_device *dev = nv_crtc->base.dev;
|
||||
int idx = nv_crtc->index;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx), 0);
|
||||
if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(idx),
|
||||
|
@ -44,7 +44,7 @@ nv50_dac_disconnect(struct nouveau_encoder *nv_encoder)
|
||||
struct nouveau_channel *evo = dev_priv->evo;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "Disconnecting DAC %d\n", nv_encoder->or);
|
||||
NV_DEBUG_KMS(dev, "Disconnecting DAC %d\n", nv_encoder->or);
|
||||
|
||||
ret = RING_SPACE(evo, 2);
|
||||
if (ret) {
|
||||
@ -81,11 +81,11 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
|
||||
/* Use bios provided value if possible. */
|
||||
if (dev_priv->vbios->dactestval) {
|
||||
load_pattern = dev_priv->vbios->dactestval;
|
||||
NV_DEBUG(dev, "Using bios provided load_pattern of %d\n",
|
||||
NV_DEBUG_KMS(dev, "Using bios provided load_pattern of %d\n",
|
||||
load_pattern);
|
||||
} else {
|
||||
load_pattern = 340;
|
||||
NV_DEBUG(dev, "Using default load_pattern of %d\n",
|
||||
NV_DEBUG_KMS(dev, "Using default load_pattern of %d\n",
|
||||
load_pattern);
|
||||
}
|
||||
|
||||
@ -103,9 +103,9 @@ nv50_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
|
||||
status = connector_status_connected;
|
||||
|
||||
if (status == connector_status_connected)
|
||||
NV_DEBUG(dev, "Load was detected on output with or %d\n", or);
|
||||
NV_DEBUG_KMS(dev, "Load was detected on output with or %d\n", or);
|
||||
else
|
||||
NV_DEBUG(dev, "Load was not detected on output with or %d\n", or);
|
||||
NV_DEBUG_KMS(dev, "Load was not detected on output with or %d\n", or);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -118,7 +118,7 @@ nv50_dac_dpms(struct drm_encoder *encoder, int mode)
|
||||
uint32_t val;
|
||||
int or = nv_encoder->or;
|
||||
|
||||
NV_DEBUG(dev, "or %d mode %d\n", or, mode);
|
||||
NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
|
||||
|
||||
/* wait for it to be done */
|
||||
if (!nv_wait(NV50_PDISPLAY_DAC_DPMS_CTRL(or),
|
||||
@ -173,7 +173,7 @@ nv50_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
||||
struct nouveau_connector *connector;
|
||||
|
||||
NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
|
||||
NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
|
||||
|
||||
connector = nouveau_encoder_connector_get(nv_encoder);
|
||||
if (!connector) {
|
||||
@ -213,7 +213,7 @@ nv50_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
uint32_t mode_ctl = 0, mode_ctl2 = 0;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "or %d\n", nv_encoder->or);
|
||||
NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
|
||||
|
||||
nv50_dac_dpms(encoder, DRM_MODE_DPMS_ON);
|
||||
|
||||
@ -264,7 +264,7 @@ nv50_dac_destroy(struct drm_encoder *encoder)
|
||||
if (!encoder)
|
||||
return;
|
||||
|
||||
NV_DEBUG(encoder->dev, "\n");
|
||||
NV_DEBUG_KMS(encoder->dev, "\n");
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
kfree(nv_encoder);
|
||||
@ -280,7 +280,7 @@ nv50_dac_create(struct drm_device *dev, struct dcb_entry *entry)
|
||||
struct nouveau_encoder *nv_encoder;
|
||||
struct drm_encoder *encoder;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
NV_INFO(dev, "Detected a DAC output\n");
|
||||
|
||||
nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
|
||||
|
@ -188,7 +188,7 @@ nv50_display_init(struct drm_device *dev)
|
||||
uint64_t start;
|
||||
int ret, i;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
|
||||
/*
|
||||
@ -232,7 +232,7 @@ nv50_display_init(struct drm_device *dev)
|
||||
nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
|
||||
/* RAM is clamped to 256 MiB. */
|
||||
ram_amount = nouveau_mem_fb_amount(dev);
|
||||
NV_DEBUG(dev, "ram_amount %d\n", ram_amount);
|
||||
NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
|
||||
if (ram_amount > 256*1024*1024)
|
||||
ram_amount = 256*1024*1024;
|
||||
nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
|
||||
@ -398,7 +398,7 @@ static int nv50_display_disable(struct drm_device *dev)
|
||||
struct drm_crtc *drm_crtc;
|
||||
int ret, i;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
|
||||
struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
|
||||
@ -469,7 +469,7 @@ int nv50_display_create(struct drm_device *dev)
|
||||
uint32_t connector[16] = {};
|
||||
int ret, i;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
/* init basic kernel modesetting */
|
||||
drm_mode_config_init(dev);
|
||||
@ -573,7 +573,7 @@ int nv50_display_destroy(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
drm_mode_config_cleanup(dev);
|
||||
|
||||
@ -617,7 +617,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
|
||||
* CRTC separately, and submission will be blocked by the GPU
|
||||
* until we handle each in turn.
|
||||
*/
|
||||
NV_DEBUG(dev, "0x610030: 0x%08x\n", unk30);
|
||||
NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
|
||||
head = ffs((unk30 >> 9) & 3) - 1;
|
||||
if (head < 0)
|
||||
return -EINVAL;
|
||||
@ -661,7 +661,7 @@ nv50_display_irq_head(struct drm_device *dev, int *phead,
|
||||
or = i;
|
||||
}
|
||||
|
||||
NV_DEBUG(dev, "type %d, or %d\n", type, or);
|
||||
NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
|
||||
if (type == OUTPUT_ANY) {
|
||||
NV_ERROR(dev, "unknown encoder!!\n");
|
||||
return -1;
|
||||
@ -811,7 +811,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
|
||||
pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
|
||||
script = nv50_display_script_select(dev, dcbent, pclk);
|
||||
|
||||
NV_DEBUG(dev, "head %d pxclk: %dKHz\n", head, pclk);
|
||||
NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
|
||||
|
||||
if (dcbent->type != OUTPUT_DP)
|
||||
nouveau_bios_run_display_table(dev, dcbent, 0, -2);
|
||||
@ -870,7 +870,7 @@ nv50_display_irq_handler_bh(struct work_struct *work)
|
||||
uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
|
||||
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
|
||||
|
||||
NV_DEBUG(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
|
||||
NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
|
||||
|
||||
if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
|
||||
nv50_display_unk10_handler(dev);
|
||||
@ -974,7 +974,7 @@ nv50_display_irq_handler(struct drm_device *dev)
|
||||
uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
|
||||
uint32_t clock;
|
||||
|
||||
NV_DEBUG(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
|
||||
NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
|
||||
|
||||
if (!intr0 && !(intr1 & ~delayed))
|
||||
break;
|
||||
|
@ -416,7 +416,7 @@ nv50_fifo_unload_context(struct drm_device *dev)
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
chid = pfifo->channel_id(dev);
|
||||
if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
|
||||
if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
|
||||
return 0;
|
||||
|
||||
chan = dev_priv->fifos[chid];
|
||||
|
@ -107,9 +107,13 @@ nv50_graph_init_regs(struct drm_device *dev)
|
||||
static int
|
||||
nv50_graph_init_ctxctl(struct drm_device *dev)
|
||||
{
|
||||
struct drm_nouveau_private *dev_priv = dev->dev_private;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
|
||||
nv40_grctx_init(dev);
|
||||
nouveau_grctx_prog_load(dev);
|
||||
if (!dev_priv->engine.graph.ctxprog)
|
||||
dev_priv->engine.graph.accel_blocked = true;
|
||||
|
||||
nv_wr32(dev, 0x400320, 4);
|
||||
nv_wr32(dev, NV40_PGRAPH_CTXCTL_CUR, 0);
|
||||
@ -140,7 +144,7 @@ void
|
||||
nv50_graph_takedown(struct drm_device *dev)
|
||||
{
|
||||
NV_DEBUG(dev, "\n");
|
||||
nv40_grctx_fini(dev);
|
||||
nouveau_grctx_fini(dev);
|
||||
}
|
||||
|
||||
void
|
||||
@ -207,7 +211,7 @@ nv50_graph_create_context(struct nouveau_channel *chan)
|
||||
dev_priv->engine.instmem.finish_access(dev);
|
||||
|
||||
dev_priv->engine.instmem.prepare_access(dev, true);
|
||||
nv40_grctx_vals_load(dev, ctx);
|
||||
nouveau_grctx_vals_load(dev, ctx);
|
||||
nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
|
||||
if ((dev_priv->chipset & 0xf0) == 0xa0)
|
||||
nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
|
||||
|
@ -44,7 +44,7 @@ nv50_sor_disconnect(struct nouveau_encoder *nv_encoder)
|
||||
struct nouveau_channel *evo = dev_priv->evo;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "Disconnecting SOR %d\n", nv_encoder->or);
|
||||
NV_DEBUG_KMS(dev, "Disconnecting SOR %d\n", nv_encoder->or);
|
||||
|
||||
ret = RING_SPACE(evo, 2);
|
||||
if (ret) {
|
||||
@ -70,7 +70,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder)
|
||||
}
|
||||
|
||||
if (dpe->script0) {
|
||||
NV_DEBUG(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
|
||||
NV_DEBUG_KMS(dev, "SOR-%d: running DP script 0\n", nv_encoder->or);
|
||||
nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script0),
|
||||
nv_encoder->dcb);
|
||||
}
|
||||
@ -79,7 +79,7 @@ nv50_sor_dp_link_train(struct drm_encoder *encoder)
|
||||
NV_ERROR(dev, "SOR-%d: link training failed\n", nv_encoder->or);
|
||||
|
||||
if (dpe->script1) {
|
||||
NV_DEBUG(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
|
||||
NV_DEBUG_KMS(dev, "SOR-%d: running DP script 1\n", nv_encoder->or);
|
||||
nouveau_bios_run_init_table(dev, le16_to_cpu(dpe->script1),
|
||||
nv_encoder->dcb);
|
||||
}
|
||||
@ -93,7 +93,7 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
|
||||
uint32_t val;
|
||||
int or = nv_encoder->or;
|
||||
|
||||
NV_DEBUG(dev, "or %d mode %d\n", or, mode);
|
||||
NV_DEBUG_KMS(dev, "or %d mode %d\n", or, mode);
|
||||
|
||||
/* wait for it to be done */
|
||||
if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_CTRL(or),
|
||||
@ -142,7 +142,7 @@ nv50_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
|
||||
struct nouveau_connector *connector;
|
||||
|
||||
NV_DEBUG(encoder->dev, "or %d\n", nv_encoder->or);
|
||||
NV_DEBUG_KMS(encoder->dev, "or %d\n", nv_encoder->or);
|
||||
|
||||
connector = nouveau_encoder_connector_get(nv_encoder);
|
||||
if (!connector) {
|
||||
@ -182,7 +182,7 @@ nv50_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
|
||||
uint32_t mode_ctl = 0;
|
||||
int ret;
|
||||
|
||||
NV_DEBUG(dev, "or %d\n", nv_encoder->or);
|
||||
NV_DEBUG_KMS(dev, "or %d\n", nv_encoder->or);
|
||||
|
||||
nv50_sor_dpms(encoder, DRM_MODE_DPMS_ON);
|
||||
|
||||
@ -246,7 +246,7 @@ nv50_sor_destroy(struct drm_encoder *encoder)
|
||||
if (!encoder)
|
||||
return;
|
||||
|
||||
NV_DEBUG(encoder->dev, "\n");
|
||||
NV_DEBUG_KMS(encoder->dev, "\n");
|
||||
|
||||
drm_encoder_cleanup(encoder);
|
||||
|
||||
@ -265,7 +265,7 @@ nv50_sor_create(struct drm_device *dev, struct dcb_entry *entry)
|
||||
bool dum;
|
||||
int type;
|
||||
|
||||
NV_DEBUG(dev, "\n");
|
||||
NV_DEBUG_KMS(dev, "\n");
|
||||
|
||||
switch (entry->type) {
|
||||
case OUTPUT_TMDS:
|
||||
|
@ -64,7 +64,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -95,8 +95,7 @@ static int compat_r128_init(struct file *file, unsigned int cmd,
|
||||
&init->agp_textures_offset))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_R128_INIT, (unsigned long)init);
|
||||
return drm_ioctl(file, DRM_IOCTL_R128_INIT, (unsigned long)init);
|
||||
}
|
||||
|
||||
typedef struct drm_r128_depth32 {
|
||||
@ -129,8 +128,7 @@ static int compat_r128_depth(struct file *file, unsigned int cmd,
|
||||
&depth->mask))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
|
||||
return drm_ioctl(file, DRM_IOCTL_R128_DEPTH, (unsigned long)depth);
|
||||
|
||||
}
|
||||
|
||||
@ -153,8 +151,7 @@ static int compat_r128_stipple(struct file *file, unsigned int cmd,
|
||||
&stipple->mask))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
|
||||
return drm_ioctl(file, DRM_IOCTL_R128_STIPPLE, (unsigned long)stipple);
|
||||
}
|
||||
|
||||
typedef struct drm_r128_getparam32 {
|
||||
@ -178,8 +175,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
|
||||
&getparam->value))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
|
||||
return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
|
||||
}
|
||||
|
||||
drm_ioctl_compat_t *r128_compat_ioctls[] = {
|
||||
@ -210,12 +206,10 @@ long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
|
||||
fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
|
||||
|
||||
lock_kernel(); /* XXX for now */
|
||||
if (fn != NULL)
|
||||
ret = (*fn) (filp, cmd, arg);
|
||||
else
|
||||
ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -58,6 +58,7 @@ typedef struct {
|
||||
} atom_exec_context;
|
||||
|
||||
int atom_debug = 0;
|
||||
static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
|
||||
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
|
||||
|
||||
static uint32_t atom_arg_mask[8] =
|
||||
@ -573,7 +574,7 @@ static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
|
||||
else
|
||||
SDEBUG(" table: %d\n", idx);
|
||||
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
|
||||
atom_execute_table(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
|
||||
atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
|
||||
}
|
||||
|
||||
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
|
||||
@ -1040,7 +1041,7 @@ static struct {
|
||||
atom_op_shr, ATOM_ARG_MC}, {
|
||||
atom_op_debug, 0},};
|
||||
|
||||
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
|
||||
static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
|
||||
{
|
||||
int base = CU16(ctx->cmd_table + 4 + 2 * index);
|
||||
int len, ws, ps, ptr;
|
||||
@ -1092,6 +1093,13 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
|
||||
kfree(ectx.ws);
|
||||
}
|
||||
|
||||
void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
|
||||
{
|
||||
mutex_lock(&ctx->mutex);
|
||||
atom_execute_table_locked(ctx, index, params);
|
||||
mutex_unlock(&ctx->mutex);
|
||||
}
|
||||
|
||||
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
|
||||
|
||||
static void atom_index_iio(struct atom_context *ctx, int base)
|
||||
|
@ -120,6 +120,7 @@ struct card_info {
|
||||
|
||||
struct atom_context {
|
||||
struct card_info *card;
|
||||
struct mutex mutex;
|
||||
void *bios;
|
||||
uint32_t cmd_table, data_table;
|
||||
uint16_t *iio;
|
||||
|
@ -4690,6 +4690,205 @@ typedef struct _ATOM_POWERPLAY_INFO_V3 {
|
||||
ATOM_POWERMODE_INFO_V3 asPowerPlayInfo[ATOM_MAX_NUMBEROF_POWER_BLOCK];
|
||||
} ATOM_POWERPLAY_INFO_V3;
|
||||
|
||||
/* New PPlib */
|
||||
/**************************************************************************/
|
||||
typedef struct _ATOM_PPLIB_THERMALCONTROLLER
|
||||
|
||||
{
|
||||
UCHAR ucType; // one of ATOM_PP_THERMALCONTROLLER_*
|
||||
UCHAR ucI2cLine; // as interpreted by DAL I2C
|
||||
UCHAR ucI2cAddress;
|
||||
UCHAR ucFanParameters; // Fan Control Parameters.
|
||||
UCHAR ucFanMinRPM; // Fan Minimum RPM (hundreds) -- for display purposes only.
|
||||
UCHAR ucFanMaxRPM; // Fan Maximum RPM (hundreds) -- for display purposes only.
|
||||
UCHAR ucReserved; // ----
|
||||
UCHAR ucFlags; // to be defined
|
||||
} ATOM_PPLIB_THERMALCONTROLLER;
|
||||
|
||||
#define ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK 0x0f
|
||||
#define ATOM_PP_FANPARAMETERS_NOFAN 0x80 // No fan is connected to this controller.
|
||||
|
||||
#define ATOM_PP_THERMALCONTROLLER_NONE 0
|
||||
#define ATOM_PP_THERMALCONTROLLER_LM63 1 // Not used by PPLib
|
||||
#define ATOM_PP_THERMALCONTROLLER_ADM1032 2 // Not used by PPLib
|
||||
#define ATOM_PP_THERMALCONTROLLER_ADM1030 3 // Not used by PPLib
|
||||
#define ATOM_PP_THERMALCONTROLLER_MUA6649 4 // Not used by PPLib
|
||||
#define ATOM_PP_THERMALCONTROLLER_LM64 5
|
||||
#define ATOM_PP_THERMALCONTROLLER_F75375 6 // Not used by PPLib
|
||||
#define ATOM_PP_THERMALCONTROLLER_RV6xx 7
|
||||
#define ATOM_PP_THERMALCONTROLLER_RV770 8
|
||||
#define ATOM_PP_THERMALCONTROLLER_ADT7473 9
|
||||
|
||||
typedef struct _ATOM_PPLIB_STATE
|
||||
{
|
||||
UCHAR ucNonClockStateIndex;
|
||||
UCHAR ucClockStateIndices[1]; // variable-sized
|
||||
} ATOM_PPLIB_STATE;
|
||||
|
||||
//// ATOM_PPLIB_POWERPLAYTABLE::ulPlatformCaps
|
||||
#define ATOM_PP_PLATFORM_CAP_BACKBIAS 1
|
||||
#define ATOM_PP_PLATFORM_CAP_POWERPLAY 2
|
||||
#define ATOM_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 4
|
||||
#define ATOM_PP_PLATFORM_CAP_ASPM_L0s 8
|
||||
#define ATOM_PP_PLATFORM_CAP_ASPM_L1 16
|
||||
#define ATOM_PP_PLATFORM_CAP_HARDWAREDC 32
|
||||
#define ATOM_PP_PLATFORM_CAP_GEMINIPRIMARY 64
|
||||
#define ATOM_PP_PLATFORM_CAP_STEPVDDC 128
|
||||
#define ATOM_PP_PLATFORM_CAP_VOLTAGECONTROL 256
|
||||
#define ATOM_PP_PLATFORM_CAP_SIDEPORTCONTROL 512
|
||||
#define ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1 1024
|
||||
#define ATOM_PP_PLATFORM_CAP_HTLINKCONTROL 2048
|
||||
|
||||
typedef struct _ATOM_PPLIB_POWERPLAYTABLE
|
||||
{
|
||||
ATOM_COMMON_TABLE_HEADER sHeader;
|
||||
|
||||
UCHAR ucDataRevision;
|
||||
|
||||
UCHAR ucNumStates;
|
||||
UCHAR ucStateEntrySize;
|
||||
UCHAR ucClockInfoSize;
|
||||
UCHAR ucNonClockSize;
|
||||
|
||||
// offset from start of this table to array of ucNumStates ATOM_PPLIB_STATE structures
|
||||
USHORT usStateArrayOffset;
|
||||
|
||||
// offset from start of this table to array of ASIC-specific structures,
|
||||
// currently ATOM_PPLIB_CLOCK_INFO.
|
||||
USHORT usClockInfoArrayOffset;
|
||||
|
||||
// offset from start of this table to array of ATOM_PPLIB_NONCLOCK_INFO
|
||||
USHORT usNonClockInfoArrayOffset;
|
||||
|
||||
USHORT usBackbiasTime; // in microseconds
|
||||
USHORT usVoltageTime; // in microseconds
|
||||
USHORT usTableSize; //the size of this structure, or the extended structure
|
||||
|
||||
ULONG ulPlatformCaps; // See ATOM_PPLIB_CAPS_*
|
||||
|
||||
ATOM_PPLIB_THERMALCONTROLLER sThermalController;
|
||||
|
||||
USHORT usBootClockInfoOffset;
|
||||
USHORT usBootNonClockInfoOffset;
|
||||
|
||||
} ATOM_PPLIB_POWERPLAYTABLE;
|
||||
|
||||
//// ATOM_PPLIB_NONCLOCK_INFO::usClassification
|
||||
#define ATOM_PPLIB_CLASSIFICATION_UI_MASK 0x0007
|
||||
#define ATOM_PPLIB_CLASSIFICATION_UI_SHIFT 0
|
||||
#define ATOM_PPLIB_CLASSIFICATION_UI_NONE 0
|
||||
#define ATOM_PPLIB_CLASSIFICATION_UI_BATTERY 1
|
||||
#define ATOM_PPLIB_CLASSIFICATION_UI_BALANCED 3
|
||||
#define ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE 5
|
||||
// 2, 4, 6, 7 are reserved
|
||||
|
||||
#define ATOM_PPLIB_CLASSIFICATION_BOOT 0x0008
|
||||
#define ATOM_PPLIB_CLASSIFICATION_THERMAL 0x0010
|
||||
#define ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE 0x0020
|
||||
#define ATOM_PPLIB_CLASSIFICATION_REST 0x0040
|
||||
#define ATOM_PPLIB_CLASSIFICATION_FORCED 0x0080
|
||||
#define ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE 0x0100
|
||||
#define ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE 0x0200
|
||||
#define ATOM_PPLIB_CLASSIFICATION_UVDSTATE 0x0400
|
||||
#define ATOM_PPLIB_CLASSIFICATION_3DLOW 0x0800
|
||||
#define ATOM_PPLIB_CLASSIFICATION_ACPI 0x1000
|
||||
// remaining 3 bits are reserved
|
||||
|
||||
//// ATOM_PPLIB_NONCLOCK_INFO::ulCapsAndSettings
|
||||
#define ATOM_PPLIB_SINGLE_DISPLAY_ONLY 0x00000001
|
||||
#define ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK 0x00000002
|
||||
|
||||
// 0 is 2.5Gb/s, 1 is 5Gb/s
|
||||
#define ATOM_PPLIB_PCIE_LINK_SPEED_MASK 0x00000004
|
||||
#define ATOM_PPLIB_PCIE_LINK_SPEED_SHIFT 2
|
||||
|
||||
// lanes - 1: 1, 2, 4, 8, 12, 16 permitted by PCIE spec
|
||||
#define ATOM_PPLIB_PCIE_LINK_WIDTH_MASK 0x000000F8
|
||||
#define ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT 3
|
||||
|
||||
// lookup into reduced refresh-rate table
|
||||
#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_MASK 0x00000F00
|
||||
#define ATOM_PPLIB_LIMITED_REFRESHRATE_VALUE_SHIFT 8
|
||||
|
||||
#define ATOM_PPLIB_LIMITED_REFRESHRATE_UNLIMITED 0
|
||||
#define ATOM_PPLIB_LIMITED_REFRESHRATE_50HZ 1
|
||||
// 2-15 TBD as needed.
|
||||
|
||||
#define ATOM_PPLIB_SOFTWARE_DISABLE_LOADBALANCING 0x00001000
|
||||
#define ATOM_PPLIB_SOFTWARE_ENABLE_SLEEP_FOR_TIMESTAMPS 0x00002000
|
||||
#define ATOM_PPLIB_ENABLE_VARIBRIGHT 0x00008000
|
||||
|
||||
#define ATOM_PPLIB_DISALLOW_ON_DC 0x00004000
|
||||
|
||||
// Contained in an array starting at the offset
|
||||
// in ATOM_PPLIB_POWERPLAYTABLE::usNonClockInfoArrayOffset.
|
||||
// referenced from ATOM_PPLIB_STATE_INFO::ucNonClockStateIndex
|
||||
typedef struct _ATOM_PPLIB_NONCLOCK_INFO
|
||||
{
|
||||
USHORT usClassification;
|
||||
UCHAR ucMinTemperature;
|
||||
UCHAR ucMaxTemperature;
|
||||
ULONG ulCapsAndSettings;
|
||||
UCHAR ucRequiredPower;
|
||||
UCHAR ucUnused1[3];
|
||||
} ATOM_PPLIB_NONCLOCK_INFO;
|
||||
|
||||
// Contained in an array starting at the offset
|
||||
// in ATOM_PPLIB_POWERPLAYTABLE::usClockInfoArrayOffset.
|
||||
// referenced from ATOM_PPLIB_STATE::ucClockStateIndices
|
||||
typedef struct _ATOM_PPLIB_R600_CLOCK_INFO
|
||||
{
|
||||
USHORT usEngineClockLow;
|
||||
UCHAR ucEngineClockHigh;
|
||||
|
||||
USHORT usMemoryClockLow;
|
||||
UCHAR ucMemoryClockHigh;
|
||||
|
||||
USHORT usVDDC;
|
||||
USHORT usUnused1;
|
||||
USHORT usUnused2;
|
||||
|
||||
ULONG ulFlags; // ATOM_PPLIB_R600_FLAGS_*
|
||||
|
||||
} ATOM_PPLIB_R600_CLOCK_INFO;
|
||||
|
||||
// ulFlags in ATOM_PPLIB_R600_CLOCK_INFO
|
||||
#define ATOM_PPLIB_R600_FLAGS_PCIEGEN2 1
|
||||
#define ATOM_PPLIB_R600_FLAGS_UVDSAFE 2
|
||||
#define ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE 4
|
||||
#define ATOM_PPLIB_R600_FLAGS_MEMORY_ODT_OFF 8
|
||||
#define ATOM_PPLIB_R600_FLAGS_MEMORY_DLL_OFF 16
|
||||
|
||||
typedef struct _ATOM_PPLIB_RS780_CLOCK_INFO
|
||||
|
||||
{
|
||||
USHORT usLowEngineClockLow; // Low Engine clock in MHz (the same way as on the R600).
|
||||
UCHAR ucLowEngineClockHigh;
|
||||
USHORT usHighEngineClockLow; // High Engine clock in MHz.
|
||||
UCHAR ucHighEngineClockHigh;
|
||||
USHORT usMemoryClockLow; // For now one of the ATOM_PPLIB_RS780_SPMCLK_XXXX constants.
|
||||
UCHAR ucMemoryClockHigh; // Currentyl unused.
|
||||
UCHAR ucPadding; // For proper alignment and size.
|
||||
USHORT usVDDC; // For the 780, use: None, Low, High, Variable
|
||||
UCHAR ucMaxHTLinkWidth; // From SBIOS - {2, 4, 8, 16}
|
||||
UCHAR ucMinHTLinkWidth; // From SBIOS - {2, 4, 8, 16}. Effective only if CDLW enabled. Minimum down stream width could be bigger as display BW requriement.
|
||||
USHORT usHTLinkFreq; // See definition ATOM_PPLIB_RS780_HTLINKFREQ_xxx or in MHz(>=200).
|
||||
ULONG ulFlags;
|
||||
} ATOM_PPLIB_RS780_CLOCK_INFO;
|
||||
|
||||
#define ATOM_PPLIB_RS780_VOLTAGE_NONE 0
|
||||
#define ATOM_PPLIB_RS780_VOLTAGE_LOW 1
|
||||
#define ATOM_PPLIB_RS780_VOLTAGE_HIGH 2
|
||||
#define ATOM_PPLIB_RS780_VOLTAGE_VARIABLE 3
|
||||
|
||||
#define ATOM_PPLIB_RS780_SPMCLK_NONE 0 // We cannot change the side port memory clock, leave it as it is.
|
||||
#define ATOM_PPLIB_RS780_SPMCLK_LOW 1
|
||||
#define ATOM_PPLIB_RS780_SPMCLK_HIGH 2
|
||||
|
||||
#define ATOM_PPLIB_RS780_HTLINKFREQ_NONE 0
|
||||
#define ATOM_PPLIB_RS780_HTLINKFREQ_LOW 1
|
||||
#define ATOM_PPLIB_RS780_HTLINKFREQ_HIGH 2
|
||||
|
||||
/**************************************************************************/
|
||||
|
||||
/* Following definitions are for compatiblity issue in different SW components. */
|
||||
|
@ -2881,6 +2881,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
|
||||
|
||||
for (i = 0; i < track->num_cb; i++) {
|
||||
if (track->cb[i].robj == NULL) {
|
||||
if (!(track->fastfill || track->color_channel_mask ||
|
||||
track->blend_read_enable)) {
|
||||
continue;
|
||||
}
|
||||
DRM_ERROR("[drm] No buffer for color buffer %d !\n", i);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -67,13 +67,15 @@ struct r100_cs_track {
|
||||
unsigned immd_dwords;
|
||||
unsigned num_arrays;
|
||||
unsigned max_indx;
|
||||
unsigned color_channel_mask;
|
||||
struct r100_cs_track_array arrays[11];
|
||||
struct r100_cs_track_cb cb[R300_MAX_CB];
|
||||
struct r100_cs_track_cb zb;
|
||||
struct r100_cs_track_texture textures[R300_TRACK_MAX_TEXTURE];
|
||||
bool z_enabled;
|
||||
bool separate_cube;
|
||||
|
||||
bool fastfill;
|
||||
bool blend_read_enable;
|
||||
};
|
||||
|
||||
int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track);
|
||||
|
@ -887,6 +887,14 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
track->textures[i].cpp = 1;
|
||||
track->textures[i].compress_format = R100_TRACK_COMP_DXT1;
|
||||
break;
|
||||
case R300_TX_FORMAT_ATI2N:
|
||||
if (p->rdev->family < CHIP_R420) {
|
||||
DRM_ERROR("Invalid texture format %u\n",
|
||||
(idx_value & 0x1F));
|
||||
return -EINVAL;
|
||||
}
|
||||
/* The same rules apply as for DXT3/5. */
|
||||
/* Pass through. */
|
||||
case R300_TX_FORMAT_DXT3:
|
||||
case R300_TX_FORMAT_DXT5:
|
||||
track->textures[i].cpp = 1;
|
||||
@ -951,6 +959,16 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
track->textures[i].width_11 = tmp;
|
||||
tmp = ((idx_value >> 16) & 1) << 11;
|
||||
track->textures[i].height_11 = tmp;
|
||||
|
||||
/* ATI1N */
|
||||
if (idx_value & (1 << 14)) {
|
||||
/* The same rules apply as for DXT1. */
|
||||
track->textures[i].compress_format =
|
||||
R100_TRACK_COMP_DXT1;
|
||||
}
|
||||
} else if (idx_value & (1 << 14)) {
|
||||
DRM_ERROR("Forbidden bit TXFORMAT_MSB\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
break;
|
||||
case 0x4480:
|
||||
@ -992,6 +1010,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
|
||||
}
|
||||
ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
|
||||
break;
|
||||
case 0x4e0c:
|
||||
/* RB3D_COLOR_CHANNEL_MASK */
|
||||
track->color_channel_mask = idx_value;
|
||||
break;
|
||||
case 0x4d1c:
|
||||
/* ZB_BW_CNTL */
|
||||
track->fastfill = !!(idx_value & (1 << 2));
|
||||
break;
|
||||
case 0x4e04:
|
||||
/* RB3D_BLENDCNTL */
|
||||
track->blend_read_enable = !!(idx_value & (1 << 2));
|
||||
break;
|
||||
case 0x4be8:
|
||||
/* valid register only on RV530 */
|
||||
if (p->rdev->family == CHIP_RV530)
|
||||
|
@ -990,7 +990,7 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
|
||||
int sz;
|
||||
int addr;
|
||||
int type;
|
||||
int clamp;
|
||||
int isclamp;
|
||||
int stride;
|
||||
RING_LOCALS;
|
||||
|
||||
@ -999,10 +999,10 @@ static inline int r300_emit_r500fp(drm_radeon_private_t *dev_priv,
|
||||
addr = ((header.r500fp.adrhi_flags & 1) << 8) | header.r500fp.adrlo;
|
||||
|
||||
type = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_TYPE);
|
||||
clamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
|
||||
isclamp = !!(header.r500fp.adrhi_flags & R500FP_CONSTANT_CLAMP);
|
||||
|
||||
addr |= (type << 16);
|
||||
addr |= (clamp << 17);
|
||||
addr |= (isclamp << 17);
|
||||
|
||||
stride = type ? 4 : 6;
|
||||
|
||||
|
@ -900,6 +900,7 @@
|
||||
# define R300_TX_FORMAT_FL_I32 0x1B
|
||||
# define R300_TX_FORMAT_FL_I32A32 0x1C
|
||||
# define R300_TX_FORMAT_FL_R32G32B32A32 0x1D
|
||||
# define R300_TX_FORMAT_ATI2N 0x1F
|
||||
/* alpha modes, convenience mostly */
|
||||
/* if you have alpha, pick constant appropriate to the
|
||||
number of channels (1 for I8, 2 for I8A8, 4 for R8G8B8A8, etc */
|
||||
|
@ -170,7 +170,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
|
||||
idx, relocs_chunk->length_dw);
|
||||
return -EINVAL;
|
||||
}
|
||||
*cs_reloc = &p->relocs[0];
|
||||
*cs_reloc = p->relocs;
|
||||
(*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
|
||||
(*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
|
||||
return 0;
|
||||
@ -717,7 +717,7 @@ static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
|
||||
if (p->chunk_relocs_idx == -1) {
|
||||
return 0;
|
||||
}
|
||||
p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
||||
p->relocs = kzalloc(sizeof(struct radeon_cs_reloc), GFP_KERNEL);
|
||||
if (p->relocs == NULL) {
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
@ -162,6 +162,7 @@ struct radeon_fence_driver {
|
||||
struct list_head created;
|
||||
struct list_head emited;
|
||||
struct list_head signaled;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
struct radeon_fence {
|
||||
@ -202,8 +203,9 @@ struct radeon_surface_reg {
|
||||
struct radeon_mman {
|
||||
struct ttm_bo_global_ref bo_global_ref;
|
||||
struct ttm_global_reference mem_global_ref;
|
||||
bool mem_global_referenced;
|
||||
struct ttm_bo_device bdev;
|
||||
bool mem_global_referenced;
|
||||
bool initialized;
|
||||
};
|
||||
|
||||
struct radeon_bo {
|
||||
|
@ -33,6 +33,7 @@
|
||||
*/
|
||||
uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev);
|
||||
void radeon_legacy_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock);
|
||||
uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev);
|
||||
void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
|
||||
|
||||
uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev);
|
||||
@ -106,7 +107,7 @@ static struct radeon_asic r100_asic = {
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = NULL,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
@ -166,7 +167,7 @@ static struct radeon_asic r300_asic = {
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = NULL,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = &rv370_set_pcie_lanes,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
@ -259,7 +260,7 @@ static struct radeon_asic rs400_asic = {
|
||||
.copy = &r100_copy_blit,
|
||||
.get_engine_clock = &radeon_legacy_get_engine_clock,
|
||||
.set_engine_clock = &radeon_legacy_set_engine_clock,
|
||||
.get_memory_clock = NULL,
|
||||
.get_memory_clock = &radeon_legacy_get_memory_clock,
|
||||
.set_memory_clock = NULL,
|
||||
.set_pcie_lanes = NULL,
|
||||
.set_clock_gating = &radeon_legacy_set_clock_gating,
|
||||
|
@ -745,8 +745,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
|
||||
else
|
||||
radeon_add_legacy_encoder(dev,
|
||||
radeon_get_encoder_id(dev,
|
||||
(1 <<
|
||||
i),
|
||||
(1 << i),
|
||||
dac),
|
||||
(1 << i));
|
||||
}
|
||||
@ -758,32 +757,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
|
||||
if (bios_connectors[j].valid && (i != j)) {
|
||||
if (bios_connectors[i].line_mux ==
|
||||
bios_connectors[j].line_mux) {
|
||||
if (((bios_connectors[i].
|
||||
devices &
|
||||
(ATOM_DEVICE_DFP_SUPPORT))
|
||||
&& (bios_connectors[j].
|
||||
devices &
|
||||
(ATOM_DEVICE_CRT_SUPPORT)))
|
||||
||
|
||||
((bios_connectors[j].
|
||||
devices &
|
||||
(ATOM_DEVICE_DFP_SUPPORT))
|
||||
&& (bios_connectors[i].
|
||||
devices &
|
||||
(ATOM_DEVICE_CRT_SUPPORT)))) {
|
||||
bios_connectors[i].
|
||||
devices |=
|
||||
bios_connectors[j].
|
||||
devices;
|
||||
bios_connectors[i].
|
||||
connector_type =
|
||||
DRM_MODE_CONNECTOR_DVII;
|
||||
if (bios_connectors[j].devices &
|
||||
(ATOM_DEVICE_DFP_SUPPORT))
|
||||
/* make sure not to combine LVDS */
|
||||
if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
bios_connectors[i].line_mux = 53;
|
||||
bios_connectors[i].ddc_bus.valid = false;
|
||||
continue;
|
||||
}
|
||||
if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
bios_connectors[j].line_mux = 53;
|
||||
bios_connectors[j].ddc_bus.valid = false;
|
||||
continue;
|
||||
}
|
||||
/* combine analog and digital for DVI-I */
|
||||
if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
|
||||
(bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) ||
|
||||
((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) &&
|
||||
(bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) {
|
||||
bios_connectors[i].devices |=
|
||||
bios_connectors[j].devices;
|
||||
bios_connectors[i].connector_type =
|
||||
DRM_MODE_CONNECTOR_DVII;
|
||||
if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT))
|
||||
bios_connectors[i].hpd =
|
||||
bios_connectors[j].hpd;
|
||||
bios_connectors[j].
|
||||
valid = false;
|
||||
bios_connectors[j].valid = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1234,6 +1231,61 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
|
||||
return true;
|
||||
}
|
||||
|
||||
enum radeon_tv_std
|
||||
radeon_atombios_get_tv_info(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_mode_info *mode_info = &rdev->mode_info;
|
||||
int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info);
|
||||
uint16_t data_offset;
|
||||
uint8_t frev, crev;
|
||||
struct _ATOM_ANALOG_TV_INFO *tv_info;
|
||||
enum radeon_tv_std tv_std = TV_STD_NTSC;
|
||||
|
||||
atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
|
||||
|
||||
tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
|
||||
|
||||
switch (tv_info->ucTV_BootUpDefaultStandard) {
|
||||
case ATOM_TV_NTSC:
|
||||
tv_std = TV_STD_NTSC;
|
||||
DRM_INFO("Default TV standard: NTSC\n");
|
||||
break;
|
||||
case ATOM_TV_NTSCJ:
|
||||
tv_std = TV_STD_NTSC_J;
|
||||
DRM_INFO("Default TV standard: NTSC-J\n");
|
||||
break;
|
||||
case ATOM_TV_PAL:
|
||||
tv_std = TV_STD_PAL;
|
||||
DRM_INFO("Default TV standard: PAL\n");
|
||||
break;
|
||||
case ATOM_TV_PALM:
|
||||
tv_std = TV_STD_PAL_M;
|
||||
DRM_INFO("Default TV standard: PAL-M\n");
|
||||
break;
|
||||
case ATOM_TV_PALN:
|
||||
tv_std = TV_STD_PAL_N;
|
||||
DRM_INFO("Default TV standard: PAL-N\n");
|
||||
break;
|
||||
case ATOM_TV_PALCN:
|
||||
tv_std = TV_STD_PAL_CN;
|
||||
DRM_INFO("Default TV standard: PAL-CN\n");
|
||||
break;
|
||||
case ATOM_TV_PAL60:
|
||||
tv_std = TV_STD_PAL_60;
|
||||
DRM_INFO("Default TV standard: PAL-60\n");
|
||||
break;
|
||||
case ATOM_TV_SECAM:
|
||||
tv_std = TV_STD_SECAM;
|
||||
DRM_INFO("Default TV standard: SECAM\n");
|
||||
break;
|
||||
default:
|
||||
tv_std = TV_STD_NTSC;
|
||||
DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
|
||||
break;
|
||||
}
|
||||
return tv_std;
|
||||
}
|
||||
|
||||
struct radeon_encoder_tv_dac *
|
||||
radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
|
||||
{
|
||||
@ -1269,6 +1321,7 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
|
||||
dac = dac_info->ucDAC2_NTSC_DAC_Adjustment;
|
||||
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
|
||||
|
||||
tv_dac->tv_std = radeon_atombios_get_tv_info(rdev);
|
||||
}
|
||||
return tv_dac;
|
||||
}
|
||||
|
@ -62,7 +62,7 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
|
||||
}
|
||||
|
||||
/* 10 khz */
|
||||
static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
|
||||
uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
|
||||
{
|
||||
struct radeon_pll *mpll = &rdev->clock.mpll;
|
||||
uint32_t fb_div, ref_div, post_div, mclk;
|
||||
|
@ -634,11 +634,10 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct
|
||||
return p_dac;
|
||||
}
|
||||
|
||||
static enum radeon_tv_std
|
||||
radeon_combios_get_tv_info(struct radeon_encoder *encoder)
|
||||
enum radeon_tv_std
|
||||
radeon_combios_get_tv_info(struct radeon_device *rdev)
|
||||
{
|
||||
struct drm_device *dev = encoder->base.dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
struct drm_device *dev = rdev->ddev;
|
||||
uint16_t tv_info;
|
||||
enum radeon_tv_std tv_std = TV_STD_NTSC;
|
||||
|
||||
@ -779,7 +778,7 @@ struct radeon_encoder_tv_dac *radeon_combios_get_tv_dac_info(struct
|
||||
tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20);
|
||||
found = 1;
|
||||
}
|
||||
tv_dac->tv_std = radeon_combios_get_tv_info(encoder);
|
||||
tv_dac->tv_std = radeon_combios_get_tv_info(rdev);
|
||||
}
|
||||
if (!found) {
|
||||
/* then check CRT table */
|
||||
|
@ -208,6 +208,18 @@ static struct drm_display_mode *radeon_fp_native_mode(struct drm_encoder *encode
|
||||
drm_mode_set_name(mode);
|
||||
|
||||
DRM_DEBUG("Adding native panel mode %s\n", mode->name);
|
||||
} else if (native_mode->hdisplay != 0 &&
|
||||
native_mode->vdisplay != 0) {
|
||||
/* mac laptops without an edid */
|
||||
/* Note that this is not necessarily the exact panel mode,
|
||||
* but an approximation based on the cvt formula. For these
|
||||
* systems we should ideally read the mode info out of the
|
||||
* registers or add a mode table, but this works and is much
|
||||
* simpler.
|
||||
*/
|
||||
mode = drm_cvt_mode(dev, native_mode->hdisplay, native_mode->vdisplay, 60, true, false, false);
|
||||
mode->type = DRM_MODE_TYPE_PREFERRED | DRM_MODE_TYPE_DRIVER;
|
||||
DRM_DEBUG("Adding cvt approximation of native panel mode %s\n", mode->name);
|
||||
}
|
||||
return mode;
|
||||
}
|
||||
@ -1171,7 +1183,7 @@ radeon_add_atom_connector(struct drm_device *dev,
|
||||
1);
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.tv_std_property,
|
||||
1);
|
||||
radeon_atombios_get_tv_info(rdev));
|
||||
}
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
@ -1315,7 +1327,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
|
||||
1);
|
||||
drm_connector_attach_property(&radeon_connector->base,
|
||||
rdev->mode_info.tv_std_property,
|
||||
1);
|
||||
radeon_combios_get_tv_info(rdev));
|
||||
}
|
||||
break;
|
||||
case DRM_MODE_CONNECTOR_LVDS:
|
||||
|
@ -391,6 +391,12 @@ int radeon_asic_init(struct radeon_device *rdev)
|
||||
/* FIXME: not supported yet */
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (rdev->flags & RADEON_IS_IGP) {
|
||||
rdev->asic->get_memory_clock = NULL;
|
||||
rdev->asic->set_memory_clock = NULL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -481,6 +487,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
|
||||
atom_card_info->pll_write = cail_pll_write;
|
||||
|
||||
rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
|
||||
mutex_init(&rdev->mode_info.atom_context->mutex);
|
||||
radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
|
||||
atom_allocate_fb_scratch(rdev->mode_info.atom_context);
|
||||
return 0;
|
||||
@ -539,9 +546,72 @@ void radeon_agp_disable(struct radeon_device *rdev)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Radeon device.
|
||||
*/
|
||||
void radeon_check_arguments(struct radeon_device *rdev)
|
||||
{
|
||||
/* vramlimit must be a power of two */
|
||||
switch (radeon_vram_limit) {
|
||||
case 0:
|
||||
case 4:
|
||||
case 8:
|
||||
case 16:
|
||||
case 32:
|
||||
case 64:
|
||||
case 128:
|
||||
case 256:
|
||||
case 512:
|
||||
case 1024:
|
||||
case 2048:
|
||||
case 4096:
|
||||
break;
|
||||
default:
|
||||
dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
|
||||
radeon_vram_limit);
|
||||
radeon_vram_limit = 0;
|
||||
break;
|
||||
}
|
||||
radeon_vram_limit = radeon_vram_limit << 20;
|
||||
/* gtt size must be power of two and greater or equal to 32M */
|
||||
switch (radeon_gart_size) {
|
||||
case 4:
|
||||
case 8:
|
||||
case 16:
|
||||
dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
|
||||
radeon_gart_size);
|
||||
radeon_gart_size = 512;
|
||||
break;
|
||||
case 32:
|
||||
case 64:
|
||||
case 128:
|
||||
case 256:
|
||||
case 512:
|
||||
case 1024:
|
||||
case 2048:
|
||||
case 4096:
|
||||
break;
|
||||
default:
|
||||
dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
|
||||
radeon_gart_size);
|
||||
radeon_gart_size = 512;
|
||||
break;
|
||||
}
|
||||
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
|
||||
/* AGP mode can only be -1, 1, 2, 4, 8 */
|
||||
switch (radeon_agpmode) {
|
||||
case -1:
|
||||
case 0:
|
||||
case 1:
|
||||
case 2:
|
||||
case 4:
|
||||
case 8:
|
||||
break;
|
||||
default:
|
||||
dev_warn(rdev->dev, "invalid AGP mode %d (valid mode: "
|
||||
"-1, 0, 1, 2, 4, 8)\n", radeon_agpmode);
|
||||
radeon_agpmode = 0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int radeon_device_init(struct radeon_device *rdev,
|
||||
struct drm_device *ddev,
|
||||
struct pci_dev *pdev,
|
||||
@ -580,9 +650,9 @@ int radeon_device_init(struct radeon_device *rdev,
|
||||
|
||||
/* Set asic functions */
|
||||
r = radeon_asic_init(rdev);
|
||||
if (r) {
|
||||
if (r)
|
||||
return r;
|
||||
}
|
||||
radeon_check_arguments(rdev);
|
||||
|
||||
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
|
||||
radeon_agp_disable(rdev);
|
||||
|
@ -739,7 +739,7 @@ static struct drm_prop_enum_list radeon_tv_std_enum_list[] =
|
||||
{ TV_STD_SECAM, "secam" },
|
||||
};
|
||||
|
||||
int radeon_modeset_create_props(struct radeon_device *rdev)
|
||||
static int radeon_modeset_create_props(struct radeon_device *rdev)
|
||||
{
|
||||
int i, sz;
|
||||
|
||||
|
@ -196,7 +196,7 @@ static struct drm_driver driver_old = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
@ -284,7 +284,7 @@ static struct drm_driver kms_driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = radeon_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -233,6 +233,8 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
|
||||
if (!ASIC_IS_AVIVO(rdev)) {
|
||||
adjusted_mode->hdisplay = mode->hdisplay;
|
||||
adjusted_mode->vdisplay = mode->vdisplay;
|
||||
adjusted_mode->crtc_hdisplay = mode->hdisplay;
|
||||
adjusted_mode->crtc_vdisplay = mode->vdisplay;
|
||||
}
|
||||
adjusted_mode->base.id = mode_id;
|
||||
}
|
||||
@ -495,9 +497,9 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
|
||||
args.v1.ucMisc |= PANEL_ENCODER_MISC_HDMI_TYPE;
|
||||
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
|
||||
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
if (dig->lvds_misc & (1 << 0))
|
||||
if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
|
||||
args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
|
||||
if (dig->lvds_misc & (1 << 1))
|
||||
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
|
||||
args.v1.ucMisc |= (1 << 1);
|
||||
} else {
|
||||
if (dig_connector->linkb)
|
||||
@ -524,18 +526,18 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
|
||||
args.v2.ucTemporal = 0;
|
||||
args.v2.ucFRC = 0;
|
||||
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
|
||||
if (dig->lvds_misc & (1 << 0))
|
||||
if (dig->lvds_misc & ATOM_PANEL_MISC_DUAL)
|
||||
args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
|
||||
if (dig->lvds_misc & (1 << 5)) {
|
||||
if (dig->lvds_misc & ATOM_PANEL_MISC_SPATIAL) {
|
||||
args.v2.ucSpatial = PANEL_ENCODER_SPATIAL_DITHER_EN;
|
||||
if (dig->lvds_misc & (1 << 1))
|
||||
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
|
||||
args.v2.ucSpatial |= PANEL_ENCODER_SPATIAL_DITHER_DEPTH;
|
||||
}
|
||||
if (dig->lvds_misc & (1 << 6)) {
|
||||
if (dig->lvds_misc & ATOM_PANEL_MISC_TEMPORAL) {
|
||||
args.v2.ucTemporal = PANEL_ENCODER_TEMPORAL_DITHER_EN;
|
||||
if (dig->lvds_misc & (1 << 1))
|
||||
if (dig->lvds_misc & ATOM_PANEL_MISC_888RGB)
|
||||
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_DITHER_DEPTH;
|
||||
if (((dig->lvds_misc >> 2) & 0x3) == 2)
|
||||
if (((dig->lvds_misc >> ATOM_PANEL_MISC_GREY_LEVEL_SHIFT) & 0x3) == 2)
|
||||
args.v2.ucTemporal |= PANEL_ENCODER_TEMPORAL_LEVEL_4;
|
||||
}
|
||||
} else {
|
||||
|
@ -324,7 +324,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
r = radeon_scratch_get(rdev, &rdev->fence_drv.scratch_reg);
|
||||
if (r) {
|
||||
DRM_ERROR("Fence failed to get a scratch register.");
|
||||
dev_err(rdev->dev, "fence failed to get scratch register\n");
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
return r;
|
||||
}
|
||||
@ -335,9 +335,10 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
|
||||
INIT_LIST_HEAD(&rdev->fence_drv.signaled);
|
||||
rdev->fence_drv.count_timeout = 0;
|
||||
init_waitqueue_head(&rdev->fence_drv.queue);
|
||||
rdev->fence_drv.initialized = true;
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
if (radeon_debugfs_fence_init(rdev)) {
|
||||
DRM_ERROR("Failed to register debugfs file for fence !\n");
|
||||
dev_err(rdev->dev, "fence debugfs file creation failed\n");
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -346,11 +347,13 @@ void radeon_fence_driver_fini(struct radeon_device *rdev)
|
||||
{
|
||||
unsigned long irq_flags;
|
||||
|
||||
if (!rdev->fence_drv.initialized)
|
||||
return;
|
||||
wake_up_all(&rdev->fence_drv.queue);
|
||||
write_lock_irqsave(&rdev->fence_drv.lock, irq_flags);
|
||||
radeon_scratch_free(rdev, rdev->fence_drv.scratch_reg);
|
||||
write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
|
||||
DRM_INFO("radeon: fence finalized\n");
|
||||
rdev->fence_drv.initialized = false;
|
||||
}
|
||||
|
||||
|
||||
|
@ -92,8 +92,7 @@ static int compat_radeon_cp_init(struct file *file, unsigned int cmd,
|
||||
&init->gart_textures_offset))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_CP_INIT, (unsigned long)init);
|
||||
}
|
||||
|
||||
typedef struct drm_radeon_clear32 {
|
||||
@ -125,8 +124,7 @@ static int compat_radeon_cp_clear(struct file *file, unsigned int cmd,
|
||||
&clr->depth_boxes))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_CLEAR, (unsigned long)clr);
|
||||
}
|
||||
|
||||
typedef struct drm_radeon_stipple32 {
|
||||
@ -149,8 +147,7 @@ static int compat_radeon_cp_stipple(struct file *file, unsigned int cmd,
|
||||
&request->mask))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_STIPPLE, (unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_radeon_tex_image32 {
|
||||
@ -204,8 +201,7 @@ static int compat_radeon_cp_texture(struct file *file, unsigned int cmd,
|
||||
&image->data))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_TEXTURE, (unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_radeon_vertex2_32 {
|
||||
@ -238,8 +234,7 @@ static int compat_radeon_cp_vertex2(struct file *file, unsigned int cmd,
|
||||
&request->prim))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_VERTEX2, (unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_radeon_cmd_buffer32 {
|
||||
@ -268,8 +263,7 @@ static int compat_radeon_cp_cmdbuf(struct file *file, unsigned int cmd,
|
||||
&request->boxes))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_CMDBUF, (unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_radeon_getparam32 {
|
||||
@ -293,8 +287,7 @@ static int compat_radeon_cp_getparam(struct file *file, unsigned int cmd,
|
||||
&request->value))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_GETPARAM, (unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_radeon_mem_alloc32 {
|
||||
@ -322,8 +315,7 @@ static int compat_radeon_mem_alloc(struct file *file, unsigned int cmd,
|
||||
&request->region_offset))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_ALLOC, (unsigned long)request);
|
||||
}
|
||||
|
||||
typedef struct drm_radeon_irq_emit32 {
|
||||
@ -345,8 +337,7 @@ static int compat_radeon_irq_emit(struct file *file, unsigned int cmd,
|
||||
&request->irq_seq))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_path.dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_IRQ_EMIT, (unsigned long)request);
|
||||
}
|
||||
|
||||
/* The two 64-bit arches where alignof(u64)==4 in 32-bit code */
|
||||
@ -372,8 +363,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
|
||||
&request->value))
|
||||
return -EFAULT;
|
||||
|
||||
return drm_ioctl(file->f_dentry->d_inode, file,
|
||||
DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
|
||||
return drm_ioctl(file, DRM_IOCTL_RADEON_SETPARAM, (unsigned long) request);
|
||||
}
|
||||
#else
|
||||
#define compat_radeon_cp_setparam NULL
|
||||
@ -413,12 +403,10 @@ long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||
if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
|
||||
fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
|
||||
|
||||
lock_kernel(); /* XXX for now */
|
||||
if (fn != NULL)
|
||||
ret = (*fn) (filp, cmd, arg);
|
||||
else
|
||||
ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
@ -431,9 +419,7 @@ long radeon_kms_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long
|
||||
if (nr < DRM_COMMAND_BASE)
|
||||
return drm_compat_ioctl(filp, cmd, arg);
|
||||
|
||||
lock_kernel(); /* XXX for now */
|
||||
ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
ret = drm_ioctl(filp, cmd, arg);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -43,8 +43,7 @@ static void radeon_overscan_setup(struct drm_crtc *crtc,
|
||||
}
|
||||
|
||||
static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
|
||||
struct drm_display_mode *mode,
|
||||
struct drm_display_mode *adjusted_mode)
|
||||
struct drm_display_mode *mode)
|
||||
{
|
||||
struct drm_device *dev = crtc->dev;
|
||||
struct radeon_device *rdev = dev->dev_private;
|
||||
@ -1059,7 +1058,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
|
||||
radeon_set_pll(crtc, adjusted_mode);
|
||||
radeon_overscan_setup(crtc, adjusted_mode);
|
||||
if (radeon_crtc->crtc_id == 0) {
|
||||
radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
|
||||
radeon_legacy_rmx_mode_set(crtc, adjusted_mode);
|
||||
} else {
|
||||
if (radeon_crtc->rmx_type != RMX_OFF) {
|
||||
/* FIXME: only first crtc has rmx what should we
|
||||
|
@ -207,6 +207,8 @@ static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
|
||||
*adjusted_mode = *native_mode;
|
||||
adjusted_mode->hdisplay = mode->hdisplay;
|
||||
adjusted_mode->vdisplay = mode->vdisplay;
|
||||
adjusted_mode->crtc_hdisplay = mode->hdisplay;
|
||||
adjusted_mode->crtc_vdisplay = mode->vdisplay;
|
||||
adjusted_mode->base.id = mode_id;
|
||||
}
|
||||
|
||||
|
@ -88,6 +88,7 @@ enum radeon_tv_std {
|
||||
TV_STD_SCART_PAL,
|
||||
TV_STD_SECAM,
|
||||
TV_STD_PAL_CN,
|
||||
TV_STD_PAL_N,
|
||||
};
|
||||
|
||||
/* radeon gpio-based i2c
|
||||
@ -395,6 +396,11 @@ struct radeon_framebuffer {
|
||||
struct drm_gem_object *obj;
|
||||
};
|
||||
|
||||
extern enum radeon_tv_std
|
||||
radeon_combios_get_tv_info(struct radeon_device *rdev);
|
||||
extern enum radeon_tv_std
|
||||
radeon_atombios_get_tv_info(struct radeon_device *rdev);
|
||||
|
||||
extern void radeon_connector_hotplug(struct drm_connector *connector);
|
||||
extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
|
||||
extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
|
||||
|
@ -42,8 +42,8 @@ void radeon_test_moves(struct radeon_device *rdev)
|
||||
/* Number of tests =
|
||||
* (Total GTT - IB pool - writeback page - ring buffer) / test size
|
||||
*/
|
||||
n = (rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
|
||||
rdev->cp.ring_size) / size;
|
||||
n = ((u32)(rdev->mc.gtt_size - RADEON_IB_POOL_SIZE*64*1024 - RADEON_GPU_PAGE_SIZE -
|
||||
rdev->cp.ring_size)) / size;
|
||||
|
||||
gtt_obj = kzalloc(n * sizeof(*gtt_obj), GFP_KERNEL);
|
||||
if (!gtt_obj) {
|
||||
|
@ -494,6 +494,7 @@ int radeon_ttm_init(struct radeon_device *rdev)
|
||||
DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
|
||||
return r;
|
||||
}
|
||||
rdev->mman.initialized = true;
|
||||
r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
|
||||
rdev->mc.real_vram_size >> PAGE_SHIFT);
|
||||
if (r) {
|
||||
@ -541,6 +542,8 @@ void radeon_ttm_fini(struct radeon_device *rdev)
|
||||
{
|
||||
int r;
|
||||
|
||||
if (!rdev->mman.initialized)
|
||||
return;
|
||||
if (rdev->stollen_vga_memory) {
|
||||
r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
|
||||
if (r == 0) {
|
||||
@ -554,6 +557,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
|
||||
ttm_bo_device_release(&rdev->mman.bdev);
|
||||
radeon_gart_fini(rdev);
|
||||
radeon_ttm_global_fini(rdev);
|
||||
rdev->mman.initialized = false;
|
||||
DRM_INFO("radeon: ttm finalized\n");
|
||||
}
|
||||
|
||||
|
@ -50,7 +50,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -80,7 +80,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -48,7 +48,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -58,7 +58,7 @@ static struct drm_driver driver = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = drm_open,
|
||||
.release = drm_release,
|
||||
.ioctl = drm_ioctl,
|
||||
.unlocked_ioctl = drm_ioctl,
|
||||
.mmap = drm_mmap,
|
||||
.poll = drm_poll,
|
||||
.fasync = drm_fasync,
|
||||
|
@ -103,37 +103,39 @@
|
||||
*/
|
||||
|
||||
static struct drm_ioctl_desc vmw_ioctls[] = {
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl, 0),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_GET_PARAM, vmw_getparam_ioctl,
|
||||
DRM_AUTH | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_ALLOC_DMABUF, vmw_dmabuf_alloc_ioctl,
|
||||
0),
|
||||
DRM_AUTH | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_DMABUF, vmw_dmabuf_unref_ioctl,
|
||||
0),
|
||||
DRM_AUTH | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CURSOR_BYPASS,
|
||||
vmw_kms_cursor_bypass_ioctl, 0),
|
||||
vmw_kms_cursor_bypass_ioctl,
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
|
||||
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CONTROL_STREAM, vmw_overlay_ioctl,
|
||||
0),
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CLAIM_STREAM, vmw_stream_claim_ioctl,
|
||||
0),
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_STREAM, vmw_stream_unref_ioctl,
|
||||
0),
|
||||
DRM_MASTER | DRM_CONTROL_ALLOW | DRM_UNLOCKED),
|
||||
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_CONTEXT, vmw_context_define_ioctl,
|
||||
0),
|
||||
DRM_AUTH | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_CONTEXT, vmw_context_destroy_ioctl,
|
||||
0),
|
||||
DRM_AUTH | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_CREATE_SURFACE, vmw_surface_define_ioctl,
|
||||
0),
|
||||
DRM_AUTH | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_UNREF_SURFACE, vmw_surface_destroy_ioctl,
|
||||
0),
|
||||
DRM_AUTH | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_REF_SURFACE, vmw_surface_reference_ioctl,
|
||||
0),
|
||||
DRM_AUTH | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_EXECBUF, vmw_execbuf_ioctl,
|
||||
0),
|
||||
DRM_AUTH | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FIFO_DEBUG, vmw_fifo_debug_ioctl,
|
||||
0),
|
||||
DRM_AUTH | DRM_ROOT_ONLY | DRM_MASTER | DRM_UNLOCKED),
|
||||
VMW_IOCTL_DEF(DRM_IOCTL_VMW_FENCE_WAIT, vmw_fence_wait_ioctl,
|
||||
0)
|
||||
DRM_AUTH | DRM_UNLOCKED)
|
||||
};
|
||||
|
||||
static struct pci_device_id vmw_pci_id_list[] = {
|
||||
@ -460,11 +462,9 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
struct drm_file *file_priv = filp->private_data;
|
||||
struct drm_device *dev = file_priv->minor->dev;
|
||||
unsigned int nr = DRM_IOCTL_NR(cmd);
|
||||
long ret;
|
||||
|
||||
/*
|
||||
* The driver private ioctls and TTM ioctls should be
|
||||
* thread-safe.
|
||||
* Do extra checking on driver private ioctls.
|
||||
*/
|
||||
|
||||
if ((nr >= DRM_COMMAND_BASE) && (nr < DRM_COMMAND_END)
|
||||
@ -477,18 +477,9 @@ static long vmw_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
nr - DRM_COMMAND_BASE);
|
||||
return -EINVAL;
|
||||
}
|
||||
return drm_ioctl(filp->f_path.dentry->d_inode,
|
||||
filp, cmd, arg);
|
||||
}
|
||||
|
||||
/*
|
||||
* Not all old drm ioctls are thread-safe.
|
||||
*/
|
||||
|
||||
lock_kernel();
|
||||
ret = drm_ioctl(filp->f_path.dentry->d_inode, filp, cmd, arg);
|
||||
unlock_kernel();
|
||||
return ret;
|
||||
return drm_ioctl(filp, cmd, arg);
|
||||
}
|
||||
|
||||
static int vmw_firstopen(struct drm_device *dev)
|
||||
|
@ -123,6 +123,7 @@ struct vmw_sw_context{
|
||||
uint32_t last_cid;
|
||||
bool cid_valid;
|
||||
uint32_t last_sid;
|
||||
uint32_t sid_translation;
|
||||
bool sid_valid;
|
||||
struct ttm_object_file *tfile;
|
||||
struct list_head validate_nodes;
|
||||
@ -317,9 +318,10 @@ extern void vmw_surface_res_free(struct vmw_resource *res);
|
||||
extern int vmw_surface_init(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *srf,
|
||||
void (*res_free) (struct vmw_resource *res));
|
||||
extern int vmw_user_surface_lookup(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int sid, struct vmw_surface **out);
|
||||
extern int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle,
|
||||
struct vmw_surface **out);
|
||||
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
@ -328,7 +330,7 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_surface_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int id);
|
||||
uint32_t handle, int *id);
|
||||
extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
|
||||
extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *vmw_bo,
|
||||
|
@ -73,21 +73,32 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
|
||||
|
||||
static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
uint32_t sid)
|
||||
uint32_t *sid)
|
||||
{
|
||||
if (unlikely((!sw_context->sid_valid || sid != sw_context->last_sid) &&
|
||||
sid != SVGA3D_INVALID_ID)) {
|
||||
int ret = vmw_surface_check(dev_priv, sw_context->tfile, sid);
|
||||
if (*sid == SVGA3D_INVALID_ID)
|
||||
return 0;
|
||||
|
||||
if (unlikely((!sw_context->sid_valid ||
|
||||
*sid != sw_context->last_sid))) {
|
||||
int real_id;
|
||||
int ret = vmw_surface_check(dev_priv, sw_context->tfile,
|
||||
*sid, &real_id);
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could ot find or use surface %u\n",
|
||||
(unsigned) sid);
|
||||
DRM_ERROR("Could ot find or use surface 0x%08x "
|
||||
"address 0x%08lx\n",
|
||||
(unsigned int) *sid,
|
||||
(unsigned long) sid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
sw_context->last_sid = sid;
|
||||
sw_context->last_sid = *sid;
|
||||
sw_context->sid_valid = true;
|
||||
}
|
||||
*sid = real_id;
|
||||
sw_context->sid_translation = real_id;
|
||||
} else
|
||||
*sid = sw_context->sid_translation;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -107,7 +118,8 @@ static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
|
||||
return ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.target.sid);
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
|
||||
@ -121,10 +133,10 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
|
||||
int ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
|
||||
}
|
||||
|
||||
static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
|
||||
@ -138,10 +150,10 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
|
||||
int ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.src.sid);
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.dest.sid);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
|
||||
}
|
||||
|
||||
static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
|
||||
@ -154,7 +166,7 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
|
||||
} *cmd;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.srcImage.sid);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
|
||||
}
|
||||
|
||||
static int vmw_cmd_present_check(struct vmw_private *dev_priv,
|
||||
@ -167,7 +179,7 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
|
||||
} *cmd;
|
||||
|
||||
cmd = container_of(header, struct vmw_sid_cmd, header);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, cmd->body.sid);
|
||||
return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
|
||||
}
|
||||
|
||||
static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||
@ -187,12 +199,7 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||
uint32_t cur_validate_node;
|
||||
struct ttm_validate_buffer *val_buf;
|
||||
|
||||
|
||||
cmd = container_of(header, struct vmw_dma_cmd, header);
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context, cmd->dma.host.sid);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
handle = cmd->dma.guest.ptr.gmrId;
|
||||
ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
|
||||
if (unlikely(ret != 0)) {
|
||||
@ -228,14 +235,23 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
|
||||
++sw_context->cur_val_buf;
|
||||
}
|
||||
|
||||
ret = vmw_user_surface_lookup(dev_priv, sw_context->tfile,
|
||||
cmd->dma.host.sid, &srf);
|
||||
ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
|
||||
cmd->dma.host.sid, &srf);
|
||||
if (ret) {
|
||||
DRM_ERROR("could not find surface\n");
|
||||
goto out_no_reloc;
|
||||
}
|
||||
|
||||
/**
|
||||
* Patch command stream with device SID.
|
||||
*/
|
||||
|
||||
cmd->dma.host.sid = srf->res.id;
|
||||
vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
|
||||
/**
|
||||
* FIXME: May deadlock here when called from the
|
||||
* command parsing code.
|
||||
*/
|
||||
vmw_surface_unreference(&srf);
|
||||
|
||||
out_no_reloc:
|
||||
@ -243,6 +259,90 @@ out_no_reloc:
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_cmd_draw(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_draw_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdDrawPrimitives body;
|
||||
} *cmd;
|
||||
SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
|
||||
(unsigned long)header + sizeof(*cmd));
|
||||
SVGA3dPrimitiveRange *range;
|
||||
uint32_t i;
|
||||
uint32_t maxnum;
|
||||
int ret;
|
||||
|
||||
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
cmd = container_of(header, struct vmw_draw_cmd, header);
|
||||
maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
|
||||
|
||||
if (unlikely(cmd->body.numVertexDecls > maxnum)) {
|
||||
DRM_ERROR("Illegal number of vertex declarations.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context,
|
||||
&decl->array.surfaceId);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
maxnum = (header->size - sizeof(cmd->body) -
|
||||
cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
|
||||
if (unlikely(cmd->body.numRanges > maxnum)) {
|
||||
DRM_ERROR("Illegal number of index ranges.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
range = (SVGA3dPrimitiveRange *) decl;
|
||||
for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context,
|
||||
&range->indexArray.surfaceId);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
|
||||
struct vmw_sw_context *sw_context,
|
||||
SVGA3dCmdHeader *header)
|
||||
{
|
||||
struct vmw_tex_state_cmd {
|
||||
SVGA3dCmdHeader header;
|
||||
SVGA3dCmdSetTextureState state;
|
||||
};
|
||||
|
||||
SVGA3dTextureState *last_state = (SVGA3dTextureState *)
|
||||
((unsigned long) header + header->size + sizeof(header));
|
||||
SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
|
||||
((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
|
||||
int ret;
|
||||
|
||||
ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
for (; cur_state < last_state; ++cur_state) {
|
||||
if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
|
||||
continue;
|
||||
|
||||
ret = vmw_cmd_sid_check(dev_priv, sw_context,
|
||||
&cur_state->value);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
typedef int (*vmw_cmd_func) (struct vmw_private *,
|
||||
struct vmw_sw_context *,
|
||||
@ -264,7 +364,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
|
||||
&vmw_cmd_set_render_target_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
|
||||
@ -276,7 +376,7 @@ static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
|
||||
VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_cid_check),
|
||||
@ -291,6 +391,7 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
|
||||
void *buf, uint32_t *size)
|
||||
{
|
||||
uint32_t cmd_id;
|
||||
uint32_t size_remaining = *size;
|
||||
SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
|
||||
int ret;
|
||||
|
||||
@ -304,6 +405,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
|
||||
*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
|
||||
|
||||
cmd_id -= SVGA_3D_CMD_BASE;
|
||||
if (unlikely(*size > size_remaining))
|
||||
goto out_err;
|
||||
|
||||
if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
|
||||
goto out_err;
|
||||
|
||||
@ -326,6 +430,7 @@ static int vmw_cmd_check_all(struct vmw_private *dev_priv,
|
||||
int ret;
|
||||
|
||||
while (cur_size > 0) {
|
||||
size = cur_size;
|
||||
ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
@ -386,7 +491,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
||||
return 0;
|
||||
|
||||
ret = vmw_gmr_bind(dev_priv, bo);
|
||||
if (likely(ret == 0 || ret == -ERESTART))
|
||||
if (likely(ret == 0 || ret == -ERESTARTSYS))
|
||||
return ret;
|
||||
|
||||
|
||||
@ -429,7 +534,7 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
|
||||
|
||||
ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
|
||||
if (unlikely(ret != 0)) {
|
||||
ret = -ERESTART;
|
||||
ret = -ERESTARTSYS;
|
||||
goto out_no_cmd_mutex;
|
||||
}
|
||||
|
||||
|
@ -191,7 +191,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
|
||||
}
|
||||
schedule_timeout(1);
|
||||
if (interruptible && signal_pending(current)) {
|
||||
ret = -ERESTART;
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -237,9 +237,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
|
||||
(dev_priv->fifo_queue,
|
||||
!vmw_fifo_is_full(dev_priv, bytes), timeout);
|
||||
|
||||
if (unlikely(ret == -ERESTARTSYS))
|
||||
ret = -ERESTART;
|
||||
else if (unlikely(ret == 0))
|
||||
if (unlikely(ret == 0))
|
||||
ret = -EBUSY;
|
||||
else if (likely(ret > 0))
|
||||
ret = 0;
|
||||
|
@ -155,7 +155,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
}
|
||||
if (interruptible && signal_pending(current)) {
|
||||
ret = -ERESTART;
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -218,9 +218,7 @@ int vmw_wait_fence(struct vmw_private *dev_priv,
|
||||
vmw_fence_signaled(dev_priv, sequence),
|
||||
timeout);
|
||||
|
||||
if (unlikely(ret == -ERESTARTSYS))
|
||||
ret = -ERESTART;
|
||||
else if (unlikely(ret == 0))
|
||||
if (unlikely(ret == 0))
|
||||
ret = -EBUSY;
|
||||
else if (likely(ret > 0))
|
||||
ret = 0;
|
||||
|
@ -106,8 +106,8 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
|
||||
int ret;
|
||||
|
||||
if (handle) {
|
||||
ret = vmw_user_surface_lookup(dev_priv, tfile,
|
||||
handle, &surface);
|
||||
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
|
||||
handle, &surface);
|
||||
if (!ret) {
|
||||
if (!surface->snooper.image) {
|
||||
DRM_ERROR("surface not suitable for cursor\n");
|
||||
@ -704,8 +704,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
||||
struct vmw_dma_buffer *bo = NULL;
|
||||
int ret;
|
||||
|
||||
ret = vmw_user_surface_lookup(dev_priv, tfile,
|
||||
mode_cmd->handle, &surface);
|
||||
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
|
||||
mode_cmd->handle, &surface);
|
||||
if (ret)
|
||||
goto try_dmabuf;
|
||||
|
||||
|
@ -488,28 +488,44 @@ static void vmw_user_surface_free(struct vmw_resource *res)
|
||||
kfree(user_srf);
|
||||
}
|
||||
|
||||
int vmw_user_surface_lookup(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int sid, struct vmw_surface **out)
|
||||
int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
uint32_t handle, struct vmw_surface **out)
|
||||
{
|
||||
struct vmw_resource *res;
|
||||
struct vmw_surface *srf;
|
||||
struct vmw_user_surface *user_srf;
|
||||
struct ttm_base_object *base;
|
||||
int ret = -EINVAL;
|
||||
|
||||
res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, sid);
|
||||
if (unlikely(res == NULL))
|
||||
base = ttm_base_object_lookup(tfile, handle);
|
||||
if (unlikely(base == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (res->res_free != &vmw_user_surface_free)
|
||||
return -EINVAL;
|
||||
if (unlikely(base->object_type != VMW_RES_SURFACE))
|
||||
goto out_bad_resource;
|
||||
|
||||
srf = container_of(res, struct vmw_surface, res);
|
||||
user_srf = container_of(srf, struct vmw_user_surface, srf);
|
||||
if (user_srf->base.tfile != tfile && !user_srf->base.shareable)
|
||||
return -EPERM;
|
||||
user_srf = container_of(base, struct vmw_user_surface, base);
|
||||
srf = &user_srf->srf;
|
||||
res = &srf->res;
|
||||
|
||||
read_lock(&dev_priv->resource_lock);
|
||||
|
||||
if (!res->avail || res->res_free != &vmw_user_surface_free) {
|
||||
read_unlock(&dev_priv->resource_lock);
|
||||
goto out_bad_resource;
|
||||
}
|
||||
|
||||
kref_get(&res->kref);
|
||||
read_unlock(&dev_priv->resource_lock);
|
||||
|
||||
*out = srf;
|
||||
return 0;
|
||||
ret = 0;
|
||||
|
||||
out_bad_resource:
|
||||
ttm_base_object_unref(&base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
||||
@ -526,35 +542,10 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
|
||||
int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct vmw_resource *res;
|
||||
struct vmw_surface *srf;
|
||||
struct vmw_user_surface *user_srf;
|
||||
struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
int ret = 0;
|
||||
|
||||
res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, arg->sid);
|
||||
if (unlikely(res == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (res->res_free != &vmw_user_surface_free) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
srf = container_of(res, struct vmw_surface, res);
|
||||
user_srf = container_of(srf, struct vmw_user_surface, srf);
|
||||
if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ttm_ref_object_base_unref(tfile, user_srf->base.hash.key,
|
||||
TTM_REF_USAGE);
|
||||
out:
|
||||
vmw_resource_unreference(&res);
|
||||
return ret;
|
||||
return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
|
||||
}
|
||||
|
||||
int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
@ -649,7 +640,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
|
||||
}
|
||||
srf->snooper.crtc = NULL;
|
||||
|
||||
rep->sid = res->id;
|
||||
rep->sid = user_srf->base.hash.key;
|
||||
if (rep->sid == SVGA3D_INVALID_ID)
|
||||
DRM_ERROR("Created bad Surface ID.\n");
|
||||
|
||||
vmw_resource_unreference(&res);
|
||||
return 0;
|
||||
out_err1:
|
||||
@ -662,39 +656,33 @@ out_err0:
|
||||
int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
union drm_vmw_surface_reference_arg *arg =
|
||||
(union drm_vmw_surface_reference_arg *)data;
|
||||
struct drm_vmw_surface_arg *req = &arg->req;
|
||||
struct drm_vmw_surface_create_req *rep = &arg->rep;
|
||||
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
|
||||
struct vmw_resource *res;
|
||||
struct vmw_surface *srf;
|
||||
struct vmw_user_surface *user_srf;
|
||||
struct drm_vmw_size __user *user_sizes;
|
||||
int ret;
|
||||
struct ttm_base_object *base;
|
||||
int ret = -EINVAL;
|
||||
|
||||
res = vmw_resource_lookup(dev_priv, &dev_priv->surface_idr, req->sid);
|
||||
if (unlikely(res == NULL))
|
||||
base = ttm_base_object_lookup(tfile, req->sid);
|
||||
if (unlikely(base == NULL)) {
|
||||
DRM_ERROR("Could not find surface to reference.\n");
|
||||
return -EINVAL;
|
||||
|
||||
if (res->res_free != &vmw_user_surface_free) {
|
||||
ret = -EINVAL;
|
||||
goto out;
|
||||
}
|
||||
|
||||
srf = container_of(res, struct vmw_surface, res);
|
||||
user_srf = container_of(srf, struct vmw_user_surface, srf);
|
||||
if (user_srf->base.tfile != tfile && !user_srf->base.shareable) {
|
||||
DRM_ERROR("Tried to reference none shareable surface\n");
|
||||
ret = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
if (unlikely(base->object_type != VMW_RES_SURFACE))
|
||||
goto out_bad_resource;
|
||||
|
||||
user_srf = container_of(base, struct vmw_user_surface, base);
|
||||
srf = &user_srf->srf;
|
||||
|
||||
ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
|
||||
if (unlikely(ret != 0)) {
|
||||
DRM_ERROR("Could not add a reference to a surface.\n");
|
||||
goto out;
|
||||
goto out_no_reference;
|
||||
}
|
||||
|
||||
rep->flags = srf->flags;
|
||||
@ -706,40 +694,43 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
|
||||
if (user_sizes)
|
||||
ret = copy_to_user(user_sizes, srf->sizes,
|
||||
srf->num_sizes * sizeof(*srf->sizes));
|
||||
if (unlikely(ret != 0)) {
|
||||
if (unlikely(ret != 0))
|
||||
DRM_ERROR("copy_to_user failed %p %u\n",
|
||||
user_sizes, srf->num_sizes);
|
||||
/**
|
||||
* FIXME: Unreference surface here?
|
||||
*/
|
||||
goto out;
|
||||
}
|
||||
out:
|
||||
vmw_resource_unreference(&res);
|
||||
out_bad_resource:
|
||||
out_no_reference:
|
||||
ttm_base_object_unref(&base);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
int vmw_surface_check(struct vmw_private *dev_priv,
|
||||
struct ttm_object_file *tfile,
|
||||
int id)
|
||||
uint32_t handle, int *id)
|
||||
{
|
||||
struct vmw_resource *res;
|
||||
int ret = 0;
|
||||
struct ttm_base_object *base;
|
||||
struct vmw_user_surface *user_srf;
|
||||
|
||||
read_lock(&dev_priv->resource_lock);
|
||||
res = idr_find(&dev_priv->surface_idr, id);
|
||||
if (res && res->avail) {
|
||||
struct vmw_surface *srf =
|
||||
container_of(res, struct vmw_surface, res);
|
||||
struct vmw_user_surface *usrf =
|
||||
container_of(srf, struct vmw_user_surface, srf);
|
||||
int ret = -EPERM;
|
||||
|
||||
if (usrf->base.tfile != tfile && !usrf->base.shareable)
|
||||
ret = -EPERM;
|
||||
} else
|
||||
ret = -EINVAL;
|
||||
read_unlock(&dev_priv->resource_lock);
|
||||
base = ttm_base_object_lookup(tfile, handle);
|
||||
if (unlikely(base == NULL))
|
||||
return -EINVAL;
|
||||
|
||||
if (unlikely(base->object_type != VMW_RES_SURFACE))
|
||||
goto out_bad_surface;
|
||||
|
||||
user_srf = container_of(base, struct vmw_user_surface, base);
|
||||
*id = user_srf->srf.res.id;
|
||||
ret = 0;
|
||||
|
||||
out_bad_surface:
|
||||
/**
|
||||
* FIXME: May deadlock here when called from the
|
||||
* command parsing code.
|
||||
*/
|
||||
|
||||
ttm_base_object_unref(&base);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -296,6 +296,7 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
|
||||
#define DRM_MASTER 0x2
|
||||
#define DRM_ROOT_ONLY 0x4
|
||||
#define DRM_CONTROL_ALLOW 0x8
|
||||
#define DRM_UNLOCKED 0x10
|
||||
|
||||
struct drm_ioctl_desc {
|
||||
unsigned int cmd;
|
||||
@ -1128,8 +1129,8 @@ static inline int drm_mtrr_del(int handle, unsigned long offset,
|
||||
/* Driver support (drm_drv.h) */
|
||||
extern int drm_init(struct drm_driver *driver);
|
||||
extern void drm_exit(struct drm_driver *driver);
|
||||
extern int drm_ioctl(struct inode *inode, struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern long drm_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern long drm_compat_ioctl(struct file *filp,
|
||||
unsigned int cmd, unsigned long arg);
|
||||
extern int drm_lastclose(struct drm_device *dev);
|
||||
|
Loading…
Reference in New Issue
Block a user