mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/herbert/cryptodev-2.6.git
synced 2026-04-21 04:53:46 -04:00
Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6
* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (476 commits) vmwgfx: Implement a proper GMR eviction mechanism drm/radeon/kms: fix r6xx/7xx 1D tiling CS checker v2 drm/radeon/kms: properly compute group_size on 6xx/7xx drm/radeon/kms: fix 2D tile height alignment in the r600 CS checker drm/radeon/kms/evergreen: set the clear state to the blit state drm/radeon/kms: don't poll dac load detect. gpu: Add Intel GMA500(Poulsbo) Stub Driver drm/radeon/kms: MC vram map needs to be >= pci aperture size drm/radeon/kms: implement display watermark support for evergreen drm/radeon/kms/evergreen: add some additional safe regs v2 drm/radeon/r600: fix tiling issues in CS checker. drm/i915: Move gpu_write_list to per-ring drm/i915: Invalidate the to-ring, flush the old-ring when updating domains drm/i915/ringbuffer: Write the value passed in to the tail register agp/intel: Restore valid PTE bit for Sandybridge afterbdd3072drm/i915: Fix flushing regression from9af90d19fdrm/i915/sdvo: Remove unused encoding member i915: enable AVI infoframe for intel_hdmi.c [v4] drm/i915: Fix current fb blocking for page flip drm/i915: IS_IRONLAKE is synonymous with gen == 5 ... Fix up conflicts in - drivers/gpu/drm/i915/{i915_gem.c, i915/intel_overlay.c}: due to the new simplified stack-based kmap_atomic() interface - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c: added .llseek entry due to BKL removal cleanups.
This commit is contained in:
@@ -4,6 +4,6 @@ ccflags-y := -Iinclude/drm
|
||||
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
|
||||
vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
|
||||
vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
|
||||
vmwgfx_overlay.o vmwgfx_fence.o
|
||||
vmwgfx_overlay.o vmwgfx_fence.o vmwgfx_gmrid_manager.o
|
||||
|
||||
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
|
||||
|
||||
@@ -39,6 +39,9 @@ static uint32_t vram_ne_placement_flags = TTM_PL_FLAG_VRAM |
|
||||
static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
|
||||
TTM_PL_FLAG_CACHED;
|
||||
|
||||
struct ttm_placement vmw_vram_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
@@ -48,6 +51,20 @@ struct ttm_placement vmw_vram_placement = {
|
||||
.busy_placement = &vram_placement_flags
|
||||
};
|
||||
|
||||
static uint32_t vram_gmr_placement_flags[] = {
|
||||
TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
|
||||
VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_gmr_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
.num_placement = 2,
|
||||
.placement = vram_gmr_placement_flags,
|
||||
.num_busy_placement = 1,
|
||||
.busy_placement = &gmr_placement_flags
|
||||
};
|
||||
|
||||
struct ttm_placement vmw_vram_sys_placement = {
|
||||
.fpfn = 0,
|
||||
.lpfn = 0,
|
||||
@@ -77,27 +94,52 @@ struct ttm_placement vmw_sys_placement = {
|
||||
|
||||
struct vmw_ttm_backend {
|
||||
struct ttm_backend backend;
|
||||
struct page **pages;
|
||||
unsigned long num_pages;
|
||||
struct vmw_private *dev_priv;
|
||||
int gmr_id;
|
||||
};
|
||||
|
||||
static int vmw_ttm_populate(struct ttm_backend *backend,
|
||||
unsigned long num_pages, struct page **pages,
|
||||
struct page *dummy_read_page)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_be->pages = pages;
|
||||
vmw_be->num_pages = num_pages;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_ttm_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
|
||||
{
|
||||
return 0;
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_be->gmr_id = bo_mem->start;
|
||||
|
||||
return vmw_gmr_bind(vmw_be->dev_priv, vmw_be->pages,
|
||||
vmw_be->num_pages, vmw_be->gmr_id);
|
||||
}
|
||||
|
||||
static int vmw_ttm_unbind(struct ttm_backend *backend)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_gmr_unbind(vmw_be->dev_priv, vmw_be->gmr_id);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_clear(struct ttm_backend *backend)
|
||||
{
|
||||
struct vmw_ttm_backend *vmw_be =
|
||||
container_of(backend, struct vmw_ttm_backend, backend);
|
||||
|
||||
vmw_be->pages = NULL;
|
||||
vmw_be->num_pages = 0;
|
||||
}
|
||||
|
||||
static void vmw_ttm_destroy(struct ttm_backend *backend)
|
||||
@@ -125,6 +167,7 @@ struct ttm_backend *vmw_ttm_backend_init(struct ttm_bo_device *bdev)
|
||||
return NULL;
|
||||
|
||||
vmw_be->backend.func = &vmw_ttm_func;
|
||||
vmw_be->dev_priv = container_of(bdev, struct vmw_private, bdev);
|
||||
|
||||
return &vmw_be->backend;
|
||||
}
|
||||
@@ -142,15 +185,28 @@ int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
|
||||
/* System memory */
|
||||
|
||||
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case TTM_PL_VRAM:
|
||||
/* "On-card" video ram */
|
||||
man->func = &ttm_bo_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_MASK_CACHING;
|
||||
man->default_caching = TTM_PL_FLAG_WC;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
case VMW_PL_GMR:
|
||||
/*
|
||||
* "Guest Memory Regions" is an aperture like feature with
|
||||
* one slot per bo. There is an upper limit of the number of
|
||||
* slots as well as the bo size.
|
||||
*/
|
||||
man->func = &vmw_gmrid_manager_func;
|
||||
man->gpu_offset = 0;
|
||||
man->flags = TTM_MEMTYPE_FLAG_CMA | TTM_MEMTYPE_FLAG_MAPPABLE;
|
||||
man->available_caching = TTM_PL_FLAG_CACHED;
|
||||
man->default_caching = TTM_PL_FLAG_CACHED;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
|
||||
@@ -174,18 +230,6 @@ static int vmw_verify_access(struct ttm_buffer_object *bo, struct file *filp)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_move_notify(struct ttm_buffer_object *bo,
|
||||
struct ttm_mem_reg *new_mem)
|
||||
{
|
||||
if (new_mem->mem_type != TTM_PL_SYSTEM)
|
||||
vmw_dmabuf_gmr_unbind(bo);
|
||||
}
|
||||
|
||||
static void vmw_swap_notify(struct ttm_buffer_object *bo)
|
||||
{
|
||||
vmw_dmabuf_gmr_unbind(bo);
|
||||
}
|
||||
|
||||
static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
|
||||
@@ -200,10 +244,10 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg
|
||||
return -EINVAL;
|
||||
switch (mem->mem_type) {
|
||||
case TTM_PL_SYSTEM:
|
||||
/* System memory */
|
||||
case VMW_PL_GMR:
|
||||
return 0;
|
||||
case TTM_PL_VRAM:
|
||||
mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
|
||||
mem->bus.offset = mem->start << PAGE_SHIFT;
|
||||
mem->bus.base = dev_priv->vram_start;
|
||||
mem->bus.is_iomem = true;
|
||||
break;
|
||||
@@ -276,8 +320,8 @@ struct ttm_bo_driver vmw_bo_driver = {
|
||||
.sync_obj_flush = vmw_sync_obj_flush,
|
||||
.sync_obj_unref = vmw_sync_obj_unref,
|
||||
.sync_obj_ref = vmw_sync_obj_ref,
|
||||
.move_notify = vmw_move_notify,
|
||||
.swap_notify = vmw_swap_notify,
|
||||
.move_notify = NULL,
|
||||
.swap_notify = NULL,
|
||||
.fault_reserve_notify = &vmw_ttm_fault_reserve_notify,
|
||||
.io_mem_reserve = &vmw_ttm_io_mem_reserve,
|
||||
.io_mem_free = &vmw_ttm_io_mem_free,
|
||||
|
||||
@@ -260,13 +260,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
idr_init(&dev_priv->context_idr);
|
||||
idr_init(&dev_priv->surface_idr);
|
||||
idr_init(&dev_priv->stream_idr);
|
||||
ida_init(&dev_priv->gmr_ida);
|
||||
mutex_init(&dev_priv->init_mutex);
|
||||
init_waitqueue_head(&dev_priv->fence_queue);
|
||||
init_waitqueue_head(&dev_priv->fifo_queue);
|
||||
atomic_set(&dev_priv->fence_queue_waiters, 0);
|
||||
atomic_set(&dev_priv->fifo_queue_waiters, 0);
|
||||
INIT_LIST_HEAD(&dev_priv->gmr_lru);
|
||||
|
||||
dev_priv->io_start = pci_resource_start(dev->pdev, 0);
|
||||
dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
|
||||
@@ -341,6 +339,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
|
||||
goto out_err2;
|
||||
}
|
||||
|
||||
dev_priv->has_gmr = true;
|
||||
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR,
|
||||
dev_priv->max_gmr_ids) != 0) {
|
||||
DRM_INFO("No GMR memory available. "
|
||||
"Graphics memory resources are very limited.\n");
|
||||
dev_priv->has_gmr = false;
|
||||
}
|
||||
|
||||
dev_priv->mmio_mtrr = drm_mtrr_add(dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, DRM_MTRR_WC);
|
||||
|
||||
@@ -440,13 +446,14 @@ out_err4:
|
||||
out_err3:
|
||||
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, DRM_MTRR_WC);
|
||||
if (dev_priv->has_gmr)
|
||||
(void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
out_err2:
|
||||
(void)ttm_bo_device_release(&dev_priv->bdev);
|
||||
out_err1:
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
out_err0:
|
||||
ida_destroy(&dev_priv->gmr_ida);
|
||||
idr_destroy(&dev_priv->surface_idr);
|
||||
idr_destroy(&dev_priv->context_idr);
|
||||
idr_destroy(&dev_priv->stream_idr);
|
||||
@@ -478,10 +485,11 @@ static int vmw_driver_unload(struct drm_device *dev)
|
||||
iounmap(dev_priv->mmio_virt);
|
||||
drm_mtrr_del(dev_priv->mmio_mtrr, dev_priv->mmio_start,
|
||||
dev_priv->mmio_size, DRM_MTRR_WC);
|
||||
if (dev_priv->has_gmr)
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR);
|
||||
(void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM);
|
||||
(void)ttm_bo_device_release(&dev_priv->bdev);
|
||||
vmw_ttm_global_release(dev_priv);
|
||||
ida_destroy(&dev_priv->gmr_ida);
|
||||
idr_destroy(&dev_priv->surface_idr);
|
||||
idr_destroy(&dev_priv->context_idr);
|
||||
idr_destroy(&dev_priv->stream_idr);
|
||||
@@ -597,6 +605,8 @@ static void vmw_lastclose(struct drm_device *dev)
|
||||
static void vmw_master_init(struct vmw_master *vmaster)
|
||||
{
|
||||
ttm_lock_init(&vmaster->lock);
|
||||
INIT_LIST_HEAD(&vmaster->fb_surf);
|
||||
mutex_init(&vmaster->fb_surf_mutex);
|
||||
}
|
||||
|
||||
static int vmw_master_create(struct drm_device *dev,
|
||||
@@ -608,7 +618,7 @@ static int vmw_master_create(struct drm_device *dev,
|
||||
if (unlikely(vmaster == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
ttm_lock_init(&vmaster->lock);
|
||||
vmw_master_init(vmaster);
|
||||
ttm_lock_set_kill(&vmaster->lock, true, SIGTERM);
|
||||
master->driver_priv = vmaster;
|
||||
|
||||
@@ -699,6 +709,7 @@ static void vmw_master_drop(struct drm_device *dev,
|
||||
|
||||
vmw_fp->locked_master = drm_master_get(file_priv->master);
|
||||
ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
|
||||
vmw_kms_idle_workqueues(vmaster);
|
||||
|
||||
if (unlikely((ret != 0))) {
|
||||
DRM_ERROR("Unable to lock TTM at VT switch.\n");
|
||||
@@ -751,15 +762,16 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
||||
* Buffer contents is moved to swappable memory.
|
||||
*/
|
||||
ttm_bo_swapout_all(&dev_priv->bdev);
|
||||
|
||||
break;
|
||||
case PM_POST_HIBERNATION:
|
||||
case PM_POST_SUSPEND:
|
||||
case PM_POST_RESTORE:
|
||||
ttm_suspend_unlock(&vmaster->lock);
|
||||
|
||||
break;
|
||||
case PM_RESTORE_PREPARE:
|
||||
break;
|
||||
case PM_POST_RESTORE:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@@ -770,21 +782,98 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
|
||||
* These might not be needed with the virtual SVGA device.
|
||||
*/
|
||||
|
||||
int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state)
|
||||
{
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
|
||||
if (dev_priv->num_3d_resources != 0) {
|
||||
DRM_INFO("Can't suspend or hibernate "
|
||||
"while 3D resources are active.\n");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
pci_save_state(pdev);
|
||||
pci_disable_device(pdev);
|
||||
pci_set_power_state(pdev, PCI_D3hot);
|
||||
return 0;
|
||||
}
|
||||
|
||||
int vmw_pci_resume(struct pci_dev *pdev)
|
||||
static int vmw_pci_resume(struct pci_dev *pdev)
|
||||
{
|
||||
pci_set_power_state(pdev, PCI_D0);
|
||||
pci_restore_state(pdev);
|
||||
return pci_enable_device(pdev);
|
||||
}
|
||||
|
||||
static int vmw_pm_suspend(struct device *kdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kdev);
|
||||
struct pm_message dummy;
|
||||
|
||||
dummy.event = 0;
|
||||
|
||||
return vmw_pci_suspend(pdev, dummy);
|
||||
}
|
||||
|
||||
static int vmw_pm_resume(struct device *kdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kdev);
|
||||
|
||||
return vmw_pci_resume(pdev);
|
||||
}
|
||||
|
||||
static int vmw_pm_prepare(struct device *kdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kdev);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
|
||||
/**
|
||||
* Release 3d reference held by fbdev and potentially
|
||||
* stop fifo.
|
||||
*/
|
||||
dev_priv->suspended = true;
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_3d_resource_dec(dev_priv);
|
||||
|
||||
if (dev_priv->num_3d_resources != 0) {
|
||||
|
||||
DRM_INFO("Can't suspend or hibernate "
|
||||
"while 3D resources are active.\n");
|
||||
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_3d_resource_inc(dev_priv);
|
||||
dev_priv->suspended = false;
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_pm_complete(struct device *kdev)
|
||||
{
|
||||
struct pci_dev *pdev = to_pci_dev(kdev);
|
||||
struct drm_device *dev = pci_get_drvdata(pdev);
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
|
||||
/**
|
||||
* Reclaim 3d reference held by fbdev and potentially
|
||||
* start fifo.
|
||||
*/
|
||||
if (dev_priv->enable_fb)
|
||||
vmw_3d_resource_inc(dev_priv);
|
||||
|
||||
dev_priv->suspended = false;
|
||||
}
|
||||
|
||||
static const struct dev_pm_ops vmw_pm_ops = {
|
||||
.prepare = vmw_pm_prepare,
|
||||
.complete = vmw_pm_complete,
|
||||
.suspend = vmw_pm_suspend,
|
||||
.resume = vmw_pm_resume,
|
||||
};
|
||||
|
||||
static struct drm_driver driver = {
|
||||
.driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
|
||||
DRIVER_MODESET,
|
||||
@@ -798,8 +887,6 @@ static struct drm_driver driver = {
|
||||
.irq_handler = vmw_irq_handler,
|
||||
.get_vblank_counter = vmw_get_vblank_counter,
|
||||
.reclaim_buffers_locked = NULL,
|
||||
.get_map_ofs = drm_core_get_map_ofs,
|
||||
.get_reg_ofs = drm_core_get_reg_ofs,
|
||||
.ioctls = vmw_ioctls,
|
||||
.num_ioctls = DRM_ARRAY_SIZE(vmw_ioctls),
|
||||
.dma_quiescent = NULL, /*vmw_dma_quiescent, */
|
||||
@@ -821,15 +908,16 @@ static struct drm_driver driver = {
|
||||
.compat_ioctl = drm_compat_ioctl,
|
||||
#endif
|
||||
.llseek = noop_llseek,
|
||||
},
|
||||
},
|
||||
.pci_driver = {
|
||||
.name = VMWGFX_DRIVER_NAME,
|
||||
.id_table = vmw_pci_id_list,
|
||||
.probe = vmw_probe,
|
||||
.remove = vmw_remove,
|
||||
.suspend = vmw_pci_suspend,
|
||||
.resume = vmw_pci_resume
|
||||
},
|
||||
.name = VMWGFX_DRIVER_NAME,
|
||||
.id_table = vmw_pci_id_list,
|
||||
.probe = vmw_probe,
|
||||
.remove = vmw_remove,
|
||||
.driver = {
|
||||
.pm = &vmw_pm_ops
|
||||
}
|
||||
},
|
||||
.name = VMWGFX_DRIVER_NAME,
|
||||
.desc = VMWGFX_DRIVER_DESC,
|
||||
.date = VMWGFX_DRIVER_DATE,
|
||||
@@ -863,3 +951,7 @@ module_exit(vmwgfx_exit);
|
||||
MODULE_AUTHOR("VMware Inc. and others");
|
||||
MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device");
|
||||
MODULE_LICENSE("GPL and additional rights");
|
||||
MODULE_VERSION(__stringify(VMWGFX_DRIVER_MAJOR) "."
|
||||
__stringify(VMWGFX_DRIVER_MINOR) "."
|
||||
__stringify(VMWGFX_DRIVER_PATCHLEVEL) "."
|
||||
"0");
|
||||
|
||||
@@ -39,9 +39,9 @@
|
||||
#include "ttm/ttm_execbuf_util.h"
|
||||
#include "ttm/ttm_module.h"
|
||||
|
||||
#define VMWGFX_DRIVER_DATE "20100209"
|
||||
#define VMWGFX_DRIVER_DATE "20100927"
|
||||
#define VMWGFX_DRIVER_MAJOR 1
|
||||
#define VMWGFX_DRIVER_MINOR 2
|
||||
#define VMWGFX_DRIVER_MINOR 4
|
||||
#define VMWGFX_DRIVER_PATCHLEVEL 0
|
||||
#define VMWGFX_FILE_PAGE_OFFSET 0x00100000
|
||||
#define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
|
||||
@@ -49,6 +49,9 @@
|
||||
#define VMWGFX_MAX_GMRS 2048
|
||||
#define VMWGFX_MAX_DISPLAYS 16
|
||||
|
||||
#define VMW_PL_GMR TTM_PL_PRIV0
|
||||
#define VMW_PL_FLAG_GMR TTM_PL_FLAG_PRIV0
|
||||
|
||||
struct vmw_fpriv {
|
||||
struct drm_master *locked_master;
|
||||
struct ttm_object_file *tfile;
|
||||
@@ -57,8 +60,6 @@ struct vmw_fpriv {
|
||||
struct vmw_dma_buffer {
|
||||
struct ttm_buffer_object base;
|
||||
struct list_head validate_list;
|
||||
struct list_head gmr_lru;
|
||||
uint32_t gmr_id;
|
||||
bool gmr_bound;
|
||||
uint32_t cur_validate_node;
|
||||
bool on_validate_list;
|
||||
@@ -151,6 +152,8 @@ struct vmw_overlay;
|
||||
|
||||
struct vmw_master {
|
||||
struct ttm_lock lock;
|
||||
struct mutex fb_surf_mutex;
|
||||
struct list_head fb_surf;
|
||||
};
|
||||
|
||||
struct vmw_vga_topology_state {
|
||||
@@ -182,6 +185,7 @@ struct vmw_private {
|
||||
uint32_t capabilities;
|
||||
uint32_t max_gmr_descriptors;
|
||||
uint32_t max_gmr_ids;
|
||||
bool has_gmr;
|
||||
struct mutex hw_mutex;
|
||||
|
||||
/*
|
||||
@@ -263,14 +267,6 @@ struct vmw_private {
|
||||
uint32_t val_seq;
|
||||
struct mutex cmdbuf_mutex;
|
||||
|
||||
/**
|
||||
* GMR management. Protected by the lru spinlock.
|
||||
*/
|
||||
|
||||
struct ida gmr_ida;
|
||||
struct list_head gmr_lru;
|
||||
|
||||
|
||||
/**
|
||||
* Operating mode.
|
||||
*/
|
||||
@@ -286,6 +282,7 @@ struct vmw_private {
|
||||
struct vmw_master *active_master;
|
||||
struct vmw_master fbdev_master;
|
||||
struct notifier_block pm_nb;
|
||||
bool suspended;
|
||||
|
||||
struct mutex release_mutex;
|
||||
uint32_t num_3d_resources;
|
||||
@@ -331,7 +328,9 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv);
|
||||
*/
|
||||
|
||||
extern int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo);
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id);
|
||||
extern void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id);
|
||||
|
||||
/**
|
||||
@@ -380,14 +379,10 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
|
||||
extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
|
||||
extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t id, struct vmw_dma_buffer **out);
|
||||
extern uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo);
|
||||
extern void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id);
|
||||
extern int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id);
|
||||
extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo);
|
||||
extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
|
||||
struct vmw_dma_buffer *bo);
|
||||
extern void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo);
|
||||
extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
|
||||
@@ -439,6 +434,7 @@ extern int vmw_mmap(struct file *filp, struct vm_area_struct *vma);
|
||||
extern struct ttm_placement vmw_vram_placement;
|
||||
extern struct ttm_placement vmw_vram_ne_placement;
|
||||
extern struct ttm_placement vmw_vram_sys_placement;
|
||||
extern struct ttm_placement vmw_vram_gmr_placement;
|
||||
extern struct ttm_placement vmw_sys_placement;
|
||||
extern struct ttm_bo_driver vmw_bo_driver;
|
||||
extern int vmw_dma_quiescent(struct drm_device *dev);
|
||||
@@ -518,6 +514,10 @@ void vmw_kms_write_svga(struct vmw_private *vmw_priv,
|
||||
unsigned bbp, unsigned depth);
|
||||
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
|
||||
struct drm_file *file_priv);
|
||||
void vmw_kms_idle_workqueues(struct vmw_master *vmaster);
|
||||
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
||||
uint32_t pitch,
|
||||
uint32_t height);
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
|
||||
|
||||
/**
|
||||
@@ -536,6 +536,12 @@ int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id);
|
||||
int vmw_overlay_num_overlays(struct vmw_private *dev_priv);
|
||||
int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
|
||||
|
||||
/**
|
||||
* GMR Id manager
|
||||
*/
|
||||
|
||||
extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
|
||||
|
||||
/**
|
||||
* Inline helper functions
|
||||
*/
|
||||
|
||||
@@ -538,8 +538,11 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
|
||||
reloc = &sw_context->relocs[i];
|
||||
validate = &sw_context->val_bufs[reloc->index];
|
||||
bo = validate->bo;
|
||||
reloc->location->offset += bo->offset;
|
||||
reloc->location->gmrId = vmw_dmabuf_gmr(bo);
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM) {
|
||||
reloc->location->offset += bo->offset;
|
||||
reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
|
||||
} else
|
||||
reloc->location->gmrId = bo->mem.start;
|
||||
}
|
||||
vmw_free_relocations(sw_context);
|
||||
}
|
||||
@@ -563,25 +566,14 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
||||
{
|
||||
int ret;
|
||||
|
||||
if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
|
||||
return 0;
|
||||
|
||||
/**
|
||||
* Put BO in VRAM, only if there is space.
|
||||
* Put BO in VRAM if there is space, otherwise as a GMR.
|
||||
* If there is no space in VRAM and GMR ids are all used up,
|
||||
* start evicting GMRs to make room. If the DMA buffer can't be
|
||||
* used as a GMR, this will return -ENOMEM.
|
||||
*/
|
||||
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_sys_placement, true, false, false);
|
||||
if (unlikely(ret == -ERESTARTSYS))
|
||||
return ret;
|
||||
|
||||
/**
|
||||
* Otherwise, set it up as GMR.
|
||||
*/
|
||||
|
||||
if (vmw_dmabuf_gmr(bo) != SVGA_GMR_NULL)
|
||||
return 0;
|
||||
|
||||
ret = vmw_gmr_bind(dev_priv, bo);
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
|
||||
if (likely(ret == 0 || ret == -ERESTARTSYS))
|
||||
return ret;
|
||||
|
||||
@@ -590,6 +582,7 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
|
||||
* previous contents.
|
||||
*/
|
||||
|
||||
DRM_INFO("Falling through to VRAM.\n");
|
||||
ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -144,6 +144,13 @@ static int vmw_fb_check_var(struct fb_var_screeninfo *var,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (!vmw_kms_validate_mode_vram(vmw_priv,
|
||||
info->fix.line_length,
|
||||
var->yoffset + var->yres)) {
|
||||
DRM_ERROR("Requested geom can not fit in framebuffer\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -205,6 +212,9 @@ static void vmw_fb_dirty_flush(struct vmw_fb_par *par)
|
||||
SVGAFifoCmdUpdate body;
|
||||
} *cmd;
|
||||
|
||||
if (vmw_priv->suspended)
|
||||
return;
|
||||
|
||||
spin_lock_irqsave(&par->dirty.lock, flags);
|
||||
if (!par->dirty.active) {
|
||||
spin_unlock_irqrestore(&par->dirty.lock, flags);
|
||||
@@ -616,7 +626,8 @@ int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
|
||||
goto err_unlock;
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM &&
|
||||
bo->mem.mm_node->start < bo->num_pages)
|
||||
bo->mem.start < bo->num_pages &&
|
||||
bo->mem.start > 0)
|
||||
(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
|
||||
false, false);
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
|
||||
*/
|
||||
|
||||
static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
|
||||
unsigned long num_pages)
|
||||
unsigned long num_pages)
|
||||
{
|
||||
unsigned long prev_pfn = ~(0UL);
|
||||
unsigned long pfn;
|
||||
@@ -163,45 +163,33 @@ static unsigned long vmw_gmr_count_descriptors(struct page *pages[],
|
||||
}
|
||||
|
||||
int vmw_gmr_bind(struct vmw_private *dev_priv,
|
||||
struct ttm_buffer_object *bo)
|
||||
struct page *pages[],
|
||||
unsigned long num_pages,
|
||||
int gmr_id)
|
||||
{
|
||||
struct ttm_tt *ttm = bo->ttm;
|
||||
unsigned long descriptors;
|
||||
int ret;
|
||||
uint32_t id;
|
||||
struct list_head desc_pages;
|
||||
int ret;
|
||||
|
||||
if (!(dev_priv->capabilities & SVGA_CAP_GMR))
|
||||
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ttm_tt_populate(ttm);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
descriptors = vmw_gmr_count_descriptors(ttm->pages, ttm->num_pages);
|
||||
if (unlikely(descriptors > dev_priv->max_gmr_descriptors))
|
||||
if (vmw_gmr_count_descriptors(pages, num_pages) >
|
||||
dev_priv->max_gmr_descriptors)
|
||||
return -EINVAL;
|
||||
|
||||
INIT_LIST_HEAD(&desc_pages);
|
||||
ret = vmw_gmr_build_descriptors(&desc_pages, ttm->pages,
|
||||
ttm->num_pages);
|
||||
|
||||
ret = vmw_gmr_build_descriptors(&desc_pages, pages, num_pages);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
ret = vmw_gmr_id_alloc(dev_priv, &id);
|
||||
if (unlikely(ret != 0))
|
||||
goto out_no_id;
|
||||
|
||||
vmw_gmr_fire_descriptors(dev_priv, id, &desc_pages);
|
||||
vmw_gmr_fire_descriptors(dev_priv, gmr_id, &desc_pages);
|
||||
vmw_gmr_free_descriptors(&desc_pages);
|
||||
vmw_dmabuf_set_gmr(bo, id);
|
||||
|
||||
return 0;
|
||||
|
||||
out_no_id:
|
||||
vmw_gmr_free_descriptors(&desc_pages);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
|
||||
{
|
||||
mutex_lock(&dev_priv->hw_mutex);
|
||||
|
||||
137
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
Normal file
137
drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
Normal file
@@ -0,0 +1,137 @@
|
||||
/**************************************************************************
|
||||
*
|
||||
* Copyright (c) 2007-2010 VMware, Inc., Palo Alto, CA., USA
|
||||
* All Rights Reserved.
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a
|
||||
* copy of this software and associated documentation files (the
|
||||
* "Software"), to deal in the Software without restriction, including
|
||||
* without limitation the rights to use, copy, modify, merge, publish,
|
||||
* distribute, sub license, and/or sell copies of the Software, and to
|
||||
* permit persons to whom the Software is furnished to do so, subject to
|
||||
* the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice (including the
|
||||
* next paragraph) shall be included in all copies or substantial portions
|
||||
* of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
|
||||
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
* USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
*
|
||||
**************************************************************************/
|
||||
/*
|
||||
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
|
||||
*/
|
||||
|
||||
#include "vmwgfx_drv.h"
|
||||
#include "ttm/ttm_module.h"
|
||||
#include "ttm/ttm_bo_driver.h"
|
||||
#include "ttm/ttm_placement.h"
|
||||
#include <linux/idr.h>
|
||||
#include <linux/spinlock.h>
|
||||
#include <linux/kernel.h>
|
||||
|
||||
struct vmwgfx_gmrid_man {
|
||||
spinlock_t lock;
|
||||
struct ida gmr_ida;
|
||||
uint32_t max_gmr_ids;
|
||||
};
|
||||
|
||||
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_buffer_object *bo,
|
||||
struct ttm_placement *placement,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
int ret;
|
||||
int id;
|
||||
|
||||
mem->mm_node = NULL;
|
||||
|
||||
do {
|
||||
if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&gman->lock);
|
||||
ret = ida_get_new(&gman->gmr_ida, &id);
|
||||
|
||||
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
|
||||
ida_remove(&gman->gmr_ida, id);
|
||||
spin_unlock(&gman->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
spin_unlock(&gman->lock);
|
||||
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (likely(ret == 0)) {
|
||||
mem->mm_node = gman;
|
||||
mem->start = id;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
|
||||
struct ttm_mem_reg *mem)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
|
||||
if (mem->mm_node) {
|
||||
spin_lock(&gman->lock);
|
||||
ida_remove(&gman->gmr_ida, mem->start);
|
||||
spin_unlock(&gman->lock);
|
||||
mem->mm_node = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
|
||||
unsigned long p_size)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
kzalloc(sizeof(*gman), GFP_KERNEL);
|
||||
|
||||
if (unlikely(gman == NULL))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock_init(&gman->lock);
|
||||
ida_init(&gman->gmr_ida);
|
||||
gman->max_gmr_ids = p_size;
|
||||
man->priv = (void *) gman;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vmw_gmrid_man_takedown(struct ttm_mem_type_manager *man)
|
||||
{
|
||||
struct vmwgfx_gmrid_man *gman =
|
||||
(struct vmwgfx_gmrid_man *)man->priv;
|
||||
|
||||
if (gman) {
|
||||
ida_destroy(&gman->gmr_ida);
|
||||
kfree(gman);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void vmw_gmrid_man_debug(struct ttm_mem_type_manager *man,
|
||||
const char *prefix)
|
||||
{
|
||||
printk(KERN_INFO "%s: No debug info available for the GMR "
|
||||
"id manager.\n", prefix);
|
||||
}
|
||||
|
||||
const struct ttm_mem_type_manager_func vmw_gmrid_manager_func = {
|
||||
vmw_gmrid_man_init,
|
||||
vmw_gmrid_man_takedown,
|
||||
vmw_gmrid_man_get_node,
|
||||
vmw_gmrid_man_put_node,
|
||||
vmw_gmrid_man_debug
|
||||
};
|
||||
@@ -54,6 +54,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
|
||||
case DRM_VMW_PARAM_FIFO_CAPS:
|
||||
param->value = dev_priv->fifo.capabilities;
|
||||
break;
|
||||
case DRM_VMW_PARAM_MAX_FB_SIZE:
|
||||
param->value = dev_priv->vram_size;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
|
||||
param->param);
|
||||
|
||||
@@ -332,18 +332,55 @@ struct vmw_framebuffer_surface {
|
||||
struct delayed_work d_work;
|
||||
struct mutex work_lock;
|
||||
bool present_fs;
|
||||
struct list_head head;
|
||||
struct drm_master *master;
|
||||
};
|
||||
|
||||
/**
|
||||
* vmw_kms_idle_workqueues - Flush workqueues on this master
|
||||
*
|
||||
* @vmaster - Pointer identifying the master, for the surfaces of which
|
||||
* we idle the dirty work queues.
|
||||
*
|
||||
* This function should be called with the ttm lock held in exclusive mode
|
||||
* to idle all dirty work queues before the fifo is taken down.
|
||||
*
|
||||
* The work task may actually requeue itself, but after the flush returns we're
|
||||
* sure that there's nothing to present, since the ttm lock is held in
|
||||
* exclusive mode, so the fifo will never get used.
|
||||
*/
|
||||
|
||||
void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
|
||||
{
|
||||
struct vmw_framebuffer_surface *entry;
|
||||
|
||||
mutex_lock(&vmaster->fb_surf_mutex);
|
||||
list_for_each_entry(entry, &vmaster->fb_surf, head) {
|
||||
if (cancel_delayed_work_sync(&entry->d_work))
|
||||
(void) entry->d_work.work.func(&entry->d_work.work);
|
||||
|
||||
(void) cancel_delayed_work_sync(&entry->d_work);
|
||||
}
|
||||
mutex_unlock(&vmaster->fb_surf_mutex);
|
||||
}
|
||||
|
||||
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
|
||||
{
|
||||
struct vmw_framebuffer_surface *vfb =
|
||||
struct vmw_framebuffer_surface *vfbs =
|
||||
vmw_framebuffer_to_vfbs(framebuffer);
|
||||
struct vmw_master *vmaster = vmw_master(vfbs->master);
|
||||
|
||||
cancel_delayed_work_sync(&vfb->d_work);
|
||||
|
||||
mutex_lock(&vmaster->fb_surf_mutex);
|
||||
list_del(&vfbs->head);
|
||||
mutex_unlock(&vmaster->fb_surf_mutex);
|
||||
|
||||
cancel_delayed_work_sync(&vfbs->d_work);
|
||||
drm_master_put(&vfbs->master);
|
||||
drm_framebuffer_cleanup(framebuffer);
|
||||
vmw_surface_unreference(&vfb->surface);
|
||||
vmw_surface_unreference(&vfbs->surface);
|
||||
|
||||
kfree(framebuffer);
|
||||
kfree(vfbs);
|
||||
}
|
||||
|
||||
static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
|
||||
@@ -362,6 +399,12 @@ static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
|
||||
SVGA3dCopyRect cr;
|
||||
} *cmd;
|
||||
|
||||
/**
|
||||
* Strictly we should take the ttm_lock in read mode before accessing
|
||||
* the fifo, to make sure the fifo is present and up. However,
|
||||
* instead we flush all workqueues under the ttm lock in exclusive mode
|
||||
* before taking down the fifo.
|
||||
*/
|
||||
mutex_lock(&vfbs->work_lock);
|
||||
if (!vfbs->present_fs)
|
||||
goto out_unlock;
|
||||
@@ -392,17 +435,20 @@ out_unlock:
|
||||
|
||||
|
||||
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
|
||||
struct drm_file *file_priv,
|
||||
unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
struct vmw_framebuffer_surface *vfbs =
|
||||
vmw_framebuffer_to_vfbs(framebuffer);
|
||||
struct vmw_surface *surf = vfbs->surface;
|
||||
struct drm_clip_rect norect;
|
||||
SVGA3dCopyRect *cr;
|
||||
int i, inc = 1;
|
||||
int ret;
|
||||
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
@@ -410,6 +456,13 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
|
||||
SVGA3dCopyRect cr;
|
||||
} *cmd;
|
||||
|
||||
if (unlikely(vfbs->master != file_priv->master))
|
||||
return -EINVAL;
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (!num_clips ||
|
||||
!(dev_priv->fifo.capabilities &
|
||||
SVGA_FIFO_CAP_SCREEN_OBJECT)) {
|
||||
@@ -425,6 +478,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
|
||||
*/
|
||||
vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
|
||||
}
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -442,6 +496,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -461,7 +516,7 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
|
||||
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@@ -471,16 +526,57 @@ static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
|
||||
.create_handle = vmw_framebuffer_create_handle,
|
||||
};
|
||||
|
||||
int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
|
||||
struct vmw_surface *surface,
|
||||
struct vmw_framebuffer **out,
|
||||
unsigned width, unsigned height)
|
||||
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
|
||||
struct drm_file *file_priv,
|
||||
struct vmw_surface *surface,
|
||||
struct vmw_framebuffer **out,
|
||||
const struct drm_mode_fb_cmd
|
||||
*mode_cmd)
|
||||
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct vmw_framebuffer_surface *vfbs;
|
||||
enum SVGA3dSurfaceFormat format;
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Sanity checks.
|
||||
*/
|
||||
|
||||
if (unlikely(surface->mip_levels[0] != 1 ||
|
||||
surface->num_sizes != 1 ||
|
||||
surface->sizes[0].width < mode_cmd->width ||
|
||||
surface->sizes[0].height < mode_cmd->height ||
|
||||
surface->sizes[0].depth != 1)) {
|
||||
DRM_ERROR("Incompatible surface dimensions "
|
||||
"for requested mode.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
switch (mode_cmd->depth) {
|
||||
case 32:
|
||||
format = SVGA3D_A8R8G8B8;
|
||||
break;
|
||||
case 24:
|
||||
format = SVGA3D_X8R8G8B8;
|
||||
break;
|
||||
case 16:
|
||||
format = SVGA3D_R5G6B5;
|
||||
break;
|
||||
case 15:
|
||||
format = SVGA3D_A1R5G5B5;
|
||||
break;
|
||||
default:
|
||||
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (unlikely(format != surface->format)) {
|
||||
DRM_ERROR("Invalid surface format for requested mode.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
|
||||
if (!vfbs) {
|
||||
ret = -ENOMEM;
|
||||
@@ -498,16 +594,22 @@ int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
|
||||
}
|
||||
|
||||
/* XXX get the first 3 from the surface info */
|
||||
vfbs->base.base.bits_per_pixel = 32;
|
||||
vfbs->base.base.pitch = width * 32 / 4;
|
||||
vfbs->base.base.depth = 24;
|
||||
vfbs->base.base.width = width;
|
||||
vfbs->base.base.height = height;
|
||||
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
|
||||
vfbs->base.base.pitch = mode_cmd->pitch;
|
||||
vfbs->base.base.depth = mode_cmd->depth;
|
||||
vfbs->base.base.width = mode_cmd->width;
|
||||
vfbs->base.base.height = mode_cmd->height;
|
||||
vfbs->base.pin = &vmw_surface_dmabuf_pin;
|
||||
vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
|
||||
vfbs->surface = surface;
|
||||
vfbs->master = drm_master_get(file_priv->master);
|
||||
mutex_init(&vfbs->work_lock);
|
||||
|
||||
mutex_lock(&vmaster->fb_surf_mutex);
|
||||
INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
|
||||
list_add_tail(&vfbs->head, &vmaster->fb_surf);
|
||||
mutex_unlock(&vmaster->fb_surf_mutex);
|
||||
|
||||
*out = &vfbs->base;
|
||||
|
||||
return 0;
|
||||
@@ -544,18 +646,25 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
|
||||
}
|
||||
|
||||
int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
|
||||
struct drm_file *file_priv,
|
||||
unsigned flags, unsigned color,
|
||||
struct drm_clip_rect *clips,
|
||||
unsigned num_clips)
|
||||
{
|
||||
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
|
||||
struct vmw_master *vmaster = vmw_master(file_priv->master);
|
||||
struct drm_clip_rect norect;
|
||||
int ret;
|
||||
struct {
|
||||
uint32_t header;
|
||||
SVGAFifoCmdUpdate body;
|
||||
} *cmd;
|
||||
int i, increment = 1;
|
||||
|
||||
ret = ttm_read_lock(&vmaster->lock, true);
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (!num_clips) {
|
||||
num_clips = 1;
|
||||
clips = &norect;
|
||||
@@ -570,6 +679,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
|
||||
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
|
||||
if (unlikely(cmd == NULL)) {
|
||||
DRM_ERROR("Fifo reserve failed.\n");
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@@ -582,6 +692,7 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
|
||||
}
|
||||
|
||||
vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
|
||||
ttm_read_unlock(&vmaster->lock);
|
||||
|
||||
return 0;
|
||||
}
|
||||
@@ -659,16 +770,25 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
|
||||
return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
|
||||
}
|
||||
|
||||
int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *dmabuf,
|
||||
struct vmw_framebuffer **out,
|
||||
unsigned width, unsigned height)
|
||||
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
|
||||
struct vmw_dma_buffer *dmabuf,
|
||||
struct vmw_framebuffer **out,
|
||||
const struct drm_mode_fb_cmd
|
||||
*mode_cmd)
|
||||
|
||||
{
|
||||
struct drm_device *dev = dev_priv->dev;
|
||||
struct vmw_framebuffer_dmabuf *vfbd;
|
||||
unsigned int requested_size;
|
||||
int ret;
|
||||
|
||||
requested_size = mode_cmd->height * mode_cmd->pitch;
|
||||
if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
|
||||
DRM_ERROR("Screen buffer object size is too small "
|
||||
"for requested mode.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
|
||||
if (!vfbd) {
|
||||
ret = -ENOMEM;
|
||||
@@ -685,12 +805,11 @@ int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
|
||||
goto out_err3;
|
||||
}
|
||||
|
||||
/* XXX get the first 3 from the surface info */
|
||||
vfbd->base.base.bits_per_pixel = 32;
|
||||
vfbd->base.base.pitch = width * vfbd->base.base.bits_per_pixel / 8;
|
||||
vfbd->base.base.depth = 24;
|
||||
vfbd->base.base.width = width;
|
||||
vfbd->base.base.height = height;
|
||||
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
|
||||
vfbd->base.base.pitch = mode_cmd->pitch;
|
||||
vfbd->base.base.depth = mode_cmd->depth;
|
||||
vfbd->base.base.width = mode_cmd->width;
|
||||
vfbd->base.base.height = mode_cmd->height;
|
||||
vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
|
||||
vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
|
||||
vfbd->buffer = dmabuf;
|
||||
@@ -719,8 +838,25 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
||||
struct vmw_framebuffer *vfb = NULL;
|
||||
struct vmw_surface *surface = NULL;
|
||||
struct vmw_dma_buffer *bo = NULL;
|
||||
u64 required_size;
|
||||
int ret;
|
||||
|
||||
/**
|
||||
* This code should be conditioned on Screen Objects not being used.
|
||||
* If screen objects are used, we can allocate a GMR to hold the
|
||||
* requested framebuffer.
|
||||
*/
|
||||
|
||||
required_size = mode_cmd->pitch * mode_cmd->height;
|
||||
if (unlikely(required_size > (u64) dev_priv->vram_size)) {
|
||||
DRM_ERROR("VRAM size is too small for requested mode.\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/**
|
||||
* End conditioned code.
|
||||
*/
|
||||
|
||||
ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
|
||||
mode_cmd->handle, &surface);
|
||||
if (ret)
|
||||
@@ -729,8 +865,8 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
|
||||
if (!surface->scanout)
|
||||
goto err_not_scanout;
|
||||
|
||||
ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
|
||||
mode_cmd->width, mode_cmd->height);
|
||||
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv, surface,
|
||||
&vfb, mode_cmd);
|
||||
|
||||
/* vmw_user_surface_lookup takes one ref so does new_fb */
|
||||
vmw_surface_unreference(&surface);
|
||||
@@ -751,7 +887,7 @@ try_dmabuf:
|
||||
}
|
||||
|
||||
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
|
||||
mode_cmd->width, mode_cmd->height);
|
||||
mode_cmd);
|
||||
|
||||
/* vmw_user_dmabuf_lookup takes one ref so does new_fb */
|
||||
vmw_dmabuf_unreference(&bo);
|
||||
@@ -889,6 +1025,9 @@ int vmw_kms_save_vga(struct vmw_private *vmw_priv)
|
||||
vmw_priv->num_displays = vmw_read(vmw_priv,
|
||||
SVGA_REG_NUM_GUEST_DISPLAYS);
|
||||
|
||||
if (vmw_priv->num_displays == 0)
|
||||
vmw_priv->num_displays = 1;
|
||||
|
||||
for (i = 0; i < vmw_priv->num_displays; ++i) {
|
||||
save = &vmw_priv->vga_save[i];
|
||||
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
|
||||
@@ -997,6 +1136,13 @@ out_unlock:
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
|
||||
uint32_t pitch,
|
||||
uint32_t height)
|
||||
{
|
||||
return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
|
||||
}
|
||||
|
||||
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
|
||||
{
|
||||
return 0;
|
||||
|
||||
@@ -427,7 +427,9 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
|
||||
{
|
||||
struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
|
||||
struct drm_device *dev = connector->dev;
|
||||
struct vmw_private *dev_priv = vmw_priv(dev);
|
||||
struct drm_display_mode *mode = NULL;
|
||||
struct drm_display_mode *bmode;
|
||||
struct drm_display_mode prefmode = { DRM_MODE("preferred",
|
||||
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
@@ -443,22 +445,30 @@ static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
|
||||
mode->hdisplay = ldu->pref_width;
|
||||
mode->vdisplay = ldu->pref_height;
|
||||
mode->vrefresh = drm_mode_vrefresh(mode);
|
||||
drm_mode_probed_add(connector, mode);
|
||||
if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
|
||||
mode->vdisplay)) {
|
||||
drm_mode_probed_add(connector, mode);
|
||||
|
||||
if (ldu->pref_mode) {
|
||||
list_del_init(&ldu->pref_mode->head);
|
||||
drm_mode_destroy(dev, ldu->pref_mode);
|
||||
if (ldu->pref_mode) {
|
||||
list_del_init(&ldu->pref_mode->head);
|
||||
drm_mode_destroy(dev, ldu->pref_mode);
|
||||
}
|
||||
|
||||
ldu->pref_mode = mode;
|
||||
}
|
||||
|
||||
ldu->pref_mode = mode;
|
||||
}
|
||||
|
||||
for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
|
||||
if (vmw_ldu_connector_builtin[i].hdisplay > max_width ||
|
||||
vmw_ldu_connector_builtin[i].vdisplay > max_height)
|
||||
bmode = &vmw_ldu_connector_builtin[i];
|
||||
if (bmode->hdisplay > max_width ||
|
||||
bmode->vdisplay > max_height)
|
||||
continue;
|
||||
|
||||
mode = drm_mode_duplicate(dev, &vmw_ldu_connector_builtin[i]);
|
||||
if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
|
||||
bmode->vdisplay))
|
||||
continue;
|
||||
|
||||
mode = drm_mode_duplicate(dev, bmode);
|
||||
if (!mode)
|
||||
return 0;
|
||||
mode->vrefresh = drm_mode_vrefresh(mode);
|
||||
|
||||
@@ -765,28 +765,11 @@ static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
|
||||
return bo_user_size + page_array_size;
|
||||
}
|
||||
|
||||
void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
struct vmw_private *dev_priv =
|
||||
container_of(bo->bdev, struct vmw_private, bdev);
|
||||
|
||||
if (vmw_bo->gmr_bound) {
|
||||
vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
|
||||
spin_lock(&glob->lru_lock);
|
||||
ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
vmw_bo->gmr_bound = false;
|
||||
}
|
||||
}
|
||||
|
||||
void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
|
||||
vmw_dmabuf_gmr_unbind(bo);
|
||||
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
|
||||
kfree(vmw_bo);
|
||||
}
|
||||
@@ -818,10 +801,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
|
||||
|
||||
memset(vmw_bo, 0, sizeof(*vmw_bo));
|
||||
|
||||
INIT_LIST_HEAD(&vmw_bo->gmr_lru);
|
||||
INIT_LIST_HEAD(&vmw_bo->validate_list);
|
||||
vmw_bo->gmr_id = 0;
|
||||
vmw_bo->gmr_bound = false;
|
||||
|
||||
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
|
||||
ttm_bo_type_device, placement,
|
||||
@@ -835,7 +815,6 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
|
||||
struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
|
||||
struct ttm_bo_global *glob = bo->glob;
|
||||
|
||||
vmw_dmabuf_gmr_unbind(bo);
|
||||
ttm_mem_global_free(glob->mem_glob, bo->acc_size);
|
||||
kfree(vmw_user_bo);
|
||||
}
|
||||
@@ -938,25 +917,6 @@ void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
|
||||
vmw_bo->on_validate_list = false;
|
||||
}
|
||||
|
||||
uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo;
|
||||
|
||||
if (bo->mem.mem_type == TTM_PL_VRAM)
|
||||
return SVGA_GMR_FRAMEBUFFER;
|
||||
|
||||
vmw_bo = vmw_dma_buffer(bo);
|
||||
|
||||
return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
|
||||
}
|
||||
|
||||
void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
|
||||
{
|
||||
struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
|
||||
vmw_bo->gmr_bound = true;
|
||||
vmw_bo->gmr_id = id;
|
||||
}
|
||||
|
||||
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
uint32_t handle, struct vmw_dma_buffer **out)
|
||||
{
|
||||
@@ -985,41 +945,6 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* TODO: Implement a gmr id eviction mechanism. Currently we just fail
|
||||
* when we're out of ids, causing GMR space to be allocated
|
||||
* out of VRAM.
|
||||
*/
|
||||
|
||||
int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
|
||||
{
|
||||
struct ttm_bo_global *glob = dev_priv->bdev.glob;
|
||||
int id;
|
||||
int ret;
|
||||
|
||||
do {
|
||||
if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
|
||||
return -ENOMEM;
|
||||
|
||||
spin_lock(&glob->lru_lock);
|
||||
ret = ida_get_new(&dev_priv->gmr_ida, &id);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
} while (ret == -EAGAIN);
|
||||
|
||||
if (unlikely(ret != 0))
|
||||
return ret;
|
||||
|
||||
if (unlikely(id >= dev_priv->max_gmr_ids)) {
|
||||
spin_lock(&glob->lru_lock);
|
||||
ida_remove(&dev_priv->gmr_ida, id);
|
||||
spin_unlock(&glob->lru_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
*p_id = (uint32_t) id;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Stream management
|
||||
*/
|
||||
|
||||
Reference in New Issue
Block a user