mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/
synced 2026-04-18 06:33:43 -04:00
drm/vmwgfx: Fix compat shader namespace
Contrary to the host-backed shader interface that has a per-context name-space for shaders, the compat shader namespace was per client (or rather, per file). Fix this so that the compat shader namespace is per context, and at the same time, make command buffer managed context resource management generic. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com>
This commit is contained in:
@@ -33,6 +33,7 @@ struct vmw_user_context {
|
||||
struct ttm_base_object base;
|
||||
struct vmw_resource res;
|
||||
struct vmw_ctx_binding_state cbs;
|
||||
struct vmw_cmdbuf_res_manager *man;
|
||||
};
|
||||
|
||||
|
||||
@@ -103,7 +104,8 @@ static const vmw_scrub_func vmw_scrub_funcs[vmw_ctx_binding_max] = {
|
||||
|
||||
static void vmw_hw_context_destroy(struct vmw_resource *res)
|
||||
{
|
||||
|
||||
struct vmw_user_context *uctx =
|
||||
container_of(res, struct vmw_user_context, res);
|
||||
struct vmw_private *dev_priv = res->dev_priv;
|
||||
struct {
|
||||
SVGA3dCmdHeader header;
|
||||
@@ -113,9 +115,9 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
|
||||
|
||||
if (res->func->destroy == vmw_gb_context_destroy) {
|
||||
mutex_lock(&dev_priv->cmdbuf_mutex);
|
||||
vmw_cmdbuf_res_man_destroy(uctx->man);
|
||||
mutex_lock(&dev_priv->binding_mutex);
|
||||
(void) vmw_context_binding_state_kill
|
||||
(&container_of(res, struct vmw_user_context, res)->cbs);
|
||||
(void) vmw_context_binding_state_kill(&uctx->cbs);
|
||||
(void) vmw_gb_context_destroy(res);
|
||||
mutex_unlock(&dev_priv->binding_mutex);
|
||||
if (dev_priv->pinned_bo != NULL &&
|
||||
@@ -152,13 +154,16 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
|
||||
ret = vmw_resource_init(dev_priv, res, true,
|
||||
res_free, &vmw_gb_context_func);
|
||||
res->backup_size = SVGA3D_CONTEXT_DATA_SIZE;
|
||||
if (unlikely(ret != 0))
|
||||
goto out_err;
|
||||
|
||||
if (unlikely(ret != 0)) {
|
||||
if (res_free)
|
||||
res_free(res);
|
||||
else
|
||||
kfree(res);
|
||||
return ret;
|
||||
if (dev_priv->has_mob) {
|
||||
uctx->man = vmw_cmdbuf_res_man_create(dev_priv);
|
||||
if (unlikely(IS_ERR(uctx->man))) {
|
||||
ret = PTR_ERR(uctx->man);
|
||||
uctx->man = NULL;
|
||||
goto out_err;
|
||||
}
|
||||
}
|
||||
|
||||
memset(&uctx->cbs, 0, sizeof(uctx->cbs));
|
||||
@@ -166,6 +171,13 @@ static int vmw_gb_context_init(struct vmw_private *dev_priv,
|
||||
|
||||
vmw_resource_activate(res, vmw_hw_context_destroy);
|
||||
return 0;
|
||||
|
||||
out_err:
|
||||
if (res_free)
|
||||
res_free(res);
|
||||
else
|
||||
kfree(res);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int vmw_context_init(struct vmw_private *dev_priv,
|
||||
@@ -471,7 +483,8 @@ int vmw_context_define_ioctl(struct drm_device *dev, void *data,
|
||||
*/
|
||||
|
||||
if (unlikely(vmw_user_context_size == 0))
|
||||
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128;
|
||||
vmw_user_context_size = ttm_round_pot(sizeof(*ctx)) + 128 +
|
||||
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0);
|
||||
|
||||
ret = ttm_read_lock(&dev_priv->reservation_sem, true);
|
||||
if (unlikely(ret != 0))
|
||||
@@ -901,3 +914,8 @@ struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
|
||||
{
|
||||
return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
|
||||
}
|
||||
|
||||
struct vmw_cmdbuf_res_manager *vmw_context_res_man(struct vmw_resource *ctx)
|
||||
{
|
||||
return container_of(ctx, struct vmw_user_context, res)->man;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user