diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 1505 |
1 files changed, 643 insertions, 862 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 88b8178d4687..2ff7ba04d8c8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -36,6 +36,25 @@ #define VMW_RES_HT_ORDER 12 /* + * Helper macro to get dx_ctx_node if available otherwise print an error + * message. This is for use in command verifier function where if dx_ctx_node + * is not set then command is invalid. + */ +#define VMW_GET_CTX_NODE(__sw_context) \ +({ \ + __sw_context->dx_ctx_node ? __sw_context->dx_ctx_node : ({ \ + VMW_DEBUG_USER("SM context is not set at %s\n", __func__); \ + __sw_context->dx_ctx_node; \ + }); \ +}) + +#define VMW_DECLARE_CMD_VAR(__var, __type) \ + struct { \ + SVGA3dCmdHeader header; \ + __type body; \ + } __var + +/** * struct vmw_relocation - Buffer object relocation * * @head: List head for the command submission context's relocation list @@ -59,9 +78,8 @@ struct vmw_relocation { * command stream is replaced with the actual id after validation. * @vmw_res_rel_nop: NOP relocation. The command is unconditionally replaced * with a NOP. - * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id - * after validation is -1, the command is replaced with a NOP. Otherwise no - * action. + * @vmw_res_rel_cond_nop: Conditional NOP relocation. If the resource id after + * validation is -1, the command is replaced with a NOP. Otherwise no action. */ enum vmw_resource_relocation_type { vmw_res_rel_normal, @@ -75,8 +93,8 @@ enum vmw_resource_relocation_type { * * @head: List head for the software context's relocation list. * @res: Non-ref-counted pointer to the resource. - * @offset: Offset of single byte entries into the command buffer where the - * id that needs fixup is located. + * @offset: Offset of single byte entries into the command buffer where the id + * that needs fixup is located. * @rel_type: Type of relocation. */ struct vmw_resource_relocation { @@ -86,8 +104,9 @@ struct vmw_resource_relocation { enum vmw_resource_relocation_type rel_type:3; }; -/* +/** * struct vmw_ctx_validation_info - Extra validation metadata for contexts + * * @head: List head of context list * @ctx: The context resource * @cur: The context's persistent binding state @@ -142,9 +161,10 @@ static size_t vmw_ptr_diff(void *a, void *b) /** * vmw_execbuf_bindings_commit - Commit modified binding state + * * @sw_context: The command submission context - * @backoff: Whether this is part of the error path and binding state - * changes should be ignored + * @backoff: Whether this is part of the error path and binding state changes + * should be ignored */ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, bool backoff) @@ -154,6 +174,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, list_for_each_entry(entry, &sw_context->ctx_list, head) { if (!backoff) vmw_binding_state_commit(entry->cur, entry->staged); + if (entry->staged != sw_context->staged_bindings) vmw_binding_state_free(entry->staged); else @@ -166,6 +187,7 @@ static void vmw_execbuf_bindings_commit(struct vmw_sw_context *sw_context, /** * vmw_bind_dx_query_mob - Bind the DX query MOB if referenced + * * @sw_context: The command submission context */ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) @@ -176,8 +198,8 @@ static void vmw_bind_dx_query_mob(struct vmw_sw_context *sw_context) } /** - * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is - * added to the validate list. + * vmw_cmd_ctx_first_setup - Perform the setup needed when a context is added to + * the validate list. * * @dev_priv: Pointer to the device private: * @sw_context: The command submission context @@ -195,11 +217,8 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, goto out_err; if (!sw_context->staged_bindings) { - sw_context->staged_bindings = - vmw_binding_state_alloc(dev_priv); + sw_context->staged_bindings = vmw_binding_state_alloc(dev_priv); if (IS_ERR(sw_context->staged_bindings)) { - DRM_ERROR("Failed to allocate context binding " - "information.\n"); ret = PTR_ERR(sw_context->staged_bindings); sw_context->staged_bindings = NULL; goto out_err; @@ -209,8 +228,6 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, if (sw_context->staged_bindings_inuse) { node->staged = vmw_binding_state_alloc(dev_priv); if (IS_ERR(node->staged)) { - DRM_ERROR("Failed to allocate context binding " - "information.\n"); ret = PTR_ERR(node->staged); node->staged = NULL; goto out_err; @@ -225,19 +242,20 @@ static int vmw_cmd_ctx_first_setup(struct vmw_private *dev_priv, list_add_tail(&node->head, &sw_context->ctx_list); return 0; + out_err: return ret; } /** - * vmw_execbuf_res_size - calculate extra size fore the resource validation - * node + * vmw_execbuf_res_size - calculate extra size fore the resource validation node + * * @dev_priv: Pointer to the device private struct. * @res_type: The resource type. * - * Guest-backed contexts and DX contexts require extra size to store - * execbuf private information in the validation node. Typically the - * binding manager associated data structures. + * Guest-backed contexts and DX contexts require extra size to store execbuf + * private information in the validation node. Typically the binding manager + * associated data structures. * * Returns: The extra size requirement based on resource type. */ @@ -254,8 +272,8 @@ static unsigned int vmw_execbuf_res_size(struct vmw_private *dev_priv, * * @rcache: Pointer to the entry to update. * @res: Pointer to the resource. - * @private: Pointer to the execbuf-private space in the resource - * validation node. + * @private: Pointer to the execbuf-private space in the resource validation + * node. */ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, struct vmw_resource *res, @@ -268,17 +286,19 @@ static void vmw_execbuf_rcache_update(struct vmw_res_cache_entry *rcache, } /** - * vmw_execbuf_res_noref_val_add - Add a resource described by an - * unreferenced rcu-protected pointer to the validation list. + * vmw_execbuf_res_noref_val_add - Add a resource described by an unreferenced + * rcu-protected pointer to the validation list. + * * @sw_context: Pointer to the software context. * @res: Unreferenced rcu-protected pointer to the resource. + * @dirty: Whether to change dirty status. * - * Returns: 0 on success. Negative error code on failure. Typical error - * codes are %-EINVAL on inconsistency and %-ESRCH if the resource was - * doomed. + * Returns: 0 on success. Negative error code on failure. Typical error codes + * are %-EINVAL on inconsistency and %-ESRCH if the resource was doomed. */ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, - struct vmw_resource *res) + struct vmw_resource *res, + u32 dirty) { struct vmw_private *dev_priv = res->dev_priv; int ret; @@ -290,13 +310,17 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, rcache = &sw_context->res_cache[res_type]; if (likely(rcache->valid && rcache->res == res)) { + if (dirty) + vmw_validation_res_set_dirty(sw_context->ctx, + rcache->private, dirty); vmw_user_resource_noref_release(); return 0; } priv_size = vmw_execbuf_res_size(dev_priv, res_type); ret = vmw_validation_add_resource(sw_context->ctx, res, priv_size, - (void **)&ctx_info, &first_usage); + dirty, (void **)&ctx_info, + &first_usage); vmw_user_resource_noref_release(); if (ret) return ret; @@ -304,8 +328,10 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, if (priv_size && first_usage) { ret = vmw_cmd_ctx_first_setup(dev_priv, sw_context, res, ctx_info); - if (ret) + if (ret) { + VMW_DEBUG_USER("Failed first usage context setup.\n"); return ret; + } } vmw_execbuf_rcache_update(rcache, res, ctx_info); @@ -315,13 +341,16 @@ static int vmw_execbuf_res_noref_val_add(struct vmw_sw_context *sw_context, /** * vmw_execbuf_res_noctx_val_add - Add a non-context resource to the resource * validation list if it's not already on it + * * @sw_context: Pointer to the software context. * @res: Pointer to the resource. + * @dirty: Whether to change dirty status. * * Returns: Zero on success. Negative error code on failure. */ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context, - struct vmw_resource *res) + struct vmw_resource *res, + u32 dirty) { struct vmw_res_cache_entry *rcache; enum vmw_res_type res_type = vmw_res_type(res); @@ -329,10 +358,15 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context, int ret; rcache = &sw_context->res_cache[res_type]; - if (likely(rcache->valid && rcache->res == res)) + if (likely(rcache->valid && rcache->res == res)) { + if (dirty) + vmw_validation_res_set_dirty(sw_context->ctx, + rcache->private, dirty); return 0; + } - ret = vmw_validation_add_resource(sw_context->ctx, res, 0, &ptr, NULL); + ret = vmw_validation_add_resource(sw_context->ctx, res, 0, dirty, + &ptr, NULL); if (ret) return ret; @@ -342,8 +376,8 @@ static int vmw_execbuf_res_noctx_val_add(struct vmw_sw_context *sw_context, } /** - * vmw_view_res_val_add - Add a view and the surface it's pointing to - * to the validation list + * vmw_view_res_val_add - Add a view and the surface it's pointing to to the + * validation list * * @sw_context: The software context holding the validation list. * @view: Pointer to the view resource. @@ -356,27 +390,29 @@ static int vmw_view_res_val_add(struct vmw_sw_context *sw_context, int ret; /* - * First add the resource the view is pointing to, otherwise - * it may be swapped out when the view is validated. + * First add the resource the view is pointing to, otherwise it may be + * swapped out when the view is validated. */ - ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view)); + ret = vmw_execbuf_res_noctx_val_add(sw_context, vmw_view_srf(view), + vmw_view_dirtying(view)); if (ret) return ret; - return vmw_execbuf_res_noctx_val_add(sw_context, view); + return vmw_execbuf_res_noctx_val_add(sw_context, view, + VMW_RES_DIRTY_NONE); } /** - * vmw_view_id_val_add - Look up a view and add it and the surface it's - * pointing to to the validation list. + * vmw_view_id_val_add - Look up a view and add it and the surface it's pointing + * to to the validation list. * * @sw_context: The software context holding the validation list. * @view_type: The view type to look up. * @id: view id of the view. * - * The view is represented by a view id and the DX context it's created on, - * or scheduled for creation on. If there is no DX context set, the function - * will return an -EINVAL error pointer. + * The view is represented by a view id and the DX context it's created on, or + * scheduled for creation on. If there is no DX context set, the function will + * return an -EINVAL error pointer. * * Returns: Unreferenced pointer to the resource on success, negative error * pointer on failure. @@ -389,10 +425,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context, struct vmw_resource *view; int ret; - if (!ctx_node) { - DRM_ERROR("DX Context not set.\n"); + if (!ctx_node) return ERR_PTR(-EINVAL); - } view = vmw_view_lookup(sw_context->man, view_type, id); if (IS_ERR(view)) @@ -413,8 +447,8 @@ vmw_view_id_val_add(struct vmw_sw_context *sw_context, * @sw_context: Pointer to a software context used for this command submission * @ctx: Pointer to the context resource * - * This function puts all resources that were previously bound to @ctx on - * the resource validation list. This is part of the context state reemission + * This function puts all resources that were previously bound to @ctx on the + * resource validation list. This is part of the context state reemission */ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, @@ -433,13 +467,13 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, if (IS_ERR(res)) continue; - ret = vmw_execbuf_res_noctx_val_add(sw_context, res); + ret = vmw_execbuf_res_noctx_val_add(sw_context, res, + VMW_RES_DIRTY_SET); if (unlikely(ret != 0)) return ret; } } - /* Add all resources bound to the context to the validation list */ mutex_lock(&dev_priv->binding_mutex); binding_list = vmw_context_binding_list(ctx); @@ -448,8 +482,9 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, if (vmw_res_type(entry->res) == vmw_res_view) ret = vmw_view_res_val_add(sw_context, entry->res); else - ret = vmw_execbuf_res_noctx_val_add(sw_context, - entry->res); + ret = vmw_execbuf_res_noctx_val_add + (sw_context, entry->res, + vmw_binding_dirtying(entry->bt)); if (unlikely(ret != 0)) break; } @@ -472,8 +507,8 @@ static int vmw_resource_context_res_add(struct vmw_private *dev_priv, * * @list: Pointer to head of relocation list. * @res: The resource. - * @offset: Offset into the command buffer currently being parsed where the - * id that needs fixup is located. Granularity is one byte. + * @offset: Offset into the command buffer currently being parsed where the id + * that needs fixup is located. Granularity is one byte. * @rel_type: Relocation type. */ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, @@ -486,7 +521,7 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, rel = vmw_validation_mem_alloc(sw_context->ctx, sizeof(*rel)); if (unlikely(!rel)) { - DRM_ERROR("Failed to allocate a resource relocation.\n"); + VMW_DEBUG_USER("Failed to allocate a resource relocation.\n"); return -ENOMEM; } @@ -506,17 +541,15 @@ static int vmw_resource_relocation_add(struct vmw_sw_context *sw_context, static void vmw_resource_relocations_free(struct list_head *list) { /* Memory is validation context memory, so no need to free it */ - INIT_LIST_HEAD(list); } /** * vmw_resource_relocations_apply - Apply all relocations on a list * - * @cb: Pointer to the start of the command buffer bein patch. This need - * not be the same buffer as the one being parsed when the relocation - * list was built, but the contents must be the same modulo the - * resource ids. + * @cb: Pointer to the start of the command buffer bein patch. This need not be + * the same buffer as the one being parsed when the relocation list was built, + * but the contents must be the same modulo the resource ids. * @list: Pointer to the head of the relocation list. */ static void vmw_resource_relocations_apply(uint32_t *cb, @@ -560,14 +593,14 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv, } /** - * vmw_resources_reserve - Reserve all resources on the sw_context's - * resource list. + * vmw_resources_reserve - Reserve all resources on the sw_context's resource + * list. * * @sw_context: Pointer to the software context. * - * Note that since vmware's command submission currently is protected by - * the cmdbuf mutex, no fancy deadlock avoidance is required for resources, - * since only a single thread at once will attempt this. + * Note that since vmware's command submission currently is protected by the + * cmdbuf mutex, no fancy deadlock avoidance is required for resources, since + * only a single thread at once will attempt this. */ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) { @@ -592,22 +625,24 @@ static int vmw_resources_reserve(struct vmw_sw_context *sw_context) } /** - * vmw_cmd_res_check - Check that a resource is present and if so, put it - * on the resource validate list unless it's already there. + * vmw_cmd_res_check - Check that a resource is present and if so, put it on the + * resource validate list unless it's already there. * * @dev_priv: Pointer to a device private structure. * @sw_context: Pointer to the software context. * @res_type: Resource type. + * @dirty: Whether to change dirty status. * @converter: User-space visisble type specific information. - * @id_loc: Pointer to the location in the command buffer currently being - * parsed from where the user-space resource id handle is located. - * @p_val: Pointer to pointer to resource validalidation node. Populated - * on exit. + * @id_loc: Pointer to the location in the command buffer currently being parsed + * from where the user-space resource id handle is located. + * @p_val: Pointer to pointer to resource validalidation node. Populated on + * exit. */ static int vmw_cmd_res_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, enum vmw_res_type res_type, + u32 dirty, const struct vmw_user_resource_conv *converter, uint32_t *id_loc, struct vmw_resource **p_res) @@ -621,7 +656,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, if (*id_loc == SVGA3D_INVALID_ID) { if (res_type == vmw_res_context) { - DRM_ERROR("Illegal context invalid id.\n"); + VMW_DEBUG_USER("Illegal context invalid id.\n"); return -EINVAL; } return 0; @@ -629,6 +664,9 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, if (likely(rcache->valid_handle && *id_loc == rcache->handle)) { res = rcache->res; + if (dirty) + vmw_validation_res_set_dirty(sw_context->ctx, + rcache->private, dirty); } else { unsigned int size = vmw_execbuf_res_size(dev_priv, res_type); @@ -638,13 +676,13 @@ vmw_cmd_res_check(struct vmw_private *dev_priv, res = vmw_user_resource_noref_lookup_handle (dev_priv, sw_context->fp->tfile, *id_loc, converter); - if (unlikely(IS_ERR(res))) { - DRM_ERROR("Could not find or use resource 0x%08x.\n", - (unsigned int) *id_loc); + if (IS_ERR(res)) { + VMW_DEBUG_USER("Could not find/use resource 0x%08x.\n", + (unsigned int) *id_loc); return PTR_ERR(res); } - ret = vmw_execbuf_res_noref_val_add(sw_context, res); + ret = vmw_execbuf_res_noref_val_add(sw_context, res, dirty); if (unlikely(ret != 0)) return ret; @@ -675,23 +713,16 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) { struct vmw_private *dev_priv = ctx_res->dev_priv; struct vmw_buffer_object *dx_query_mob; - struct { - SVGA3dCmdHeader header; - SVGA3dCmdDXBindAllQuery body; - } *cmd; - + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBindAllQuery); dx_query_mob = vmw_context_get_dx_query_mob(ctx_res); if (!dx_query_mob || dx_query_mob->dx_query_ctx) return 0; - cmd = vmw_fifo_reserve_dx(dev_priv, sizeof(*cmd), ctx_res->id); - - if (cmd == NULL) { - DRM_ERROR("Failed to rebind queries.\n"); + cmd = VMW_FIFO_RESERVE_DX(dev_priv, sizeof(*cmd), ctx_res->id); + if (cmd == NULL) return -ENOMEM; - } cmd->header.id = SVGA_3D_CMD_DX_BIND_ALL_QUERY; cmd->header.size = sizeof(cmd->body); @@ -705,8 +736,8 @@ static int vmw_rebind_all_dx_query(struct vmw_resource *ctx_res) } /** - * vmw_rebind_contexts - Rebind all resources previously bound to - * referenced contexts. + * vmw_rebind_contexts - Rebind all resources previously bound to referenced + * contexts. * * @sw_context: Pointer to the software context. * @@ -721,21 +752,23 @@ static int vmw_rebind_contexts(struct vmw_sw_context *sw_context) ret = vmw_binding_rebind_all(val->cur); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) - DRM_ERROR("Failed to rebind context.\n"); + VMW_DEBUG_USER("Failed to rebind context.\n"); return ret; } ret = vmw_rebind_all_dx_query(val->ctx); - if (ret != 0) + if (ret != 0) { + VMW_DEBUG_USER("Failed to rebind queries.\n"); return ret; + } } return 0; } /** - * vmw_view_bindings_add - Add an array of view bindings to a context - * binding state tracker. + * vmw_view_bindings_add - Add an array of view bindings to a context binding + * state tracker. * * @sw_context: The execbuf state used for this command. * @view_type: View type for the bindings. @@ -752,13 +785,11 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, uint32 view_ids[], u32 num_views, u32 first_slot) { - struct vmw_ctx_validation_info *ctx_node = sw_context->dx_ctx_node; + struct vmw_ctx_validation_info *ctx_node = VMW_GET_CTX_NODE(sw_context); u32 i; - if (!ctx_node) { - DRM_ERROR("DX Context not set.\n"); + if (!ctx_node) return -EINVAL; - } for (i = 0; i < num_views; ++i) { struct vmw_ctx_bindinfo_view binding; @@ -768,7 +799,7 @@ static int vmw_view_bindings_add(struct vmw_sw_context *sw_context, view = vmw_view_id_val_add(sw_context, view_type, view_ids[i]); if (IS_ERR(view)) { - DRM_ERROR("View not found.\n"); + VMW_DEBUG_USER("View not found.\n"); return PTR_ERR(view); } } @@ -798,19 +829,18 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_cid_cmd { - SVGA3dCmdHeader header; - uint32_t cid; - } *cmd; + VMW_DECLARE_CMD_VAR(*cmd, uint32_t) = + container_of(header, typeof(*cmd), header); - cmd = container_of(header, struct vmw_cid_cmd, header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, - user_context_converter, &cmd->cid, NULL); + VMW_RES_DIRTY_SET, user_context_converter, + &cmd->body, NULL); } /** * vmw_execbuf_info_from_res - Get the private validation metadata for a * recently validated resource + * * @sw_context: Pointer to the command submission context * @res: The resource * @@ -818,8 +848,8 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv, * context's resource cache and hence the last resource of that type to be * processed by the validation code. * - * Return: a pointer to the private metadata of the resource, or NULL - * if it wasn't found + * Return: a pointer to the private metadata of the resource, or NULL if it + * wasn't found */ static struct vmw_ctx_validation_info * vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, @@ -835,36 +865,32 @@ vmw_execbuf_info_from_res(struct vmw_sw_context *sw_context, return NULL; } - static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_sid_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdSetRenderTarget body; - } *cmd; + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSetRenderTarget); struct vmw_resource *ctx; struct vmw_resource *res; int ret; - cmd = container_of(header, struct vmw_sid_cmd, header); + cmd = container_of(header, typeof(*cmd), header); if (cmd->body.type >= SVGA3D_RT_MAX) { - DRM_ERROR("Illegal render target type %u.\n", - (unsigned) cmd->body.type); + VMW_DEBUG_USER("Illegal render target type %u.\n", + (unsigned int) cmd->body.type); return -EINVAL; } ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context, - user_context_converter, &cmd->body.cid, - &ctx); + VMW_RES_DIRTY_SET, user_context_converter, + &cmd->body.cid, &ctx); if (unlikely(ret != 0)) return ret; ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, &cmd->body.target.sid, - &res); + VMW_RES_DIRTY_SET, user_surface_converter, + &cmd->body.target.sid, &res); if (unlikely(ret)) return ret; @@ -890,44 +916,38 @@ static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_sid_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceCopy body; - } *cmd; + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceCopy); int ret; - cmd = container_of(header, struct vmw_sid_cmd, header); + cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, - &cmd->body.src.sid, NULL); + VMW_RES_DIRTY_NONE, user_surface_converter, + &cmd->body.src.sid, NULL); if (ret) return ret; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, + VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.dest.sid, NULL); } static int vmw_cmd_buffer_copy_check(struct vmw_private *dev_priv, - struct vmw_sw_context *sw_context, - SVGA3dCmdHeader *header) + struct vmw_sw_context *sw_context, + SVGA3dCmdHeader *header) { - struct { - SVGA3dCmdHeader header; - SVGA3dCmdDXBufferCopy body; - } *cmd; + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXBufferCopy); int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, + VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.src, NULL); if (ret != 0) return ret; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, + VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.dest, NULL); } @@ -935,21 +955,18 @@ static int vmw_cmd_pred_copy_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct { - SVGA3dCmdHeader header; - SVGA3dCmdDXPredCopyRegion body; - } *cmd; + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXPredCopyRegion); int ret; cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, + VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.srcSid, NULL); if (ret != 0) return ret; return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, + VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.dstSid, NULL); } @@ -957,20 +974,18 @@ static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_sid_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdSurfaceStretchBlt body; - } *cmd; + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdSurfaceStretchBlt); int ret; - cmd = container_of(header, struct vmw_sid_cmd, header); + cmd = container_of(header, typeof(*cmd), header); ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, + VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.src.sid, NULL); if (unlikely(ret != 0)) return ret; + return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, + VMW_RES_DIRTY_SET, user_surface_converter, &cmd->body.dest.sid, NULL); } @@ -978,15 +993,11 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_sid_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdBlitSurfaceToScreen body; - } *cmd; - - cmd = container_of(header, struct vmw_sid_cmd, header); + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdBlitSurfaceToScreen) = + container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, + VMW_RES_DIRTY_NONE, user_surface_converter, &cmd->body.srcImage.sid, NULL); } @@ -994,17 +1005,12 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, SVGA3dCmdHeader *header) { - struct vmw_sid_cmd { - SVGA3dCmdHeader header; - SVGA3dCmdPresent body; - } *cmd; - - - cmd = container_of(header, struct vmw_sid_cmd, header); + VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdPresent) = + container_of(header, typeof(*cmd), header); return vmw_cmd_res_check(dev_priv, sw_context, vmw_res_surface, - user_surface_converter, &cmd->body.sid, - NULL); + VMW_RES_DIRTY_NONE, user_surface_converter, + &cmd->body.sid, NULL); } /** @@ -1014,11 +1020,10 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv, * @new_query_bo: The new buffer holding query results. * @sw_context: The software context used for this command submission. * - * This function checks whether @new_query_bo is suitable for holding - * query results, and if another buffer currently is pinned for query - * results. If so, the function prepares the state of @sw_context for - * switching pinned buffers after successful submission of the current - * command batch. + * This function checks whether @new_query_bo is suitable for holding query + * results, and if another buffer currently is pinned for query results. If so, + * the function prepares the state of @sw_context for switching pinned buffers + * after successful submission of the current command batch. */ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, struct vmw_buffer_object *new_query_bo, @@ -1034,7 +1039,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, if (unlikely(new_query_bo != sw_context->cur_query_bo)) { if (unlikely(new_query_bo->base.num_pages > 4)) { - DRM_ERROR("Query buffer too large.\n"); + VMW_DEBUG_USER("Query buffer too large.\n"); return -EINVAL; } @@ -1053,13 +1058,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, dev_priv->has_mob, false); if (unlikely(ret != 0)) return ret; - } return 0; } - /** * vmw_query_bo_switch_commit - Finalize switching pinned query buffer * @@ -1068,11 +1071,11 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, * * This function will check if we're switching query buffers, and will then, * issue a dummy occlusion query wait used as a query barrier. When the fence - * object following that query wait has signaled, we are sure that all - * preceding queries have finished, and the old query buffer can be unpinned. - * However, since both the new query buffer and the old one are fenced with - * that fence, we can do an asynchronus unpin now, and be sure that the - * old query buffer won't be moved until the fence has signaled. + * object following that query wait has signaled, we are sure that all preceding + * queries have finished, and the old query buffer can be unpinned. However, + * since both the new query buffer and the old one are fenced with that fence, + * we can do an asynchronus unpin now, and be sure that the old query buffer + * won't be moved until the fence has signaled. * * As mentioned above, both the new - and old query buffers need to be fenced * using a sequence emitted *after* calling this function. @@ -1084,7 +1087,6 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, * The validate list should still hold references to all * contexts here. */ - if (sw_context->needs_post_query_barrier) { struct vmw_res_cache_entry *ctx_entry = &sw_context->res_cache[vmw_res_context]; @@ -1097,7 +1099,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id); if (unlikely(ret != 0)) - DRM_ERROR("Out of fifo space for dummy query.\n"); + VMW_DEBUG_USER("Out of fifo space for dummy query.\n"); } if (dev_priv->pinned_bo != sw_context->cur_query_bo) { @@ -1111,10 +1113,9 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, /* * We pin also the dummy_query_bo buffer so that we - * don't need to validate it when emitting - * dummy queries in context destroy paths. + * don't need to validate it when emitting dummy queries + * in context destroy paths. */ - if (!dev_priv->dummy_query_bo_pinned) { vmw_bo_pin_reserved(dev_priv->dummy_query_bo, true); @@ -1131,22 +1132,24 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv, } /** - * vmw_translate_mob_pointer - Prepare to translate a user-space buffer - * handle to a MOB id. + * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle + * to a MOB id. * * @dev_priv: Pointer to a device private structure. * @sw_context: The software context used for this command batch validation. * @id: Pointer to the user-space handle to be translated. - * @vmw_bo_p: Points to a location that, on successful return will carry - * a non-reference-counted pointer to the buffer object identified by the + * @vmw_bo_p: Points to a location that, on successful return will carry a + * non-reference-counted pointer to the buffer object identified by the * user-space handle in @id. * * This function saves information needed to translate a user-space buffer * handle to a MOB id. The translation does not take place immediately, but - * during a call to vmw_apply_relocations(). This function builds a relocation - * list and a list of buffers to validate. The former needs to be freed using - * either vmw_apply_relocations() or vmw_free_relocations(). The latter - * needs to be freed using vmw_clear_validations. + * during a call to vmw_apply_relocations(). + * + * This function builds a relocation list and a list of buffers to validate. The + * former needs to be freed using either vmw_apply_relocations() or + * vmw_free_relocations(). The latter needs to be freed using + * vmw_clear_validations. */ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, struct vmw_sw_context *sw_context, @@ -1161,7 +1164,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, vmw_validation_preload_bo(sw_context->ctx); vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); if (IS_ERR(vmw_bo)) { - DRM_ERROR("Could not find or use MOB buffer.\n"); + VMW_DEBUG_USER("Could not find or use MOB buffer.\n"); return PTR_ERR(vmw_bo); } @@ -1184,19 +1187,20 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, } /** - * vmw_translate_guest_pointer - Prepare to translate a user-space buffer - * handle to a valid SVGAGuestPtr + * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle + * to a valid SVGAGuestPtr * * @dev_priv: Pointer to a device private structure. * @sw_context: The software context used for this command batch validation. * @ptr: Pointer to the user-space handle to be translated. - * @vmw_bo_p: Points to a location that, on successful return will carry - * a non-reference-counted pointer to the DMA buffer identified by the - * user-space handle in @id. + * @vmw_bo_p: Points to a location that, on successful return will carry a + * non-reference-counted pointer to the DMA buffer identified by the user-space + * handle in @id. * * This function saves information needed to translate a user-space buffer * handle to a valid SVGAGuestPtr. The translation does not take place * immediately, but during a call to vmw_apply_relocations(). + * * This function builds a relocation list and a list of buffers to validate. * The former needs to be freed using either vmw_apply_relocations() or * vmw_free_relocations(). The latter needs to be freed using @@ -1215,7 +1219,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, vmw_validation_preload_bo(sw_context->ctx); vmw_bo = vmw_user_bo_noref_lookup(sw_context->fp->tfile, handle); if (IS_ERR(vmw_bo)) { - DRM_ERROR("Could not find or use GMR region.\n"); + VMW_DEBUG_USER("Could not find or use GMR region.\n"); return PTR_ERR(vmw_bo); } @@ -1236,10 +1240,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, return 0; } - - /** - * vmw_cmd_dx_define_query - validate a SVGA_3D_CMD_DX_DEFINE_QUERY command. + * vmw_cmd_dx_define_query - validate SVGA_3D_CMD_DX_DEFINE_QUERY comma |
