summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem_execbuffer.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_execbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c2788
1 files changed, 0 insertions, 2788 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
deleted file mode 100644
index 699f3f180d8a..000000000000
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ /dev/null
@@ -1,2788 +0,0 @@
-/*
- * Copyright © 2008,2010 Intel Corporation
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- * Eric Anholt <eric@anholt.net>
- * Chris Wilson <chris@chris-wilson.co.uk>
- *
- */
-
-#include <linux/intel-iommu.h>
-#include <linux/reservation.h>
-#include <linux/sync_file.h>
-#include <linux/uaccess.h>
-
-#include <drm/drm_syncobj.h>
-#include <drm/i915_drm.h>
-
-#include "gem/i915_gem_ioctls.h"
-#include "gt/intel_gt_pm.h"
-
-#include "i915_drv.h"
-#include "i915_gem_clflush.h"
-#include "i915_trace.h"
-#include "intel_drv.h"
-#include "intel_frontbuffer.h"
-
-enum {
- FORCE_CPU_RELOC = 1,
- FORCE_GTT_RELOC,
- FORCE_GPU_RELOC,
-#define DBG_FORCE_RELOC 0 /* choose one of the above! */
-};
-
-#define __EXEC_OBJECT_HAS_REF BIT(31)
-#define __EXEC_OBJECT_HAS_PIN BIT(30)
-#define __EXEC_OBJECT_HAS_FENCE BIT(29)
-#define __EXEC_OBJECT_NEEDS_MAP BIT(28)
-#define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
-#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
-#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
-
-#define __EXEC_HAS_RELOC BIT(31)
-#define __EXEC_VALIDATED BIT(30)
-#define __EXEC_INTERNAL_FLAGS (~0u << 30)
-#define UPDATE PIN_OFFSET_FIXED
-
-#define BATCH_OFFSET_BIAS (256*1024)
-
-#define __I915_EXEC_ILLEGAL_FLAGS \
- (__I915_EXEC_UNKNOWN_FLAGS | \
- I915_EXEC_CONSTANTS_MASK | \
- I915_EXEC_RESOURCE_STREAMER)
-
-/* Catch emission of unexpected errors for CI! */
-#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
-#undef EINVAL
-#define EINVAL ({ \
- DRM_DEBUG_DRIVER("EINVAL at %s:%d\n", __func__, __LINE__); \
- 22; \
-})
-#endif
-
-/**
- * DOC: User command execution
- *
- * Userspace submits commands to be executed on the GPU as an instruction
- * stream within a GEM object we call a batchbuffer. This instructions may
- * refer to other GEM objects containing auxiliary state such as kernels,
- * samplers, render targets and even secondary batchbuffers. Userspace does
- * not know where in the GPU memory these objects reside and so before the
- * batchbuffer is passed to the GPU for execution, those addresses in the
- * batchbuffer and auxiliary objects are updated. This is known as relocation,
- * or patching. To try and avoid having to relocate each object on the next
- * execution, userspace is told the location of those objects in this pass,
- * but this remains just a hint as the kernel may choose a new location for
- * any object in the future.
- *
- * At the level of talking to the hardware, submitting a batchbuffer for the
- * GPU to execute is to add content to a buffer from which the HW
- * command streamer is reading.
- *
- * 1. Add a command to load the HW context. For Logical Ring Contexts, i.e.
- * Execlists, this command is not placed on the same buffer as the
- * remaining items.
- *
- * 2. Add a command to invalidate caches to the buffer.
- *
- * 3. Add a batchbuffer start command to the buffer; the start command is
- * essentially a token together with the GPU address of the batchbuffer
- * to be executed.
- *
- * 4. Add a pipeline flush to the buffer.
- *
- * 5. Add a memory write command to the buffer to record when the GPU
- * is done executing the batchbuffer. The memory write writes the
- * global sequence number of the request, ``i915_request::global_seqno``;
- * the i915 driver uses the current value in the register to determine
- * if the GPU has completed the batchbuffer.
- *
- * 6. Add a user interrupt command to the buffer. This command instructs
- * the GPU to issue an interrupt when the command, pipeline flush and
- * memory write are completed.
- *
- * 7. Inform the hardware of the additional commands added to the buffer
- * (by updating the tail pointer).
- *
- * Processing an execbuf ioctl is conceptually split up into a few phases.
- *
- * 1. Validation - Ensure all the pointers, handles and flags are valid.
- * 2. Reservation - Assign GPU address space for every object
- * 3. Relocation - Update any addresses to point to the final locations
- * 4. Serialisation - Order the request with respect to its dependencies
- * 5. Construction - Construct a request to execute the batchbuffer
- * 6. Submission (at some point in the future execution)
- *
- * Reserving resources for the execbuf is the most complicated phase. We
- * neither want to have to migrate the object in the address space, nor do
- * we want to have to update any relocations pointing to this object. Ideally,
- * we want to leave the object where it is and for all the existing relocations
- * to match. If the object is given a new address, or if userspace thinks the
- * object is elsewhere, we have to parse all the relocation entries and update
- * the addresses. Userspace can set the I915_EXEC_NORELOC flag to hint that
- * all the target addresses in all of its objects match the value in the
- * relocation entries and that they all match the presumed offsets given by the
- * list of execbuffer objects. Using this knowledge, we know that if we haven't
- * moved any buffers, all the relocation entries are valid and we can skip
- * the update. (If userspace is wrong, the likely outcome is an impromptu GPU
- * hang.) The requirement for using I915_EXEC_NO_RELOC are:
- *
- * The addresses written in the objects must match the corresponding
- * reloc.presumed_offset which in turn must match the corresponding
- * execobject.offset.
- *
- * Any render targets written to in the batch must be flagged with
- * EXEC_OBJECT_WRITE.
- *
- * To avoid stalling, execobject.offset should match the current
- * address of that object within the active context.
- *
- * The reservation is done is multiple phases. First we try and keep any
- * object already bound in its current location - so as long as meets the
- * constraints imposed by the new execbuffer. Any object left unbound after the
- * first pass is then fitted into any available idle space. If an object does
- * not fit, all objects are removed from the reservation and the process rerun
- * after sorting the objects into a priority order (more difficult to fit
- * objects are tried first). Failing that, the entire VM is cleared and we try
- * to fit the execbuf once last time before concluding that it simply will not
- * fit.
- *
- * A small complication to all of this is that we allow userspace not only to
- * specify an alignment and a size for the object in the address space, but
- * we also allow userspace to specify the exact offset. This objects are
- * simpler to place (the location is known a priori) all we have to do is make
- * sure the space is available.
- *
- * Once all the objects are in place, patching up the buried pointers to point
- * to the final locations is a fairly simple job of walking over the relocation
- * entry arrays, looking up the right address and rewriting the value into
- * the object. Simple! ... The relocation entries are stored in user memory
- * and so to access them we have to copy them into a local buffer. That copy
- * has to avoid taking any pagefaults as they may lead back to a GEM object
- * requiring the struct_mutex (i.e. recursive deadlock). So once again we split
- * the relocation into multiple passes. First we try to do everything within an
- * atomic context (avoid the pagefaults) which requires that we never wait. If
- * we detect that we may wait, or if we need to fault, then we have to fallback
- * to a slower path. The slowpath has to drop the mutex. (Can you hear alarm
- * bells yet?) Dropping the mutex means that we lose all the state we have
- * built up so far for the execbuf and we must reset any global data. However,
- * we do leave the objects pinned in their final locations - which is a
- * potential issue for concurrent execbufs. Once we have left the mutex, we can
- * allocate and copy all the relocation entries into a large array at our
- * leisure, reacquire the mutex, reclaim all the objects and other state and
- * then proceed to update any incorrect addresses with the objects.
- *
- * As we process the relocation entries, we maintain a record of whether the
- * object is being written to. Using NORELOC, we expect userspace to provide
- * this information instead. We also check whether we can skip the relocation
- * by comparing the expected value inside the relocation entry with the target's
- * final address. If they differ, we have to map the current object and rewrite
- * the 4 or 8 byte pointer within.
- *
- * Serialising an execbuf is quite simple according to the rules of the GEM
- * ABI. Execution within each context is ordered by the order of submission.
- * Writes to any GEM object are in order of submission and are exclusive. Reads
- * from a GEM object are unordered with respect to other reads, but ordered by
- * writes. A write submitted after a read cannot occur before the read, and
- * similarly any read submitted after a write cannot occur before the write.
- * Writes are ordered between engines such that only one write occurs at any
- * time (completing any reads beforehand) - using semaphores where available
- * and CPU serialisation otherwise. Other GEM access obey the same rules, any
- * write (either via mmaps using set-domain, or via pwrite) must flush all GPU
- * reads before starting, and any read (either using set-domain or pread) must
- * flush all GPU writes before starting. (Note we only employ a barrier before,
- * we currently rely on userspace not concurrently starting a new execution
- * whilst reading or writing to an object. This may be an advantage or not
- * depending on how much you trust userspace not to shoot themselves in the
- * foot.) Serialisation may just result in the request being inserted into
- * a DAG awaiting its turn, but most simple is to wait on the CPU until
- * all dependencies are resolved.
- *
- * After all of that, is just a matter of closing the request and handing it to
- * the hardware (well, leaving it in a queue to be executed). However, we also
- * offer the ability for batchbuffers to be run with elevated privileges so
- * that they access otherwise hidden registers. (Used to adjust L3 cache etc.)
- * Before any batch is given extra privileges we first must check that it
- * contains no nefarious instructions, we check that each instruction is from
- * our whitelist and all registers are also from an allowed list. We first
- * copy the user's batchbuffer to a shadow (so that the user doesn't have
- * access to it, either by the CPU or GPU as we scan it) and then parse each
- * instruction. If everything is ok, we set a flag telling the hardware to run
- * the batchbuffer in trusted mode, otherwise the ioctl is rejected.
- */
-
-struct i915_execbuffer {
- struct drm_i915_private *i915; /** i915 backpointer */
- struct drm_file *file; /** per-file lookup tables and limits */
- struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
- struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
- struct i915_vma **vma;
- unsigned int *flags;
-
- struct intel_engine_cs *engine; /** engine to queue the request to */
- struct intel_context *context; /* logical state for the request */
- struct i915_gem_context *gem_context; /** caller's context */
- struct i915_address_space *vm; /** GTT and vma for the request */
-
- struct i915_request *request; /** our request to build */
- struct i915_vma *batch; /** identity of the batch obj/vma */
-
- /** actual size of execobj[] as we may extend it for the cmdparser */
- unsigned int buffer_count;
-
- /** list of vma not yet bound during reservation phase */
- struct list_head unbound;
-
- /** list of vma that have execobj.relocation_count */
- struct list_head relocs;
-
- /**
- * Track the most recently used object for relocations, as we
- * frequently have to perform multiple relocations within the same
- * obj/page
- */
- struct reloc_cache {
- struct drm_mm_node node; /** temporary GTT binding */
- unsigned long vaddr; /** Current kmap address */
- unsigned long page; /** Currently mapped page index */
- unsigned int gen; /** Cached value of INTEL_GEN */
- bool use_64bit_reloc : 1;
- bool has_llc : 1;
- bool has_fence : 1;
- bool needs_unfenced : 1;
-
- struct i915_request *rq;
- u32 *rq_cmd;
- unsigned int rq_size;
- } reloc_cache;
-
- u64 invalid_flags; /** Set of execobj.flags that are invalid */
- u32 context_flags; /** Set of execobj.flags to insert from the ctx */
-
- u32 batch_start_offset; /** Location within object of batch */
- u32 batch_len; /** Length of batch within object */
- u32 batch_flags; /** Flags composed for emit_bb_start() */
-
- /**
- * Indicate either the size of the hastable used to resolve
- * relocation handles, or if negative that we are using a direct
- * index into the execobj[].
- */
- int lut_size;
- struct hlist_head *buckets; /** ht for relocation handles */
-};
-
-#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-
-/*
- * Used to convert any address to canonical form.
- * Starting from gen8, some commands (e.g. STATE_BASE_ADDRESS,
- * MI_LOAD_REGISTER_MEM and others, see Broadwell PRM Vol2a) require the
- * addresses to be in a canonical form:
- * "GraphicsAddress[63:48] are ignored by the HW and assumed to be in correct
- * canonical form [63:48] == [47]."
- */
-#define GEN8_HIGH_ADDRESS_BIT 47
-static inline u64 gen8_canonical_addr(u64 address)
-{
- return sign_extend64(address, GEN8_HIGH_ADDRESS_BIT);
-}
-
-static inline u64 gen8_noncanonical_addr(u64 address)
-{
- return address & GENMASK_ULL(GEN8_HIGH_ADDRESS_BIT, 0);
-}
-
-static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
-{
- return intel_engine_needs_cmd_parser(eb->engine) && eb->batch_len;
-}
-
-static int eb_create(struct i915_execbuffer *eb)
-{
- if (!(eb->args->flags & I915_EXEC_HANDLE_LUT)) {
- unsigned int size = 1 + ilog2(eb->buffer_count);
-
- /*
- * Without a 1:1 association between relocation handles and
- * the execobject[] index, we instead create a hashtable.
- * We size it dynamically based on available memory, starting
- * first with 1:1 assocative hash and scaling back until
- * the allocation succeeds.
- *
- * Later on we use a positive lut_size to indicate we are
- * using this hashtable, and a negative value to indicate a
- * direct lookup.
- */
- do {
- gfp_t flags;
-
- /* While we can still reduce the allocation size, don't
- * raise a warning and allow the allocation to fail.
- * On the last pass though, we want to try as hard
- * as possible to perform the allocation and warn
- * if it fails.
- */
- flags = GFP_KERNEL;
- if (size > 1)
- flags |= __GFP_NORETRY | __GFP_NOWARN;
-
- eb->buckets = kzalloc(sizeof(struct hlist_head) << size,
- flags);
- if (eb->buckets)
- break;
- } while (--size);
-
- if (unlikely(!size))
- return -ENOMEM;
-
- eb->lut_size = size;
- } else {
- eb->lut_size = -eb->buffer_count;
- }
-
- return 0;
-}
-
-static bool
-eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
- const struct i915_vma *vma,
- unsigned int flags)
-{
- if (vma->node.size < entry->pad_to_size)
- return true;
-
- if (entry->alignment && !IS_ALIGNED(vma->node.start, entry->alignment))
- return true;
-
- if (flags & EXEC_OBJECT_PINNED &&
- vma->node.start != entry->offset)
- return true;
-
- if (flags & __EXEC_OBJECT_NEEDS_BIAS &&
- vma->node.start < BATCH_OFFSET_BIAS)
- return true;
-
- if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) &&
- (vma->node.start + vma->node.size - 1) >> 32)
- return true;
-
- if (flags & __EXEC_OBJECT_NEEDS_MAP &&
- !i915_vma_is_map_and_fenceable(vma))
- return true;
-
- return false;
-}
-
-static inline bool
-eb_pin_vma(struct i915_execbuffer *eb,
- const struct drm_i915_gem_exec_object2 *entry,
- struct i915_vma *vma)
-{
- unsigned int exec_flags = *vma->exec_flags;
- u64 pin_flags;
-
- if (vma->node.size)
- pin_flags = vma->node.start;
- else
- pin_flags = entry->offset & PIN_OFFSET_MASK;
-
- pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
- pin_flags |= PIN_GLOBAL;
-
- if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
- return false;
-
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
- if (unlikely(i915_vma_pin_fence(vma))) {
- i915_vma_unpin(vma);
- return false;
- }
-
- if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
- }
-
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- return !eb_vma_misplaced(entry, vma, exec_flags);
-}
-
-static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
-{
- GEM_BUG_ON(!(flags & __EXEC_OBJECT_HAS_PIN));
-
- if (unlikely(flags & __EXEC_OBJECT_HAS_FENCE))
- __i915_vma_unpin_fence(vma);
-
- __i915_vma_unpin(vma);
-}
-
-static inline void
-eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
-{
- if (!(*flags & __EXEC_OBJECT_HAS_PIN))
- return;
-
- __eb_unreserve_vma(vma, *flags);
- *flags &= ~__EXEC_OBJECT_RESERVED;
-}
-
-static int
-eb_validate_vma(struct i915_execbuffer *eb,
- struct drm_i915_gem_exec_object2 *entry,
- struct i915_vma *vma)
-{
- if (unlikely(entry->flags & eb->invalid_flags))
- return -EINVAL;
-
- if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
- return -EINVAL;
-
- /*
- * Offset can be used as input (EXEC_OBJECT_PINNED), reject
- * any non-page-aligned or non-canonical addresses.
- */
- if (unlikely(entry->flags & EXEC_OBJECT_PINNED &&
- entry->offset != gen8_canonical_addr(entry->offset & I915_GTT_PAGE_MASK)))
- return -EINVAL;
-
- /* pad_to_size was once a reserved field, so sanitize it */
- if (entry->flags & EXEC_OBJECT_PAD_TO_SIZE) {
- if (unlikely(offset_in_page(entry->pad_to_size)))
- return -EINVAL;
- } else {
- entry->pad_to_size = 0;
- }
-
- if (unlikely(vma->exec_flags)) {
- DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
- entry->handle, (int)(entry - eb->exec));
- return -EINVAL;
- }
-
- /*
- * From drm_mm perspective address space is continuous,
- * so from this point we're always using non-canonical
- * form internally.
- */
- entry->offset = gen8_noncanonical_addr(entry->offset);
-
- if (!eb->reloc_cache.has_fence) {
- entry->flags &= ~EXEC_OBJECT_NEEDS_FENCE;
- } else {
- if ((entry->flags & EXEC_OBJECT_NEEDS_FENCE ||
- eb->reloc_cache.needs_unfenced) &&
- i915_gem_object_is_tiled(vma->obj))
- entry->flags |= EXEC_OBJECT_NEEDS_GTT | __EXEC_OBJECT_NEEDS_MAP;
- }
-
- if (!(entry->flags & EXEC_OBJECT_PINNED))
- entry->flags |= eb->context_flags;
-
- return 0;
-}
-
-static int
-eb_add_vma(struct i915_execbuffer *eb,
- unsigned int i, unsigned batch_idx,
- struct i915_vma *vma)
-{
- struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
- int err;
-
- GEM_BUG_ON(i915_vma_is_closed(vma));
-
- if (!(eb->args->flags & __EXEC_VALIDATED)) {
- err = eb_validate_vma(eb, entry, vma);
- if (unlikely(err))
- return err;
- }
-
- if (eb->lut_size > 0) {
- vma->exec_handle = entry->handle;
- hlist_add_head(&vma->exec_node,
- &eb->buckets[hash_32(entry->handle,
- eb->lut_size)]);
- }
-
- if (entry->relocation_count)
- list_add_tail(&vma->reloc_link, &eb->relocs);
-
- /*
- * Stash a pointer from the vma to execobj, so we can query its flags,
- * size, alignment etc as provided by the user. Also we stash a pointer
- * to the vma inside the execobj so that we can use a direct lookup
- * to find the right target VMA when doing relocations.
- */
- eb->vma[i] = vma;
- eb->flags[i] = entry->flags;
- vma->exec_flags = &eb->flags[i];
-
- /*
- * SNA is doing fancy tricks with compressing batch buffers, which leads
- * to negative relocation deltas. Usually that works out ok since the
- * relocate address is still positive, except when the batch is placed
- * very low in the GTT. Ensure this doesn't happen.
- *
- * Note that actual hangs have only been observed on gen7, but for
- * paranoia do it everywhere.
- */
- if (i == batch_idx) {
- if (entry->relocation_count &&
- !(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
- if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
-
- eb->batch = vma;
- }
-
- err = 0;
- if (eb_pin_vma(eb, entry, vma)) {
- if (entry->offset != vma->node.start) {
- entry->offset = vma->node.start | UPDATE;
- eb->args->flags |= __EXEC_HAS_RELOC;
- }
- } else {
- eb_unreserve_vma(vma, vma->exec_flags);
-
- list_add_tail(&vma->exec_link, &eb->unbound);
- if (drm_mm_node_allocated(&vma->node))
- err = i915_vma_unbind(vma);
- if (unlikely(err))
- vma->exec_flags = NULL;
- }
- return err;
-}
-
-static inline int use_cpu_reloc(const struct reloc_cache *cache,
- const struct drm_i915_gem_object *obj)
-{
- if (!i915_gem_object_has_struct_page(obj))
- return false;
-
- if (DBG_FORCE_RELOC == FORCE_CPU_RELOC)
- return true;
-
- if (DBG_FORCE_RELOC == FORCE_GTT_RELOC)
- return false;
-
- return (cache->has_llc ||
- obj->cache_dirty ||
- obj->cache_level != I915_CACHE_NONE);
-}
-
-static int eb_reserve_vma(const struct i915_execbuffer *eb,
- struct i915_vma *vma)
-{
- struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- unsigned int exec_flags = *vma->exec_flags;
- u64 pin_flags;
- int err;
-
- pin_flags = PIN_USER | PIN_NONBLOCK;
- if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
- pin_flags |= PIN_GLOBAL;
-
- /*
- * Wa32bitGeneralStateOffset & Wa32bitInstructionBaseOffset,
- * limit address to the first 4GBs for unflagged objects.
- */
- if (!(exec_flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
- pin_flags |= PIN_ZONE_4G;
-
- if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
- pin_flags |= PIN_MAPPABLE;
-
- if (exec_flags & EXEC_OBJECT_PINNED) {
- pin_flags |= entry->offset | PIN_OFFSET_FIXED;
- pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
- } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
- pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
- }
-
- err = i915_vma_pin(vma,
- entry->pad_to_size, entry->alignment,
- pin_flags);
- if (err)
- return err;
-
- if (entry->offset != vma->node.start) {
- entry->offset = vma->node.start | UPDATE;
- eb->args->flags |= __EXEC_HAS_RELOC;
- }
-
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
- err = i915_vma_pin_fence(vma);
- if (unlikely(err)) {
- i915_vma_unpin(vma);
- return err;
- }
-
- if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
- }
-
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
-
- return 0;
-}
-
-static int eb_reserve(struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- struct list_head last;
- struct i915_vma *vma;
- unsigned int i, pass;
- int err;
-
- /*
- * Attempt to pin all of the buffers into the GTT.
- * This is done in 3 phases:
- *
- * 1a. Unbind all objects that do not match the GTT constraints for
- * the execbuffer (fenceable, mappable, alignment etc).
- * 1b. Increment pin count for already bound objects.
- * 2. Bind new objects.
- * 3. Decrement pin count.
- *
- * This avoid unnecessary unbinding of later objects in order to make
- * room for the earlier objects *unless* we need to defragment.
- */
-
- pass = 0;
- err = 0;
- do {
- list_for_each_entry(vma, &eb->unbound, exec_link) {
- err = eb_reserve_vma(eb, vma);
- if (err)
- break;
- }
- if (err != -ENOSPC)
- return err;
-
- /* Resort *all* the objects into priority order */
- INIT_LIST_HEAD(&eb->unbound);
- INIT_LIST_HEAD(&last);
- for (i = 0; i < count; i++) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
-
- if (flags & EXEC_OBJECT_PINNED &&
- flags & __EXEC_OBJECT_HAS_PIN)
- continue;
-
- eb_unreserve_vma(vma, &eb->flags[i]);
-
- if (flags & EXEC_OBJECT_PINNED)
- /* Pinned must have their slot */
- list_add(&vma->exec_link, &eb->unbound);
- else if (flags & __EXEC_OBJECT_NEEDS_MAP)
- /* Map require the lowest 256MiB (aperture) */
- list_add_tail(&vma->exec_link, &eb->unbound);
- else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
- /* Prioritise 4GiB region for restricted bo */
- list_add(&vma->exec_link, &last);
- else
- list_add_tail(&vma->exec_link, &last);
- }
- list_splice_tail(&last, &eb->unbound);
-
- switch (pass++) {
- case 0:
- break;
-
- case 1:
- /* Too fragmented, unbind everything and retry */
- err = i915_gem_evict_vm(eb->vm);
- if (err)
- return err;
- break;
-
- default:
- return -ENOSPC;
- }
- } while (1);
-}
-
-static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
-{
- if (eb->args->flags & I915_EXEC_BATCH_FIRST)
- return 0;
- else
- return eb->buffer_count - 1;
-}
-
-static int eb_select_context(struct i915_execbuffer *eb)
-{
- struct i915_gem_context *ctx;
-
- ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
- if (unlikely(!ctx))
- return -ENOENT;
-
- eb->gem_context = ctx;
- if (ctx->ppgtt) {
- eb->vm = &ctx->ppgtt->vm;
- eb->invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
- } else {
- eb->vm = &eb->i915->ggtt.vm;
- }
-
- eb->context_flags = 0;
- if (test_bit(UCONTEXT_NO_ZEROMAP, &ctx->user_flags))
- eb->context_flags |= __EXEC_OBJECT_NEEDS_BIAS;
-
- return 0;
-}
-
-static struct i915_request *__eb_wait_for_ring(struct intel_ring *ring)
-{
- struct i915_request *rq;
-
- /*
- * Completely unscientific finger-in-the-air estimates for suitable
- * maximum user request size (to avoid blocking) and then backoff.
- */
- if (intel_ring_update_space(ring) >= PAGE_SIZE)
- return NULL;
-
- /*
- * Find a request that after waiting upon, there will be at least half
- * the ring available. The hysteresis allows us to compete for the
- * shared ring and should mean that we sleep less often prior to
- * claiming our resources, but not so long that the ring completely
- * drains before we can submit our next request.
- */
- list_for_each_entry(rq, &ring->request_list, ring_link) {
- if (__intel_ring_space(rq->postfix,
- ring->emit, ring->size) > ring->size / 2)
- break;
- }
- if (&rq->ring_link == &ring->request_list)
- return NULL; /* weird, we will check again later for real */
-
- return i915_request_get(rq);
-}
-
-static int eb_wait_for_ring(const struct i915_execbuffer *eb)
-{
- struct i915_request *rq;
- int ret = 0;
-
- /*
- * Apply a light amount of backpressure to prevent excessive hogs
- * from blocking waiting for space whilst holding struct_mutex and
- * keeping all of their resources pinned.
- */
-
- rq = __eb_wait_for_ring(eb->context->ring);
- if (rq) {
- mutex_unlock(&eb->i915->drm.struct_mutex);
-
- if (i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0)
- ret = -EINTR;
-
- i915_request_put(rq);
-
- mutex_lock(&eb->i915->drm.struct_mutex);
- }
-
- return ret;
-}
-
-static int eb_lookup_vmas(struct i915_execbuffer *eb)
-{
- struct radix_tree_root *handles_vma = &eb->gem_context->handles_vma;
- struct drm_i915_gem_object *obj;
- unsigned int i, batch;
- int err;
-
- if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
- return -ENOENT;
-
- if (unlikely(i915_gem_context_is_banned(eb->gem_context)))
- return -EIO;
-
- INIT_LIST_HEAD(&eb->relocs);
- INIT_LIST_HEAD(&eb->unbound);
-
- batch = eb_batch_index(eb);
-
- for (i = 0; i < eb->buffer_count; i++) {
- u32 handle = eb->exec[i].handle;
- struct i915_lut_handle *lut;
- struct i915_vma *vma;
-
- vma = radix_tree_lookup(handles_vma, handle);
- if (likely(vma))
- goto add_vma;
-
- obj = i915_gem_object_lookup(eb->file, handle);
- if (unlikely(!obj)) {
- err = -ENOENT;
- goto err_vma;
- }
-
- vma = i915_vma_instance(obj, eb->vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto err_obj;
- }
-
- lut = i915_lut_handle_alloc();
- if (unlikely(!lut)) {
- err = -ENOMEM;
- goto err_obj;
- }
-
- err = radix_tree_insert(handles_vma, handle, vma);
- if (unlikely(err)) {
- i915_lut_handle_free(lut);
- goto err_obj;
- }
-
- /* transfer ref to ctx */
- if (!vma->open_count++)
- i915_vma_reopen(vma);
- list_add(&lut->obj_link, &obj->lut_list);
- list_add(&lut->ctx_link, &eb->gem_context->handles_list);
- lut->ctx = eb->gem_context;
- lut->handle = handle;
-
-add_vma:
- err = eb_add_vma(eb, i, batch, vma);
- if (unlikely(err))
- goto err_vma;
-
- GEM_BUG_ON(vma != eb->vma[i]);
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
- eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
- }
-
- eb->args->flags |= __EXEC_VALIDATED;
- return eb_reserve(eb);
-
-err_obj:
- i915_gem_object_put(obj);
-err_vma:
- eb->vma[i] = NULL;
- return err;
-}
-
-static struct i915_vma *
-eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
-{
- if (eb->lut_size < 0) {
- if (handle >= -eb->lut_size)
- return NULL;
- return eb->vma[handle];
- } else {
- struct hlist_head *head;
- struct i915_vma *vma;
-
- head = &eb->buckets[hash_32(handle, eb->lut_size)];
- hlist_for_each_entry(vma, head, exec_node) {
- if (vma->exec_handle == handle)
- return vma;
- }
- return NULL;
- }
-}
-
-static void eb_release_vmas(const struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
- unsigned int flags = eb->flags[i];
-
- if (!vma)
- break;
-
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- vma->exec_flags = NULL;
- eb->vma[i] = NULL;
-
- if (flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, flags);
-
- if (flags & __EXEC_OBJECT_HAS_REF)
- i915_vma_put(vma);
- }
-}
-
-static void eb_reset_vmas(const struct i915_execbuffer *eb)
-{
- eb_release_vmas(eb);
- if (eb->lut_size > 0)
- memset(eb->buckets, 0,
- sizeof(struct hlist_head) << eb->lut_size);
-}
-
-static void eb_destroy(const struct i915_execbuffer *eb)
-{
- GEM_BUG_ON(eb->reloc_cache.rq);
-
- if (eb->lut_size > 0)
- kfree(eb->buckets);
-}
-
-static inline u64
-relocation_target(const struct drm_i915_gem_relocation_entry *reloc,
- const struct i915_vma *target)
-{
- return gen8_canonical_addr((int)reloc->delta + target->node.start);
-}
-
-static void reloc_cache_init(struct reloc_cache *cache,
- struct drm_i915_private *i915)
-{
- cache->page = -1;
- cache->vaddr = 0;
- /* Must be a variable in the struct to allow GCC to unroll. */
- cache->gen = INTEL_GEN(i915);
- cache->has_llc = HAS_LLC(i915);
- cache->use_64bit_reloc = HAS_64BIT_RELOC(i915);
- cache->has_fence = cache->gen < 4;
- cache->needs_unfenced = INTEL_INFO(i915)->unfenced_needs_alignment;
- cache->node.allocated = false;
- cache->rq = NULL;
- cache->rq_size = 0;
-}
-
-static inline void *unmask_page(unsigned long p)
-{
- return (void *)(uintptr_t)(p & PAGE_MASK);
-}
-
-static inline unsigned int unmask_flags(unsigned long p)
-{
- return p & ~PAGE_MASK;
-}
-
-#define KMAP 0x4 /* after CLFLUSH_FLAGS */
-
-static inline struct i915_ggtt *cache_to_ggtt(struct reloc_cache *cache)
-{
- struct drm_i915_private *i915 =
- container_of(cache, struct i915_execbuffer, reloc_cache)->i915;
- return &i915->ggtt;
-}
-
-static void reloc_gpu_flush(struct reloc_cache *cache)
-{
- GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
- cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
-
- __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
- i915_gem_object_unpin_map(cache->rq->batch->obj);
-
- i915_gem_chipset_flush(cache->rq->i915);
-
- i915_request_add(cache->rq);
- cache->rq = NULL;
-}
-
-static void reloc_cache_reset(struct reloc_cache *cache)
-{
- void *vaddr;
-
- if (cache->rq)
- reloc_gpu_flush(cache);
-
- if (!cache->vaddr)
- return;
-
- vaddr = unmask_page(cache->vaddr);
- if (cache->vaddr & KMAP) {
- if (cache->vaddr & CLFLUSH_AFTER)
- mb();
-
- kunmap_atomic(vaddr);
- i915_gem_object_finish_access((struct drm_i915_gem_object *)cache->node.mm);
- } else {
- wmb();
- io_mapping_unmap_atomic((void __iomem *)vaddr);
- if (cache->node.allocated) {
- struct i915_ggtt *ggtt = cache_to_ggtt(cache);
-
- ggtt->vm.clear_range(&ggtt->vm,
- cache->node.start,
- cache->node.size);
- drm_mm_remove_node(&cache->node);
- } else {
- i915_vma_unpin((struct i915_vma *)cache->node.mm);
- }
- }
-
- cache->vaddr = 0;
- cache->page = -1;
-}
-
-static void *reloc_kmap(struct drm_i915_gem_object *obj,
- struct reloc_cache *cache,
- unsigned long page)
-{
- void *vaddr;
-
- if (cache->vaddr) {