aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2016-06-02 07:58:36 +1000
committerDave Airlie <airlied@redhat.com>2016-06-02 07:58:36 +1000
commit66fd7a66e8b9e11e49f46ea77910f935c4dee5c3 (patch)
treecc9dd78568036c1d4d0313bcd74f017b69a106c4 /drivers/gpu/drm/i915/i915_gem.c
parent65439b68bb10afd877af05463bbff5d25200fd06 (diff)
parente42aeef1237b7c969a77b7f726c50f6cb832185f (diff)
downloadlinux-stericsson-66fd7a66e8b9e11e49f46ea77910f935c4dee5c3.tar.gz
Merge branch 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel into drm-next
drm-intel-next-2016-05-22: - cmd-parser support for direct reg->reg loads (Ken Graunke) - better handle DP++ smart dongles (Ville) - bxt guc fw loading support (Nick Hoathe) - remove a bunch of struct typedefs from dpll code (Ander) - tons of small work all over to avoid casting between drm_device and the i915 dev struct (Tvrtko&Chris) - untangle request retiring from other operations, also fixes reset stat corner cases (Chris) - skl atomic watermark support from Matt Roper, yay! - various wm handling bugfixes from Ville - big pile of cdclck rework for bxt/skl (Ville) - CABC (Content Adaptive Brigthness Control) for dsi panels (Jani&Deepak M) - nonblocking atomic commits for plane-only updates (Maarten Lankhorst) - bunch of PSR fixes&improvements - untangle our map/pin/sg_iter code a bit (Dave Gordon) drm-intel-next-2016-05-08: - refactor stolen quirks to share code between early quirks and i915 (Joonas) - refactor gem BO/vma funcstion (Tvrtko&Dave) - backlight over DPCD support (Yetunde Abedisi) - more dsi panel sequence support (Jani) - lots of refactoring around handling iomaps, vma, ring access and related topics culmulating in removing the duplicated request tracking in the execlist code (Chris & Tvrtko) includes a small patch for core iomapping code - hw state readout for bxt dsi (Ramalingam C) - cdclk cleanups (Ville) - dedupe chv pll code a bit (Ander) - enable semaphores on gen8+ for legacy submission, to be able to have a direct comparison against execlist on the same platform (Chris) Not meant to be used for anything else but performance tuning - lvds border bit hw state checker fix (Jani) - rpm vs. shrinker/oom-notifier fixes (Praveen Paneri) - l3 tuning (Imre) - revert mst dp audio, it's totally non-functional and crash-y (Lyude) - first official dmc for kbl (Rodrigo) - and tons of small things all over as usual * 'drm-intel-next' of git://anongit.freedesktop.org/drm-intel: (194 commits) drm/i915: Revert async unpin and nonblocking atomic commit drm/i915: Update DRIVER_DATE to 20160522 drm/i915: Inline sg_next() for the optimised SGL iterator drm/i915: Introduce & use new lightweight SGL iterators drm/i915: optimise i915_gem_object_map() for small objects drm/i915: refactor i915_gem_object_pin_map() drm/i915/psr: Implement PSR2 w/a for gen9 drm/i915/psr: Use ->get_aux_send_ctl functions drm/i915/psr: Order DP aux transactions correctly drm/i915/psr: Make idle_frames sensible again drm/i915/psr: Try to program link training times correctly drm/i915/userptr: Convert to drm_i915_private drm/i915: Allow nonblocking update of pageflips. drm/i915: Check for unpin correctness. Reapply "drm/i915: Avoid stalling on pending flips for legacy cursor updates" drm/i915: Make unpin async. drm/i915: Prepare connectors for nonblocking checks. drm/i915: Pass atomic states to fbc update functions. drm/i915: Remove reset_counter from intel_crtc. drm/i915: Remove queue_flip pointer. ...
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c400
1 files changed, 169 insertions, 231 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aad26851cee3..12407bc70c71 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -177,7 +177,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
vaddr += PAGE_SIZE;
}
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
st = kmalloc(sizeof(*st), GFP_KERNEL);
if (st == NULL)
@@ -347,7 +347,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
}
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
out:
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -381,9 +381,9 @@ i915_gem_create(struct drm_file *file,
return -EINVAL;
/* Allocate the new object */
- obj = i915_gem_alloc_object(dev, size);
- if (obj == NULL)
- return -ENOMEM;
+ obj = i915_gem_object_create(dev, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
@@ -1006,7 +1006,7 @@ out:
}
if (needs_clflush_after)
- i915_gem_chipset_flush(dev);
+ i915_gem_chipset_flush(to_i915(dev));
else
obj->cache_dirty = true;
@@ -1230,8 +1230,7 @@ int __i915_wait_request(struct drm_i915_gem_request *req,
struct intel_rps_client *rps)
{
struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_private *dev_priv = req->i915;
const bool irq_test_in_progress =
ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
@@ -1413,6 +1412,13 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
list_del_init(&request->list);
i915_gem_request_remove_from_client(request);
+ if (request->previous_context) {
+ if (i915.enable_execlists)
+ intel_lr_context_unpin(request->previous_context,
+ request->engine);
+ }
+
+ i915_gem_context_unreference(request->ctx);
i915_gem_request_unreference(request);
}
@@ -1422,7 +1428,7 @@ __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
struct intel_engine_cs *engine = req->engine;
struct drm_i915_gem_request *tmp;
- lockdep_assert_held(&engine->dev->struct_mutex);
+ lockdep_assert_held(&engine->i915->dev->struct_mutex);
if (list_empty(&req->list))
return;
@@ -1982,7 +1988,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
return size;
/* Previous chips need a power-of-two fence region when tiling */
- if (INTEL_INFO(dev)->gen == 3)
+ if (IS_GEN3(dev))
gtt_size = 1024*1024;
else
gtt_size = 512*1024;
@@ -2162,7 +2168,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
static void
i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
+ struct page *page;
int ret;
BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2191,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
if (obj->madv == I915_MADV_DONTNEED)
obj->dirty = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
- struct page *page = sg_page_iter_page(&sg_iter);
-
+ for_each_sgt_page(page, sgt_iter, obj->pages) {
if (obj->dirty)
set_page_dirty(page);
@@ -2243,7 +2248,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
struct address_space *mapping;
struct sg_table *st;
struct scatterlist *sg;
- struct sg_page_iter sg_iter;
+ struct sgt_iter sgt_iter;
struct page *page;
unsigned long last_pfn = 0; /* suppress gcc warning */
int ret;
@@ -2340,8 +2345,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
err_pages:
sg_mark_end(sg);
- for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
- put_page(sg_page_iter_page(&sg_iter));
+ for_each_sgt_page(page, sgt_iter, st)
+ put_page(page);
sg_free_table(st);
kfree(st);
@@ -2395,6 +2400,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
return 0;
}
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+{
+ unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+ struct sg_table *sgt = obj->pages;
+ struct sgt_iter sgt_iter;
+ struct page *page;
+ struct page *stack_pages[32];
+ struct page **pages = stack_pages;
+ unsigned long i = 0;
+ void *addr;
+
+ /* A single page can always be kmapped */
+ if (n_pages == 1)
+ return kmap(sg_page(sgt->sgl));
+
+ if (n_pages > ARRAY_SIZE(stack_pages)) {
+ /* Too big for stack -- allocate temporary array instead */
+ pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+ if (!pages)
+ return NULL;
+ }
+
+ for_each_sgt_page(page, sgt_iter, sgt)
+ pages[i++] = page;
+
+ /* Check that we have the expected number of pages */
+ GEM_BUG_ON(i != n_pages);
+
+ addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+
+ if (pages != stack_pages)
+ drm_free_large(pages);
+
+ return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
{
int ret;
@@ -2407,29 +2450,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
i915_gem_object_pin_pages(obj);
- if (obj->mapping == NULL) {
- struct page **pages;
-
- pages = NULL;
- if (obj->base.size == PAGE_SIZE)
- obj->mapping = kmap(sg_page(obj->pages->sgl));
- else
- pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
- sizeof(*pages),
- GFP_TEMPORARY);
- if (pages != NULL) {
- struct sg_page_iter sg_iter;
- int n;
-
- n = 0;
- for_each_sg_page(obj->pages->sgl, &sg_iter,
- obj->pages->nents, 0)
- pages[n++] = sg_page_iter_page(&sg_iter);
-
- obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
- drm_free_large(pages);
- }
- if (obj->mapping == NULL) {
+ if (!obj->mapping) {
+ obj->mapping = i915_gem_object_map(obj);
+ if (!obj->mapping) {
i915_gem_object_unpin_pages(obj);
return ERR_PTR(-ENOMEM);
}
@@ -2502,9 +2525,8 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
}
static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
+i915_gem_init_seqno(struct drm_i915_private *dev_priv, u32 seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
int ret;
@@ -2514,7 +2536,7 @@ i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
if (ret)
return ret;
}
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
/* Finally reset hw state */
for_each_engine(engine, dev_priv)
@@ -2534,7 +2556,7 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
/* HWS page needs to be set less than what we
* will inject to ring
*/
- ret = i915_gem_init_seqno(dev, seqno - 1);
+ ret = i915_gem_init_seqno(dev_priv, seqno - 1);
if (ret)
return ret;
@@ -2550,13 +2572,11 @@ int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
}
int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
+i915_gem_get_seqno(struct drm_i915_private *dev_priv, u32 *seqno)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
-
/* reserve 0 for non-seqno */
if (dev_priv->next_seqno == 0) {
- int ret = i915_gem_init_seqno(dev, 0);
+ int ret = i915_gem_init_seqno(dev_priv, 0);
if (ret)
return ret;
@@ -2580,6 +2600,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
struct drm_i915_private *dev_priv;
struct intel_ringbuffer *ringbuf;
u32 request_start;
+ u32 reserved_tail;
int ret;
if (WARN_ON(request == NULL))
@@ -2594,9 +2615,10 @@ void __i915_add_request(struct drm_i915_gem_request *request,
* should already have been reserved in the ring buffer. Let the ring
* know that it is time to use that space up.
*/
- intel_ring_reserved_space_use(ringbuf);
-
request_start = intel_ring_get_tail(ringbuf);
+ reserved_tail = request->reserved_space;
+ request->reserved_space = 0;
+
/*
* Emit any outstanding flushes - execbuf can fail to emit the flush
* after having emitted the batchbuffer command. Hence we need to fix
@@ -2652,15 +2674,21 @@ void __i915_add_request(struct drm_i915_gem_request *request,
/* Not allowed to fail! */
WARN(ret, "emit|add_request failed: %d!\n", ret);
- i915_queue_hangcheck(engine->dev);
+ i915_queue_hangcheck(engine->i915);
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work,
round_jiffies_up_relative(HZ));
- intel_mark_busy(dev_priv->dev);
+ intel_mark_busy(dev_priv);
/* Sanity check that the reserved size was large enough. */
- intel_ring_reserved_space_end(ringbuf);
+ ret = intel_ring_get_tail(ringbuf) - request_start;
+ if (ret < 0)
+ ret += ringbuf->size;
+ WARN_ONCE(ret > reserved_tail,
+ "Not enough space reserved (%d bytes) "
+ "for adding the request (%d bytes)\n",
+ reserved_tail, ret);
}
static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
@@ -2712,18 +2740,6 @@ void i915_gem_request_free(struct kref *req_ref)
{
struct drm_i915_gem_request *req = container_of(req_ref,
typeof(*req), ref);
- struct intel_context *ctx = req->ctx;
-
- if (req->file_priv)
- i915_gem_request_remove_from_client(req);
-
- if (ctx) {
- if (i915.enable_execlists && ctx != req->i915->kernel_context)
- intel_lr_context_unpin(ctx, req->engine);
-
- i915_gem_context_unreference(ctx);
- }
-
kmem_cache_free(req->i915->requests, req);
}
@@ -2732,7 +2748,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
struct intel_context *ctx,
struct drm_i915_gem_request **req_out)
{
- struct drm_i915_private *dev_priv = to_i915(engine->dev);
+ struct drm_i915_private *dev_priv = engine->i915;
unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
struct drm_i915_gem_request *req;
int ret;
@@ -2754,7 +2770,7 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
if (req == NULL)
return -ENOMEM;
- ret = i915_gem_get_seqno(engine->dev, &req->seqno);
+ ret = i915_gem_get_seqno(engine->i915, &req->seqno);
if (ret)
goto err;
@@ -2765,15 +2781,6 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
req->ctx = ctx;
i915_gem_context_reference(req->ctx);
- if (i915.enable_execlists)
- ret = intel_logical_ring_alloc_request_extras(req);
- else
- ret = intel_ring_alloc_request_extras(req);
- if (ret) {
- i915_gem_context_unreference(req->ctx);
- goto err;
- }
-
/*
* Reserve space in the ring buffer for all the commands required to
* eventually emit this request. This is to guarantee that the
@@ -2781,24 +2788,20 @@ __i915_gem_request_alloc(struct intel_engine_cs *engine,
* to be redone if the request is not actually submitted straight
* away, e.g. because a GPU scheduler has deferred it.
*/
+ req->reserved_space = MIN_SPACE_FOR_ADD_REQUEST;
+
if (i915.enable_execlists)
- ret = intel_logical_ring_reserve_space(req);
+ ret = intel_logical_ring_alloc_request_extras(req);
else
- ret = intel_ring_reserve_space(req);
- if (ret) {
- /*
- * At this point, the request is fully allocated even if not
- * fully prepared. Thus it can be cleaned up using the proper
- * free code.
- */
- intel_ring_reserved_space_cancel(req->ringbuf);
- i915_gem_request_unreference(req);
- return ret;
- }
+ ret = intel_ring_alloc_request_extras(req);
+ if (ret)
+ goto err_ctx;
*req_out = req;
return 0;
+err_ctx:
+ i915_gem_context_unreference(ctx);
err:
kmem_cache_free(dev_priv->requests, req);
return ret;
@@ -2824,7 +2827,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
int err;
if (ctx == NULL)
- ctx = to_i915(engine->dev)->kernel_context;
+ ctx = engine->i915->kernel_context;
err = __i915_gem_request_alloc(engine, ctx, &req);
return err ? ERR_PTR(err) : req;
}
@@ -2888,13 +2891,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
/* Ensure irq handler finishes or is cancelled. */
tasklet_kill(&engine->irq_tasklet);
- spin_lock_bh(&engine->execlist_lock);
- /* list_splice_tail_init checks for empty lists */
- list_splice_tail_init(&engine->execlist_queue,
- &engine->execlist_retired_req_list);
- spin_unlock_bh(&engine->execlist_lock);
-
- intel_execlists_retire_requests(engine);
+ intel_execlists_cancel_requests(engine);
}
/*
@@ -3005,9 +3002,8 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
}
bool
-i915_gem_retire_requests(struct drm_device *dev)
+i915_gem_retire_requests(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
bool idle = true;
@@ -3018,8 +3014,6 @@ i915_gem_retire_requests(struct drm_device *dev)
spin_lock_bh(&engine->execlist_lock);
idle &= list_empty(&engine->execlist_queue);
spin_unlock_bh(&engine->execlist_lock);
-
- intel_execlists_retire_requests(engine);
}
}
@@ -3042,7 +3036,7 @@ i915_gem_retire_work_handler(struct work_struct *work)
/* Come back later if the device is busy... */
idle = false;
if (mutex_trylock(&dev->struct_mutex)) {
- idle = i915_gem_retire_requests(dev);
+ idle = i915_gem_retire_requests(dev_priv);
mutex_unlock(&dev->struct_mutex);
}
if (!idle)
@@ -3066,7 +3060,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
* Also locking seems to be fubar here, engine->request_list is protected
* by dev->struct_mutex. */
- intel_mark_idle(dev);
+ intel_mark_idle(dev_priv);
if (mutex_trylock(&dev->struct_mutex)) {
for_each_engine(engine, dev_priv)
@@ -3096,14 +3090,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
if (req == NULL)
continue;
- if (list_empty(&req->list))
- goto retire;
-
- if (i915_gem_request_completed(req, true)) {
- __i915_gem_request_retire__upto(req);
-retire:
+ if (i915_gem_request_completed(req, true))
i915_gem_object_retire__read(obj, i);
- }
}
return 0;
@@ -3185,7 +3173,7 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
ret = __i915_wait_request(req[i], true,
args->timeout_ns > 0 ? &args->timeout_ns : NULL,
to_rps_client(file));
- i915_gem_request_unreference__unlocked(req[i]);
+ i915_gem_request_unreference(req[i]);
}
return ret;
@@ -3211,7 +3199,7 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
if (i915_gem_request_completed(from_req, true))
return 0;
- if (!i915_semaphore_is_enabled(obj->base.dev)) {
+ if (!i915_semaphore_is_enabled(to_i915(obj->base.dev))) {
struct drm_i915_private *i915 = to_i915(obj->base.dev);
ret = __i915_wait_request(from_req,
i915->mm.interruptible,
@@ -3345,6 +3333,17 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
old_write_domain);
}
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+ GEM_BUG_ON(vma->pin_count);
+
+ if (vma->iomap == NULL)
+ return;
+
+ io_mapping_unmap(vma->iomap);
+ vma->iomap = NULL;
+}
+
static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
{
struct drm_i915_gem_object *obj = vma->obj;
@@ -3377,6 +3376,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
ret = i915_gem_object_put_fence(obj);
if (ret)
return ret;
+
+ __i915_vma_iounmap(vma);
}
trace_i915_vma_unbind(vma);
@@ -3731,7 +3732,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
return;
if (i915_gem_clflush_object(obj, obj->pin_display))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
old_write_domain = obj->base.write_domain;
obj->base.write_domain = 0;
@@ -3929,7 +3930,7 @@ out:
obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
cpu_write_needs_clflush(obj)) {
if (i915_gem_clflush_object(obj, true))
- i915_gem_chipset_flush(obj->base.dev);
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
}
return 0;
@@ -4198,7 +4199,7 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
if (ret == 0)
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- i915_gem_request_unreference__unlocked(target);
+ i915_gem_request_unreference(target);
return ret;
}
@@ -4499,21 +4500,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
.put_pages = i915_gem_object_put_pages_gtt,
};
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
size_t size)
{
struct drm_i915_gem_object *obj;
struct address_space *mapping;
gfp_t mask;
+ int ret;
obj = i915_gem_object_alloc(dev);
if (obj == NULL)
- return NULL;
+ return ERR_PTR(-ENOMEM);
- if (drm_gem_object_init(dev, &obj->base, size) != 0) {
- i915_gem_object_free(obj);
- return NULL;
- }
+ ret = drm_gem_object_init(dev, &obj->base, size);
+ if (ret)
+ goto fail;
mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4550,6 +4551,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
trace_i915_gem_object_create(obj);
return obj;
+
+fail:
+ i915_gem_object_free(obj);
+
+ return ERR_PTR(ret);
}
static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4655,16 +4661,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
const struct i915_ggtt_view *view)
{
- struct drm_device *dev = obj->base.dev;
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
- BUG_ON(!view);
+ GEM_BUG_ON(!view);
list_for_each_entry(vma, &obj->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma;
return NULL;
}
@@ -4706,9 +4708,10 @@ i915_gem_suspend(struct drm_device *dev)
if (ret)
goto err;
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev_priv);
i915_gem_stop_engines(dev);
+ i915_gem_context_lost(dev_priv);
mutex_unlock(&dev->struct_mutex);
cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@ -4727,37 +4730,6 @@ err:
return ret;
}
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
-{
- struct intel_engine_cs *engine = req->engine;
- struct drm_device *dev = engine->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
- int i, ret;
-
- if (!HAS_L3_DPF(dev) || !remap_info)
- return 0;
-
- ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
- if (ret)
- return ret;
-
- /*
- * Note: We do not worry about the concurrent register cacheline hang
- * here because no other code should access these registers other than
- * at initialization time.
- */
- for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
- intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
- intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
- intel_ring_emit(engine, remap_info[i]);
- }
-
- intel_ring_advance(engine);
-
- return ret;
-}
-
void i915_gem_init_swizzling(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -4862,7 +4834,7 @@ i915_gem_init_hw(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_engine_cs *engine;
- int ret, j;
+ int ret;
/* Double layer security blanket, see i915_gem_init() */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4928,44 +4900,6 @@ i915_gem_init_hw(struct drm_device *dev)
* on re-initialisation
*/
ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
- if (ret)
- goto out;
-
- /* Now it is safe to go back round and do everything else: */
- for_each_engine(engine, dev_priv) {
- struct drm_i915_gem_request *req;
-
- req = i915_gem_request_alloc(engine, NULL);
- if (IS_ERR(req)) {
- ret = PTR_ERR(req);
- break;
- }
-
- if (engine->id == RCS) {
- for (j = 0; j < NUM_L3_SLICES(dev); j++) {
- ret = i915_gem_l3_remap(req, j);
- if (ret)
- goto err_request;
- }
- }
-
- ret = i915_ppgtt_init_ring(req);
- if (ret)
- goto err_request;
-
- ret = i915_gem_context_enable(req);
- if (ret)
- goto err_request;
-
-err_request:
- i915_add_request_no_flush(req);
- if (ret) {
- DRM_ERROR("Failed to enable %s, error=%d\n",
- engine->name, ret);
- i915_gem_cleanup_engines(dev);
- break;
- }
- }
out:
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
@@ -4977,9 +4911,6 @@ int i915_gem_init(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
- i915.enable_execlists = intel_sanitize_enable_execlists(dev,
- i915.enable_execlists);
-
mutex_lock(&dev->struct_mutex);
if (!i915.enable_execlists) {
@@ -5002,10 +4933,7 @@ int i915_gem_init(struct drm_device *dev)
*/
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
- ret = i915_gem_init_userptr(dev);
- if (ret)
- goto out_unlock;
-
+ i915_gem_init_userptr(dev_priv);
i915_gem_init_ggtt(dev);
ret = i915_gem_context_init(dev);
@@ -5042,14 +4970,6 @@ i915_gem_cleanup_engines(struct drm_device *dev)
for_each_engine(engine, dev_priv)
dev_priv->gt.cleanup_engine(engine);
-
- if (i915.enable_execlists)
- /*
- * Neither the BIOS, ourselves or any other kernel
- * expects the system to be in execlists mode on startup,
- * so we need to reset the GPU back to legacy mode.
- */
- intel_gpu_reset(dev, ALL_ENGINES);
}
static void
@@ -5073,7 +4993,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
else
dev_priv->num_fence_regs = 8;
- if (intel_vgpu_active(dev))
+ if (intel_vgpu_active(dev_priv))
dev_priv->num_fence_regs =
I915_READ(vgtif_reg(avail_rs.fence_num));
@@ -5148,6 +5068,34 @@ void i915_gem_load_cleanup(struct drm_device *dev)
kmem_cache_destroy(dev_priv->objects);
}
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+{
+ struct drm_i915_gem_object *obj;
+
+ /* Called just before we write the hibernation image.
+ *
+ * We need to update the domain tracking to reflect that the CPU
+ * will be accessing all the pages to create and restore from the
+ * hibernation, and so upon restoration those pages will be in the
+ * CPU domain.
+ *
+ * To make sure the hibernation image contains the latest state,
+ * we update that state just before writing out the image.
+ */
+
+ list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+ }
+
+ return 0;
+}
+
void i915_gem_release(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
@@ -5254,13 +5202,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
- i915_ggtt_view_equal(&vma->ggtt_view, view))
+ if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
return vma->node.start;
WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +5231,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
const struct i915_ggtt_view *view)
{
- struct drm_i915_private *dev_priv = to_i915(o->base.dev);
- struct i915_ggtt *ggtt = &dev_priv->ggtt;
struct i915_vma *vma;
list_for_each_entry(vma, &o->vma_list, obj_link)
- if (vma->vm == &ggtt->base &&
+ if (vma->is_ggtt &&
i915_ggtt_view_equal(&vma->ggtt_view, view) &&
drm_mm_node_allocated(&vma->node))
return true;
@@ -5310,23 +5253,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
return false;
}
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
- struct i915_address_space *vm)
+unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
{
- struct drm_i915_private *dev_priv = o->base.dev->dev_private;
struct i915_vma *vma;
- WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
- BUG_ON(list_empty(&o->vma_list));
+ GEM_BUG_ON(list_empty(&o->vma_list));
list_for_each_entry(vma, &o->vma_list, obj_link) {
if (vma->is_ggtt &&
- vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
- continue;
- if (vma->vm == vm)
+ vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
return vma->node.size;
}
+
return 0;
}
@@ -5365,8 +5303,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
size_t bytes;
int ret;
- obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
- if (IS_ERR_OR_NULL(obj))
+ obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+ if (IS_ERR(obj))
return obj;
ret = i915_gem_object_set_to_cpu_domain(obj, true);