]> git.openfabrics.org - ~emulex/infiniband.git/commitdiff
drm/i915/bdw: Skeleton for the new logical rings submission path
authorOscar Mateo <oscar.mateo@intel.com>
Thu, 24 Jul 2014 16:04:22 +0000 (17:04 +0100)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Mon, 11 Aug 2014 14:40:57 +0000 (16:40 +0200)
Execlists are indeed a brave new world with respect to workload
submission to the GPU.

In previous version of these series, I have tried to impact the
legacy ringbuffer submission path as little as possible (mostly,
passing the context around and using the correct ringbuffer when I
needed one) but Daniel is afraid (probably with a reason) that
these changes and, especially, future ones, will end up breaking
older gens.

This commit and some others coming next will try to limit the
damage by creating an alternative path for workload submission.
The first step is here: laying out a new ring init/fini.

Signed-off-by: Oscar Mateo <oscar.mateo@intel.com>
Reviewed-by: Damien Lespiau <damien.lespiau@intel.com>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lrc.h

index 33a54cbf9a2e24354254eed07dd1f4863ce4b102..9acb2469116a5f5b630888fcfbe3c4969ee98e91 100644 (file)
@@ -4741,6 +4741,11 @@ int i915_gem_init(struct drm_device *dev)
                dev_priv->gt.init_rings = i915_gem_init_rings;
                dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
                dev_priv->gt.stop_ring = intel_stop_ring_buffer;
+       } else {
+               dev_priv->gt.do_execbuf = intel_execlists_submission;
+               dev_priv->gt.init_rings = intel_logical_rings_init;
+               dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
+               dev_priv->gt.stop_ring = intel_logical_ring_stop;
        }
 
        i915_gem_init_userptr(dev);
index a7a08a85edb36b1aa4e28f78851a385ded8ac0d1..9c2ff8f11c9041b1f6ab6e03af019bfebc04a326 100644 (file)
@@ -91,6 +91,157 @@ int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists
        return 0;
 }
 
+int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
+                              struct intel_engine_cs *ring,
+                              struct intel_context *ctx,
+                              struct drm_i915_gem_execbuffer2 *args,
+                              struct list_head *vmas,
+                              struct drm_i915_gem_object *batch_obj,
+                              u64 exec_start, u32 flags)
+{
+       /* TODO */
+       return 0;
+}
+
+void intel_logical_ring_stop(struct intel_engine_cs *ring)
+{
+       /* TODO */
+}
+
+void intel_logical_ring_cleanup(struct intel_engine_cs *ring)
+{
+       /* TODO */
+}
+
+static int logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
+{
+       /* TODO */
+       return 0;
+}
+
+static int logical_render_ring_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring = &dev_priv->ring[RCS];
+
+       ring->name = "render ring";
+       ring->id = RCS;
+       ring->mmio_base = RENDER_RING_BASE;
+       ring->irq_enable_mask =
+               GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT;
+
+       return logical_ring_init(dev, ring);
+}
+
+static int logical_bsd_ring_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring = &dev_priv->ring[VCS];
+
+       ring->name = "bsd ring";
+       ring->id = VCS;
+       ring->mmio_base = GEN6_BSD_RING_BASE;
+       ring->irq_enable_mask =
+               GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT;
+
+       return logical_ring_init(dev, ring);
+}
+
+static int logical_bsd2_ring_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring = &dev_priv->ring[VCS2];
+
+       ring->name = "bds2 ring";
+       ring->id = VCS2;
+       ring->mmio_base = GEN8_BSD2_RING_BASE;
+       ring->irq_enable_mask =
+               GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT;
+
+       return logical_ring_init(dev, ring);
+}
+
+static int logical_blt_ring_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring = &dev_priv->ring[BCS];
+
+       ring->name = "blitter ring";
+       ring->id = BCS;
+       ring->mmio_base = BLT_RING_BASE;
+       ring->irq_enable_mask =
+               GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
+
+       return logical_ring_init(dev, ring);
+}
+
+static int logical_vebox_ring_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct intel_engine_cs *ring = &dev_priv->ring[VECS];
+
+       ring->name = "video enhancement ring";
+       ring->id = VECS;
+       ring->mmio_base = VEBOX_RING_BASE;
+       ring->irq_enable_mask =
+               GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT;
+
+       return logical_ring_init(dev, ring);
+}
+
+int intel_logical_rings_init(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       int ret;
+
+       ret = logical_render_ring_init(dev);
+       if (ret)
+               return ret;
+
+       if (HAS_BSD(dev)) {
+               ret = logical_bsd_ring_init(dev);
+               if (ret)
+                       goto cleanup_render_ring;
+       }
+
+       if (HAS_BLT(dev)) {
+               ret = logical_blt_ring_init(dev);
+               if (ret)
+                       goto cleanup_bsd_ring;
+       }
+
+       if (HAS_VEBOX(dev)) {
+               ret = logical_vebox_ring_init(dev);
+               if (ret)
+                       goto cleanup_blt_ring;
+       }
+
+       if (HAS_BSD2(dev)) {
+               ret = logical_bsd2_ring_init(dev);
+               if (ret)
+                       goto cleanup_vebox_ring;
+       }
+
+       ret = i915_gem_set_seqno(dev, ((u32)~0 - 0x1000));
+       if (ret)
+               goto cleanup_bsd2_ring;
+
+       return 0;
+
+cleanup_bsd2_ring:
+       intel_logical_ring_cleanup(&dev_priv->ring[VCS2]);
+cleanup_vebox_ring:
+       intel_logical_ring_cleanup(&dev_priv->ring[VECS]);
+cleanup_blt_ring:
+       intel_logical_ring_cleanup(&dev_priv->ring[BCS]);
+cleanup_bsd_ring:
+       intel_logical_ring_cleanup(&dev_priv->ring[VCS]);
+cleanup_render_ring:
+       intel_logical_ring_cleanup(&dev_priv->ring[RCS]);
+
+       return ret;
+}
+
 static int
 populate_lr_context(struct intel_context *ctx, struct drm_i915_gem_object *ctx_obj,
                    struct intel_engine_cs *ring, struct intel_ringbuffer *ringbuf)
index 3b93572431e3ed58ab839f325f8270d6c8b92cf5..bf0eff4e9f088edfae395230f89b5b79979fffef 100644 (file)
 #ifndef _INTEL_LRC_H_
 #define _INTEL_LRC_H_
 
+/* Logical Rings */
+void intel_logical_ring_stop(struct intel_engine_cs *ring);
+void intel_logical_ring_cleanup(struct intel_engine_cs *ring);
+int intel_logical_rings_init(struct drm_device *dev);
+
 /* Logical Ring Contexts */
 void intel_lr_context_free(struct intel_context *ctx);
 int intel_lr_context_deferred_create(struct intel_context *ctx,
@@ -31,5 +36,12 @@ int intel_lr_context_deferred_create(struct intel_context *ctx,
 
 /* Execlists */
 int intel_sanitize_enable_execlists(struct drm_device *dev, int enable_execlists);
+int intel_execlists_submission(struct drm_device *dev, struct drm_file *file,
+                              struct intel_engine_cs *ring,
+                              struct intel_context *ctx,
+                              struct drm_i915_gem_execbuffer2 *args,
+                              struct list_head *vmas,
+                              struct drm_i915_gem_object *batch_obj,
+                              u64 exec_start, u32 flags);
 
 #endif /* _INTEL_LRC_H_ */