diff options
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
| -rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 1255 | 
1 files changed, 818 insertions, 437 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 460ee1026fc..279488addf3 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -33,16 +33,44 @@  #include "i915_trace.h"  #include "intel_drv.h" -static inline int ring_space(struct intel_ring_buffer *ring) +/* Early gen2 devices have a cacheline of just 32 bytes, using 64 is overkill, + * but keeps the logic simple. Indeed, the whole purpose of this macro is just + * to give some inclination as to some of the magic values used in the various + * workarounds! + */ +#define CACHELINE_BYTES 64 + +static inline int __ring_space(int head, int tail, int size)  { -	int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE); +	int space = head - (tail + I915_RING_FREE_SPACE);  	if (space < 0) -		space += ring->size; +		space += size;  	return space;  } +static inline int ring_space(struct intel_engine_cs *ring) +{ +	struct intel_ringbuffer *ringbuf = ring->buffer; +	return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); +} + +static bool intel_ring_stopped(struct intel_engine_cs *ring) +{ +	struct drm_i915_private *dev_priv = ring->dev->dev_private; +	return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); +} + +void __intel_ring_advance(struct intel_engine_cs *ring) +{ +	struct intel_ringbuffer *ringbuf = ring->buffer; +	ringbuf->tail &= ringbuf->size - 1; +	if (intel_ring_stopped(ring)) +		return; +	ring->write_tail(ring, ringbuf->tail); +} +  static int -gen2_render_ring_flush(struct intel_ring_buffer *ring, +gen2_render_ring_flush(struct intel_engine_cs *ring,  		       u32	invalidate_domains,  		       u32	flush_domains)  { @@ -68,7 +96,7 @@ gen2_render_ring_flush(struct intel_ring_buffer *ring,  }  static int -gen4_render_ring_flush(struct intel_ring_buffer *ring, +gen4_render_ring_flush(struct intel_engine_cs *ring,  		       u32	invalidate_domains,  		       u32	flush_domains)  { @@ -163,9 +191,9 @@ gen4_render_ring_flush(struct intel_ring_buffer *ring,   * really our business.  That leaves only stall at scoreboard.   */  static int -intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) +intel_emit_post_sync_nonzero_flush(struct intel_engine_cs *ring)  { -	u32 scratch_addr = ring->scratch.gtt_offset + 128; +	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;  	int ret; @@ -198,11 +226,11 @@ intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)  }  static int -gen6_render_ring_flush(struct intel_ring_buffer *ring, +gen6_render_ring_flush(struct intel_engine_cs *ring,                           u32 invalidate_domains, u32 flush_domains)  {  	u32 flags = 0; -	u32 scratch_addr = ring->scratch.gtt_offset + 128; +	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;  	int ret;  	/* Force SNB workarounds for PIPE_CONTROL flushes */ @@ -250,7 +278,7 @@ gen6_render_ring_flush(struct intel_ring_buffer *ring,  }  static int -gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring) +gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring)  {  	int ret; @@ -268,21 +296,23 @@ gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)  	return 0;  } -static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value) +static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value)  {  	int ret;  	if (!ring->fbc_dirty)  		return 0; -	ret = intel_ring_begin(ring, 4); +	ret = intel_ring_begin(ring, 6);  	if (ret)  		return ret; -	intel_ring_emit(ring, MI_NOOP);  	/* WaFbcNukeOn3DBlt:ivb/hsw */  	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));  	intel_ring_emit(ring, MSG_FBC_REND_STATE);  	intel_ring_emit(ring, value); +	intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); +	intel_ring_emit(ring, MSG_FBC_REND_STATE); +	intel_ring_emit(ring, ring->scratch.gtt_offset + 256);  	intel_ring_advance(ring);  	ring->fbc_dirty = false; @@ -290,11 +320,11 @@ static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)  }  static int -gen7_render_ring_flush(struct intel_ring_buffer *ring, +gen7_render_ring_flush(struct intel_engine_cs *ring,  		       u32 invalidate_domains, u32 flush_domains)  {  	u32 flags = 0; -	u32 scratch_addr = ring->scratch.gtt_offset + 128; +	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;  	int ret;  	/* @@ -344,29 +374,77 @@ gen7_render_ring_flush(struct intel_ring_buffer *ring,  	intel_ring_emit(ring, 0);  	intel_ring_advance(ring); -	if (flush_domains) +	if (!invalidate_domains && flush_domains)  		return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);  	return 0;  } -static void ring_write_tail(struct intel_ring_buffer *ring, +static int +gen8_render_ring_flush(struct intel_engine_cs *ring, +		       u32 invalidate_domains, u32 flush_domains) +{ +	u32 flags = 0; +	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; +	int ret; + +	flags |= PIPE_CONTROL_CS_STALL; + +	if (flush_domains) { +		flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; +		flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; +	} +	if (invalidate_domains) { +		flags |= PIPE_CONTROL_TLB_INVALIDATE; +		flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; +		flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; +		flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; +		flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; +		flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; +		flags |= PIPE_CONTROL_QW_WRITE; +		flags |= PIPE_CONTROL_GLOBAL_GTT_IVB; +	} + +	ret = intel_ring_begin(ring, 6); +	if (ret) +		return ret; + +	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6)); +	intel_ring_emit(ring, flags); +	intel_ring_emit(ring, scratch_addr); +	intel_ring_emit(ring, 0); +	intel_ring_emit(ring, 0); +	intel_ring_emit(ring, 0); +	intel_ring_advance(ring); + +	return 0; + +} + +static void ring_write_tail(struct intel_engine_cs *ring,  			    u32 value)  { -	drm_i915_private_t *dev_priv = ring->dev->dev_private; +	struct drm_i915_private *dev_priv = ring->dev->dev_private;  	I915_WRITE_TAIL(ring, value);  } -u32 intel_ring_get_active_head(struct intel_ring_buffer *ring) +u64 intel_ring_get_active_head(struct intel_engine_cs *ring)  { -	drm_i915_private_t *dev_priv = ring->dev->dev_private; -	u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ? -			RING_ACTHD(ring->mmio_base) : ACTHD; +	struct drm_i915_private *dev_priv = ring->dev->dev_private; +	u64 acthd; + +	if (INTEL_INFO(ring->dev)->gen >= 8) +		acthd = I915_READ64_2x32(RING_ACTHD(ring->mmio_base), +					 RING_ACTHD_UDW(ring->mmio_base)); +	else if (INTEL_INFO(ring->dev)->gen >= 4) +		acthd = I915_READ(RING_ACTHD(ring->mmio_base)); +	else +		acthd = I915_READ(ACTHD); -	return I915_READ(acthd_reg); +	return acthd;  } -static void ring_setup_phys_status_page(struct intel_ring_buffer *ring) +static void ring_setup_phys_status_page(struct intel_engine_cs *ring)  {  	struct drm_i915_private *dev_priv = ring->dev->dev_private;  	u32 addr; @@ -377,31 +455,42 @@ static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)  	I915_WRITE(HWS_PGA, addr);  } -static int init_ring_common(struct intel_ring_buffer *ring) +static bool stop_ring(struct intel_engine_cs *ring)  { -	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = dev->dev_private; -	struct drm_i915_gem_object *obj = ring->obj; -	int ret = 0; -	u32 head; - -	if (HAS_FORCE_WAKE(dev)) -		gen6_gt_force_wake_get(dev_priv); +	struct drm_i915_private *dev_priv = to_i915(ring->dev); -	if (I915_NEED_GFX_HWS(dev)) -		intel_ring_setup_status_page(ring); -	else -		ring_setup_phys_status_page(ring); +	if (!IS_GEN2(ring->dev)) { +		I915_WRITE_MODE(ring, _MASKED_BIT_ENABLE(STOP_RING)); +		if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) { +			DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); +			return false; +		} +	} -	/* Stop the ring if it's running. */  	I915_WRITE_CTL(ring, 0);  	I915_WRITE_HEAD(ring, 0);  	ring->write_tail(ring, 0); -	head = I915_READ_HEAD(ring) & HEAD_ADDR; +	if (!IS_GEN2(ring->dev)) { +		(void)I915_READ_CTL(ring); +		I915_WRITE_MODE(ring, _MASKED_BIT_DISABLE(STOP_RING)); +	} + +	return (I915_READ_HEAD(ring) & HEAD_ADDR) == 0; +} + +static int init_ring_common(struct intel_engine_cs *ring) +{ +	struct drm_device *dev = ring->dev; +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_ringbuffer *ringbuf = ring->buffer; +	struct drm_i915_gem_object *obj = ringbuf->obj; +	int ret = 0; + +	gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); -	/* G45 ring initialization fails to reset head to zero */ -	if (head != 0) { +	if (!stop_ring(ring)) { +		/* G45 ring initialization often fails to reset head to zero */  		DRM_DEBUG_KMS("%s head not reset to zero "  			      "ctl %08x head %08x tail %08x start %08x\n",  			      ring->name, @@ -410,9 +499,7 @@ static int init_ring_common(struct intel_ring_buffer *ring)  			      I915_READ_TAIL(ring),  			      I915_READ_START(ring)); -		I915_WRITE_HEAD(ring, 0); - -		if (I915_READ_HEAD(ring) & HEAD_ADDR) { +		if (!stop_ring(ring)) {  			DRM_ERROR("failed to set %s head to zero "  				  "ctl %08x head %08x tail %08x start %08x\n",  				  ring->name, @@ -420,16 +507,23 @@ static int init_ring_common(struct intel_ring_buffer *ring)  				  I915_READ_HEAD(ring),  				  I915_READ_TAIL(ring),  				  I915_READ_START(ring)); +			ret = -EIO; +			goto out;  		}  	} +	if (I915_NEED_GFX_HWS(dev)) +		intel_ring_setup_status_page(ring); +	else +		ring_setup_phys_status_page(ring); +  	/* Initialize the ring. This must happen _after_ we've cleared the ring  	 * registers with the above sequence (the readback of the HEAD registers  	 * also enforces ordering), otherwise the hw might lose the new ring  	 * register values. */  	I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));  	I915_WRITE_CTL(ring, -			((ring->size - PAGE_SIZE) & RING_NR_PAGES) +			((ringbuf->size - PAGE_SIZE) & RING_NR_PAGES)  			| RING_VALID);  	/* If the head is still not zero, the ring is dead */ @@ -437,12 +531,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)  		     I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&  		     (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {  		DRM_ERROR("%s initialization failed " -				"ctl %08x head %08x tail %08x start %08x\n", -				ring->name, -				I915_READ_CTL(ring), -				I915_READ_HEAD(ring), -				I915_READ_TAIL(ring), -				I915_READ_START(ring)); +			  "ctl %08x (valid? %d) head %08x tail %08x start %08x [expected %08lx]\n", +			  ring->name, +			  I915_READ_CTL(ring), I915_READ_CTL(ring) & RING_VALID, +			  I915_READ_HEAD(ring), I915_READ_TAIL(ring), +			  I915_READ_START(ring), (unsigned long)i915_gem_obj_ggtt_offset(obj));  		ret = -EIO;  		goto out;  	} @@ -450,23 +543,22 @@ static int init_ring_common(struct intel_ring_buffer *ring)  	if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))  		i915_kernel_lost_context(ring->dev);  	else { -		ring->head = I915_READ_HEAD(ring); -		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; -		ring->space = ring_space(ring); -		ring->last_retired_head = -1; +		ringbuf->head = I915_READ_HEAD(ring); +		ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; +		ringbuf->space = ring_space(ring); +		ringbuf->last_retired_head = -1;  	}  	memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));  out: -	if (HAS_FORCE_WAKE(dev)) -		gen6_gt_force_wake_put(dev_priv); +	gen6_gt_force_wake_put(dev_priv, FORCEWAKE_ALL);  	return ret;  }  static int -init_pipe_control(struct intel_ring_buffer *ring) +init_pipe_control(struct intel_engine_cs *ring)  {  	int ret; @@ -480,9 +572,11 @@ init_pipe_control(struct intel_ring_buffer *ring)  		goto err;  	} -	i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); +	ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); +	if (ret) +		goto err_unref; -	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); +	ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0);  	if (ret)  		goto err_unref; @@ -498,39 +592,42 @@ init_pipe_control(struct intel_ring_buffer *ring)  	return 0;  err_unpin: -	i915_gem_object_unpin(ring->scratch.obj); +	i915_gem_object_ggtt_unpin(ring->scratch.obj);  err_unref:  	drm_gem_object_unreference(&ring->scratch.obj->base);  err:  	return ret;  } -static int init_render_ring(struct intel_ring_buffer *ring) +static int init_render_ring(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev;  	struct drm_i915_private *dev_priv = dev->dev_private;  	int ret = init_ring_common(ring); -	if (INTEL_INFO(dev)->gen > 3) +	/* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ +	if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7)  		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));  	/* We need to disable the AsyncFlip performance optimisations in order  	 * to use MI_WAIT_FOR_EVENT within the CS. It should already be  	 * programmed to '1' on all products.  	 * -	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv +	 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv  	 */  	if (INTEL_INFO(dev)->gen >= 6)  		I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));  	/* Required for the hardware to program scanline values for waiting */ +	/* WaEnableFlushTlbInvalidationMode:snb */  	if (INTEL_INFO(dev)->gen == 6)  		I915_WRITE(GFX_MODE, -			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS)); +			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT)); +	/* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */  	if (IS_GEN7(dev))  		I915_WRITE(GFX_MODE_GEN7, -			   _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) | +			   _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |  			   _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));  	if (INTEL_INFO(dev)->gen >= 5) { @@ -547,25 +644,18 @@ static int init_render_ring(struct intel_ring_buffer *ring)  		 */  		I915_WRITE(CACHE_MODE_0,  			   _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB)); - -		/* This is not explicitly set for GEN6, so read the register. -		 * see intel_ring_mi_set_context() for why we care. -		 * TODO: consider explicitly setting the bit for GEN5 -		 */ -		ring->itlb_before_ctx_switch = -			!!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);  	}  	if (INTEL_INFO(dev)->gen >= 6)  		I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); -	if (HAS_L3_GPU_CACHE(dev)) -		I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); +	if (HAS_L3_DPF(dev)) +		I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));  	return ret;  } -static void render_ring_cleanup(struct intel_ring_buffer *ring) +static void render_ring_cleanup(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; @@ -574,27 +664,53 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)  	if (INTEL_INFO(dev)->gen >= 5) {  		kunmap(sg_page(ring->scratch.obj->pages->sgl)); -		i915_gem_object_unpin(ring->scratch.obj); +		i915_gem_object_ggtt_unpin(ring->scratch.obj);  	}  	drm_gem_object_unreference(&ring->scratch.obj->base);  	ring->scratch.obj = NULL;  } -static void -update_mboxes(struct intel_ring_buffer *ring, -	      u32 mmio_offset) +static int gen6_signal(struct intel_engine_cs *signaller, +		       unsigned int num_dwords)  { -/* NB: In order to be able to do semaphore MBOX updates for varying number - * of rings, it's easiest if we round up each individual update to a - * multiple of 2 (since ring updates must always be a multiple of 2) - * even though the actual update only requires 3 dwords. - */ +	struct drm_device *dev = signaller->dev; +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_engine_cs *useless; +	int i, ret; + +	/* NB: In order to be able to do semaphore MBOX updates for varying +	 * number of rings, it's easiest if we round up each individual update +	 * to a multiple of 2 (since ring updates must always be a multiple of +	 * 2) even though the actual update only requires 3 dwords. +	 */  #define MBOX_UPDATE_DWORDS 4 -	intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); -	intel_ring_emit(ring, mmio_offset); -	intel_ring_emit(ring, ring->outstanding_lazy_request); -	intel_ring_emit(ring, MI_NOOP); +	if (i915_semaphore_is_enabled(dev)) +		num_dwords += ((I915_NUM_RINGS-1) * MBOX_UPDATE_DWORDS); +	else +		return intel_ring_begin(signaller, num_dwords); + +	ret = intel_ring_begin(signaller, num_dwords); +	if (ret) +		return ret; +#undef MBOX_UPDATE_DWORDS + +	for_each_ring(useless, dev_priv, i) { +		u32 mbox_reg = signaller->semaphore.mbox.signal[i]; +		if (mbox_reg != GEN6_NOSYNC) { +			intel_ring_emit(signaller, MI_LOAD_REGISTER_IMM(1)); +			intel_ring_emit(signaller, mbox_reg); +			intel_ring_emit(signaller, signaller->outstanding_lazy_seqno); +			intel_ring_emit(signaller, MI_NOOP); +		} else { +			intel_ring_emit(signaller, MI_NOOP); +			intel_ring_emit(signaller, MI_NOOP); +			intel_ring_emit(signaller, MI_NOOP); +			intel_ring_emit(signaller, MI_NOOP); +		} +	} + +	return 0;  }  /** @@ -607,31 +723,19 @@ update_mboxes(struct intel_ring_buffer *ring,   * This acts like a signal in the canonical semaphore.   */  static int -gen6_add_request(struct intel_ring_buffer *ring) +gen6_add_request(struct intel_engine_cs *ring)  { -	struct drm_device *dev = ring->dev; -	struct drm_i915_private *dev_priv = dev->dev_private; -	struct intel_ring_buffer *useless; -	int i, ret; +	int ret; -	ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) * -				      MBOX_UPDATE_DWORDS) + -				      4); +	ret = ring->semaphore.signal(ring, 4);  	if (ret)  		return ret; -#undef MBOX_UPDATE_DWORDS - -	for_each_ring(useless, dev_priv, i) { -		u32 mbox_reg = ring->signal_mbox[i]; -		if (mbox_reg != GEN6_NOSYNC) -			update_mboxes(ring, mbox_reg); -	}  	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);  	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); -	intel_ring_emit(ring, ring->outstanding_lazy_request); +	intel_ring_emit(ring, ring->outstanding_lazy_seqno);  	intel_ring_emit(ring, MI_USER_INTERRUPT); -	intel_ring_advance(ring); +	__intel_ring_advance(ring);  	return 0;  } @@ -651,14 +755,15 @@ static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,   * @seqno - seqno which the waiter will block on   */  static int -gen6_ring_sync(struct intel_ring_buffer *waiter, -	       struct intel_ring_buffer *signaller, +gen6_ring_sync(struct intel_engine_cs *waiter, +	       struct intel_engine_cs *signaller,  	       u32 seqno)  { -	int ret;  	u32 dw1 = MI_SEMAPHORE_MBOX |  		  MI_SEMAPHORE_COMPARE |  		  MI_SEMAPHORE_REGISTER; +	u32 wait_mbox = signaller->semaphore.mbox.wait[waiter->id]; +	int ret;  	/* Throughout all of the GEM code, seqno passed implies our current  	 * seqno is >= the last seqno executed. However for hardware the @@ -666,8 +771,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,  	 */  	seqno -= 1; -	WARN_ON(signaller->semaphore_register[waiter->id] == -		MI_SEMAPHORE_SYNC_INVALID); +	WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);  	ret = intel_ring_begin(waiter, 4);  	if (ret) @@ -675,9 +779,7 @@ gen6_ring_sync(struct intel_ring_buffer *waiter,  	/* If seqno wrap happened, omit the wait with no-ops */  	if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) { -		intel_ring_emit(waiter, -				dw1 | -				signaller->semaphore_register[waiter->id]); +		intel_ring_emit(waiter, dw1 | wait_mbox);  		intel_ring_emit(waiter, seqno);  		intel_ring_emit(waiter, 0);  		intel_ring_emit(waiter, MI_NOOP); @@ -702,9 +804,9 @@ do {									\  } while (0)  static int -pc_render_add_request(struct intel_ring_buffer *ring) +pc_render_add_request(struct intel_engine_cs *ring)  { -	u32 scratch_addr = ring->scratch.gtt_offset + 128; +	u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;  	int ret;  	/* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently @@ -723,18 +825,18 @@ pc_render_add_request(struct intel_ring_buffer *ring)  			PIPE_CONTROL_WRITE_FLUSH |  			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);  	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); -	intel_ring_emit(ring, ring->outstanding_lazy_request); +	intel_ring_emit(ring, ring->outstanding_lazy_seqno);  	intel_ring_emit(ring, 0);  	PIPE_CONTROL_FLUSH(ring, scratch_addr); -	scratch_addr += 128; /* write to separate cachelines */ +	scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */  	PIPE_CONTROL_FLUSH(ring, scratch_addr); -	scratch_addr += 128; +	scratch_addr += 2 * CACHELINE_BYTES;  	PIPE_CONTROL_FLUSH(ring, scratch_addr); -	scratch_addr += 128; +	scratch_addr += 2 * CACHELINE_BYTES;  	PIPE_CONTROL_FLUSH(ring, scratch_addr); -	scratch_addr += 128; +	scratch_addr += 2 * CACHELINE_BYTES;  	PIPE_CONTROL_FLUSH(ring, scratch_addr); -	scratch_addr += 128; +	scratch_addr += 2 * CACHELINE_BYTES;  	PIPE_CONTROL_FLUSH(ring, scratch_addr);  	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | @@ -742,53 +844,56 @@ pc_render_add_request(struct intel_ring_buffer *ring)  			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |  			PIPE_CONTROL_NOTIFY);  	intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); -	intel_ring_emit(ring, ring->outstanding_lazy_request); +	intel_ring_emit(ring, ring->outstanding_lazy_seqno);  	intel_ring_emit(ring, 0); -	intel_ring_advance(ring); +	__intel_ring_advance(ring);  	return 0;  }  static u32 -gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +gen6_ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)  {  	/* Workaround to force correct ordering between irq and seqno writes on  	 * ivb (and maybe also on snb) by reading from a CS register (like  	 * ACTHD) before reading the status page. */ -	if (!lazy_coherency) -		intel_ring_get_active_head(ring); +	if (!lazy_coherency) { +		struct drm_i915_private *dev_priv = ring->dev->dev_private; +		POSTING_READ(RING_ACTHD(ring->mmio_base)); +	} +  	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);  }  static u32 -ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +ring_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)  {  	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);  }  static void -ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +ring_set_seqno(struct intel_engine_cs *ring, u32 seqno)  {  	intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);  }  static u32 -pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency) +pc_render_get_seqno(struct intel_engine_cs *ring, bool lazy_coherency)  {  	return ring->scratch.cpu_page[0];  }  static void -pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno) +pc_render_set_seqno(struct intel_engine_cs *ring, u32 seqno)  {  	ring->scratch.cpu_page[0] = seqno;  }  static bool -gen5_ring_get_irq(struct intel_ring_buffer *ring) +gen5_ring_get_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = dev->dev_private; +	struct drm_i915_private *dev_priv = dev->dev_private;  	unsigned long flags;  	if (!dev->irq_enabled) @@ -803,10 +908,10 @@ gen5_ring_get_irq(struct intel_ring_buffer *ring)  }  static void -gen5_ring_put_irq(struct intel_ring_buffer *ring) +gen5_ring_put_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = dev->dev_private; +	struct drm_i915_private *dev_priv = dev->dev_private;  	unsigned long flags;  	spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -816,10 +921,10 @@ gen5_ring_put_irq(struct intel_ring_buffer *ring)  }  static bool -i9xx_ring_get_irq(struct intel_ring_buffer *ring) +i9xx_ring_get_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = dev->dev_private; +	struct drm_i915_private *dev_priv = dev->dev_private;  	unsigned long flags;  	if (!dev->irq_enabled) @@ -837,10 +942,10 @@ i9xx_ring_get_irq(struct intel_ring_buffer *ring)  }  static void -i9xx_ring_put_irq(struct intel_ring_buffer *ring) +i9xx_ring_put_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = dev->dev_private; +	struct drm_i915_private *dev_priv = dev->dev_private;  	unsigned long flags;  	spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -853,10 +958,10 @@ i9xx_ring_put_irq(struct intel_ring_buffer *ring)  }  static bool -i8xx_ring_get_irq(struct intel_ring_buffer *ring) +i8xx_ring_get_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = dev->dev_private; +	struct drm_i915_private *dev_priv = dev->dev_private;  	unsigned long flags;  	if (!dev->irq_enabled) @@ -874,10 +979,10 @@ i8xx_ring_get_irq(struct intel_ring_buffer *ring)  }  static void -i8xx_ring_put_irq(struct intel_ring_buffer *ring) +i8xx_ring_put_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = dev->dev_private; +	struct drm_i915_private *dev_priv = dev->dev_private;  	unsigned long flags;  	spin_lock_irqsave(&dev_priv->irq_lock, flags); @@ -889,10 +994,10 @@ i8xx_ring_put_irq(struct intel_ring_buffer *ring)  	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);  } -void intel_ring_setup_status_page(struct intel_ring_buffer *ring) +void intel_ring_setup_status_page(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = ring->dev->dev_private; +	struct drm_i915_private *dev_priv = ring->dev->dev_private;  	u32 mmio = 0;  	/* The ring status page addresses are no longer next to the rest of @@ -906,6 +1011,11 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)  		case BCS:  			mmio = BLT_HWS_PGA_GEN7;  			break; +		/* +		 * VCS2 actually doesn't exist on Gen7. Only shut up +		 * gcc switch check warning +		 */ +		case VCS2:  		case VCS:  			mmio = BSD_HWS_PGA_GEN7;  			break; @@ -916,15 +1026,26 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)  	} else if (IS_GEN6(ring->dev)) {  		mmio = RING_HWS_PGA_GEN6(ring->mmio_base);  	} else { +		/* XXX: gen8 returns to sanity */  		mmio = RING_HWS_PGA(ring->mmio_base);  	}  	I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);  	POSTING_READ(mmio); -	/* Flush the TLB for this page */ -	if (INTEL_INFO(dev)->gen >= 6) { +	/* +	 * Flush the TLB for this page +	 * +	 * FIXME: These two bits have disappeared on gen8, so a question +	 * arises: do we still need this and if so how should we go about +	 * invalidating the TLB? +	 */ +	if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) {  		u32 reg = RING_INSTPM(ring->mmio_base); + +		/* ring should be idle before issuing a sync flush*/ +		WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); +  		I915_WRITE(reg,  			   _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |  					      INSTPM_SYNC_FLUSH)); @@ -936,7 +1057,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)  }  static int -bsd_ring_flush(struct intel_ring_buffer *ring, +bsd_ring_flush(struct intel_engine_cs *ring,  	       u32     invalidate_domains,  	       u32     flush_domains)  { @@ -953,7 +1074,7 @@ bsd_ring_flush(struct intel_ring_buffer *ring,  }  static int -i9xx_add_request(struct intel_ring_buffer *ring) +i9xx_add_request(struct intel_engine_cs *ring)  {  	int ret; @@ -963,34 +1084,29 @@ i9xx_add_request(struct intel_ring_buffer *ring)  	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);  	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); -	intel_ring_emit(ring, ring->outstanding_lazy_request); +	intel_ring_emit(ring, ring->outstanding_lazy_seqno);  	intel_ring_emit(ring, MI_USER_INTERRUPT); -	intel_ring_advance(ring); +	__intel_ring_advance(ring);  	return 0;  }  static bool -gen6_ring_get_irq(struct intel_ring_buffer *ring) +gen6_ring_get_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = dev->dev_private; +	struct drm_i915_private *dev_priv = dev->dev_private;  	unsigned long flags;  	if (!dev->irq_enabled)  	       return false; -	/* It looks like we need to prevent the gt from suspending while waiting -	 * for an notifiy irq, otherwise irqs seem to get lost on at least the -	 * blt/bsd rings on ivb. */ -	gen6_gt_force_wake_get(dev_priv); -  	spin_lock_irqsave(&dev_priv->irq_lock, flags);  	if (ring->irq_refcount++ == 0) { -		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) +		if (HAS_L3_DPF(dev) && ring->id == RCS)  			I915_WRITE_IMR(ring,  				       ~(ring->irq_enable_mask | -					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); +					 GT_PARITY_ERROR(dev)));  		else  			I915_WRITE_IMR(ring, ~ring->irq_enable_mask);  		ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask); @@ -1001,28 +1117,25 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring)  }  static void -gen6_ring_put_irq(struct intel_ring_buffer *ring) +gen6_ring_put_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev; -	drm_i915_private_t *dev_priv = dev->dev_private; +	struct drm_i915_private *dev_priv = dev->dev_private;  	unsigned long flags;  	spin_lock_irqsave(&dev_priv->irq_lock, flags);  	if (--ring->irq_refcount == 0) { -		if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS) -			I915_WRITE_IMR(ring, -				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); +		if (HAS_L3_DPF(dev) && ring->id == RCS) +			I915_WRITE_IMR(ring, ~GT_PARITY_ERROR(dev));  		else  			I915_WRITE_IMR(ring, ~0);  		ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);  	}  	spin_unlock_irqrestore(&dev_priv->irq_lock, flags); - -	gen6_gt_force_wake_put(dev_priv);  }  static bool -hsw_vebox_get_irq(struct intel_ring_buffer *ring) +hsw_vebox_get_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev;  	struct drm_i915_private *dev_priv = dev->dev_private; @@ -1042,7 +1155,7 @@ hsw_vebox_get_irq(struct intel_ring_buffer *ring)  }  static void -hsw_vebox_put_irq(struct intel_ring_buffer *ring) +hsw_vebox_put_irq(struct intel_engine_cs *ring)  {  	struct drm_device *dev = ring->dev;  	struct drm_i915_private *dev_priv = dev->dev_private; @@ -1059,9 +1172,55 @@ hsw_vebox_put_irq(struct intel_ring_buffer *ring)  	spin_unlock_irqrestore(&dev_priv->irq_lock, flags);  } +static bool +gen8_ring_get_irq(struct intel_engine_cs *ring) +{ +	struct drm_device *dev = ring->dev; +	struct drm_i915_private *dev_priv = dev->dev_private; +	unsigned long flags; + +	if (!dev->irq_enabled) +		return false; + +	spin_lock_irqsave(&dev_priv->irq_lock, flags); +	if (ring->irq_refcount++ == 0) { +		if (HAS_L3_DPF(dev) && ring->id == RCS) { +			I915_WRITE_IMR(ring, +				       ~(ring->irq_enable_mask | +					 GT_RENDER_L3_PARITY_ERROR_INTERRUPT)); +		} else { +			I915_WRITE_IMR(ring, ~ring->irq_enable_mask); +		} +		POSTING_READ(RING_IMR(ring->mmio_base)); +	} +	spin_unlock_irqrestore(&dev_priv->irq_lock, flags); + +	return true; +} + +static void +gen8_ring_put_irq(struct intel_engine_cs *ring) +{ +	struct drm_device *dev = ring->dev; +	struct drm_i915_private *dev_priv = dev->dev_private; +	unsigned long flags; + +	spin_lock_irqsave(&dev_priv->irq_lock, flags); +	if (--ring->irq_refcount == 0) { +		if (HAS_L3_DPF(dev) && ring->id == RCS) { +			I915_WRITE_IMR(ring, +				       ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT); +		} else { +			I915_WRITE_IMR(ring, ~0); +		} +		POSTING_READ(RING_IMR(ring->mmio_base)); +	} +	spin_unlock_irqrestore(&dev_priv->irq_lock, flags); +} +  static int -i965_dispatch_execbuffer(struct intel_ring_buffer *ring, -			 u32 offset, u32 length, +i965_dispatch_execbuffer(struct intel_engine_cs *ring, +			 u64 offset, u32 length,  			 unsigned flags)  {  	int ret; @@ -1083,8 +1242,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,  /* Just userspace ABI convention to limit the wa batch bo to a resonable size */  #define I830_BATCH_LIMIT (256*1024)  static int -i830_dispatch_execbuffer(struct intel_ring_buffer *ring, -				u32 offset, u32 len, +i830_dispatch_execbuffer(struct intel_engine_cs *ring, +				u64 offset, u32 len,  				unsigned flags)  {  	int ret; @@ -1134,8 +1293,8 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,  }  static int -i915_dispatch_execbuffer(struct intel_ring_buffer *ring, -			 u32 offset, u32 len, +i915_dispatch_execbuffer(struct intel_engine_cs *ring, +			 u64 offset, u32 len,  			 unsigned flags)  {  	int ret; @@ -1151,7 +1310,7 @@ i915_dispatch_execbuffer(struct intel_ring_buffer *ring,  	return 0;  } -static void cleanup_status_page(struct intel_ring_buffer *ring) +static void cleanup_status_page(struct intel_engine_cs *ring)  {  	struct drm_i915_gem_object *obj; @@ -1160,54 +1319,49 @@ static void cleanup_status_page(struct intel_ring_buffer *ring)  		return;  	kunmap(sg_page(obj->pages->sgl)); -	i915_gem_object_unpin(obj); +	i915_gem_object_ggtt_unpin(obj);  	drm_gem_object_unreference(&obj->base);  	ring->status_page.obj = NULL;  } -static int init_status_page(struct intel_ring_buffer *ring) +static int init_status_page(struct intel_engine_cs *ring)  { -	struct drm_device *dev = ring->dev;  	struct drm_i915_gem_object *obj; -	int ret; -	obj = i915_gem_alloc_object(dev, 4096); -	if (obj == NULL) { -		DRM_ERROR("Failed to allocate status page\n"); -		ret = -ENOMEM; -		goto err; -	} +	if ((obj = ring->status_page.obj) == NULL) { +		int ret; -	i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); +		obj = i915_gem_alloc_object(ring->dev, 4096); +		if (obj == NULL) { +			DRM_ERROR("Failed to allocate status page\n"); +			return -ENOMEM; +		} -	ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); -	if (ret != 0) { -		goto err_unref; +		ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); +		if (ret) +			goto err_unref; + +		ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); +		if (ret) { +err_unref: +			drm_gem_object_unreference(&obj->base); +			return ret; +		} + +		ring->status_page.obj = obj;  	}  	ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);  	ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); -	if (ring->status_page.page_addr == NULL) { -		ret = -ENOMEM; -		goto err_unpin; -	} -	ring->status_page.obj = obj;  	memset(ring->status_page.page_addr, 0, PAGE_SIZE);  	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",  			ring->name, ring->status_page.gfx_addr);  	return 0; - -err_unpin: -	i915_gem_object_unpin(obj); -err_unref: -	drm_gem_object_unreference(&obj->base); -err: -	return ret;  } -static int init_phys_status_page(struct intel_ring_buffer *ring) +static int init_phys_status_page(struct intel_engine_cs *ring)  {  	struct drm_i915_private *dev_priv = ring->dev->dev_private; @@ -1224,46 +1378,26 @@ static int init_phys_status_page(struct intel_ring_buffer *ring)  	return 0;  } -static int intel_init_ring_buffer(struct drm_device *dev, -				  struct intel_ring_buffer *ring) +static int allocate_ring_buffer(struct intel_engine_cs *ring)  { +	struct drm_device *dev = ring->dev; +	struct drm_i915_private *dev_priv = to_i915(dev); +	struct intel_ringbuffer *ringbuf = ring->buffer;  	struct drm_i915_gem_object *obj; -	struct drm_i915_private *dev_priv = dev->dev_private;  	int ret; -	ring->dev = dev; -	INIT_LIST_HEAD(&ring->active_list); -	INIT_LIST_HEAD(&ring->request_list); -	ring->size = 32 * PAGE_SIZE; -	memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno)); - -	init_waitqueue_head(&ring->irq_queue); - -	if (I915_NEED_GFX_HWS(dev)) { -		ret = init_status_page(ring); -		if (ret) -			return ret; -	} else { -		BUG_ON(ring->id != RCS); -		ret = init_phys_status_page(ring); -		if (ret) -			return ret; -	} +	if (intel_ring_initialized(ring)) +		return 0;  	obj = NULL;  	if (!HAS_LLC(dev)) -		obj = i915_gem_object_create_stolen(dev, ring->size); +		obj = i915_gem_object_create_stolen(dev, ringbuf->size);  	if (obj == NULL) -		obj = i915_gem_alloc_object(dev, ring->size); -	if (obj == NULL) { -		DRM_ERROR("Failed to allocate ringbuffer\n"); -		ret = -ENOMEM; -		goto err_hws; -	} - -	ring->obj = obj; +		obj = i915_gem_alloc_object(dev, ringbuf->size); +	if (obj == NULL) +		return -ENOMEM; -	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); +	ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE);  	if (ret)  		goto err_unref; @@ -1271,142 +1405,159 @@ static int intel_init_ring_buffer(struct drm_device *dev,  	if (ret)  		goto err_unpin; -	ring->virtual_start = +	ringbuf->virtual_start =  		ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj), -			   ring->size); -	if (ring->virtual_start == NULL) { -		DRM_ERROR("Failed to map ringbuffer.\n"); +				ringbuf->size); +	if (ringbuf->virtual_start == NULL) {  		ret = -EINVAL;  		goto err_unpin;  	} -	ret = ring->init(ring); -	if (ret) -		goto err_unmap; +	ringbuf->obj = obj; +	return 0; + +err_unpin: +	i915_gem_object_ggtt_unpin(obj); +err_unref: +	drm_gem_object_unreference(&obj->base); +	return ret; +} + +static int intel_init_ring_buffer(struct drm_device *dev, +				  struct intel_engine_cs *ring) +{ +	struct intel_ringbuffer *ringbuf = ring->buffer; +	int ret; + +	if (ringbuf == NULL) { +		ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); +		if (!ringbuf) +			return -ENOMEM; +		ring->buffer = ringbuf; +	} + +	ring->dev = dev; +	INIT_LIST_HEAD(&ring->active_list); +	INIT_LIST_HEAD(&ring->request_list); +	ringbuf->size = 32 * PAGE_SIZE; +	memset(ring->semaphore.sync_seqno, 0, sizeof(ring->semaphore.sync_seqno)); + +	init_waitqueue_head(&ring->irq_queue); + +	if (I915_NEED_GFX_HWS(dev)) { +		ret = init_status_page(ring); +		if (ret) +			goto error; +	} else { +		BUG_ON(ring->id != RCS); +		ret = init_phys_status_page(ring); +		if (ret) +			goto error; +	} + +	ret = allocate_ring_buffer(ring); +	if (ret) { +		DRM_ERROR("Failed to allocate ringbuffer %s: %d\n", ring->name, ret); +		goto error; +	}  	/* Workaround an erratum on the i830 which causes a hang if  	 * the TAIL pointer points to within the last 2 cachelines  	 * of the buffer.  	 */ -	ring->effective_size = ring->size; -	if (IS_I830(ring->dev) || IS_845G(ring->dev)) -		ring->effective_size -= 128; +	ringbuf->effective_size = ringbuf->size; +	if (IS_I830(dev) || IS_845G(dev)) +		ringbuf->effective_size -= 2 * CACHELINE_BYTES; + +	ret = i915_cmd_parser_init_ring(ring); +	if (ret) +		goto error; + +	ret = ring->init(ring); +	if (ret) +		goto error;  	return 0; -err_unmap: -	iounmap(ring->virtual_start); -err_unpin: -	i915_gem_object_unpin(obj); -err_unref: -	drm_gem_object_unreference(&obj->base); -	ring->obj = NULL; -err_hws: -	cleanup_status_page(ring); +error: +	kfree(ringbuf); +	ring->buffer = NULL;  	return ret;  } -void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) +void intel_cleanup_ring_buffer(struct intel_engine_cs *ring)  { -	struct drm_i915_private *dev_priv; -	int ret; +	struct drm_i915_private *dev_priv = to_i915(ring->dev); +	struct intel_ringbuffer *ringbuf = ring->buffer; -	if (ring->obj == NULL) +	if (!intel_ring_initialized(ring))  		return; -	/* Disable the ring buffer. The ring must be idle at this point */ -	dev_priv = ring->dev->dev_private; -	ret = intel_ring_idle(ring); -	if (ret) -		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", -			  ring->name, ret); - -	I915_WRITE_CTL(ring, 0); +	intel_stop_ring_buffer(ring); +	WARN_ON(!IS_GEN2(ring->dev) && (I915_READ_MODE(ring) & MODE_IDLE) == 0); -	iounmap(ring->virtual_start); +	iounmap(ringbuf->virtual_start); -	i915_gem_object_unpin(ring->obj); -	drm_gem_object_unreference(&ring->obj->base); -	ring->obj = NULL; +	i915_gem_object_ggtt_unpin(ringbuf->obj); +	drm_gem_object_unreference(&ringbuf->obj->base); +	ringbuf->obj = NULL; +	ring->preallocated_lazy_request = NULL; +	ring->outstanding_lazy_seqno = 0;  	if (ring->cleanup)  		ring->cleanup(ring);  	cleanup_status_page(ring); -} -static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) -{ -	int ret; - -	ret = i915_wait_seqno(ring, seqno); -	if (!ret) -		i915_gem_retire_requests_ring(ring); +	i915_cmd_parser_fini_ring(ring); -	return ret; +	kfree(ringbuf); +	ring->buffer = NULL;  } -static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) +static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)  { +	struct intel_ringbuffer *ringbuf = ring->buffer;  	struct drm_i915_gem_request *request;  	u32 seqno = 0;  	int ret; -	i915_gem_retire_requests_ring(ring); +	if (ringbuf->last_retired_head != -1) { +		ringbuf->head = ringbuf->last_retired_head; +		ringbuf->last_retired_head = -1; -	if (ring->last_retired_head != -1) { -		ring->head = ring->last_retired_head; -		ring->last_retired_head = -1; -		ring->space = ring_space(ring); -		if (ring->space >= n) +		ringbuf->space = ring_space(ring); +		if (ringbuf->space >= n)  			return 0;  	}  	list_for_each_entry(request, &ring->request_list, list) { -		int space; - -		if (request->tail == -1) -			continue; - -		space = request->tail - (ring->tail + I915_RING_FREE_SPACE); -		if (space < 0) -			space += ring->size; -		if (space >= n) { +		if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) {  			seqno = request->seqno;  			break;  		} - -		/* Consume this request in case we need more space than -		 * is available and so need to prevent a race between -		 * updating last_retired_head and direct reads of -		 * I915_RING_HEAD. It also provides a nice sanity check. -		 */ -		request->tail = -1;  	}  	if (seqno == 0)  		return -ENOSPC; -	ret = intel_ring_wait_seqno(ring, seqno); +	ret = i915_wait_seqno(ring, seqno);  	if (ret)  		return ret; -	if (WARN_ON(ring->last_retired_head == -1)) -		return -ENOSPC; - -	ring->head = ring->last_retired_head; -	ring->last_retired_head = -1; -	ring->space = ring_space(ring); -	if (WARN_ON(ring->space < n)) -		return -ENOSPC; +	i915_gem_retire_requests_ring(ring); +	ringbuf->head = ringbuf->last_retired_head; +	ringbuf->last_retired_head = -1; +	ringbuf->space = ring_space(ring);  	return 0;  } -static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) +static int ring_wait_for_space(struct intel_engine_cs *ring, int n)  {  	struct drm_device *dev = ring->dev;  	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_ringbuffer *ringbuf = ring->buffer;  	unsigned long end;  	int ret; @@ -1414,7 +1565,9 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)  	if (ret != -ENOSPC)  		return ret; -	trace_i915_ring_wait_begin(ring); +	/* force the tail write in case we have been skipping them */ +	__intel_ring_advance(ring); +  	/* With GEM the hangcheck timer should kick us out of the loop,  	 * leaving it early runs the risk of corrupting GEM state (due  	 * to running on almost untested codepaths). But on resume @@ -1422,15 +1575,17 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)  	 * case by choosing an insanely large timeout. */  	end = jiffies + 60 * HZ; +	trace_i915_ring_wait_begin(ring);  	do { -		ring->head = I915_READ_HEAD(ring); -		ring->space = ring_space(ring); -		if (ring->space >= n) { -			trace_i915_ring_wait_end(ring); -			return 0; +		ringbuf->head = I915_READ_HEAD(ring); +		ringbuf->space = ring_space(ring); +		if (ringbuf->space >= n) { +			ret = 0; +			break;  		} -		if (dev->primary->master) { +		if (!drm_core_check_feature(dev, DRIVER_MODESET) && +		    dev->primary->master) {  			struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;  			if (master_priv->sarea_priv)  				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; @@ -1438,44 +1593,55 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)  		msleep(1); +		if (dev_priv->mm.interruptible && signal_pending(current)) { +			ret = -ERESTARTSYS; +			break; +		} +  		ret = i915_gem_check_wedge(&dev_priv->gpu_error,  					   dev_priv->mm.interruptible);  		if (ret) -			return ret; -	} while (!time_after(jiffies, end)); +			break; + +		if (time_after(jiffies, end)) { +			ret = -EBUSY; +			break; +		} +	} while (1);  	trace_i915_ring_wait_end(ring); -	return -EBUSY; +	return ret;  } -static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring) +static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)  {  	uint32_t __iomem *virt; -	int rem = ring->size - ring->tail; +	struct intel_ringbuffer *ringbuf = ring->buffer; +	int rem = ringbuf->size - ringbuf->tail; -	if (ring->space < rem) { +	if (ringbuf->space < rem) {  		int ret = ring_wait_for_space(ring, rem);  		if (ret)  			return ret;  	} -	virt = ring->virtual_start + ring->tail; +	virt = ringbuf->virtual_start + ringbuf->tail;  	rem /= 4;  	while (rem--)  		iowrite32(MI_NOOP, virt++); -	ring->tail = 0; -	ring->space = ring_space(ring); +	ringbuf->tail = 0; +	ringbuf->space = ring_space(ring);  	return 0;  } -int intel_ring_idle(struct intel_ring_buffer *ring) +int intel_ring_idle(struct intel_engine_cs *ring)  {  	u32 seqno;  	int ret;  	/* We need to add any requests required to flush the objects and ring */ -	if (ring->outstanding_lazy_request) { +	if (ring->outstanding_lazy_seqno) {  		ret = i915_add_request(ring, NULL);  		if (ret)  			return ret; @@ -1493,39 +1659,49 @@ int intel_ring_idle(struct intel_ring_buffer *ring)  }  static int -intel_ring_alloc_seqno(struct intel_ring_buffer *ring) +intel_ring_alloc_seqno(struct intel_engine_cs *ring)  { -	if (ring->outstanding_lazy_request) +	if (ring->outstanding_lazy_seqno)  		return 0; -	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request); +	if (ring->preallocated_lazy_request == NULL) { +		struct drm_i915_gem_request *request; + +		request = kmalloc(sizeof(*request), GFP_KERNEL); +		if (request == NULL) +			return -ENOMEM; + +		ring->preallocated_lazy_request = request; +	} + +	return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);  } -static int __intel_ring_begin(struct intel_ring_buffer *ring, -			      int bytes) +static int __intel_ring_prepare(struct intel_engine_cs *ring, +				int bytes)  { +	struct intel_ringbuffer *ringbuf = ring->buffer;  	int ret; -	if (unlikely(ring->tail + bytes > ring->effective_size)) { +	if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) {  		ret = intel_wrap_ring_buffer(ring);  		if (unlikely(ret))  			return ret;  	} -	if (unlikely(ring->space < bytes)) { +	if (unlikely(ringbuf->space < bytes)) {  		ret = ring_wait_for_space(ring, bytes);  		if (unlikely(ret))  			return ret;  	} -	ring->space -= bytes;  	return 0;  } -int intel_ring_begin(struct intel_ring_buffer *ring, +int intel_ring_begin(struct intel_engine_cs *ring,  		     int num_dwords)  { -	drm_i915_private_t *dev_priv = ring->dev->dev_private; +	struct drm_i915_private *dev_priv = ring->dev->dev_private;  	int ret;  	ret = i915_gem_check_wedge(&dev_priv->gpu_error, @@ -1533,19 +1709,46 @@ int intel_ring_begin(struct intel_ring_buffer *ring,  	if (ret)  		return ret; +	ret = __intel_ring_prepare(ring, num_dwords * sizeof(uint32_t)); +	if (ret) +		return ret; +  	/* Preallocate the olr before touching the ring */  	ret = intel_ring_alloc_seqno(ring);  	if (ret)  		return ret; -	return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t)); +	ring->buffer->space -= num_dwords * sizeof(uint32_t); +	return 0; +} + +/* Align the ring tail to a cacheline boundary */ +int intel_ring_cacheline_align(struct intel_engine_cs *ring) +{ +	int num_dwords = (ring->buffer->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t); +	int ret; + +	if (num_dwords == 0) +		return 0; + +	num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords; +	ret = intel_ring_begin(ring, num_dwords); +	if (ret) +		return ret; + +	while (num_dwords--) +		intel_ring_emit(ring, MI_NOOP); + +	intel_ring_advance(ring); + +	return 0;  } -void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno) +void intel_ring_init_seqno(struct intel_engine_cs *ring, u32 seqno)  {  	struct drm_i915_private *dev_priv = ring->dev->dev_private; -	BUG_ON(ring->outstanding_lazy_request); +	BUG_ON(ring->outstanding_lazy_seqno);  	if (INTEL_INFO(ring->dev)->gen >= 6) {  		I915_WRITE(RING_SYNC_0(ring->mmio_base), 0); @@ -1558,21 +1761,10 @@ void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)  	ring->hangcheck.seqno = seqno;  } -void intel_ring_advance(struct intel_ring_buffer *ring) -{ -	struct drm_i915_private *dev_priv = ring->dev->dev_private; - -	ring->tail &= ring->size - 1; -	if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring)) -		return; -	ring->write_tail(ring, ring->tail); -} - - -static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring, +static void gen6_bsd_ring_write_tail(struct intel_engine_cs *ring,  				     u32 value)  { -	drm_i915_private_t *dev_priv = ring->dev->dev_private; +	struct drm_i915_private *dev_priv = ring->dev->dev_private;         /* Every tail move must follow the sequence below */ @@ -1602,7 +1794,7 @@ static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,  		   _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));  } -static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring, +static int gen6_bsd_ring_flush(struct intel_engine_cs *ring,  			       u32 invalidate, u32 flush)  {  	uint32_t cmd; @@ -1613,6 +1805,8 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,  		return ret;  	cmd = MI_FLUSH_DW; +	if (INTEL_INFO(ring->dev)->gen >= 8) +		cmd += 1;  	/*  	 * Bspec vol 1c.5 - video engine command streamer:  	 * "If ENABLED, all TLBs will be invalidated once the flush @@ -1624,15 +1818,44 @@ static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,  			MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;  	intel_ring_emit(ring, cmd);  	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); -	intel_ring_emit(ring, 0); +	if (INTEL_INFO(ring->dev)->gen >= 8) { +		intel_ring_emit(ring, 0); /* upper addr */ +		intel_ring_emit(ring, 0); /* value */ +	} else  { +		intel_ring_emit(ring, 0); +		intel_ring_emit(ring, MI_NOOP); +	} +	intel_ring_advance(ring); +	return 0; +} + +static int +gen8_ring_dispatch_execbuffer(struct intel_engine_cs *ring, +			      u64 offset, u32 len, +			      unsigned flags) +{ +	struct drm_i915_private *dev_priv = ring->dev->dev_private; +	bool ppgtt = dev_priv->mm.aliasing_ppgtt != NULL && +		!(flags & I915_DISPATCH_SECURE); +	int ret; + +	ret = intel_ring_begin(ring, 4); +	if (ret) +		return ret; + +	/* FIXME(BDW): Address space and security selectors. */ +	intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8)); +	intel_ring_emit(ring, lower_32_bits(offset)); +	intel_ring_emit(ring, upper_32_bits(offset));  	intel_ring_emit(ring, MI_NOOP);  	intel_ring_advance(ring); +  	return 0;  }  static int -hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, -			      u32 offset, u32 len, +hsw_ring_dispatch_execbuffer(struct intel_engine_cs *ring, +			      u64 offset, u32 len,  			      unsigned flags)  {  	int ret; @@ -1652,8 +1875,8 @@ hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,  }  static int -gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring, -			      u32 offset, u32 len, +gen6_ring_dispatch_execbuffer(struct intel_engine_cs *ring, +			      u64 offset, u32 len,  			      unsigned flags)  {  	int ret; @@ -1674,7 +1897,7 @@ gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,  /* Blitter support (SandyBridge+) */ -static int gen6_ring_flush(struct intel_ring_buffer *ring, +static int gen6_ring_flush(struct intel_engine_cs *ring,  			   u32 invalidate, u32 flush)  {  	struct drm_device *dev = ring->dev; @@ -1686,6 +1909,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,  		return ret;  	cmd = MI_FLUSH_DW; +	if (INTEL_INFO(ring->dev)->gen >= 8) +		cmd += 1;  	/*  	 * Bspec vol 1c.3 - blitter engine command streamer:  	 * "If ENABLED, all TLBs will be invalidated once the flush @@ -1697,11 +1922,16 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,  			MI_FLUSH_DW_OP_STOREDW;  	intel_ring_emit(ring, cmd);  	intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); -	intel_ring_emit(ring, 0); -	intel_ring_emit(ring, MI_NOOP); +	if (INTEL_INFO(ring->dev)->gen >= 8) { +		intel_ring_emit(ring, 0); /* upper addr */ +		intel_ring_emit(ring, 0); /* value */ +	} else  { +		intel_ring_emit(ring, 0); +		intel_ring_emit(ring, MI_NOOP); +	}  	intel_ring_advance(ring); -	if (IS_GEN7(dev) && flush) +	if (IS_GEN7(dev) && !invalidate && flush)  		return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);  	return 0; @@ -1709,8 +1939,8 @@ static int gen6_ring_flush(struct intel_ring_buffer *ring,  int intel_init_render_ring_buffer(struct drm_device *dev)  { -	drm_i915_private_t *dev_priv = dev->dev_private; -	struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_engine_cs *ring = &dev_priv->ring[RCS];  	ring->name = "render ring";  	ring->id = RCS; @@ -1721,20 +1951,35 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  		ring->flush = gen7_render_ring_flush;  		if (INTEL_INFO(dev)->gen == 6)  			ring->flush = gen6_render_ring_flush; -		ring->irq_get = gen6_ring_get_irq; -		ring->irq_put = gen6_ring_put_irq; +		if (INTEL_INFO(dev)->gen >= 8) { +			ring->flush = gen8_render_ring_flush; +			ring->irq_get = gen8_ring_get_irq; +			ring->irq_put = gen8_ring_put_irq; +		} else { +			ring->irq_get = gen6_ring_get_irq; +			ring->irq_put = gen6_ring_put_irq; +		}  		ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;  		ring->get_seqno = gen6_ring_get_seqno;  		ring->set_seqno = ring_set_seqno; -		ring->sync_to = gen6_ring_sync; -		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID; -		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV; -		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB; -		ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE; -		ring->signal_mbox[RCS] = GEN6_NOSYNC; -		ring->signal_mbox[VCS] = GEN6_VRSYNC; -		ring->signal_mbox[BCS] = GEN6_BRSYNC; -		ring->signal_mbox[VECS] = GEN6_VERSYNC; +		ring->semaphore.sync_to = gen6_ring_sync; +		ring->semaphore.signal = gen6_signal; +		/* +		 * The current semaphore is only applied on pre-gen8 platform. +		 * And there is no VCS2 ring on the pre-gen8 platform. So the +		 * semaphore between RCS and VCS2 is initialized as INVALID. +		 * Gen8 will initialize the sema between VCS2 and RCS later. +		 */ +		ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; +		ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_RV; +		ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_RB; +		ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_RVE; +		ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; +		ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; +		ring->semaphore.mbox.signal[VCS] = GEN6_VRSYNC; +		ring->semaphore.mbox.signal[BCS] = GEN6_BRSYNC; +		ring->semaphore.mbox.signal[VECS] = GEN6_VERSYNC; +		ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;  	} else if (IS_GEN5(dev)) {  		ring->add_request = pc_render_add_request;  		ring->flush = gen4_render_ring_flush; @@ -1764,6 +2009,8 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  	ring->write_tail = ring_write_tail;  	if (IS_HASWELL(dev))  		ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer; +	else if (IS_GEN8(dev)) +		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer;  	else if (INTEL_INFO(dev)->gen >= 6)  		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;  	else if (INTEL_INFO(dev)->gen >= 4) @@ -1786,7 +2033,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  			return -ENOMEM;  		} -		ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); +		ret = i915_gem_obj_ggtt_pin(obj, 0, 0);  		if (ret != 0) {  			drm_gem_object_unreference(&obj->base);  			DRM_ERROR("Failed to ping batch bo\n"); @@ -1802,17 +2049,26 @@ int intel_init_render_ring_buffer(struct drm_device *dev)  int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)  { -	drm_i915_private_t *dev_priv = dev->dev_private; -	struct intel_ring_buffer *ring = &dev_priv->ring[RCS]; +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_engine_cs *ring = &dev_priv->ring[RCS]; +	struct intel_ringbuffer *ringbuf = ring->buffer;  	int ret; +	if (ringbuf == NULL) { +		ringbuf = kzalloc(sizeof(*ringbuf), GFP_KERNEL); +		if (!ringbuf) +			return -ENOMEM; +		ring->buffer = ringbuf; +	} +  	ring->name = "render ring";  	ring->id = RCS;  	ring->mmio_base = RENDER_RING_BASE;  	if (INTEL_INFO(dev)->gen >= 6) {  		/* non-kms not supported on gen6+ */ -		return -ENODEV; +		ret = -ENODEV; +		goto err_ringbuf;  	}  	/* Note: gem is not supported on gen5/ilk without kms (the corresponding @@ -1847,37 +2103,45 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)  	INIT_LIST_HEAD(&ring->active_list);  	INIT_LIST_HEAD(&ring->request_list); -	ring->size = size; -	ring->effective_size = ring->size; +	ringbuf->size = size; +	ringbuf->effective_size = ringbuf->size;  	if (IS_I830(ring->dev) || IS_845G(ring->dev)) -		ring->effective_size -= 128; +		ringbuf->effective_size -= 2 * CACHELINE_BYTES; -	ring->virtual_start = ioremap_wc(start, size); -	if (ring->virtual_start == NULL) { +	ringbuf->virtual_start = ioremap_wc(start, size); +	if (ringbuf->virtual_start == NULL) {  		DRM_ERROR("can not ioremap virtual address for"  			  " ring buffer\n"); -		return -ENOMEM; +		ret = -ENOMEM; +		goto err_ringbuf;  	}  	if (!I915_NEED_GFX_HWS(dev)) {  		ret = init_phys_status_page(ring);  		if (ret) -			return ret; +			goto err_vstart;  	}  	return 0; + +err_vstart: +	iounmap(ringbuf->virtual_start); +err_ringbuf: +	kfree(ringbuf); +	ring->buffer = NULL; +	return ret;  }  int intel_init_bsd_ring_buffer(struct drm_device *dev)  { -	drm_i915_private_t *dev_priv = dev->dev_private; -	struct intel_ring_buffer *ring = &dev_priv->ring[VCS]; +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_engine_cs *ring = &dev_priv->ring[VCS];  	ring->name = "bsd ring";  	ring->id = VCS;  	ring->write_tail = ring_write_tail; -	if (IS_GEN6(dev) || IS_GEN7(dev)) { +	if (INTEL_INFO(dev)->gen >= 6) {  		ring->mmio_base = GEN6_BSD_RING_BASE;  		/* gen6 bsd needs a special wa for tail updates */  		if (IS_GEN6(dev)) @@ -1886,19 +2150,38 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)  		ring->add_request = gen6_add_request;  		ring->get_seqno = gen6_ring_get_seqno;  		ring->set_seqno = ring_set_seqno; -		ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; -		ring->irq_get = gen6_ring_get_irq; -		ring->irq_put = gen6_ring_put_irq; -		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; -		ring->sync_to = gen6_ring_sync; -		ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR; -		ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID; -		ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB; -		ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE; -		ring->signal_mbox[RCS] = GEN6_RVSYNC; -		ring->signal_mbox[VCS] = GEN6_NOSYNC; -		ring->signal_mbox[BCS] = GEN6_BVSYNC; -		ring->signal_mbox[VECS] = GEN6_VEVSYNC; +		if (INTEL_INFO(dev)->gen >= 8) { +			ring->irq_enable_mask = +				GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT; +			ring->irq_get = gen8_ring_get_irq; +			ring->irq_put = gen8_ring_put_irq; +			ring->dispatch_execbuffer = +				gen8_ring_dispatch_execbuffer; +		} else { +			ring->irq_enable_mask = GT_BSD_USER_INTERRUPT; +			ring->irq_get = gen6_ring_get_irq; +			ring->irq_put = gen6_ring_put_irq; +			ring->dispatch_execbuffer = +				gen6_ring_dispatch_execbuffer; +		} +		ring->semaphore.sync_to = gen6_ring_sync; +		ring->semaphore.signal = gen6_signal; +		/* +		 * The current semaphore is only applied on pre-gen8 platform. +		 * And there is no VCS2 ring on the pre-gen8 platform. So the +		 * semaphore between VCS and VCS2 is initialized as INVALID. +		 * Gen8 will initialize the sema between VCS2 and VCS later. +		 */ +		ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VR; +		ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; +		ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VB; +		ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_VVE; +		ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; +		ring->semaphore.mbox.signal[RCS] = GEN6_RVSYNC; +		ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; +		ring->semaphore.mbox.signal[BCS] = GEN6_BVSYNC; +		ring->semaphore.mbox.signal[VECS] = GEN6_VEVSYNC; +		ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;  	} else {  		ring->mmio_base = BSD_RING_BASE;  		ring->flush = bsd_ring_flush; @@ -1921,10 +2204,63 @@ int intel_init_bsd_ring_buffer(struct drm_device *dev)  	return intel_init_ring_buffer(dev, ring);  } +/** + * Initialize the second BSD ring for Broadwell GT3. + * It is noted that this only exists on Broadwell GT3. + */ +int intel_init_bsd2_ring_buffer(struct drm_device *dev) +{ +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_engine_cs *ring = &dev_priv->ring[VCS2]; + +	if ((INTEL_INFO(dev)->gen != 8)) { +		DRM_ERROR("No dual-BSD ring on non-BDW machine\n"); +		return -EINVAL; +	} + +	ring->name = "bds2_ring"; +	ring->id = VCS2; + +	ring->write_tail = ring_write_tail; +	ring->mmio_base = GEN8_BSD2_RING_BASE; +	ring->flush = gen6_bsd_ring_flush; +	ring->add_request = gen6_add_request; +	ring->get_seqno = gen6_ring_get_seqno; +	ring->set_seqno = ring_set_seqno; +	ring->irq_enable_mask = +			GT_RENDER_USER_INTERRUPT << GEN8_VCS2_IRQ_SHIFT; +	ring->irq_get = gen8_ring_get_irq; +	ring->irq_put = gen8_ring_put_irq; +	ring->dispatch_execbuffer = +			gen8_ring_dispatch_execbuffer; +	ring->semaphore.sync_to = gen6_ring_sync; +	ring->semaphore.signal = gen6_signal; +	/* +	 * The current semaphore is only applied on the pre-gen8. And there +	 * is no bsd2 ring on the pre-gen8. So now the semaphore_register +	 * between VCS2 and other ring is initialized as invalid. +	 * Gen8 will initialize the sema between VCS2 and other ring later. +	 */ +	ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_INVALID; +	ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_INVALID; +	ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; +	ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; +	ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; +	ring->semaphore.mbox.signal[RCS] = GEN6_NOSYNC; +	ring->semaphore.mbox.signal[VCS] = GEN6_NOSYNC; +	ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; +	ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; +	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC; + +	ring->init = init_ring_common; + +	return intel_init_ring_buffer(dev, ring); +} +  int intel_init_blt_ring_buffer(struct drm_device *dev)  { -	drm_i915_private_t *dev_priv = dev->dev_private; -	struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_engine_cs *ring = &dev_priv->ring[BCS];  	ring->name = "blitter ring";  	ring->id = BCS; @@ -1935,19 +2271,36 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)  	ring->add_request = gen6_add_request;  	ring->get_seqno = gen6_ring_get_seqno;  	ring->set_seqno = ring_set_seqno; -	ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; -	ring->irq_get = gen6_ring_get_irq; -	ring->irq_put = gen6_ring_put_irq; -	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; -	ring->sync_to = gen6_ring_sync; -	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR; -	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV; -	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID; -	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE; -	ring->signal_mbox[RCS] = GEN6_RBSYNC; -	ring->signal_mbox[VCS] = GEN6_VBSYNC; -	ring->signal_mbox[BCS] = GEN6_NOSYNC; -	ring->signal_mbox[VECS] = GEN6_VEBSYNC; +	if (INTEL_INFO(dev)->gen >= 8) { +		ring->irq_enable_mask = +			GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT; +		ring->irq_get = gen8_ring_get_irq; +		ring->irq_put = gen8_ring_put_irq; +		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; +	} else { +		ring->irq_enable_mask = GT_BLT_USER_INTERRUPT; +		ring->irq_get = gen6_ring_get_irq; +		ring->irq_put = gen6_ring_put_irq; +		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; +	} +	ring->semaphore.sync_to = gen6_ring_sync; +	ring->semaphore.signal = gen6_signal; +	/* +	 * The current semaphore is only applied on pre-gen8 platform. And +	 * there is no VCS2 ring on the pre-gen8 platform. So the semaphore +	 * between BCS and VCS2 is initialized as INVALID. +	 * Gen8 will initialize the sema between BCS and VCS2 later. +	 */ +	ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_BR; +	ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_BV; +	ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_INVALID; +	ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_BVE; +	ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; +	ring->semaphore.mbox.signal[RCS] = GEN6_RBSYNC; +	ring->semaphore.mbox.signal[VCS] = GEN6_VBSYNC; +	ring->semaphore.mbox.signal[BCS] = GEN6_NOSYNC; +	ring->semaphore.mbox.signal[VECS] = GEN6_VEBSYNC; +	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;  	ring->init = init_ring_common;  	return intel_init_ring_buffer(dev, ring); @@ -1955,8 +2308,8 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)  int intel_init_vebox_ring_buffer(struct drm_device *dev)  { -	drm_i915_private_t *dev_priv = dev->dev_private; -	struct intel_ring_buffer *ring = &dev_priv->ring[VECS]; +	struct drm_i915_private *dev_priv = dev->dev_private; +	struct intel_engine_cs *ring = &dev_priv->ring[VECS];  	ring->name = "video enhancement ring";  	ring->id = VECS; @@ -1967,26 +2320,38 @@ int intel_init_vebox_ring_buffer(struct drm_device *dev)  	ring->add_request = gen6_add_request;  	ring->get_seqno = gen6_ring_get_seqno;  	ring->set_seqno = ring_set_seqno; -	ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; -	ring->irq_get = hsw_vebox_get_irq; -	ring->irq_put = hsw_vebox_put_irq; -	ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; -	ring->sync_to = gen6_ring_sync; -	ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER; -	ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV; -	ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB; -	ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID; -	ring->signal_mbox[RCS] = GEN6_RVESYNC; -	ring->signal_mbox[VCS] = GEN6_VVESYNC; -	ring->signal_mbox[BCS] = GEN6_BVESYNC; -	ring->signal_mbox[VECS] = GEN6_NOSYNC; + +	if (INTEL_INFO(dev)->gen >= 8) { +		ring->irq_enable_mask = +			GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT; +		ring->irq_get = gen8_ring_get_irq; +		ring->irq_put = gen8_ring_put_irq; +		ring->dispatch_execbuffer = gen8_ring_dispatch_execbuffer; +	} else { +		ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT; +		ring->irq_get = hsw_vebox_get_irq; +		ring->irq_put = hsw_vebox_put_irq; +		ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer; +	} +	ring->semaphore.sync_to = gen6_ring_sync; +	ring->semaphore.signal = gen6_signal; +	ring->semaphore.mbox.wait[RCS] = MI_SEMAPHORE_SYNC_VER; +	ring->semaphore.mbox.wait[VCS] = MI_SEMAPHORE_SYNC_VEV; +	ring->semaphore.mbox.wait[BCS] = MI_SEMAPHORE_SYNC_VEB; +	ring->semaphore.mbox.wait[VECS] = MI_SEMAPHORE_SYNC_INVALID; +	ring->semaphore.mbox.wait[VCS2] = MI_SEMAPHORE_SYNC_INVALID; +	ring->semaphore.mbox.signal[RCS] = GEN6_RVESYNC; +	ring->semaphore.mbox.signal[VCS] = GEN6_VVESYNC; +	ring->semaphore.mbox.signal[BCS] = GEN6_BVESYNC; +	ring->semaphore.mbox.signal[VECS] = GEN6_NOSYNC; +	ring->semaphore.mbox.signal[VCS2] = GEN6_NOSYNC;  	ring->init = init_ring_common;  	return intel_init_ring_buffer(dev, ring);  }  int -intel_ring_flush_all_caches(struct intel_ring_buffer *ring) +intel_ring_flush_all_caches(struct intel_engine_cs *ring)  {  	int ret; @@ -2004,7 +2369,7 @@ intel_ring_flush_all_caches(struct intel_ring_buffer *ring)  }  int -intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring) +intel_ring_invalidate_all_caches(struct intel_engine_cs *ring)  {  	uint32_t flush_domains;  	int ret; @@ -2022,3 +2387,19 @@ intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)  	ring->gpu_caches_dirty = false;  	return 0;  } + +void +intel_stop_ring_buffer(struct intel_engine_cs *ring) +{ +	int ret; + +	if (!intel_ring_initialized(ring)) +		return; + +	ret = intel_ring_idle(ring); +	if (ret && !i915_reset_in_progress(&to_i915(ring->dev)->gpu_error)) +		DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n", +			  ring->name, ret); + +	stop_ring(ring); +}  | 
