diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_drv.h')
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 1513 | 
1 files changed, 968 insertions, 545 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 35874b3a86d..374f964323a 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -35,11 +35,13 @@  #include "i915_reg.h"  #include "intel_bios.h"  #include "intel_ringbuffer.h" +#include "i915_gem_gtt.h"  #include <linux/io-mapping.h>  #include <linux/i2c.h>  #include <linux/i2c-algo-bit.h>  #include <drm/intel-gtt.h>  #include <linux/backlight.h> +#include <linux/hashtable.h>  #include <linux/intel-iommu.h>  #include <linux/kref.h>  #include <linux/pm_qos.h> @@ -54,10 +56,12 @@  #define DRIVER_DATE		"20080730"  enum pipe { +	INVALID_PIPE = -1,  	PIPE_A = 0,  	PIPE_B,  	PIPE_C, -	I915_MAX_PIPES +	_PIPE_EDP, +	I915_MAX_PIPES = _PIPE_EDP  };  #define pipe_name(p) ((p) + 'A') @@ -65,7 +69,8 @@ enum transcoder {  	TRANSCODER_A = 0,  	TRANSCODER_B,  	TRANSCODER_C, -	TRANSCODER_EDP = 0xF, +	TRANSCODER_EDP, +	I915_MAX_TRANSCODERS  };  #define transcoder_name(t) ((t) + 'A') @@ -76,7 +81,7 @@ enum plane {  };  #define plane_name(p) ((p) + 'A') -#define sprite_name(p, s) ((p) * dev_priv->num_plane + (s) + 'A') +#define sprite_name(p, s) ((p) * INTEL_INFO(dev)->num_sprites[(p)] + (s) + 'A')  enum port {  	PORT_A = 0, @@ -88,6 +93,18 @@ enum port {  };  #define port_name(p) ((p) + 'A') +#define I915_NUM_PHYS_VLV 2 + +enum dpio_channel { +	DPIO_CH0, +	DPIO_CH1 +}; + +enum dpio_phy { +	DPIO_PHY0, +	DPIO_PHY1 +}; +  enum intel_display_power_domain {  	POWER_DOMAIN_PIPE_A,  	POWER_DOMAIN_PIPE_B, @@ -98,13 +115,31 @@ enum intel_display_power_domain {  	POWER_DOMAIN_TRANSCODER_A,  	POWER_DOMAIN_TRANSCODER_B,  	POWER_DOMAIN_TRANSCODER_C, -	POWER_DOMAIN_TRANSCODER_EDP = POWER_DOMAIN_TRANSCODER_A + 0xF, +	POWER_DOMAIN_TRANSCODER_EDP, +	POWER_DOMAIN_PORT_DDI_A_2_LANES, +	POWER_DOMAIN_PORT_DDI_A_4_LANES, +	POWER_DOMAIN_PORT_DDI_B_2_LANES, +	POWER_DOMAIN_PORT_DDI_B_4_LANES, +	POWER_DOMAIN_PORT_DDI_C_2_LANES, +	POWER_DOMAIN_PORT_DDI_C_4_LANES, +	POWER_DOMAIN_PORT_DDI_D_2_LANES, +	POWER_DOMAIN_PORT_DDI_D_4_LANES, +	POWER_DOMAIN_PORT_DSI, +	POWER_DOMAIN_PORT_CRT, +	POWER_DOMAIN_PORT_OTHER, +	POWER_DOMAIN_VGA, +	POWER_DOMAIN_AUDIO, +	POWER_DOMAIN_INIT, + +	POWER_DOMAIN_NUM,  };  #define POWER_DOMAIN_PIPE(pipe) ((pipe) + POWER_DOMAIN_PIPE_A)  #define POWER_DOMAIN_PIPE_PANEL_FITTER(pipe) \  		((pipe) + POWER_DOMAIN_PIPE_A_PANEL_FITTER) -#define POWER_DOMAIN_TRANSCODER(tran) ((tran) + POWER_DOMAIN_TRANSCODER_A) +#define POWER_DOMAIN_TRANSCODER(tran) \ +	((tran) == TRANSCODER_EDP ? POWER_DOMAIN_TRANSCODER_EDP : \ +	 (tran) + POWER_DOMAIN_TRANSCODER_A)  enum hpd_pin {  	HPD_NONE = 0, @@ -127,12 +162,24 @@ enum hpd_pin {  	 I915_GEM_DOMAIN_VERTEX)  #define for_each_pipe(p) for ((p) = 0; (p) < INTEL_INFO(dev)->num_pipes; (p)++) +#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) + +#define for_each_crtc(dev, crtc) \ +	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) + +#define for_each_intel_crtc(dev, intel_crtc) \ +	list_for_each_entry(intel_crtc, &dev->mode_config.crtc_list, base.head)  #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \  	list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \  		if ((intel_encoder)->base.crtc == (__crtc)) +#define for_each_connector_on_encoder(dev, __encoder, intel_connector) \ +	list_for_each_entry((intel_connector), &(dev)->mode_config.connector_list, base.head) \ +		if ((intel_connector)->base.encoder == (__encoder)) +  struct drm_i915_private; +struct i915_mmu_object;  enum intel_dpll_id {  	DPLL_ID_PRIVATE = -1, /* non-shared dpll in use */ @@ -204,18 +251,6 @@ struct intel_ddi_plls {  #define WATCH_LISTS	0  #define WATCH_GTT	0 -#define I915_GEM_PHYS_CURSOR_0 1 -#define I915_GEM_PHYS_CURSOR_1 2 -#define I915_GEM_PHYS_OVERLAY_REGS 3 -#define I915_MAX_PHYS_OBJECT (I915_GEM_PHYS_OVERLAY_REGS) - -struct drm_i915_gem_phys_object { -	int id; -	struct page **page_list; -	drm_dma_handle_t *handle; -	struct drm_i915_gem_object *cur_obj; -}; -  struct opregion_header;  struct opregion_acpi;  struct opregion_swsci; @@ -225,9 +260,12 @@ struct intel_opregion {  	struct opregion_header __iomem *header;  	struct opregion_acpi __iomem *acpi;  	struct opregion_swsci __iomem *swsci; +	u32 swsci_gbda_sub_functions; +	u32 swsci_sbcb_sub_functions;  	struct opregion_asle __iomem *asle;  	void __iomem *vbt;  	u32 __iomem *lid_state; +	struct work_struct asle_work;  };  #define OPREGION_SIZE            (8*1024) @@ -262,51 +300,86 @@ struct intel_display_error_state;  struct drm_i915_error_state {  	struct kref ref; +	struct timeval time; + +	char error_msg[128]; +	u32 reset_count; +	u32 suspend_count; + +	/* Generic register state */  	u32 eir;  	u32 pgtbl_er;  	u32 ier;  	u32 ccid;  	u32 derrmr;  	u32 forcewake; -	bool waiting[I915_NUM_RINGS]; -	u32 pipestat[I915_MAX_PIPES]; -	u32 tail[I915_NUM_RINGS]; -	u32 head[I915_NUM_RINGS]; -	u32 ctl[I915_NUM_RINGS]; -	u32 ipeir[I915_NUM_RINGS]; -	u32 ipehr[I915_NUM_RINGS]; -	u32 instdone[I915_NUM_RINGS]; -	u32 acthd[I915_NUM_RINGS]; -	u32 semaphore_mboxes[I915_NUM_RINGS][I915_NUM_RINGS - 1]; -	u32 semaphore_seqno[I915_NUM_RINGS][I915_NUM_RINGS - 1]; -	u32 rc_psmi[I915_NUM_RINGS]; /* sleep state */ -	/* our own tracking of ring head and tail */ -	u32 cpu_ring_head[I915_NUM_RINGS]; -	u32 cpu_ring_tail[I915_NUM_RINGS];  	u32 error; /* gen6+ */  	u32 err_int; /* gen7 */ -	u32 instpm[I915_NUM_RINGS]; -	u32 instps[I915_NUM_RINGS]; -	u32 extra_instdone[I915_NUM_INSTDONE_REG]; -	u32 seqno[I915_NUM_RINGS]; -	u64 bbaddr; -	u32 fault_reg[I915_NUM_RINGS];  	u32 done_reg; -	u32 faddr[I915_NUM_RINGS]; +	u32 gac_eco; +	u32 gam_ecochk; +	u32 gab_ctl; +	u32 gfx_mode; +	u32 extra_instdone[I915_NUM_INSTDONE_REG];  	u64 fence[I915_MAX_NUM_FENCES]; -	struct timeval time; +	struct intel_overlay_error_state *overlay; +	struct intel_display_error_state *display; +  	struct drm_i915_error_ring { +		bool valid; +		/* Software tracked state */ +		bool waiting; +		int hangcheck_score; +		enum intel_ring_hangcheck_action hangcheck_action; +		int num_requests; + +		/* our own tracking of ring head and tail */ +		u32 cpu_ring_head; +		u32 cpu_ring_tail; + +		u32 semaphore_seqno[I915_NUM_RINGS - 1]; + +		/* Register state */ +		u32 tail; +		u32 head; +		u32 ctl; +		u32 hws; +		u32 ipeir; +		u32 ipehr; +		u32 instdone; +		u32 bbstate; +		u32 instpm; +		u32 instps; +		u32 seqno; +		u64 bbaddr; +		u64 acthd; +		u32 fault_reg; +		u64 faddr; +		u32 rc_psmi; /* sleep state */ +		u32 semaphore_mboxes[I915_NUM_RINGS - 1]; +  		struct drm_i915_error_object {  			int page_count;  			u32 gtt_offset;  			u32 *pages[0]; -		} *ringbuffer, *batchbuffer, *ctx; +		} *ringbuffer, *batchbuffer, *wa_batchbuffer, *ctx, *hws_page; +  		struct drm_i915_error_request {  			long jiffies;  			u32 seqno;  			u32 tail;  		} *requests; -		int num_requests; + +		struct { +			u32 gfx_mode; +			union { +				u64 pdp[4]; +				u32 pp_dir_base; +			}; +		} vm_info; + +		pid_t pid; +		char comm[TASK_COMM_LEN];  	} ring[I915_NUM_RINGS];  	struct drm_i915_error_buffer {  		u32 size; @@ -320,22 +393,24 @@ struct drm_i915_error_state {  		u32 tiling:2;  		u32 dirty:1;  		u32 purgeable:1; +		u32 userptr:1;  		s32 ring:4; -		u32 cache_level:2; +		u32 cache_level:3;  	} **active_bo, **pinned_bo; +  	u32 *active_bo_count, *pinned_bo_count; -	struct intel_overlay_error_state *overlay; -	struct intel_display_error_state *display;  }; +struct intel_connector;  struct intel_crtc_config; +struct intel_plane_config;  struct intel_crtc;  struct intel_limit;  struct dpll;  struct drm_i915_display_funcs {  	bool (*fbc_enabled)(struct drm_device *dev); -	void (*enable_fbc)(struct drm_crtc *crtc, unsigned long interval); +	void (*enable_fbc)(struct drm_crtc *crtc);  	void (*disable_fbc)(struct drm_device *dev);  	int (*get_display_clock_speed)(struct drm_device *dev);  	int (*get_fifo_size)(struct drm_device *dev, int plane); @@ -357,7 +432,7 @@ struct drm_i915_display_funcs {  			  int target, int refclk,  			  struct dpll *match_clock,  			  struct dpll *best_clock); -	void (*update_wm)(struct drm_device *dev); +	void (*update_wm)(struct drm_crtc *crtc);  	void (*update_sprite_wm)(struct drm_plane *plane,  				 struct drm_crtc *crtc,  				 uint32_t sprite_width, int pixel_size, @@ -367,7 +442,8 @@ struct drm_i915_display_funcs {  	 * fills out the pipe-config with the hw state. */  	bool (*get_pipe_config)(struct intel_crtc *,  				struct intel_crtc_config *); -	void (*get_clock)(struct intel_crtc *, struct intel_crtc_config *); +	void (*get_plane_config)(struct intel_crtc *, +				 struct intel_plane_config *);  	int (*crtc_mode_set)(struct drm_crtc *crtc,  			     int x, int y,  			     struct drm_framebuffer *old_fb); @@ -375,26 +451,52 @@ struct drm_i915_display_funcs {  	void (*crtc_disable)(struct drm_crtc *crtc);  	void (*off)(struct drm_crtc *crtc);  	void (*write_eld)(struct drm_connector *connector, -			  struct drm_crtc *crtc); +			  struct drm_crtc *crtc, +			  struct drm_display_mode *mode);  	void (*fdi_link_train)(struct drm_crtc *crtc);  	void (*init_clock_gating)(struct drm_device *dev);  	int (*queue_flip)(struct drm_device *dev, struct drm_crtc *crtc,  			  struct drm_framebuffer *fb,  			  struct drm_i915_gem_object *obj, +			  struct intel_engine_cs *ring,  			  uint32_t flags); -	int (*update_plane)(struct drm_crtc *crtc, struct drm_framebuffer *fb, -			    int x, int y); +	void (*update_primary_plane)(struct drm_crtc *crtc, +				     struct drm_framebuffer *fb, +				     int x, int y);  	void (*hpd_irq_setup)(struct drm_device *dev);  	/* clock updates for mode set */  	/* cursor updates */  	/* render clock increase/decrease */  	/* display clock increase/decrease */  	/* pll clock increase/decrease */ + +	int (*setup_backlight)(struct intel_connector *connector); +	uint32_t (*get_backlight)(struct intel_connector *connector); +	void (*set_backlight)(struct intel_connector *connector, +			      uint32_t level); +	void (*disable_backlight)(struct intel_connector *connector); +	void (*enable_backlight)(struct intel_connector *connector);  };  struct intel_uncore_funcs { -	void (*force_wake_get)(struct drm_i915_private *dev_priv); -	void (*force_wake_put)(struct drm_i915_private *dev_priv); +	void (*force_wake_get)(struct drm_i915_private *dev_priv, +							int fw_engine); +	void (*force_wake_put)(struct drm_i915_private *dev_priv, +							int fw_engine); + +	uint8_t  (*mmio_readb)(struct drm_i915_private *dev_priv, off_t offset, bool trace); +	uint16_t (*mmio_readw)(struct drm_i915_private *dev_priv, off_t offset, bool trace); +	uint32_t (*mmio_readl)(struct drm_i915_private *dev_priv, off_t offset, bool trace); +	uint64_t (*mmio_readq)(struct drm_i915_private *dev_priv, off_t offset, bool trace); + +	void (*mmio_writeb)(struct drm_i915_private *dev_priv, off_t offset, +				uint8_t val, bool trace); +	void (*mmio_writew)(struct drm_i915_private *dev_priv, off_t offset, +				uint16_t val, bool trace); +	void (*mmio_writel)(struct drm_i915_private *dev_priv, off_t offset, +				uint32_t val, bool trace); +	void (*mmio_writeq)(struct drm_i915_private *dev_priv, off_t offset, +				uint64_t val, bool trace);  };  struct intel_uncore { @@ -404,6 +506,11 @@ struct intel_uncore {  	unsigned fifo_count;  	unsigned forcewake_count; + +	unsigned fw_rendercount; +	unsigned fw_mediacount; + +	struct timer_list force_wake_timer;  };  #define DEV_INFO_FOR_EACH_FLAG(func, sep) \ @@ -420,7 +527,7 @@ struct intel_uncore {  	func(is_ivybridge) sep \  	func(is_valleyview) sep \  	func(is_haswell) sep \ -	func(has_force_wake) sep \ +	func(is_preliminary) sep \  	func(has_fbc) sep \  	func(has_pipe_cxsr) sep \  	func(has_hotplug) sep \ @@ -428,9 +535,6 @@ struct intel_uncore {  	func(has_overlay) sep \  	func(overlay_needs_physical) sep \  	func(supports_tv) sep \ -	func(has_bsd_ring) sep \ -	func(has_blt_ring) sep \ -	func(has_vebox_ring) sep \  	func(has_llc) sep \  	func(has_ddi) sep \  	func(has_fpga_dbg) @@ -441,8 +545,17 @@ struct intel_uncore {  struct intel_device_info {  	u32 display_mmio_offset;  	u8 num_pipes:3; +	u8 num_sprites[I915_MAX_PIPES];  	u8 gen; +	u8 ring_mask; /* Rings supported by the HW */  	DEV_INFO_FOR_EACH_FLAG(DEFINE_FLAG, SEP_SEMICOLON); +	/* Register offsets for the various display pipes and transcoders */ +	int pipe_offsets[I915_MAX_TRANSCODERS]; +	int trans_offsets[I915_MAX_TRANSCODERS]; +	int dpll_offsets[I915_MAX_PIPES]; +	int dpll_md_offsets[I915_MAX_PIPES]; +	int palette_offsets[I915_MAX_PIPES]; +	int cursor_offsets[I915_MAX_PIPES];  };  #undef DEFINE_FLAG @@ -458,136 +571,34 @@ enum i915_cache_level {  	I915_CACHE_WT, /* hsw:gt3e WriteThrough for scanouts */  }; -typedef uint32_t gen6_gtt_pte_t; - -struct i915_address_space { -	struct drm_mm mm; -	struct drm_device *dev; -	struct list_head global_link; -	unsigned long start;		/* Start offset always 0 for dri2 */ -	size_t total;		/* size addr space maps (ex. 2GB for ggtt) */ - -	struct { -		dma_addr_t addr; -		struct page *page; -	} scratch; - -	/** -	 * List of objects currently involved in rendering. -	 * -	 * Includes buffers having the contents of their GPU caches -	 * flushed, not necessarily primitives.  last_rendering_seqno -	 * represents when the rendering involved will be completed. -	 * -	 * A reference is held on the buffer while on this list. -	 */ -	struct list_head active_list; - -	/** -	 * LRU list of objects which are not in the ringbuffer and -	 * are ready to unbind, but are still in the GTT. -	 * -	 * last_rendering_seqno is 0 while an object is in this list. -	 * -	 * A reference is not held on the buffer while on this list, -	 * as merely being GTT-bound shouldn't prevent its being -	 * freed, and we'll pull it off the list in the free path. -	 */ -	struct list_head inactive_list; - -	/* FIXME: Need a more generic return type */ -	gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, -				     enum i915_cache_level level); -	void (*clear_range)(struct i915_address_space *vm, -			    unsigned int first_entry, -			    unsigned int num_entries); -	void (*insert_entries)(struct i915_address_space *vm, -			       struct sg_table *st, -			       unsigned int first_entry, -			       enum i915_cache_level cache_level); -	void (*cleanup)(struct i915_address_space *vm); -}; - -/* The Graphics Translation Table is the way in which GEN hardware translates a - * Graphics Virtual Address into a Physical Address. In addition to the normal - * collateral associated with any va->pa translations GEN hardware also has a - * portion of the GTT which can be mapped by the CPU and remain both coherent - * and correct (in cases like swizzling). That region is referred to as GMADR in - * the spec. - */ -struct i915_gtt { -	struct i915_address_space base; -	size_t stolen_size;		/* Total size of stolen memory */ - -	unsigned long mappable_end;	/* End offset that we can CPU map */ -	struct io_mapping *mappable;	/* Mapping to our CPU mappable region */ -	phys_addr_t mappable_base;	/* PA of our GMADR */ - -	/** "Graphics Stolen Memory" holds the global PTEs */ -	void __iomem *gsm; - -	bool do_idle_maps; - -	int mtrr; - -	/* global gtt ops */ -	int (*gtt_probe)(struct drm_device *dev, size_t *gtt_total, -			  size_t *stolen, phys_addr_t *mappable_base, -			  unsigned long *mappable_end); -}; -#define gtt_total_entries(gtt) ((gtt).base.total >> PAGE_SHIFT) - -struct i915_hw_ppgtt { -	struct i915_address_space base; -	unsigned num_pd_entries; -	struct page **pt_pages; -	uint32_t pd_offset; -	dma_addr_t *pt_dma_addr; - -	int (*enable)(struct drm_device *dev); -}; - -/** - * A VMA represents a GEM BO that is bound into an address space. Therefore, a - * VMA's presence cannot be guaranteed before binding, or after unbinding the - * object into/from the address space. - * - * To make things as simple as possible (ie. no refcounting), a VMA's lifetime - * will always be <= an objects lifetime. So object refcounting should cover us. - */ -struct i915_vma { -	struct drm_mm_node node; -	struct drm_i915_gem_object *obj; -	struct i915_address_space *vm; - -	/** This object's place on the active/inactive lists */ -	struct list_head mm_list; - -	struct list_head vma_link; /* Link in the object's VMA list */ - -	/** This vma's place in the batchbuffer or on the eviction list */ -	struct list_head exec_list; - -}; -  struct i915_ctx_hang_stats {  	/* This context had batch pending when hang was declared */  	unsigned batch_pending;  	/* This context had batch active when hang was declared */  	unsigned batch_active; + +	/* Time when this context was last blamed for a GPU reset */ +	unsigned long guilty_ts; + +	/* This context is banned to submit more work */ +	bool banned;  };  /* This must match up with the value previously used for execbuf2.rsvd1. */  #define DEFAULT_CONTEXT_ID 0 -struct i915_hw_context { +struct intel_context {  	struct kref ref;  	int id;  	bool is_initialized; +	uint8_t remap_slice;  	struct drm_i915_file_private *file_priv; -	struct intel_ring_buffer *ring; +	struct intel_engine_cs *last_ring;  	struct drm_i915_gem_object *obj;  	struct i915_ctx_hang_stats hang_stats; +	struct i915_address_space *vm; + +	struct list_head link;  };  struct i915_fbc { @@ -603,7 +614,6 @@ struct i915_fbc {  		struct delayed_work work;  		struct drm_crtc *crtc;  		struct drm_framebuffer *fb; -		int interval;  	} *fbc_work;  	enum no_fbc_reason { @@ -621,17 +631,13 @@ struct i915_fbc {  	} no_fbc_reason;  }; -enum no_psr_reason { -	PSR_NO_SOURCE, /* Not supported on platform */ -	PSR_NO_SINK, /* Not supported by panel */ -	PSR_MODULE_PARAM, -	PSR_CRTC_NOT_ACTIVE, -	PSR_PWR_WELL_ENABLED, -	PSR_NOT_TILED, -	PSR_SPRITE_ENABLED, -	PSR_S3D_ENABLED, -	PSR_INTERLACED_ENABLED, -	PSR_HSW_NOT_DDIA, +struct i915_drrs { +	struct intel_connector *connector; +}; + +struct i915_psr { +	bool sink_support; +	bool source_ok;  };  enum intel_pch { @@ -650,7 +656,7 @@ enum intel_sbi_destination {  #define QUIRK_PIPEA_FORCE (1<<0)  #define QUIRK_LVDS_SSC_DISABLE (1<<1)  #define QUIRK_INVERT_BRIGHTNESS (1<<2) -#define QUIRK_NO_PCH_PWM_ENABLE (1<<3) +#define QUIRK_BACKLIGHT_PRESENT (1<<3)  struct intel_fbdev;  struct intel_fbc_work; @@ -702,6 +708,7 @@ struct i915_suspend_saved_registers {  	u32 saveBLC_HIST_CTL;  	u32 saveBLC_PWM_CTL;  	u32 saveBLC_PWM_CTL2; +	u32 saveBLC_HIST_CTL_B;  	u32 saveBLC_CPU_PWM_CTL;  	u32 saveBLC_CPU_PWM_CTL2;  	u32 saveFPB0; @@ -747,11 +754,7 @@ struct i915_suspend_saved_registers {  	u32 savePFIT_CONTROL;  	u32 save_palette_a[256];  	u32 save_palette_b[256]; -	u32 saveDPFC_CB_BASE; -	u32 saveFBC_CFB_BASE; -	u32 saveFBC_LL_BASE;  	u32 saveFBC_CONTROL; -	u32 saveFBC_CONTROL2;  	u32 saveIER;  	u32 saveIIR;  	u32 saveIMR; @@ -816,22 +819,95 @@ struct i915_suspend_saved_registers {  	u32 savePCH_PORT_HOTPLUG;  }; +struct vlv_s0ix_state { +	/* GAM */ +	u32 wr_watermark; +	u32 gfx_prio_ctrl; +	u32 arb_mode; +	u32 gfx_pend_tlb0; +	u32 gfx_pend_tlb1; +	u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM]; +	u32 media_max_req_count; +	u32 gfx_max_req_count; +	u32 render_hwsp; +	u32 ecochk; +	u32 bsd_hwsp; +	u32 blt_hwsp; +	u32 tlb_rd_addr; + +	/* MBC */ +	u32 g3dctl; +	u32 gsckgctl; +	u32 mbctl; + +	/* GCP */ +	u32 ucgctl1; +	u32 ucgctl3; +	u32 rcgctl1; +	u32 rcgctl2; +	u32 rstctl; +	u32 misccpctl; + +	/* GPM */ +	u32 gfxpause; +	u32 rpdeuhwtc; +	u32 rpdeuc; +	u32 ecobus; +	u32 pwrdwnupctl; +	u32 rp_down_timeout; +	u32 rp_deucsw; +	u32 rcubmabdtmr; +	u32 rcedata; +	u32 spare2gh; + +	/* Display 1 CZ domain */ +	u32 gt_imr; +	u32 gt_ier; +	u32 pm_imr; +	u32 pm_ier; +	u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM]; + +	/* GT SA CZ domain */ +	u32 tilectl; +	u32 gt_fifoctl; +	u32 gtlc_wake_ctrl; +	u32 gtlc_survive; +	u32 pmwgicz; + +	/* Display 2 CZ domain */ +	u32 gu_ctl0; +	u32 gu_ctl1; +	u32 clock_gate_dis2; +}; +  struct intel_gen6_power_mgmt {  	/* work and pm_iir are protected by dev_priv->irq_lock */  	struct work_struct work;  	u32 pm_iir; -	/* On vlv we need to manually drop to Vmin with a delayed work. */ -	struct delayed_work vlv_work; - -	/* The below variables an all the rps hw state are protected by -	 * dev->struct mutext. */ -	u8 cur_delay; -	u8 min_delay; -	u8 max_delay; -	u8 rpe_delay; -	u8 hw_max; +	/* Frequencies are stored in potentially platform dependent multiples. +	 * In other words, *_freq needs to be multiplied by X to be interesting. +	 * Soft limits are those which are used for the dynamic reclocking done +	 * by the driver (raise frequencies under heavy loads, and lower for +	 * lighter loads). Hard limits are those imposed by the hardware. +	 * +	 * A distinction is made for overclocking, which is never enabled by +	 * default, and is considered to be above the hard limit if it's +	 * possible at all. +	 */ +	u8 cur_freq;		/* Current frequency (cached, may not == HW) */ +	u8 min_freq_softlimit;	/* Minimum frequency permitted by the driver */ +	u8 max_freq_softlimit;	/* Max frequency permitted by the driver */ +	u8 max_freq;		/* Maximum frequency, RP0 if not overclocking */ +	u8 min_freq;		/* AKA RPn. Minimum frequency */ +	u8 efficient_freq;	/* AKA RPe. Pre-determined balanced frequency */ +	u8 rp1_freq;		/* "less than" RP0 power/freqency */ +	u8 rp0_freq;		/* Non-overclocked max frequency. */ + +	int last_adj; +	enum { LOW_POWER, BETWEEN, HIGH_POWER } power; +	bool enabled;  	struct delayed_work delayed_resume_work;  	/* @@ -866,13 +942,61 @@ struct intel_ilk_power_mgmt {  	struct drm_i915_gem_object *renderctx;  }; +struct drm_i915_private; +struct i915_power_well; + +struct i915_power_well_ops { +	/* +	 * Synchronize the well's hw state to match the current sw state, for +	 * example enable/disable it based on the current refcount. Called +	 * during driver init and resume time, possibly after first calling +	 * the enable/disable handlers. +	 */ +	void (*sync_hw)(struct drm_i915_private *dev_priv, +			struct i915_power_well *power_well); +	/* +	 * Enable the well and resources that depend on it (for example +	 * interrupts located on the well). Called after the 0->1 refcount +	 * transition. +	 */ +	void (*enable)(struct drm_i915_private *dev_priv, +		       struct i915_power_well *power_well); +	/* +	 * Disable the well and resources that depend on it. Called after +	 * the 1->0 refcount transition. +	 */ +	void (*disable)(struct drm_i915_private *dev_priv, +			struct i915_power_well *power_well); +	/* Returns the hw enabled state. */ +	bool (*is_enabled)(struct drm_i915_private *dev_priv, +			   struct i915_power_well *power_well); +}; +  /* Power well structure for haswell */  struct i915_power_well { -	struct drm_device *device; -	spinlock_t lock; +	const char *name; +	bool always_on;  	/* power well enable/disable usage count */  	int count; -	int i915_request; +	/* cached hw enabled state */ +	bool hw_enabled; +	unsigned long domains; +	unsigned long data; +	const struct i915_power_well_ops *ops; +}; + +struct i915_power_domains { +	/* +	 * Power wells needed for initialization at driver init and suspend +	 * time are on. They are kept on until after the first modeset. +	 */ +	bool init_power_on; +	bool initializing; +	int power_well_count; + +	struct mutex lock; +	int domain_use_count[POWER_DOMAIN_NUM]; +	struct i915_power_well *power_wells;  };  struct i915_dri1_state { @@ -900,9 +1024,11 @@ struct i915_ums_state {  	int mm_suspended;  }; +#define MAX_L3_SLICES 2  struct intel_l3_parity { -	u32 *remap_info; +	u32 *remap_info[MAX_L3_SLICES];  	struct work_struct error_work; +	int which_slice;  };  struct i915_gem_mm { @@ -924,7 +1050,8 @@ struct i915_gem_mm {  	/** PPGTT used for aliasing the PPGTT with the GTT */  	struct i915_hw_ppgtt *aliasing_ppgtt; -	struct shrinker inactive_shrinker; +	struct notifier_block oom_notifier; +	struct shrinker shrinker;  	bool shrinker_no_lock_stealing;  	/** LRU list of objects with fence regs on them. */ @@ -940,19 +1067,36 @@ struct i915_gem_mm {  	struct delayed_work retire_work;  	/** +	 * When we detect an idle GPU, we want to turn on +	 * powersaving features. So once we see that there +	 * are no more requests outstanding and no more +	 * arrive within a small period of time, we fire +	 * off the idle_work. +	 */ +	struct delayed_work idle_work; + +	/**  	 * Are we in a non-interruptible section of code like  	 * modesetting?  	 */  	bool interruptible; +	/** +	 * Is the GPU currently considered idle, or busy executing userspace +	 * requests?  Whilst idle, we attempt to power down the hardware and +	 * display clocks. In order to reduce the effect on performance, there +	 * is a slight delay before we do so. +	 */ +	bool busy; + +	/* the indicator for dispatch video commands on two BSD rings */ +	int bsd_ring_dispatch_index; +  	/** Bit 6 swizzling required for X tiling */  	uint32_t bit_6_swizzle_x;  	/** Bit 6 swizzling required for Y tiling */  	uint32_t bit_6_swizzle_y; -	/* storage for physical objects */ -	struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; -  	/* accounting, useful for userland debugging */  	spinlock_t object_stat_lock;  	size_t object_memory; @@ -977,6 +1121,9 @@ struct i915_gpu_error {  	/* For hangcheck timer */  #define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */  #define DRM_I915_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD) +	/* Hang gpu twice in this window and your context gets banned */ +#define DRM_I915_CTX_BAN_PERIOD DIV_ROUND_UP(8*DRM_I915_HANGCHECK_PERIOD, 1000) +  	struct timer_list hangcheck_timer;  	/* For reset and error_state handling. */ @@ -985,37 +1132,34 @@ struct i915_gpu_error {  	struct drm_i915_error_state *first_error;  	struct work_struct work; -	unsigned long last_reset; + +	unsigned long missed_irq_rings;  	/** -	 * State variable and reset counter controlling the reset flow +	 * State variable controlling the reset flow and count +	 * +	 * This is a counter which gets incremented when reset is triggered, +	 * and again when reset has been handled. So odd values (lowest bit set) +	 * means that reset is in progress and even values that +	 * (reset_counter >> 1):th reset was successfully completed. +	 * +	 * If reset is not completed succesfully, the I915_WEDGE bit is +	 * set meaning that hardware is terminally sour and there is no +	 * recovery. All waiters on the reset_queue will be woken when +	 * that happens.  	 * -	 * Upper bits are for the reset counter.  This counter is used by the -	 * wait_seqno code to race-free noticed that a reset event happened and -	 * that it needs to restart the entire ioctl (since most likely the -	 * seqno it waited for won't ever signal anytime soon). +	 * This counter is used by the wait_seqno code to notice that reset +	 * event happened and it needs to restart the entire ioctl (since most +	 * likely the seqno it waited for won't ever signal anytime soon).  	 *  	 * This is important for lock-free wait paths, where no contended lock  	 * naturally enforces the correct ordering between the bail-out of the  	 * waiter and the gpu reset work code. -	 * -	 * Lowest bit controls the reset state machine: Set means a reset is in -	 * progress. This state will (presuming we don't have any bugs) decay -	 * into either unset (successful reset) or the special WEDGED value (hw -	 * terminally sour). All waiters on the reset_queue will be woken when -	 * that happens.  	 */  	atomic_t reset_counter; -	/** -	 * Special values/flags for reset_counter -	 * -	 * Note that the code relies on -	 * 	I915_WEDGED & I915_RESET_IN_PROGRESS_FLAG -	 * being true. -	 */  #define I915_RESET_IN_PROGRESS_FLAG	1 -#define I915_WEDGED			0xffffffff +#define I915_WEDGED			(1 << 31)  	/**  	 * Waitqueue to signal when the reset has completed. Used by clients @@ -1023,8 +1167,15 @@ struct i915_gpu_error {  	 */  	wait_queue_head_t reset_queue; -	/* For gpu hang simulation. */ -	unsigned int stop_rings; +	/* Userspace knobs for gpu hang simulation; +	 * combines both a ring mask, and extra flags +	 */ +	u32 stop_rings; +#define I915_STOP_RING_ALLOW_BAN       (1 << 31) +#define I915_STOP_RING_ALLOW_WARN      (1 << 30) + +	/* For missed irq/seqno simulation. */ +	unsigned int test_irq_rings;  };  enum modeset_restore { @@ -1033,6 +1184,20 @@ enum modeset_restore {  	MODESET_SUSPENDED,  }; +struct ddi_vbt_port_info { +	uint8_t hdmi_level_shift; + +	uint8_t supports_dvi:1; +	uint8_t supports_hdmi:1; +	uint8_t supports_dp:1; +}; + +enum drrs_support_type { +	DRRS_NOT_SUPPORTED = 0, +	STATIC_DRRS_SUPPORT = 1, +	SEAMLESS_DRRS_SUPPORT = 2 +}; +  struct intel_vbt_data {  	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */  	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ @@ -1045,9 +1210,12 @@ struct intel_vbt_data {  	unsigned int lvds_use_ssc:1;  	unsigned int display_clock_mode:1;  	unsigned int fdi_rx_polarity_inverted:1; +	unsigned int has_mipi:1;  	int lvds_ssc_freq;  	unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ +	enum drrs_support_type drrs_type; +  	/* eDP */  	int edp_rate;  	int edp_lanes; @@ -1058,10 +1226,30 @@ struct intel_vbt_data {  	int edp_bpp;  	struct edp_power_seq edp_pps; +	struct { +		u16 pwm_freq_hz; +		bool present; +		bool active_low_pwm; +	} backlight; + +	/* MIPI DSI */ +	struct { +		u16 port; +		u16 panel_id; +		struct mipi_config *config; +		struct mipi_pps_data *pps; +		u8 seq_version; +		u32 size; +		u8 *data; +		u8 *sequence[MIPI_SEQ_MAX]; +	} dsi; +  	int crt_ddc_pin;  	int child_dev_num; -	struct child_device_config *child_dev; +	union child_device_config *child_dev; + +	struct ddi_vbt_port_info ddi_port_info[I915_MAX_PORTS];  };  enum intel_ddb_partitioning { @@ -1077,80 +1265,78 @@ struct intel_wm_level {  	uint32_t fbc_val;  }; +struct ilk_wm_values { +	uint32_t wm_pipe[3]; +	uint32_t wm_lp[3]; +	uint32_t wm_lp_spr[3]; +	uint32_t wm_linetime[3]; +	bool enable_fbc_wm; +	enum intel_ddb_partitioning partitioning; +}; +  /* - * This struct tracks the state needed for the Package C8+ feature. - * - * Package states C8 and deeper are really deep PC states that can only be - * reached when all the devices on the system allow it, so even if the graphics - * device allows PC8+, it doesn't mean the system will actually get to these - * states. - * - * Our driver only allows PC8+ when all the outputs are disabled, the power well - * is disabled and the GPU is idle. When these conditions are met, we manually - * do the other conditions: disable the interrupts, clocks and switch LCPLL - * refclk to Fclk. - * - * When we really reach PC8 or deeper states (not just when we allow it) we lose - * the state of some registers, so when we come back from PC8+ we need to - * restore this state. We don't get into PC8+ if we're not in RC6, so we don't - * need to take care of the registers kept by RC6. - * - * The interrupt disabling is part of the requirements. We can only leave the - * PCH HPD interrupts enabled. If we're in PC8+ and we get another interrupt we - * can lock the machine. + * This struct helps tracking the state needed for runtime PM, which puts the + * device in PCI D3 state. Notice that when this happens, nothing on the + * graphics device works, even register access, so we don't get interrupts nor + * anything else.   * - * Ideally every piece of our code that needs PC8+ disabled would call - * hsw_disable_package_c8, which would increment disable_count and prevent the - * system from reaching PC8+. But we don't have a symmetric way to do this for - * everything, so we have the requirements_met and gpu_idle variables. When we - * switch requirements_met or gpu_idle to true we decrease disable_count, and - * increase it in the opposite case. The requirements_met variable is true when - * all the CRTCs, encoders and the power well are disabled. The gpu_idle - * variable is true when the GPU is idle. + * Every piece of our code that needs to actually touch the hardware needs to + * either call intel_runtime_pm_get or call intel_display_power_get with the + * appropriate power domain.   * - * In addition to everything, we only actually enable PC8+ if disable_count - * stays at zero for at least some seconds. This is implemented with the - * enable_work variable. We do this so we don't enable/disable PC8 dozens of - * consecutive times when all screens are disabled and some background app - * queries the state of our connectors, or we have some application constantly - * waking up to use the GPU. Only after the enable_work function actually - * enables PC8+ the "enable" variable will become true, which means that it can - * be false even if disable_count is 0. + * Our driver uses the autosuspend delay feature, which means we'll only really + * suspend if we stay with zero refcount for a certain amount of time. The + * default value is currently very conservative (see intel_init_runtime_pm), but + * it can be changed with the standard runtime PM files from sysfs.   *   * The irqs_disabled variable becomes true exactly after we disable the IRQs and   * goes back to false exactly before we reenable the IRQs. We use this variable   * to check if someone is trying to enable/disable IRQs while they're supposed   * to be disabled. This shouldn't happen and we'll print some error messages in - * case it happens, but if it actually happens we'll also update the variables - * inside struct regsave so when we restore the IRQs they will contain the - * latest expected values. + * case it happens.   * - * For more, read "Display Sequences for Package C8" on our documentation. + * For more, read the Documentation/power/runtime_pm.txt.   */ -struct i915_package_c8 { -	bool requirements_met; -	bool gpu_idle; +struct i915_runtime_pm { +	bool suspended;  	bool irqs_disabled; -	/* Only true after the delayed work task actually enables it. */ -	bool enabled; -	int disable_count; -	struct mutex lock; -	struct delayed_work enable_work; +}; -	struct { -		uint32_t deimr; -		uint32_t sdeimr; -		uint32_t gtimr; -		uint32_t gtier; -		uint32_t gen6_pmimr; -	} regsave; +enum intel_pipe_crc_source { +	INTEL_PIPE_CRC_SOURCE_NONE, +	INTEL_PIPE_CRC_SOURCE_PLANE1, +	INTEL_PIPE_CRC_SOURCE_PLANE2, +	INTEL_PIPE_CRC_SOURCE_PF, +	INTEL_PIPE_CRC_SOURCE_PIPE, +	/* TV/DP on pre-gen5/vlv can't use the pipe source. */ +	INTEL_PIPE_CRC_SOURCE_TV, +	INTEL_PIPE_CRC_SOURCE_DP_B, +	INTEL_PIPE_CRC_SOURCE_DP_C, +	INTEL_PIPE_CRC_SOURCE_DP_D, +	INTEL_PIPE_CRC_SOURCE_AUTO, +	INTEL_PIPE_CRC_SOURCE_MAX, +}; + +struct intel_pipe_crc_entry { +	uint32_t frame; +	uint32_t crc[5]; +}; + +#define INTEL_PIPE_CRC_ENTRIES_NR	128 +struct intel_pipe_crc { +	spinlock_t lock; +	bool opened;		/* exclusive access to the result file */ +	struct intel_pipe_crc_entry *entries; +	enum intel_pipe_crc_source source; +	int head, tail; +	wait_queue_head_t wq;  }; -typedef struct drm_i915_private { +struct drm_i915_private {  	struct drm_device *dev;  	struct kmem_cache *slab; -	const struct intel_device_info *info; +	const struct intel_device_info info;  	int relative_constants_mode; @@ -1170,20 +1356,23 @@ typedef struct drm_i915_private {  	 */  	uint32_t gpio_mmio_base; +	/* MMIO base address for MIPI regs */ +	uint32_t mipi_mmio_base; +  	wait_queue_head_t gmbus_wait_queue;  	struct pci_dev *bridge_dev; -	struct intel_ring_buffer ring[I915_NUM_RINGS]; +	struct intel_engine_cs ring[I915_NUM_RINGS];  	uint32_t last_seqno, next_seqno;  	drm_dma_handle_t *status_page_dmah;  	struct resource mch_res; -	atomic_t irq_received; -  	/* protects the irq masks */  	spinlock_t irq_lock; +	bool display_irqs_enabled; +  	/* To control wakeup latency, e.g. for irq-driven dp aux transfers. */  	struct pm_qos_request pm_qos; @@ -1191,9 +1380,14 @@ typedef struct drm_i915_private {  	struct mutex dpio_lock;  	/** Cached value of IMR to avoid reads in updating the bitfield */ -	u32 irq_mask; +	union { +		u32 irq_mask; +		u32 de_irq_mask[I915_MAX_PIPES]; +	};  	u32 gt_irq_mask;  	u32 pm_irq_mask; +	u32 pm_rps_events; +	u32 pipestat_irq_mask[I915_MAX_PIPES];  	struct work_struct hotplug_work;  	bool enable_hotplug_processing; @@ -1209,23 +1403,16 @@ typedef struct drm_i915_private {  	u32 hpd_event_bits;  	struct timer_list hotplug_reenable_timer; -	int num_plane; -  	struct i915_fbc fbc; +	struct i915_drrs drrs;  	struct intel_opregion opregion;  	struct intel_vbt_data vbt;  	/* overlay */  	struct intel_overlay *overlay; -	unsigned int sprite_scaling_enabled; -	/* backlight */ -	struct { -		int level; -		bool enabled; -		spinlock_t lock; /* bl registers and the above bl fields */ -		struct backlight_device *device; -	} backlight; +	/* backlight registers and fields in struct intel_panel */ +	spinlock_t backlight_lock;  	/* LVDS info */  	bool no_aux_handshake; @@ -1235,6 +1422,7 @@ typedef struct drm_i915_private {  	int num_fence_regs; /* 8 on pre-965, 16 otherwise */  	unsigned int fsb_freq, mem_freq, is_ddr3; +	unsigned int vlv_cdclk_freq;  	/**  	 * wq - Driver workqueue for GEM. @@ -1258,21 +1446,29 @@ typedef struct drm_i915_private {  	struct mutex modeset_restore_lock;  	struct list_head vm_list; /* Global list of all address spaces */ -	struct i915_gtt gtt; /* VMA representing the global address space */ +	struct i915_gtt gtt; /* VM representing the global address space */  	struct i915_gem_mm mm; +#if defined(CONFIG_MMU_NOTIFIER) +	DECLARE_HASHTABLE(mmu_notifiers, 7); +#endif  	/* Kernel Modesetting */  	struct sdvo_device_mapping sdvo_mappings[2]; -	struct drm_crtc *plane_to_crtc_mapping[3]; -	struct drm_crtc *pipe_to_crtc_mapping[3]; +	struct drm_crtc *plane_to_crtc_mapping[I915_MAX_PIPES]; +	struct drm_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];  	wait_queue_head_t pending_flip_queue; +#ifdef CONFIG_DEBUG_FS +	struct intel_pipe_crc pipe_crc[I915_MAX_PIPES]; +#endif +  	int num_shared_dpll;  	struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];  	struct intel_ddi_plls ddi_plls; +	int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];  	/* Reclocking support */  	bool render_reclock_avail; @@ -1295,17 +1491,18 @@ typedef struct drm_i915_private {  	 * mchdev_lock in intel_pm.c */  	struct intel_ilk_power_mgmt ips; -	/* Haswell power well */ -	struct i915_power_well power_well; +	struct i915_power_domains power_domains; -	enum no_psr_reason no_psr_reason; +	struct i915_psr psr;  	struct i915_gpu_error gpu_error;  	struct drm_i915_gem_object *vlv_pctx; +#ifdef CONFIG_DRM_I915_FBDEV  	/* list of fbdev register on this device */  	struct intel_fbdev *fbdev; +#endif  	/*  	 * The console may be contended at resume, but we don't @@ -1316,12 +1513,14 @@ typedef struct drm_i915_private {  	struct drm_property *broadcast_rgb_property;  	struct drm_property *force_audio_property; -	bool hw_contexts_disabled;  	uint32_t hw_context_size; +	struct list_head context_list;  	u32 fdi_rx_config; +	u32 suspend_count;  	struct i915_suspend_saved_registers regfile; +	struct vlv_s0ix_state vlv_s0ix_state;  	struct {  		/* @@ -1335,16 +1534,24 @@ typedef struct drm_i915_private {  		uint16_t spr_latency[5];  		/* cursor */  		uint16_t cur_latency[5]; + +		/* current hardware state */ +		struct ilk_wm_values hw;  	} wm; -	struct i915_package_c8 pc8; +	struct i915_runtime_pm pm;  	/* Old dri1 support infrastructure, beware the dragons ya fools entering  	 * here! */  	struct i915_dri1_state dri1;  	/* Old ums support infrastructure, same warning applies. */  	struct i915_ums_state ums; -} drm_i915_private_t; + +	/* +	 * NOTE: This is the dri1/ums dungeon, don't add stuff here. Your patch +	 * will be rejected. Instead look for a better place. +	 */ +};  static inline struct drm_i915_private *to_i915(const struct drm_device *dev)  { @@ -1381,6 +1588,8 @@ struct drm_i915_gem_object_ops {  	 */  	int (*get_pages)(struct drm_i915_gem_object *);  	void (*put_pages)(struct drm_i915_gem_object *); +	int (*dmabuf_export)(struct drm_i915_gem_object *); +	void (*release)(struct drm_i915_gem_object *);  };  struct drm_i915_gem_object { @@ -1398,8 +1607,6 @@ struct drm_i915_gem_object {  	struct list_head ring_list;  	/** Used in execbuf to temporarily hold a ref */  	struct list_head obj_exec_link; -	/** This object's place in the batchbuffer or on the eviction list */ -	struct list_head exec_list;  	/**  	 * This is set if the object is on the active lists (has pending @@ -1439,18 +1646,6 @@ struct drm_i915_gem_object {  	 */  	unsigned int fence_dirty:1; -	/** How many users have pinned this object in GTT space. The following -	 * users can each hold at most one reference: pwrite/pread, pin_ioctl -	 * (via user_pin_count), execbuffer (objects are not allowed multiple -	 * times for the same batchbuffer), and the framebuffer code. When -	 * switching/pageflipping, the framebuffer code has at most two buffers -	 * pinned per crtc. -	 * -	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3 -	 * bits with absolutely no headroom. So use 4 bits. */ -	unsigned int pin_count:4; -#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf -  	/**  	 * Is the object at the current location in the gtt mappable and  	 * fenceable? Used to avoid costly recalculations. @@ -1485,14 +1680,7 @@ struct drm_i915_gem_object {  	void *dma_buf_vmapping;  	int vmapping_count; -	/** -	 * Used for performing relocations during execbuffer insertion. -	 */ -	struct hlist_node exec_node; -	unsigned long exec_handle; -	struct drm_i915_gem_exec_object2 *exec_entry; - -	struct intel_ring_buffer *ring; +	struct intel_engine_cs *ring;  	/** Breadcrumb of last rendering to the buffer. */  	uint32_t last_read_seqno; @@ -1503,18 +1691,32 @@ struct drm_i915_gem_object {  	/** Current tiling stride for the object, if it's tiled. */  	uint32_t stride; +	/** References from framebuffers, locks out tiling changes. */ +	unsigned long framebuffer_references; +  	/** Record of address bit 17 of each page at last unbind. */  	unsigned long *bit_17;  	/** User space pin count and filp owning the pin */ -	uint32_t user_pin_count; +	unsigned long user_pin_count;  	struct drm_file *pin_filp;  	/** for phy allocated objects */ -	struct drm_i915_gem_phys_object *phys_obj; +	drm_dma_handle_t *phys_handle; + +	union { +		struct i915_gem_userptr { +			uintptr_t ptr; +			unsigned read_only :1; +			unsigned workers :4; +#define I915_GEM_USERPTR_MAX_WORKERS 15 + +			struct mm_struct *mm; +			struct i915_mmu_object *mn; +			struct work_struct *work; +		} userptr; +	};  }; -#define to_gem_object(obj) (&((struct drm_i915_gem_object *)(obj))->base) -  #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base)  /** @@ -1529,7 +1731,7 @@ struct drm_i915_gem_object {   */  struct drm_i915_gem_request {  	/** On Which ring this request was generated */ -	struct intel_ring_buffer *ring; +	struct intel_engine_cs *ring;  	/** GEM sequence number associated with this request. */  	uint32_t seqno; @@ -1541,7 +1743,7 @@ struct drm_i915_gem_request {  	u32 tail;  	/** Context related to this request */ -	struct i915_hw_context *ctx; +	struct intel_context *ctx;  	/** Batch buffer related to this request if any */  	struct drm_i915_gem_object *batch_obj; @@ -1558,48 +1760,156 @@ struct drm_i915_gem_request {  };  struct drm_i915_file_private { +	struct drm_i915_private *dev_priv; +	struct drm_file *file; +  	struct {  		spinlock_t lock;  		struct list_head request_list; +		struct delayed_work idle_work;  	} mm;  	struct idr context_idr; -	struct i915_ctx_hang_stats hang_stats; +	atomic_t rps_wait_boost; +	struct  intel_engine_cs *bsd_ring; +}; + +/* + * A command that requires special handling by the command parser. + */ +struct drm_i915_cmd_descriptor { +	/* +	 * Flags describing how the command parser processes the command. +	 * +	 * CMD_DESC_FIXED: The command has a fixed length if this is set, +	 *                 a length mask if not set +	 * CMD_DESC_SKIP: The command is allowed but does not follow the +	 *                standard length encoding for the opcode range in +	 *                which it falls +	 * CMD_DESC_REJECT: The command is never allowed +	 * CMD_DESC_REGISTER: The command should be checked against the +	 *                    register whitelist for the appropriate ring +	 * CMD_DESC_MASTER: The command is allowed if the submitting process +	 *                  is the DRM master +	 */ +	u32 flags; +#define CMD_DESC_FIXED    (1<<0) +#define CMD_DESC_SKIP     (1<<1) +#define CMD_DESC_REJECT   (1<<2) +#define CMD_DESC_REGISTER (1<<3) +#define CMD_DESC_BITMASK  (1<<4) +#define CMD_DESC_MASTER   (1<<5) + +	/* +	 * The command's unique identification bits and the bitmask to get them. +	 * This isn't strictly the opcode field as defined in the spec and may +	 * also include type, subtype, and/or subop fields. +	 */ +	struct { +		u32 value; +		u32 mask; +	} cmd; + +	/* +	 * The command's length. The command is either fixed length (i.e. does +	 * not include a length field) or has a length field mask. The flag +	 * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has +	 * a length mask. All command entries in a command table must include +	 * length information. +	 */ +	union { +		u32 fixed; +		u32 mask; +	} length; + +	/* +	 * Describes where to find a register address in the command to check +	 * against the ring's register whitelist. Only valid if flags has the +	 * CMD_DESC_REGISTER bit set. +	 */ +	struct { +		u32 offset; +		u32 mask; +	} reg; + +#define MAX_CMD_DESC_BITMASKS 3 +	/* +	 * Describes command checks where a particular dword is masked and +	 * compared against an expected value. If the command does not match +	 * the expected value, the parser rejects it. Only valid if flags has +	 * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero +	 * are valid. +	 * +	 * If the check specifies a non-zero condition_mask then the parser +	 * only performs the check when the bits specified by condition_mask +	 * are non-zero. +	 */ +	struct { +		u32 offset; +		u32 mask; +		u32 expected; +		u32 condition_offset; +		u32 condition_mask; +	} bits[MAX_CMD_DESC_BITMASKS]; +}; + +/* + * A table of commands requiring special handling by the command parser. + * + * Each ring has an array of tables. Each table consists of an array of command + * descriptors, which must be sorted with command opcodes in ascending order. + */ +struct drm_i915_cmd_table { +	const struct drm_i915_cmd_descriptor *table; +	int count;  }; -#define INTEL_INFO(dev)	(to_i915(dev)->info) +#define INTEL_INFO(dev)	(&to_i915(dev)->info) -#define IS_I830(dev)		((dev)->pci_device == 0x3577) -#define IS_845G(dev)		((dev)->pci_device == 0x2562) +#define IS_I830(dev)		((dev)->pdev->device == 0x3577) +#define IS_845G(dev)		((dev)->pdev->device == 0x2562)  #define IS_I85X(dev)		(INTEL_INFO(dev)->is_i85x) -#define IS_I865G(dev)		((dev)->pci_device == 0x2572) +#define IS_I865G(dev)		((dev)->pdev->device == 0x2572)  #define IS_I915G(dev)		(INTEL_INFO(dev)->is_i915g) -#define IS_I915GM(dev)		((dev)->pci_device == 0x2592) -#define IS_I945G(dev)		((dev)->pci_device == 0x2772) +#define IS_I915GM(dev)		((dev)->pdev->device == 0x2592) +#define IS_I945G(dev)		((dev)->pdev->device == 0x2772)  #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)  #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)  #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline) -#define IS_GM45(dev)		((dev)->pci_device == 0x2A42) +#define IS_GM45(dev)		((dev)->pdev->device == 0x2A42)  #define IS_G4X(dev)		(INTEL_INFO(dev)->is_g4x) -#define IS_PINEVIEW_G(dev)	((dev)->pci_device == 0xa001) -#define IS_PINEVIEW_M(dev)	((dev)->pci_device == 0xa011) +#define IS_PINEVIEW_G(dev)	((dev)->pdev->device == 0xa001) +#define IS_PINEVIEW_M(dev)	((dev)->pdev->device == 0xa011)  #define IS_PINEVIEW(dev)	(INTEL_INFO(dev)->is_pineview)  #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33) -#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046) +#define IS_IRONLAKE_M(dev)	((dev)->pdev->device == 0x0046)  #define IS_IVYBRIDGE(dev)	(INTEL_INFO(dev)->is_ivybridge) -#define IS_IVB_GT1(dev)		((dev)->pci_device == 0x0156 || \ -				 (dev)->pci_device == 0x0152 ||	\ -				 (dev)->pci_device == 0x015a) -#define IS_SNB_GT1(dev)		((dev)->pci_device == 0x0102 || \ -				 (dev)->pci_device == 0x0106 ||	\ -				 (dev)->pci_device == 0x010A) +#define IS_IVB_GT1(dev)		((dev)->pdev->device == 0x0156 || \ +				 (dev)->pdev->device == 0x0152 || \ +				 (dev)->pdev->device == 0x015a) +#define IS_SNB_GT1(dev)		((dev)->pdev->device == 0x0102 || \ +				 (dev)->pdev->device == 0x0106 || \ +				 (dev)->pdev->device == 0x010A)  #define IS_VALLEYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview) +#define IS_CHERRYVIEW(dev)	(INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))  #define IS_HASWELL(dev)	(INTEL_INFO(dev)->is_haswell) +#define IS_BROADWELL(dev)	(!INTEL_INFO(dev)->is_valleyview && IS_GEN8(dev))  #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)  #define IS_HSW_EARLY_SDV(dev)	(IS_HASWELL(dev) && \ -				 ((dev)->pci_device & 0xFF00) == 0x0C00) -#define IS_ULT(dev)		(IS_HASWELL(dev) && \ -				 ((dev)->pci_device & 0xFF00) == 0x0A00) +				 ((dev)->pdev->device & 0xFF00) == 0x0C00) +#define IS_BDW_ULT(dev)		(IS_BROADWELL(dev) && \ +				 (((dev)->pdev->device & 0xf) == 0x2  || \ +				 ((dev)->pdev->device & 0xf) == 0x6 || \ +				 ((dev)->pdev->device & 0xf) == 0xe)) +#define IS_HSW_ULT(dev)		(IS_HASWELL(dev) && \ +				 ((dev)->pdev->device & 0xFF00) == 0x0A00) +#define IS_ULT(dev)		(IS_HSW_ULT(dev) || IS_BDW_ULT(dev)) +#define IS_HSW_GT3(dev)		(IS_HASWELL(dev) && \ +				 ((dev)->pdev->device & 0x00F0) == 0x0020) +/* ULX machines are also considered ULT. */ +#define IS_HSW_ULX(dev)		((dev)->pdev->device == 0x0A0E || \ +				 (dev)->pdev->device == 0x0A1E) +#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)  /*   * The genX designation typically refers to the render engine, so render @@ -1613,22 +1923,43 @@ struct drm_i915_file_private {  #define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)  #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)  #define IS_GEN7(dev)	(INTEL_INFO(dev)->gen == 7) - -#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring) -#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring) -#define HAS_VEBOX(dev)          (INTEL_INFO(dev)->has_vebox_ring) -#define HAS_LLC(dev)            (INTEL_INFO(dev)->has_llc) -#define HAS_WT(dev)            (IS_HASWELL(dev) && to_i915(dev)->ellc_size) +#define IS_GEN8(dev)	(INTEL_INFO(dev)->gen == 8) + +#define RENDER_RING		(1<<RCS) +#define BSD_RING		(1<<VCS) +#define BLT_RING		(1<<BCS) +#define VEBOX_RING		(1<<VECS) +#define BSD2_RING		(1<<VCS2) +#define HAS_BSD(dev)		(INTEL_INFO(dev)->ring_mask & BSD_RING) +#define HAS_BSD2(dev)		(INTEL_INFO(dev)->ring_mask & BSD2_RING) +#define HAS_BLT(dev)		(INTEL_INFO(dev)->ring_mask & BLT_RING) +#define HAS_VEBOX(dev)		(INTEL_INFO(dev)->ring_mask & VEBOX_RING) +#define HAS_LLC(dev)		(INTEL_INFO(dev)->has_llc) +#define HAS_WT(dev)		((IS_HASWELL(dev) || IS_BROADWELL(dev)) && \ +				 to_i915(dev)->ellc_size)  #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)  #define HAS_HW_CONTEXTS(dev)	(INTEL_INFO(dev)->gen >= 6) -#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >=6 && !IS_VALLEYVIEW(dev)) +#define HAS_ALIASING_PPGTT(dev)	(INTEL_INFO(dev)->gen >= 6 && \ +				 (!IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))) +#define HAS_PPGTT(dev)		(INTEL_INFO(dev)->gen >= 7 \ +				 && !IS_GEN8(dev)) +#define USES_PPGTT(dev)		intel_enable_ppgtt(dev, false) +#define USES_FULL_PPGTT(dev)	intel_enable_ppgtt(dev, true)  #define HAS_OVERLAY(dev)		(INTEL_INFO(dev)->has_overlay)  #define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)  /* Early gen2 have a totally busted CS tlb and require pinned batches. */  #define HAS_BROKEN_CS_TLB(dev)		(IS_I830(dev) || IS_845G(dev)) +/* + * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts + * even when in MSI mode. This results in spurious interrupt warnings if the + * legacy irq no. is shared with another device. The kernel then disables that + * interrupt source and so prevents the other device from working properly. + */ +#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5) +#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)  /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte   * rows, which changed the alignment requirements and fence programming. @@ -1638,19 +1969,20 @@ struct drm_i915_file_private {  #define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))  #define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))  #define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev)) -#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))  #define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)  #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)  #define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)  #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr) -#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) +#define HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc) -#define HAS_IPS(dev)		(IS_ULT(dev)) +#define HAS_IPS(dev)		(IS_ULT(dev) || IS_BROADWELL(dev))  #define HAS_DDI(dev)		(INTEL_INFO(dev)->has_ddi) -#define HAS_POWER_WELL(dev)	(IS_HASWELL(dev))  #define HAS_FPGA_DBG_UNCLAIMED(dev)	(INTEL_INFO(dev)->has_fpga_dbg) +#define HAS_PSR(dev)		(IS_HASWELL(dev) || IS_BROADWELL(dev)) +#define HAS_RUNTIME_PM(dev)	(IS_GEN6(dev) || IS_HASWELL(dev) || \ +				 IS_BROADWELL(dev) || IS_VALLEYVIEW(dev))  #define INTEL_PCH_DEVICE_ID_MASK		0xff00  #define INTEL_PCH_IBX_DEVICE_ID_TYPE		0x3b00 @@ -1666,63 +1998,51 @@ struct drm_i915_file_private {  #define HAS_PCH_NOP(dev) (INTEL_PCH_TYPE(dev) == PCH_NOP)  #define HAS_PCH_SPLIT(dev) (INTEL_PCH_TYPE(dev) != PCH_NONE) -#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake) - -#define HAS_L3_GPU_CACHE(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) +/* DPF == dynamic parity feature */ +#define HAS_L3_DPF(dev) (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) +#define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev))  #define GT_FREQUENCY_MULTIPLIER 50  #include "i915_trace.h" -/** - * RC6 is a special power stage which allows the GPU to enter an very - * low-voltage mode when idle, using down to 0V while at this stage.  This - * stage is entered automatically when the GPU is idle when RC6 support is - * enabled, and as soon as new workload arises GPU wakes up automatically as well. - * - * There are different RC6 modes available in Intel GPU, which differentiate - * among each other with the latency required to enter and leave RC6 and - * voltage consumed by the GPU in different states. - * - * The combination of the following flags define which states GPU is allowed - * to enter, while RC6 is the normal RC6 state, RC6p is the deep RC6, and - * RC6pp is deepest RC6. Their support by hardware varies according to the - * GPU, BIOS, chipset and platform. RC6 is usually the safest one and the one - * which brings the most power savings; deeper states save more power, but - * require higher latency to switch to and wake up. - */ -#define INTEL_RC6_ENABLE			(1<<0) -#define INTEL_RC6p_ENABLE			(1<<1) -#define INTEL_RC6pp_ENABLE			(1<<2) -  extern const struct drm_ioctl_desc i915_ioctls[];  extern int i915_max_ioctl; -extern unsigned int i915_fbpercrtc __always_unused; -extern int i915_panel_ignore_lid __read_mostly; -extern unsigned int i915_powersave __read_mostly; -extern int i915_semaphores __read_mostly; -extern unsigned int i915_lvds_downclock __read_mostly; -extern int i915_lvds_channel_mode __read_mostly; -extern int i915_panel_use_ssc __read_mostly; -extern int i915_vbt_sdvo_panel_type __read_mostly; -extern int i915_enable_rc6 __read_mostly; -extern int i915_enable_fbc __read_mostly; -extern bool i915_enable_hangcheck __read_mostly; -extern int i915_enable_ppgtt __read_mostly; -extern int i915_enable_psr __read_mostly; -extern unsigned int i915_preliminary_hw_support __read_mostly; -extern int i915_disable_power_well __read_mostly; -extern int i915_enable_ips __read_mostly; -extern bool i915_fastboot __read_mostly; -extern int i915_enable_pc8 __read_mostly; -extern int i915_pc8_timeout __read_mostly; -extern bool i915_prefault_disable __read_mostly;  extern int i915_suspend(struct drm_device *dev, pm_message_t state);  extern int i915_resume(struct drm_device *dev);  extern int i915_master_create(struct drm_device *dev, struct drm_master *master);  extern void i915_master_destroy(struct drm_device *dev, struct drm_master *master); +/* i915_params.c */ +struct i915_params { +	int modeset; +	int panel_ignore_lid; +	unsigned int powersave; +	int semaphores; +	unsigned int lvds_downclock; +	int lvds_channel_mode; +	int panel_use_ssc; +	int vbt_sdvo_panel_type; +	int enable_rc6; +	int enable_fbc; +	int enable_ppgtt; +	int enable_psr; +	unsigned int preliminary_hw_support; +	int disable_power_well; +	int enable_ips; +	int invert_brightness; +	int enable_cmd_parser; +	/* leave bools at the end to not create holes */ +	bool enable_hangcheck; +	bool fastboot; +	bool prefault_disable; +	bool reset; +	bool disable_display; +	bool disable_vtd_wa; +}; +extern struct i915_params i915 __read_mostly; +  				/* i915_dma.c */  void i915_update_dri1_breadcrumb(struct drm_device *dev);  extern void i915_kernel_lost_context(struct drm_device * dev); @@ -1748,29 +2068,37 @@ extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);  extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);  extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);  extern void i915_update_gfx_val(struct drm_i915_private *dev_priv); +int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);  extern void intel_console_resume(struct work_struct *work);  /* i915_irq.c */  void i915_queue_hangcheck(struct drm_device *dev); -void i915_handle_error(struct drm_device *dev, bool wedged); +__printf(3, 4) +void i915_handle_error(struct drm_device *dev, bool wedged, +		       const char *fmt, ...); +void gen6_set_pm_mask(struct drm_i915_private *dev_priv, u32 pm_iir, +							int new_delay);  extern void intel_irq_init(struct drm_device *dev); -extern void intel_pm_init(struct drm_device *dev);  extern void intel_hpd_init(struct drm_device *dev); -extern void intel_pm_init(struct drm_device *dev);  extern void intel_uncore_sanitize(struct drm_device *dev);  extern void intel_uncore_early_sanitize(struct drm_device *dev);  extern void intel_uncore_init(struct drm_device *dev); -extern void intel_uncore_clear_errors(struct drm_device *dev);  extern void intel_uncore_check_errors(struct drm_device *dev); +extern void intel_uncore_fini(struct drm_device *dev);  void -i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); +i915_enable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, +		     u32 status_mask);  void -i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask); +i915_disable_pipestat(struct drm_i915_private *dev_priv, enum pipe pipe, +		      u32 status_mask); + +void valleyview_enable_display_irqs(struct drm_i915_private *dev_priv); +void valleyview_disable_display_irqs(struct drm_i915_private *dev_priv);  /* i915_gem.c */  int i915_gem_init_ioctl(struct drm_device *dev, void *data, @@ -1815,6 +2143,9 @@ int i915_gem_set_tiling(struct drm_device *dev, void *data,  			struct drm_file *file_priv);  int i915_gem_get_tiling(struct drm_device *dev, void *data,  			struct drm_file *file_priv); +int i915_gem_init_userptr(struct drm_device *dev); +int i915_gem_userptr_ioctl(struct drm_device *dev, void *data, +			   struct drm_file *file);  int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,  				struct drm_file *file_priv);  int i915_gem_wait_ioctl(struct drm_device *dev, void *data, @@ -1822,28 +2153,33 @@ int i915_gem_wait_ioctl(struct drm_device *dev, void *data,  void i915_gem_load(struct drm_device *dev);  void *i915_gem_object_alloc(struct drm_device *dev);  void i915_gem_object_free(struct drm_i915_gem_object *obj); -int i915_gem_init_object(struct drm_gem_object *obj);  void i915_gem_object_init(struct drm_i915_gem_object *obj,  			 const struct drm_i915_gem_object_ops *ops);  struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,  						  size_t size); +void i915_init_vm(struct drm_i915_private *dev_priv, +		  struct i915_address_space *vm);  void i915_gem_free_object(struct drm_gem_object *obj); -struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, -				     struct i915_address_space *vm);  void i915_gem_vma_destroy(struct i915_vma *vma); +#define PIN_MAPPABLE 0x1 +#define PIN_NONBLOCK 0x2 +#define PIN_GLOBAL 0x4 +#define PIN_OFFSET_BIAS 0x8 +#define PIN_OFFSET_MASK (~4095)  int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,  				     struct i915_address_space *vm,  				     uint32_t alignment, -				     bool map_and_fenceable, -				     bool nonblocking); -void i915_gem_object_unpin(struct drm_i915_gem_object *obj); +				     uint64_t flags);  int __must_check i915_vma_unbind(struct i915_vma *vma); -int __must_check i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj);  int i915_gem_object_put_pages(struct drm_i915_gem_object *obj); +void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv);  void i915_gem_release_mmap(struct drm_i915_gem_object *obj);  void i915_gem_lastclose(struct drm_device *dev); +int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj, +				    int *needs_clflush); +  int __must_check i915_gem_object_get_pages(struct drm_i915_gem_object *obj);  static inline struct page *i915_gem_object_get_page(struct drm_i915_gem_object *obj, int n)  { @@ -1867,10 +2203,9 @@ static inline void i915_gem_object_unpin_pages(struct drm_i915_gem_object *obj)  int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);  int i915_gem_object_sync(struct drm_i915_gem_object *obj, -			 struct intel_ring_buffer *to); -void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, -				    struct intel_ring_buffer *ring); - +			 struct intel_engine_cs *to); +void i915_vma_move_to_active(struct i915_vma *vma, +			     struct intel_engine_cs *ring);  int i915_gem_dumb_create(struct drm_file *file_priv,  			 struct drm_device *dev,  			 struct drm_mode_create_dumb *args); @@ -1890,40 +2225,42 @@ int __must_check i915_gem_set_seqno(struct drm_device *dev, u32 seqno);  int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj);  int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj); -static inline bool -i915_gem_object_pin_fence(struct drm_i915_gem_object *obj) -{ -	if (obj->fence_reg != I915_FENCE_REG_NONE) { -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private; -		dev_priv->fence_regs[obj->fence_reg].pin_count++; -		return true; -	} else -		return false; -} +bool i915_gem_object_pin_fence(struct drm_i915_gem_object *obj); +void i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj); -static inline void -i915_gem_object_unpin_fence(struct drm_i915_gem_object *obj) -{ -	if (obj->fence_reg != I915_FENCE_REG_NONE) { -		struct drm_i915_private *dev_priv = obj->base.dev->dev_private; -		WARN_ON(dev_priv->fence_regs[obj->fence_reg].pin_count <= 0); -		dev_priv->fence_regs[obj->fence_reg].pin_count--; -	} -} +struct drm_i915_gem_request * +i915_gem_find_active_request(struct intel_engine_cs *ring); -void i915_gem_retire_requests(struct drm_device *dev); -void i915_gem_retire_requests_ring(struct intel_ring_buffer *ring); +bool i915_gem_retire_requests(struct drm_device *dev); +void i915_gem_retire_requests_ring(struct intel_engine_cs *ring);  int __must_check i915_gem_check_wedge(struct i915_gpu_error *error,  				      bool interruptible);  static inline bool i915_reset_in_progress(struct i915_gpu_error *error)  {  	return unlikely(atomic_read(&error->reset_counter) -			& I915_RESET_IN_PROGRESS_FLAG); +			& (I915_RESET_IN_PROGRESS_FLAG | I915_WEDGED));  }  static inline bool i915_terminally_wedged(struct i915_gpu_error *error)  { -	return atomic_read(&error->reset_counter) == I915_WEDGED; +	return atomic_read(&error->reset_counter) & I915_WEDGED; +} + +static inline u32 i915_reset_count(struct i915_gpu_error *error) +{ +	return ((atomic_read(&error->reset_counter) & ~I915_WEDGED) + 1) / 2; +} + +static inline bool i915_stop_ring_allow_ban(struct drm_i915_private *dev_priv) +{ +	return dev_priv->gpu_error.stop_rings == 0 || +		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_BAN; +} + +static inline bool i915_stop_ring_allow_warn(struct drm_i915_private *dev_priv) +{ +	return dev_priv->gpu_error.stop_rings == 0 || +		dev_priv->gpu_error.stop_rings & I915_STOP_RING_ALLOW_WARN;  }  void i915_gem_reset(struct drm_device *dev); @@ -1931,18 +2268,18 @@ bool i915_gem_clflush_object(struct drm_i915_gem_object *obj, bool force);  int __must_check i915_gem_object_finish_gpu(struct drm_i915_gem_object *obj);  int __must_check i915_gem_init(struct drm_device *dev);  int __must_check i915_gem_init_hw(struct drm_device *dev); -void i915_gem_l3_remap(struct drm_device *dev); +int i915_gem_l3_remap(struct intel_engine_cs *ring, int slice);  void i915_gem_init_swizzling(struct drm_device *dev);  void i915_gem_cleanup_ringbuffer(struct drm_device *dev);  int __must_check i915_gpu_idle(struct drm_device *dev); -int __must_check i915_gem_idle(struct drm_device *dev); -int __i915_add_request(struct intel_ring_buffer *ring, +int __must_check i915_gem_suspend(struct drm_device *dev); +int __i915_add_request(struct intel_engine_cs *ring,  		       struct drm_file *file,  		       struct drm_i915_gem_object *batch_obj,  		       u32 *seqno);  #define i915_add_request(ring, seqno) \  	__i915_add_request(ring, NULL, NULL, seqno) -int __must_check i915_wait_seqno(struct intel_ring_buffer *ring, +int __must_check i915_wait_seqno(struct intel_engine_cs *ring,  				 uint32_t seqno);  int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);  int __must_check @@ -1953,15 +2290,11 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write);  int __must_check  i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,  				     u32 alignment, -				     struct intel_ring_buffer *pipelined); +				     struct intel_engine_cs *pipelined);  void i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj); -int i915_gem_attach_phys_object(struct drm_device *dev, -				struct drm_i915_gem_object *obj, -				int id, +int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj,  				int align); -void i915_gem_detach_phys_object(struct drm_device *dev, -				 struct drm_i915_gem_object *obj); -void i915_gem_free_all_phys_object(struct drm_device *dev); +int i915_gem_open(struct drm_device *dev, struct drm_file *file);  void i915_gem_release(struct drm_device *dev, struct drm_file *file);  uint32_t @@ -1993,6 +2326,16 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,  struct i915_vma *  i915_gem_obj_lookup_or_create_vma(struct drm_i915_gem_object *obj,  				  struct i915_address_space *vm); + +struct i915_vma *i915_gem_obj_to_ggtt(struct drm_i915_gem_object *obj); +static inline bool i915_gem_obj_is_pinned(struct drm_i915_gem_object *obj) { +	struct i915_vma *vma; +	list_for_each_entry(vma, &obj->vma_list, vma_link) +		if (vma->pin_count > 0) +			return true; +	return false; +} +  /* Some GGTT VM helpers */  #define obj_to_ggtt(obj) \  	(&((struct drm_i915_private *)(obj)->base.dev->dev_private)->gtt.base) @@ -2023,75 +2366,73 @@ i915_gem_obj_ggtt_size(struct drm_i915_gem_object *obj)  static inline int __must_check  i915_gem_obj_ggtt_pin(struct drm_i915_gem_object *obj,  		      uint32_t alignment, -		      bool map_and_fenceable, -		      bool nonblocking) +		      unsigned flags) +{ +	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, flags | PIN_GLOBAL); +} + +static inline int +i915_gem_object_ggtt_unbind(struct drm_i915_gem_object *obj)  { -	return i915_gem_object_pin(obj, obj_to_ggtt(obj), alignment, -				   map_and_fenceable, nonblocking); +	return i915_vma_unbind(i915_gem_obj_to_ggtt(obj));  } -#undef obj_to_ggtt + +void i915_gem_object_ggtt_unpin(struct drm_i915_gem_object *obj);  /* i915_gem_context.c */ -void i915_gem_context_init(struct drm_device *dev); +#define ctx_to_ppgtt(ctx) container_of((ctx)->vm, struct i915_hw_ppgtt, base) +int __must_check i915_gem_context_init(struct drm_device *dev);  void i915_gem_context_fini(struct drm_device *dev); +void i915_gem_context_reset(struct drm_device *dev); +int i915_gem_context_open(struct drm_device *dev, struct drm_file *file); +int i915_gem_context_enable(struct drm_i915_private *dev_priv);  void i915_gem_context_close(struct drm_device *dev, struct drm_file *file); -int i915_switch_context(struct intel_ring_buffer *ring, -			struct drm_file *file, int to_id); +int i915_switch_context(struct intel_engine_cs *ring, +			struct intel_context *to); +struct intel_context * +i915_gem_context_get(struct drm_i915_file_private *file_priv, u32 id);  void i915_gem_context_free(struct kref *ctx_ref); -static inline void i915_gem_context_reference(struct i915_hw_context *ctx) +static inline void i915_gem_context_reference(struct intel_context *ctx)  {  	kref_get(&ctx->ref);  } -static inline void i915_gem_context_unreference(struct i915_hw_context *ctx) +static inline void i915_gem_context_unreference(struct intel_context *ctx)  {  	kref_put(&ctx->ref, i915_gem_context_free);  } -struct i915_ctx_hang_stats * __must_check -i915_gem_context_get_hang_stats(struct drm_device *dev, -				struct drm_file *file, -				u32 id); +static inline bool i915_gem_context_is_default(const struct intel_context *c) +{ +	return c->id == DEFAULT_CONTEXT_ID; +} +  int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,  				  struct drm_file *file);  int i915_gem_context_destroy_ioctl(struct drm_device *dev, void *data,  				   struct drm_file *file); -/* i915_gem_gtt.c */ -void i915_gem_cleanup_aliasing_ppgtt(struct drm_device *dev); -void i915_ppgtt_bind_object(struct i915_hw_ppgtt *ppgtt, -			    struct drm_i915_gem_object *obj, -			    enum i915_cache_level cache_level); -void i915_ppgtt_unbind_object(struct i915_hw_ppgtt *ppgtt, -			      struct drm_i915_gem_object *obj); - -void i915_gem_restore_gtt_mappings(struct drm_device *dev); -int __must_check i915_gem_gtt_prepare_object(struct drm_i915_gem_object *obj); -void i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj, -				enum i915_cache_level cache_level); -void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj); -void i915_gem_gtt_finish_object(struct drm_i915_gem_object *obj); -void i915_gem_init_global_gtt(struct drm_device *dev); -void i915_gem_setup_global_gtt(struct drm_device *dev, unsigned long start, -			       unsigned long mappable_end, unsigned long end); -int i915_gem_gtt_init(struct drm_device *dev); -static inline void i915_gem_chipset_flush(struct drm_device *dev) -{ -	if (INTEL_INFO(dev)->gen < 6) -		intel_gtt_chipset_flush(); -} - - +/* i915_gem_render_state.c */ +int i915_gem_render_state_init(struct intel_engine_cs *ring);  /* i915_gem_evict.c */  int __must_check i915_gem_evict_something(struct drm_device *dev,  					  struct i915_address_space *vm,  					  int min_size,  					  unsigned alignment,  					  unsigned cache_level, -					  bool mappable, -					  bool nonblock); +					  unsigned long start, +					  unsigned long end, +					  unsigned flags); +int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);  int i915_gem_evict_everything(struct drm_device *dev); +/* belongs in i915_gem_gtt.h */ +static inline void i915_gem_chipset_flush(struct drm_device *dev) +{ +	if (INTEL_INFO(dev)->gen < 6) +		intel_gtt_chipset_flush(); +} +  /* i915_gem_stolen.c */  int i915_gem_init_stolen(struct drm_device *dev);  int i915_gem_stolen_setup_compression(struct drm_device *dev, int size); @@ -2109,7 +2450,7 @@ void i915_gem_object_release_stolen(struct drm_i915_gem_object *obj);  /* i915_gem_tiling.c */  static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)  { -	drm_i915_private_t *dev_priv = obj->base.dev->dev_private; +	struct drm_i915_private *dev_priv = obj->base.dev->dev_private;  	return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&  		obj->tiling_mode != I915_TILING_NONE; @@ -2129,6 +2470,11 @@ int i915_verify_lists(struct drm_device *dev);  /* i915_debugfs.c */  int i915_debugfs_init(struct drm_minor *minor);  void i915_debugfs_cleanup(struct drm_minor *minor); +#ifdef CONFIG_DEBUG_FS +void intel_display_crc_init(struct drm_device *dev); +#else +static inline void intel_display_crc_init(struct drm_device *dev) {} +#endif  /* i915_gpu_error.c */  __printf(2, 3) @@ -2142,7 +2488,8 @@ static inline void i915_error_state_buf_release(  {  	kfree(eb->buf);  } -void i915_capture_error_state(struct drm_device *dev); +void i915_capture_error_state(struct drm_device *dev, bool wedge, +			      const char *error_msg);  void i915_error_state_get(struct drm_device *dev,  			  struct i915_error_state_file_priv *error_priv);  void i915_error_state_put(struct i915_error_state_file_priv *error_priv); @@ -2151,6 +2498,16 @@ void i915_destroy_error_state(struct drm_device *dev);  void i915_get_extra_instdone(struct drm_device *dev, uint32_t *instdone);  const char *i915_cache_level_str(int type); +/* i915_cmd_parser.c */ +int i915_cmd_parser_get_version(void); +int i915_cmd_parser_init_ring(struct intel_engine_cs *ring); +void i915_cmd_parser_fini_ring(struct intel_engine_cs *ring); +bool i915_needs_cmd_parser(struct intel_engine_cs *ring); +int i915_parse_cmds(struct intel_engine_cs *ring, +		    struct drm_i915_gem_object *batch_obj, +		    u32 batch_start_offset, +		    bool is_master); +  /* i915_suspend.c */  extern int i915_save_state(struct drm_device *dev);  extern int i915_restore_state(struct drm_device *dev); @@ -2182,15 +2539,31 @@ static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)  extern void intel_i2c_reset(struct drm_device *dev);  /* intel_opregion.c */ -extern int intel_opregion_setup(struct drm_device *dev); +struct intel_encoder;  #ifdef CONFIG_ACPI +extern int intel_opregion_setup(struct drm_device *dev);  extern void intel_opregion_init(struct drm_device *dev);  extern void intel_opregion_fini(struct drm_device *dev);  extern void intel_opregion_asle_intr(struct drm_device *dev); +extern int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, +					 bool enable); +extern int intel_opregion_notify_adapter(struct drm_device *dev, +					 pci_power_t state);  #else +static inline int intel_opregion_setup(struct drm_device *dev) { return 0; }  static inline void intel_opregion_init(struct drm_device *dev) { return; }  static inline void intel_opregion_fini(struct drm_device *dev) { return; }  static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; } +static inline int +intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, bool enable) +{ +	return 0; +} +static inline int +intel_opregion_notify_adapter(struct drm_device *dev, pci_power_t state) +{ +	return 0; +}  #endif  /* intel_acpi.c */ @@ -2208,10 +2581,12 @@ extern void intel_modeset_suspend_hw(struct drm_device *dev);  extern void intel_modeset_init(struct drm_device *dev);  extern void intel_modeset_gem_init(struct drm_device *dev);  extern void intel_modeset_cleanup(struct drm_device *dev); +extern void intel_connector_unregister(struct intel_connector *);  extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);  extern void intel_modeset_setup_hw_state(struct drm_device *dev,  					 bool force_restore);  extern void i915_redisable_vga(struct drm_device *dev); +extern void i915_redisable_vga_power_on(struct drm_device *dev);  extern bool intel_fbc_enabled(struct drm_device *dev);  extern void intel_disable_fbc(struct drm_device *dev);  extern bool ironlake_set_drps(struct drm_device *dev, u8 val); @@ -2227,6 +2602,8 @@ extern int intel_enable_rc6(const struct drm_device *dev);  extern bool i915_semaphore_is_enabled(struct drm_device *dev);  int i915_reg_read_ioctl(struct drm_device *dev, void *data,  			struct drm_file *file); +int i915_get_reset_stats_ioctl(struct drm_device *dev, void *data, +			       struct drm_file *file);  /* overlay */  extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev); @@ -2242,8 +2619,9 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e,   * must be set to prevent GT core from power down and stale values being   * returned.   */ -void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv); -void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv); +void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv, int fw_engine); +void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv, int fw_engine); +void assert_force_wake_inactive(struct drm_i915_private *dev_priv);  int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u8 mbox, u32 *val);  int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val); @@ -2252,47 +2630,65 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u8 mbox, u32 val)  u32 vlv_punit_read(struct drm_i915_private *dev_priv, u8 addr);  void vlv_punit_write(struct drm_i915_private *dev_priv, u8 addr, u32 val);  u32 vlv_nc_read(struct drm_i915_private *dev_priv, u8 addr); -u32 vlv_dpio_read(struct drm_i915_private *dev_priv, int reg); -void vlv_dpio_write(struct drm_i915_private *dev_priv, int reg, u32 val); +u32 vlv_gpio_nc_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_gpio_nc_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_cck_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_cck_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_ccu_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_ccu_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_bunit_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_bunit_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_gps_core_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_gps_core_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); +u32 vlv_dpio_read(struct drm_i915_private *dev_priv, enum pipe pipe, int reg); +void vlv_dpio_write(struct drm_i915_private *dev_priv, enum pipe pipe, int reg, u32 val);  u32 intel_sbi_read(struct drm_i915_private *dev_priv, u16 reg,  		   enum intel_sbi_destination destination);  void intel_sbi_write(struct drm_i915_private *dev_priv, u16 reg, u32 value,  		     enum intel_sbi_destination destination); +u32 vlv_flisdsi_read(struct drm_i915_private *dev_priv, u32 reg); +void vlv_flisdsi_write(struct drm_i915_private *dev_priv, u32 reg, u32 val); + +int vlv_gpu_freq(struct drm_i915_private *dev_priv, int val); +int vlv_freq_opcode(struct drm_i915_private *dev_priv, int val); + +#define FORCEWAKE_RENDER	(1 << 0) +#define FORCEWAKE_MEDIA		(1 << 1) +#define FORCEWAKE_ALL		(FORCEWAKE_RENDER | FORCEWAKE_MEDIA) + + +#define I915_READ8(reg)		dev_priv->uncore.funcs.mmio_readb(dev_priv, (reg), true) +#define I915_WRITE8(reg, val)	dev_priv->uncore.funcs.mmio_writeb(dev_priv, (reg), (val), true) -int vlv_gpu_freq(int ddr_freq, int val); -int vlv_freq_opcode(int ddr_freq, int val); - -#define __i915_read(x) \ -	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg, bool trace); -__i915_read(8) -__i915_read(16) -__i915_read(32) -__i915_read(64) -#undef __i915_read - -#define __i915_write(x) \ -	void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val, bool trace); -__i915_write(8) -__i915_write(16) -__i915_write(32) -__i915_write(64) -#undef __i915_write - -#define I915_READ8(reg)		i915_read8(dev_priv, (reg), true) -#define I915_WRITE8(reg, val)	i915_write8(dev_priv, (reg), (val), true) - -#define I915_READ16(reg)	i915_read16(dev_priv, (reg), true) -#define I915_WRITE16(reg, val)	i915_write16(dev_priv, (reg), (val), true) -#define I915_READ16_NOTRACE(reg)	i915_read16(dev_priv, (reg), false) -#define I915_WRITE16_NOTRACE(reg, val)	i915_write16(dev_priv, (reg), (val), false) - -#define I915_READ(reg)		i915_read32(dev_priv, (reg), true) -#define I915_WRITE(reg, val)	i915_write32(dev_priv, (reg), (val), true) -#define I915_READ_NOTRACE(reg)		i915_read32(dev_priv, (reg), false) -#define I915_WRITE_NOTRACE(reg, val)	i915_write32(dev_priv, (reg), (val), false) - -#define I915_WRITE64(reg, val)	i915_write64(dev_priv, (reg), (val), true) -#define I915_READ64(reg)	i915_read64(dev_priv, (reg), true) +#define I915_READ16(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), true) +#define I915_WRITE16(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), true) +#define I915_READ16_NOTRACE(reg)	dev_priv->uncore.funcs.mmio_readw(dev_priv, (reg), false) +#define I915_WRITE16_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writew(dev_priv, (reg), (val), false) + +#define I915_READ(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), true) +#define I915_WRITE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), true) +#define I915_READ_NOTRACE(reg)		dev_priv->uncore.funcs.mmio_readl(dev_priv, (reg), false) +#define I915_WRITE_NOTRACE(reg, val)	dev_priv->uncore.funcs.mmio_writel(dev_priv, (reg), (val), false) + +/* Be very careful with read/write 64-bit values. On 32-bit machines, they + * will be implemented using 2 32-bit writes in an arbitrary order with + * an arbitrary delay between them. This can cause the hardware to + * act upon the intermediate value, possibly leading to corruption and + * machine death. You have been warned. + */ +#define I915_WRITE64(reg, val)	dev_priv->uncore.funcs.mmio_writeq(dev_priv, (reg), (val), true) +#define I915_READ64(reg)	dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) + +#define I915_READ64_2x32(lower_reg, upper_reg) ({			\ +		u32 upper = I915_READ(upper_reg);			\ +		u32 lower = I915_READ(lower_reg);			\ +		u32 tmp = I915_READ(upper_reg);				\ +		if (upper != tmp) {					\ +			upper = tmp;					\ +			lower = I915_READ(lower_reg);			\ +			WARN_ON(I915_READ(upper_reg) != upper);		\ +		}							\ +		(u64)upper << 32 | lower; })  #define POSTING_READ(reg)	(void)I915_READ_NOTRACE(reg)  #define POSTING_READ16(reg)	(void)I915_READ16_NOTRACE(reg) @@ -2332,4 +2728,31 @@ timespec_to_jiffies_timeout(const struct timespec *value)  	return min_t(unsigned long, MAX_JIFFY_OFFSET, j + 1);  } +/* + * If you need to wait X milliseconds between events A and B, but event B + * doesn't happen exactly after event A, you record the timestamp (jiffies) of + * when event A happened, then just before event B you call this function and + * pass the timestamp as the first argument, and X as the second argument. + */ +static inline void +wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) +{ +	unsigned long target_jiffies, tmp_jiffies, remaining_jiffies; + +	/* +	 * Don't re-read the value of "jiffies" every time since it may change +	 * behind our back and break the math. +	 */ +	tmp_jiffies = jiffies; +	target_jiffies = timestamp_jiffies + +			 msecs_to_jiffies_timeout(to_wait_ms); + +	if (time_after(target_jiffies, tmp_jiffies)) { +		remaining_jiffies = target_jiffies - tmp_jiffies; +		while (remaining_jiffies) +			remaining_jiffies = +			    schedule_timeout_uninterruptible(remaining_jiffies); +	} +} +  #endif  | 
