aboutsummaryrefslogtreecommitdiff
path: root/drivers/char/agp/intel-gtt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char/agp/intel-gtt.c')
-rw-r--r--drivers/char/agp/intel-gtt.c1612
1 files changed, 771 insertions, 841 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index 75e0a349788..0c8ff6d8824 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -15,6 +15,18 @@
* /fairy-tale-mode off
*/
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/pagemap.h>
+#include <linux/agp_backend.h>
+#include <asm/smp.h>
+#include "agp.h"
+#include "intel-agp.h"
+#include <linux/intel-gtt.h>
+#include <drm/intel-gtt.h>
+
/*
* If we have Intel graphics, we're not going to have anything other than
* an Intel IOMMU. So make the correct use of the PCI DMA API contingent
@@ -23,11 +35,12 @@
*/
#ifdef CONFIG_DMAR
#define USE_PCI_DMA_API 1
+#else
+#define USE_PCI_DMA_API 0
#endif
/* Max amount of stolen space, anything above will be returned to Linux */
int intel_max_stolen = 32 * 1024 * 1024;
-EXPORT_SYMBOL(intel_max_stolen);
static const struct aper_size_info_fixed intel_i810_sizes[] =
{
@@ -55,32 +68,36 @@ static struct gatt_mask intel_i810_masks[] =
#define INTEL_AGP_CACHED_MEMORY_LLC_MLC 3
#define INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT 4
-static struct gatt_mask intel_gen6_masks[] =
-{
- {.mask = I810_PTE_VALID | GEN6_PTE_UNCACHED,
- .type = INTEL_AGP_UNCACHED_MEMORY },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC,
- .type = INTEL_AGP_CACHED_MEMORY_LLC },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC | GEN6_PTE_GFDT,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_GFDT },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC },
- {.mask = I810_PTE_VALID | GEN6_PTE_LLC_MLC | GEN6_PTE_GFDT,
- .type = INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT },
+struct intel_gtt_driver {
+ unsigned int gen : 8;
+ unsigned int is_g33 : 1;
+ unsigned int is_pineview : 1;
+ unsigned int is_ironlake : 1;
+ unsigned int dma_mask_size : 8;
+ /* Chipset specific GTT setup */
+ int (*setup)(void);
+ /* This should undo anything done in ->setup() save the unmapping
+ * of the mmio register file, that's done in the generic code. */
+ void (*cleanup)(void);
+ void (*write_entry)(dma_addr_t addr, unsigned int entry, unsigned int flags);
+ /* Flags is a more or less chipset specific opaque value.
+ * For chipsets that need to support old ums (non-gem) code, this
+ * needs to be identical to the various supported agp memory types! */
+ bool (*check_flags)(unsigned int flags);
+ void (*chipset_flush)(void);
};
static struct _intel_private {
+ struct intel_gtt base;
+ const struct intel_gtt_driver *driver;
struct pci_dev *pcidev; /* device one */
+ struct pci_dev *bridge_dev;
u8 __iomem *registers;
+ phys_addr_t gtt_bus_addr;
+ phys_addr_t gma_bus_addr;
+ phys_addr_t pte_bus_addr;
u32 __iomem *gtt; /* I915G */
int num_dcache_entries;
- /* gtt_entries is the number of gtt entries that are already mapped
- * to stolen memory. Stolen memory is larger than the memory mapped
- * through gtt_entries, as it includes some reserved space for the BIOS
- * popup and for the GTT.
- */
- int gtt_entries; /* i830+ */
- int gtt_total_size;
union {
void __iomem *i9xx_flush_page;
void *i8xx_flush_page;
@@ -88,23 +105,14 @@ static struct _intel_private {
struct page *i8xx_page;
struct resource ifp_resource;
int resource_valid;
+ struct page *scratch_page;
+ dma_addr_t scratch_page_dma;
} intel_private;
-#ifdef USE_PCI_DMA_API
-static int intel_agp_map_page(struct page *page, dma_addr_t *ret)
-{
- *ret = pci_map_page(intel_private.pcidev, page, 0,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(intel_private.pcidev, *ret))
- return -EINVAL;
- return 0;
-}
-
-static void intel_agp_unmap_page(struct page *page, dma_addr_t dma)
-{
- pci_unmap_page(intel_private.pcidev, dma,
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-}
+#define INTEL_GTT_GEN intel_private.driver->gen
+#define IS_G33 intel_private.driver->is_g33
+#define IS_PINEVIEW intel_private.driver->is_pineview
+#define IS_IRONLAKE intel_private.driver->is_ironlake
static void intel_agp_free_sglist(struct agp_memory *mem)
{
@@ -125,6 +133,9 @@ static int intel_agp_map_memory(struct agp_memory *mem)
struct scatterlist *sg;
int i;
+ if (mem->sg_list)
+ return 0; /* already mapped (for e.g. resume */
+
DBG("try mapping %lu pages\n", (unsigned long)mem->page_count);
if (sg_alloc_table(&st, mem->page_count, GFP_KERNEL))
@@ -156,70 +167,17 @@ static void intel_agp_unmap_memory(struct agp_memory *mem)
intel_agp_free_sglist(mem);
}
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- struct scatterlist *sg;
- int i, j;
-
- j = pg_start;
-
- WARN_ON(!mem->num_sg);
-
- if (mem->num_sg == mem->page_count) {
- for_each_sg(mem->sg_list, sg, mem->page_count, i) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg), mask_type),
- intel_private.gtt+j);
- j++;
- }
- } else {
- /* sg may merge pages, but we have to separate
- * per-page addr for GTT */
- unsigned int len, m;
-
- for_each_sg(mem->sg_list, sg, mem->num_sg, i) {
- len = sg_dma_len(sg) / PAGE_SIZE;
- for (m = 0; m < len; m++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- sg_dma_address(sg) + m * PAGE_SIZE,
- mask_type),
- intel_private.gtt+j);
- j++;
- }
- }
- }
- readl(intel_private.gtt+j-1);
-}
-
-#else
-
-static void intel_agp_insert_sg_entries(struct agp_memory *mem,
- off_t pg_start, int mask_type)
-{
- int i, j;
-
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.gtt+j);
- }
-
- readl(intel_private.gtt+j-1);
-}
-
-#endif
-
static int intel_i810_fetch_size(void)
{
u32 smram_miscc;
struct aper_size_info_fixed *values;
- pci_read_config_dword(agp_bridge->dev, I810_SMRAM_MISCC, &smram_miscc);
+ pci_read_config_dword(intel_private.bridge_dev,
+ I810_SMRAM_MISCC, &smram_miscc);
values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
if ((smram_miscc & I810_GMS) == I810_GMS_DISABLE) {
- dev_warn(&agp_bridge->dev->dev, "i810 is disabled\n");
+ dev_warn(&intel_private.bridge_dev->dev, "i810 is disabled\n");
return 0;
}
if ((smram_miscc & I810_GFX_MEM_WIN_SIZE) == I810_GFX_MEM_WIN_32M) {
@@ -284,7 +242,7 @@ static void intel_i810_cleanup(void)
iounmap(intel_private.registers);
}
-static void intel_i810_agp_enable(struct agp_bridge_data *bridge, u32 mode)
+static void intel_fake_agp_enable(struct agp_bridge_data *bridge, u32 mode)
{
return;
}
@@ -319,34 +277,6 @@ static void i8xx_destroy_pages(struct page *page)
atomic_dec(&agp_bridge->current_memory_agp);
}
-static int intel_i830_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- if (type < AGP_USER_TYPES)
- return type;
- else if (type == AGP_USER_CACHED_MEMORY)
- return INTEL_AGP_CACHED_MEMORY;
- else
- return 0;
-}
-
-static int intel_gen6_type_to_mask_type(struct agp_bridge_data *bridge,
- int type)
-{
- unsigned int type_mask = type & ~AGP_USER_CACHED_MEMORY_GFDT;
- unsigned int gfdt = type & AGP_USER_CACHED_MEMORY_GFDT;
-
- if (type_mask == AGP_USER_UNCACHED_MEMORY)
- return INTEL_AGP_UNCACHED_MEMORY;
- else if (type_mask == AGP_USER_CACHED_MEMORY_LLC_MLC)
- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_MLC_GFDT :
- INTEL_AGP_CACHED_MEMORY_LLC_MLC;
- else /* set 'normal'/'cached' to LLC by default */
- return gfdt ? INTEL_AGP_CACHED_MEMORY_LLC_GFDT :
- INTEL_AGP_CACHED_MEMORY_LLC;
-}
-
-
static int intel_i810_insert_entries(struct agp_memory *mem, off_t pg_start,
int type)
{
@@ -514,8 +444,33 @@ static unsigned long intel_i810_mask_memory(struct agp_bridge_data *bridge,
return addr | bridge->driver->masks[type].mask;
}
-static struct aper_size_info_fixed intel_i830_sizes[] =
+static int intel_gtt_setup_scratch_page(void)
{
+ struct page *page;
+ dma_addr_t dma_addr;
+
+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
+ if (page == NULL)
+ return -ENOMEM;
+ get_page(page);
+ set_pages_uc(page, 1);
+
+ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+ dma_addr = pci_map_page(intel_private.pcidev, page, 0,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ if (pci_dma_mapping_error(intel_private.pcidev, dma_addr))
+ return -EINVAL;
+
+ intel_private.scratch_page_dma = dma_addr;
+ } else
+ intel_private.scratch_page_dma = page_to_phys(page);
+
+ intel_private.scratch_page = page;
+
+ return 0;
+}
+
+static const struct aper_size_info_fixed const intel_fake_agp_sizes[] = {
{128, 32768, 5},
/* The 64M mode still requires a 128k gatt */
{64, 16384, 5},
@@ -523,102 +478,49 @@ static struct aper_size_info_fixed intel_i830_sizes[] =
{512, 131072, 7},
};
-static void intel_i830_init_gtt_entries(void)
+static unsigned int intel_gtt_stolen_entries(void)
{
u16 gmch_ctrl;
- int gtt_entries = 0;
u8 rdct;
int local = 0;
static const int ddt[4] = { 0, 16, 32, 64 };
- int size; /* reserved space (in kb) at the top of stolen memory */
+ unsigned int overhead_entries, stolen_entries;
+ unsigned int stolen_size = 0;
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
- if (IS_I965) {
- u32 pgetbl_ctl;
- pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+ if (INTEL_GTT_GEN > 4 || IS_PINEVIEW)
+ overhead_entries = 0;
+ else
+ overhead_entries = intel_private.base.gtt_mappable_entries
+ / 1024;
- /* The 965 has a field telling us the size of the GTT,
- * which may be larger than what is necessary to map the
- * aperture.
- */
- switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
- case I965_PGETBL_SIZE_128KB:
- size = 128;
- break;
- case I965_PGETBL_SIZE_256KB:
- size = 256;
- break;
- case I965_PGETBL_SIZE_512KB:
- size = 512;
- break;
- case I965_PGETBL_SIZE_1MB:
- size = 1024;
- break;
- case I965_PGETBL_SIZE_2MB:
- size = 2048;
- break;
- case I965_PGETBL_SIZE_1_5MB:
- size = 1024 + 512;
- break;
- default:
- dev_info(&intel_private.pcidev->dev,
- "unknown page table size, assuming 512KB\n");
- size = 512;
- }
- size += 4; /* add in BIOS popup space */
- } else if (IS_G33 && !IS_PINEVIEW) {
- /* G33's GTT size defined in gmch_ctrl */
- switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
- case G33_PGETBL_SIZE_1M:
- size = 1024;
- break;
- case G33_PGETBL_SIZE_2M:
- size = 2048;
- break;
- default:
- dev_info(&agp_bridge->dev->dev,
- "unknown page table size 0x%x, assuming 512KB\n",
- (gmch_ctrl & G33_PGETBL_SIZE_MASK));
- size = 512;
- }
- size += 4;
- } else if (IS_G4X || IS_PINEVIEW) {
- /* On 4 series hardware, GTT stolen is separate from graphics
- * stolen, ignore it in stolen gtt entries counting. However,
- * 4KB of the stolen memory doesn't get mapped to the GTT.
- */
- size = 4;
- } else {
- /* On previous hardware, the GTT size was just what was
- * required to map the aperture.
- */
- size = agp_bridge->driver->fetch_size() + 4;
- }
+ overhead_entries += 1; /* BIOS popup */
- if (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
- agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
+ if (intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82830_HB ||
+ intel_private.bridge_dev->device == PCI_DEVICE_ID_INTEL_82845G_HB) {
switch (gmch_ctrl & I830_GMCH_GMS_MASK) {
case I830_GMCH_GMS_STOLEN_512:
- gtt_entries = KB(512) - KB(size);
+ stolen_size = KB(512);
break;
case I830_GMCH_GMS_STOLEN_1024:
- gtt_entries = MB(1) - KB(size);
+ stolen_size = MB(1);
break;
case I830_GMCH_GMS_STOLEN_8192:
- gtt_entries = MB(8) - KB(size);
+ stolen_size = MB(8);
break;
case I830_GMCH_GMS_LOCAL:
rdct = readb(intel_private.registers+I830_RDRAM_CHANNEL_TYPE);
- gtt_entries = (I830_RDRAM_ND(rdct) + 1) *
+ stolen_size = (I830_RDRAM_ND(rdct) + 1) *
MB(ddt[I830_RDRAM_DDT(rdct)]);
local = 1;
break;
default:
- gtt_entries = 0;
+ stolen_size = 0;
break;
}
- } else if (IS_SNB) {
+ } else if (INTEL_GTT_GEN == 6) {
/*
* SandyBridge has new memory control reg at 0x50.w
*/
@@ -626,149 +528,292 @@ static void intel_i830_init_gtt_entries(void)
pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
switch (snb_gmch_ctl & SNB_GMCH_GMS_STOLEN_MASK) {
case SNB_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
+ stolen_size = MB(32);
break;
case SNB_GMCH_GMS_STOLEN_64M:
- gtt_entries = MB(64) - KB(size);
+ stolen_size = MB(64);
break;
case SNB_GMCH_GMS_STOLEN_96M:
- gtt_entries = MB(96) - KB(size);
+ stolen_size = MB(96);
break;
case SNB_GMCH_GMS_STOLEN_128M:
- gtt_entries = MB(128) - KB(size);
+ stolen_size = MB(128);
break;
case SNB_GMCH_GMS_STOLEN_160M:
- gtt_entries = MB(160) - KB(size);
+ stolen_size = MB(160);
break;
case SNB_GMCH_GMS_STOLEN_192M:
- gtt_entries = MB(192) - KB(size);
+ stolen_size = MB(192);
break;
case SNB_GMCH_GMS_STOLEN_224M:
- gtt_entries = MB(224) - KB(size);
+ stolen_size = MB(224);
break;
case SNB_GMCH_GMS_STOLEN_256M:
- gtt_entries = MB(256) - KB(size);
+ stolen_size = MB(256);
break;
case SNB_GMCH_GMS_STOLEN_288M:
- gtt_entries = MB(288) - KB(size);
+ stolen_size = MB(288);
break;
case SNB_GMCH_GMS_STOLEN_320M:
- gtt_entries = MB(320) - KB(size);
+ stolen_size = MB(320);
break;
case SNB_GMCH_GMS_STOLEN_352M:
- gtt_entries = MB(352) - KB(size);
+ stolen_size = MB(352);
break;
case SNB_GMCH_GMS_STOLEN_384M:
- gtt_entries = MB(384) - KB(size);
+ stolen_size = MB(384);
break;
case SNB_GMCH_GMS_STOLEN_416M:
- gtt_entries = MB(416) - KB(size);
+ stolen_size = MB(416);
break;
case SNB_GMCH_GMS_STOLEN_448M:
- gtt_entries = MB(448) - KB(size);
+ stolen_size = MB(448);
break;
case SNB_GMCH_GMS_STOLEN_480M:
- gtt_entries = MB(480) - KB(size);
+ stolen_size = MB(480);
break;
case SNB_GMCH_GMS_STOLEN_512M:
- gtt_entries = MB(512) - KB(size);
+ stolen_size = MB(512);
break;
}
} else {
switch (gmch_ctrl & I855_GMCH_GMS_MASK) {
case I855_GMCH_GMS_STOLEN_1M:
- gtt_entries = MB(1) - KB(size);
+ stolen_size = MB(1);
break;
case I855_GMCH_GMS_STOLEN_4M:
- gtt_entries = MB(4) - KB(size);
+ stolen_size = MB(4);
break;
case I855_GMCH_GMS_STOLEN_8M:
- gtt_entries = MB(8) - KB(size);
+ stolen_size = MB(8);
break;
case I855_GMCH_GMS_STOLEN_16M:
- gtt_entries = MB(16) - KB(size);
+ stolen_size = MB(16);
break;
case I855_GMCH_GMS_STOLEN_32M:
- gtt_entries = MB(32) - KB(size);
+ stolen_size = MB(32);
break;
case I915_GMCH_GMS_STOLEN_48M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(48) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(48);
break;
case I915_GMCH_GMS_STOLEN_64M:
- /* Check it's really I915G */
- if (IS_I915 || IS_I965 || IS_G33 || IS_G4X)
- gtt_entries = MB(64) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(64);
break;
case G33_GMCH_GMS_STOLEN_128M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(128) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(128);
break;
case G33_GMCH_GMS_STOLEN_256M:
- if (IS_G33 || IS_I965 || IS_G4X)
- gtt_entries = MB(256) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(256);
break;
case INTEL_GMCH_GMS_STOLEN_96M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(96) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(96);
break;
case INTEL_GMCH_GMS_STOLEN_160M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(160) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(160);
break;
case INTEL_GMCH_GMS_STOLEN_224M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(224) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(224);
break;
case INTEL_GMCH_GMS_STOLEN_352M:
- if (IS_I965 || IS_G4X)
- gtt_entries = MB(352) - KB(size);
- else
- gtt_entries = 0;
+ stolen_size = MB(352);
break;
default:
- gtt_entries = 0;
+ stolen_size = 0;
break;
}
}
- if (!local && gtt_entries > intel_max_stolen) {
- dev_info(&agp_bridge->dev->dev,
+
+ if (!local && stolen_size > intel_max_stolen) {
+ dev_info(&intel_private.bridge_dev->dev,
"detected %dK stolen memory, trimming to %dK\n",
- gtt_entries / KB(1), intel_max_stolen / KB(1));
- gtt_entries = intel_max_stolen / KB(4);
- } else if (gtt_entries > 0) {
- dev_info(&agp_bridge->dev->dev, "detected %dK %s memory\n",
- gtt_entries / KB(1), local ? "local" : "stolen");
- gtt_entries /= KB(4);
+ stolen_size / KB(1), intel_max_stolen / KB(1));
+ stolen_size = intel_max_stolen;
+ } else if (stolen_size > 0) {
+ dev_info(&intel_private.bridge_dev->dev, "detected %dK %s memory\n",
+ stolen_size / KB(1), local ? "local" : "stolen");
} else {
- dev_info(&agp_bridge->dev->dev,
+ dev_info(&intel_private.bridge_dev->dev,
"no pre-allocated video memory detected\n");
- gtt_entries = 0;
+ stolen_size = 0;
+ }
+
+ stolen_entries = stolen_size/KB(4) - overhead_entries;
+
+ return stolen_entries;
+}
+
+static unsigned int intel_gtt_total_entries(void)
+{
+ int size;
+
+ if (IS_G33 || INTEL_GTT_GEN == 4 || INTEL_GTT_GEN == 5) {
+ u32 pgetbl_ctl;
+ pgetbl_ctl = readl(intel_private.registers+I810_PGETBL_CTL);
+
+ switch (pgetbl_ctl & I965_PGETBL_SIZE_MASK) {
+ case I965_PGETBL_SIZE_128KB:
+ size = KB(128);
+ break;
+ case I965_PGETBL_SIZE_256KB:
+ size = KB(256);
+ break;
+ case I965_PGETBL_SIZE_512KB:
+ size = KB(512);
+ break;
+ case I965_PGETBL_SIZE_1MB:
+ size = KB(1024);
+ break;
+ case I965_PGETBL_SIZE_2MB:
+ size = KB(2048);
+ break;
+ case I965_PGETBL_SIZE_1_5MB:
+ size = KB(1024 + 512);
+ break;
+ default:
+ dev_info(&intel_private.pcidev->dev,
+ "unknown page table size, assuming 512KB\n");
+ size = KB(512);
+ }
+
+ return size/4;
+ } else if (INTEL_GTT_GEN == 6) {
+ u16 snb_gmch_ctl;
+
+ pci_read_config_word(intel_private.pcidev, SNB_GMCH_CTRL, &snb_gmch_ctl);
+ switch (snb_gmch_ctl & SNB_GTT_SIZE_MASK) {
+ default:
+ case SNB_GTT_SIZE_0M:
+ printk(KERN_ERR "Bad GTT size mask: 0x%04x.\n", snb_gmch_ctl);
+ size = MB(0);
+ break;
+ case SNB_GTT_SIZE_1M:
+ size = MB(1);
+ break;
+ case SNB_GTT_SIZE_2M:
+ size = MB(2);
+ break;
+ }
+ return size/4;
+ } else {
+ /* On previous hardware, the GTT size was just what was
+ * required to map the aperture.
+ */
+ return intel_private.base.gtt_mappable_entries;
+ }
+}
+
+static unsigned int intel_gtt_mappable_entries(void)
+{
+ unsigned int aperture_size;
+
+ if (INTEL_GTT_GEN == 2) {
+ u16 gmch_ctrl;
+
+ pci_read_config_word(intel_private.bridge_dev,
+ I830_GMCH_CTRL, &gmch_ctrl);
+
+ if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_64M)
+ aperture_size = MB(64);
+ else
+ aperture_size = MB(128);
+ } else {
+ /* 9xx supports large sizes, just look at the length */
+ aperture_size = pci_resource_len(intel_private.pcidev, 2);
}
- intel_private.gtt_entries = gtt_entries;
+ return aperture_size >> PAGE_SHIFT;
}
-static void intel_i830_fini_flush(void)
+static void intel_gtt_teardown_scratch_page(void)
+{
+ set_pages_wb(intel_private.scratch_page, 1);
+ pci_unmap_page(intel_private.pcidev, intel_private.scratch_page_dma,
+ PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ put_page(intel_private.scratch_page);
+ __free_page(intel_private.scratch_page);
+}
+
+static void intel_gtt_cleanup(void)
+{
+ intel_private.driver->cleanup();
+
+ iounmap(intel_private.gtt);
+ iounmap(intel_private.registers);
+
+ intel_gtt_teardown_scratch_page();
+}
+
+static int intel_gtt_init(void)
+{
+ u32 gtt_map_size;
+ int ret;
+
+ ret = intel_private.driver->setup();
+ if (ret != 0)
+ return ret;
+
+ intel_private.base.gtt_mappable_entries = intel_gtt_mappable_entries();
+ intel_private.base.gtt_total_entries = intel_gtt_total_entries();
+
+ dev_info(&intel_private.bridge_dev->dev,
+ "detected gtt size: %dK total, %dK mappable\n",
+ intel_private.base.gtt_total_entries * 4,
+ intel_private.base.gtt_mappable_entries * 4);
+
+ gtt_map_size = intel_private.base.gtt_total_entries * 4;
+
+ intel_private.gtt = ioremap(intel_private.gtt_bus_addr,
+ gtt_map_size);
+ if (!intel_private.gtt) {
+ intel_private.driver->cleanup();
+ iounmap(intel_private.registers);
+ return -ENOMEM;
+ }
+
+ global_cache_flush(); /* FIXME: ? */
+
+ /* we have to call this as early as possible after the MMIO base address is known */
+ intel_private.base.gtt_stolen_entries = intel_gtt_stolen_entries();
+ if (intel_private.base.gtt_stolen_entries == 0) {
+ intel_private.driver->cleanup();
+ iounmap(intel_private.registers);
+ iounmap(intel_private.gtt);
+ return -ENOMEM;
+ }
+
+ ret = intel_gtt_setup_scratch_page();
+ if (ret != 0) {
+ intel_gtt_cleanup();
+ return ret;
+ }
+
+ return 0;
+}
+
+static int intel_fake_agp_fetch_size(void)
+{
+ int num_sizes = ARRAY_SIZE(intel_fake_agp_sizes);
+ unsigned int aper_size;
+ int i;
+
+ aper_size = (intel_private.base.gtt_mappable_entries << PAGE_SHIFT)
+ / MB(1);
+
+ for (i = 0; i < num_sizes; i++) {
+ if (aper_size == intel_fake_agp_sizes[i].size) {
+ agp_bridge->current_size =
+ (void *) (intel_fake_agp_sizes + i);
+ return aper_size;
+ }
+ }
+
+ return 0;
+}
+
+static void i830_cleanup(void)
{
kunmap(intel_private.i8xx_page);
intel_private.i8xx_flush_page = NULL;
- unmap_page_from_agp(intel_private.i8xx_page);
__free_page(intel_private.i8xx_page);
intel_private.i8xx_page = NULL;
@@ -780,13 +825,13 @@ static void intel_i830_setup_flush(void)
if (intel_private.i8xx_page)
return;
- intel_private.i8xx_page = alloc_page(GFP_KERNEL | __GFP_ZERO | GFP_DMA32);
+ intel_private.i8xx_page = alloc_page(GFP_KERNEL);
if (!intel_private.i8xx_page)
return;
intel_private.i8xx_flush_page = kmap(intel_private.i8xx_page);
if (!intel_private.i8xx_flush_page)
- intel_i830_fini_flush();
+ i830_cleanup();
}
/* The chipset_flush interface needs to get data that has already been
@@ -799,7 +844,7 @@ static void intel_i830_setup_flush(void)
* that buffer out, we just fill 1KB and clflush it out, on the assumption
* that it'll push whatever was in there out. It appears to work.
*/
-static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
+static void i830_chipset_flush(void)
{
unsigned int *pg = intel_private.i8xx_flush_page;
@@ -811,169 +856,184 @@ static void intel_i830_chipset_flush(struct agp_bridge_data *bridge)
printk(KERN_ERR "Timed out waiting for cache flush.\n");
}
-/* The intel i830 automatically initializes the agp aperture during POST.
- * Use the memory already set aside for in the GTT.
- */
-static int intel_i830_create_gatt_table(struct agp_bridge_data *bridge)
+static void i830_write_entry(dma_addr_t addr, unsigned int entry,
+ unsigned int flags)
{
- int page_order;
- struct aper_size_info_fixed *size;
- int num_entries;
- u32 temp;
+ u32 pte_flags = I810_PTE_VALID;
+
+ switch (flags) {
+ case AGP_DCACHE_MEMORY:
+ pte_flags |= I810_PTE_LOCAL;
+ break;
+ case AGP_USER_CACHED_MEMORY:
+ pte_flags |= I830_PTE_SYSTEM_CACHED;
+ break;
+ }
- size = agp_bridge->current_size;
- page_order = size->page_order;
- num_entries = size->num_entries;
- agp_bridge->gatt_table_real = NULL;
+ writel(addr | pte_flags, intel_private.gtt + entry);
+}
- pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &temp);
- temp &= 0xfff80000;
+static void intel_enable_gtt(void)
+{
+ u32 gma_addr;
+ u16 gmch_ctrl;
- intel_private.registers = ioremap(temp, 128 * 4096);
- if (!intel_private.registers)
- return -ENOMEM;
+ if (INTEL_GTT_GEN == 2)
+ pci_read_config_dword(intel_private.pcidev, I810_GMADDR,
+ &gma_addr);
+ else
+ pci_read_config_dword(intel_private.pcidev, I915_GMADDR,
+ &gma_addr);
- temp = readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- global_cache_flush(); /* FIXME: ?? */
+ intel_private.gma_bus_addr = (gma_addr & PCI_BASE_ADDRESS_MEM_MASK);
- /* we have to call this as early as possible after the MMIO base address is known */
- intel_i830_init_gtt_entries();
- if (intel_private.gtt_entries == 0) {
- iounmap(intel_private.registers);
+ pci_read_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, &gmch_ctrl);
+ gmch_ctrl |= I830_GMCH_ENABLED;
+ pci_write_config_word(intel_private.bridge_dev, I830_GMCH_CTRL, gmch_ctrl);
+
+ writel(intel_private.pte_bus_addr|I810_PGETBL_ENABLED,
+ intel_private.registers+I810_PGETBL_CTL);
+ readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+}
+
+static int i830_setup(void)
+{
+ u32 reg_addr;
+
+ pci_read_config_dword(intel_private.pcidev, I810_MMADDR, &reg_addr);
+ reg_addr &= 0xfff80000;
+
+ intel_private.registers = ioremap(reg_addr, KB(64));
+ if (!intel_private.registers)
return -ENOMEM;
- }
- agp_bridge->gatt_table = NULL;
+ intel_private.gtt_bus_addr = reg_addr + I810_PTE_BASE;
+ intel_private.pte_bus_addr =
+ readl(intel_private.registers+I810_PGETBL_CTL) & 0xfffff000;
- agp_bridge->gatt_bus_addr = temp;
+ intel_i830_setup_flush();
return 0;
}
-/* Return the gatt table to a sane state. Use the top of stolen
- * memory for the GTT.
- */
-static int intel_i830_free_gatt_table(struct agp_bridge_data *bridge)
+static int intel_fake_agp_create_gatt_table(struct agp_bridge_data *bridge)
{
+ agp_bridge->gatt_table_real = NULL;
+ agp_bridge->gatt_table = NULL;
+ agp_bridge->gatt_bus_addr = 0;
+
return 0;
}
-static int intel_i830_fetch_size(void)
+static int intel_fake_agp_free_gatt_table(struct agp_bridge_data *bridge)
{
- u16 gmch_ctrl;
- struct aper_size_info_fixed *values;
-
- values = A_SIZE_FIX(agp_bridge->driver->aperture_sizes);
-
- if (agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82830_HB &&
- agp_bridge->dev->device != PCI_DEVICE_ID_INTEL_82845G_HB) {
- /* 855GM/852GM/865G has 128MB aperture size */
- agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- }
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
-
- if ((gmch_ctrl & I830_GMCH_MEM_MASK) == I830_GMCH_MEM_128M) {
- agp_bridge->current_size = (void *) values;
- agp_bridge->aperture_size_idx = 0;
- return values[0].size;
- } else {
- agp_bridge->current_size = (void *) (values + 1);
- agp_bridge->aperture_size_idx = 1;
- return values[1].size;
- }
-
return 0;
}
-static int intel_i830_configure(void)
+static int intel_fake_agp_configure(void)
{
- struct aper_size_info_fixed *current_size;
- u32 temp;
- u16 gmch_ctrl;
int i;
- current_size = A_SIZE_FIX(agp_bridge->current_size);
-
- pci_read_config_dword(intel_private.pcidev, I810_GMADDR, &temp);
- agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
-
- pci_read_config_word(agp_bridge->dev, I830_GMCH_CTRL, &gmch_ctrl);
- gmch_ctrl |= I830_GMCH_ENABLED;
- pci_write_config_word(agp_bridge->dev, I830_GMCH_CTRL, gmch_ctrl);
+ intel_enable_gtt();
- writel(agp_bridge->gatt_bus_addr|I810_PGETBL_ENABLED, intel_private.registers+I810_PGETBL_CTL);
- readl(intel_private.registers+I810_PGETBL_CTL); /* PCI Posting. */
+ agp_bridge->gart_bus_addr = intel_private.gma_bus_addr;
- if (agp_bridge->driver->needs_scratch_page) {
- for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
- }
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4)); /* PCI Posting. */
+ for (i = intel_private.base.gtt_stolen_entries;
+ i < intel_private.base.gtt_total_entries; i++) {
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
}
+ readl(intel_private.gtt+i-1); /* PCI Posting. */
global_cache_flush();
- intel_i830_setup_flush();
return 0;
}
-static void intel_i830_cleanup(void)
+static bool i830_check_flags(unsigned int flags)
{
- iounmap(intel_private.registers);
+ switch (flags) {
+ case 0:
+ case AGP_PHYS_MEMORY:
+ case AGP_USER_CACHED_MEMORY:
+ case AGP_USER_MEMORY:
+ return true;
+ }
+
+ return false;
}
-static int intel_i830_insert_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static void intel_gtt_insert_sg_entries(struct scatterlist *sg_list,
+ unsigned int sg_len,
+ unsigned int pg_start,
+ unsigned int flags)
{
- int i, j, num_entries;
- void *temp;
+ struct scatterlist *sg;
+ unsigned int len, m;
+ int i, j;
+
+ j = pg_start;
+
+ /* sg may merge pages, but we have to separate
+ * per-page addr for GTT */
+ for_each_sg(sg_list, sg, sg_len, i) {
+ len = sg_dma_len(sg) >> PAGE_SHIFT;
+ for (m = 0; m < len; m++) {
+ dma_addr_t addr = sg_dma_address(sg) + (m << PAGE_SHIFT);
+ intel_private.driver->write_entry(addr,
+ j, flags);
+ j++;
+ }
+ }
+ readl(intel_private.gtt+j-1);
+}
+
+static int intel_fake_agp_insert_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
+{
+ int i, j;
int ret = -EINVAL;
- int mask_type;
if (mem->page_count == 0)
goto out;
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_FIX(temp)->num_entries;
-
- if (pg_start < intel_private.gtt_entries) {
+ if (pg_start < intel_private.base.gtt_stolen_entries) {
dev_printk(KERN_DEBUG, &intel_private.pcidev->dev,
- "pg_start == 0x%.8lx, intel_private.gtt_entries == 0x%.8x\n",
- pg_start, intel_private.gtt_entries);
+ "pg_start == 0x%.8lx, gtt_stolen_entries == 0x%.8x\n",
+ pg_start, intel_private.base.gtt_stolen_entries);
dev_info(&intel_private.pcidev->dev,
"trying to insert into local/stolen memory\n");
goto out_err;
}
- if ((pg_start + mem->page_count) > num_entries)
+ if ((pg_start + mem->page_count) > intel_private.base.gtt_total_entries)
goto out_err;
- /* The i830 can't check the GTT for entries since its read only,
- * depend on the caller to make the correct offset decisions.
- */
-
if (type != mem->type)
goto out_err;
- mask_type = agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type);
-
- if (mask_type != 0 && mask_type != AGP_PHYS_MEMORY &&
- mask_type != INTEL_AGP_CACHED_MEMORY)
+ if (!intel_private.driver->check_flags(type))
goto out_err;
if (!mem->is_flushed)
global_cache_flush();
- for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
- writel(agp_bridge->driver->mask_memory(agp_bridge,
- page_to_phys(mem->pages[i]), mask_type),
- intel_private.registers+I810_PTE_BASE+(j*4));
+ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2) {
+ ret = intel_agp_map_memory(mem);
+ if (ret != 0)
+ return ret;
+
+ intel_gtt_insert_sg_entries(mem->sg_list, mem->num_sg,
+ pg_start, type);
+ } else {
+ for (i = 0, j = pg_start; i < mem->page_count; i++, j++) {
+ dma_addr_t addr = page_to_phys(mem->pages[i]);
+ intel_private.driver->write_entry(addr,
+ j, type);
+ }
+ readl(intel_private.gtt+j-1);
}
- readl(intel_private.registers+I810_PTE_BASE+((j-1)*4));
out:
ret = 0;
@@ -982,29 +1042,39 @@ out_err:
return ret;
}
-static int intel_i830_remove_entries(struct agp_memory *mem, off_t pg_start,
- int type)
+static int intel_fake_agp_remove_entries(struct agp_memory *mem,
+ off_t pg_start, int type)
{
int i;
if (mem->page_count == 0)
return 0;
- if (pg_start < intel_private.gtt_entries) {
+ if (pg_start < intel_private.base.gtt_stolen_entries) {
dev_info(&intel_private.pcidev->dev,
"trying to disable local/stolen memory\n");
return -EINVAL;
}
+ if (USE_PCI_DMA_API && INTEL_GTT_GEN > 2)
+ intel_agp_unmap_memory(mem);
+
for (i = pg_start; i < (mem->page_count + pg_start); i++) {
- writel(agp_bridge->scratch_page, intel_private.registers+I810_PTE_BASE+(i*4));
+ intel_private.driver->write_entry(intel_private.scratch_page_dma,
+ i, 0);
}
- readl(intel_private.registers+I810_PTE_BASE+((i-1)*4));
+ readl(intel_private.gtt+i-1);
return 0;
}
-static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
+static void intel_fake_agp_chipset_flush(struct agp_bridge_data *bridge)
+{
+ intel_private.driver->chipset_flush();
+}
+
+static struct agp_memory *intel_fake_agp_alloc_by_type(size_t pg_count,
+ int type)
{
if (type == AGP_PHYS_MEMORY)
return alloc_agpphysmem_i8xx(pg_count, type);
@@ -1015,9 +1085,9 @@ static struct agp_memory *intel_i830_alloc_by_type(size_t pg_count, int type)
static int intel_alloc_chipset_flush_resource(void)
{
int ret;
- ret = pci_bus_alloc_resource(agp_bridge->dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
+ ret = pci_bus_alloc_resource(intel_private.bridge_dev->bus, &intel_private.ifp_resource, PAGE_SIZE,
PAGE_SIZE, PCIBIOS_MIN_MEM, 0,
- pcibios_align_resource, agp_bridge->dev);
+ pcibios_align_resource, intel_private.bridge_dev);
return ret;
}
@@ -1027,11 +1097,11 @@ static void intel_i915_setup_chipset_flush(void)
int ret;
u32 temp;
- pci_read_config_dword(agp_bridge->dev, I915_IFPADDR, &temp);
+ pci_read_config_dword(intel_private.bridge_dev, I915_IFPADDR, &temp);
if (!(temp & 0x1)) {
intel_allo