diff options
Diffstat (limited to 'drivers/char/agp/generic.c')
| -rw-r--r-- | drivers/char/agp/generic.c | 228 |
1 files changed, 154 insertions, 74 deletions
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 7fc0c99a3a5..0fbccce1cee 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -29,7 +29,6 @@ */ #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/pagemap.h> #include <linux/miscdevice.h> #include <linux/pm.h> @@ -38,6 +37,7 @@ #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/sched.h> +#include <linux/slab.h> #include <asm/io.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> @@ -80,13 +80,6 @@ static int agp_get_key(void) return -1; } -void agp_flush_chipset(struct agp_bridge_data *bridge) -{ - if (bridge->driver->chipset_flush) - bridge->driver->chipset_flush(bridge); -} -EXPORT_SYMBOL(agp_flush_chipset); - /* * Use kmalloc if possible for the page list. Otherwise fall back to * vmalloc. This speeds things up and also saves memory for small AGP @@ -95,24 +88,22 @@ EXPORT_SYMBOL(agp_flush_chipset); void agp_alloc_page_array(size_t size, struct agp_memory *mem) { - mem->memory = NULL; - mem->vmalloc_flag = 0; + mem->pages = NULL; if (size <= 2*PAGE_SIZE) - mem->memory = kmalloc(size, GFP_KERNEL | __GFP_NORETRY); - if (mem->memory == NULL) { - mem->memory = vmalloc(size); - mem->vmalloc_flag = 1; + mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (mem->pages == NULL) { + mem->pages = vmalloc(size); } } EXPORT_SYMBOL(agp_alloc_page_array); void agp_free_page_array(struct agp_memory *mem) { - if (mem->vmalloc_flag) { - vfree(mem->memory); + if (is_vmalloc_addr(mem->pages)) { + vfree(mem->pages); } else { - kfree(mem->memory); + kfree(mem->pages); } } EXPORT_SYMBOL(agp_free_page_array); @@ -123,6 +114,9 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) struct agp_memory *new; unsigned long alloc_size = num_agp_pages*sizeof(struct page *); + if (INT_MAX/sizeof(struct page *) < num_agp_pages) + return NULL; + new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); if (new == NULL) return NULL; @@ -136,7 +130,7 @@ static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) agp_alloc_page_array(alloc_size, new); - if (new->memory == NULL) { + if (new->pages == NULL) { agp_free_key(new->key); kfree(new); return NULL; @@ -162,7 +156,7 @@ struct agp_memory *agp_create_memory(int scratch_pages) agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); - if (new->memory == NULL) { + if (new->pages == NULL) { agp_free_key(new->key); kfree(new); return NULL; @@ -188,7 +182,7 @@ void agp_free_memory(struct agp_memory *curr) if (curr == NULL) return; - if (curr->is_bound == TRUE) + if (curr->is_bound) agp_unbind_memory(curr); if (curr->type >= AGP_USER_TYPES) { @@ -201,11 +195,20 @@ void agp_free_memory(struct agp_memory *curr) return; } if (curr->page_count != 0) { - for (i = 0; i < curr->page_count; i++) { - curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_UNMAP); - } - for (i = 0; i < curr->page_count; i++) { - curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]), AGP_PAGE_DESTROY_FREE); + if (curr->bridge->driver->agp_destroy_pages) { + curr->bridge->driver->agp_destroy_pages(curr); + } else { + + for (i = 0; i < curr->page_count; i++) { + curr->bridge->driver->agp_destroy_page( + curr->pages[i], + AGP_PAGE_DESTROY_UNMAP); + } + for (i = 0; i < curr->page_count; i++) { + curr->bridge->driver->agp_destroy_page( + curr->pages[i], + AGP_PAGE_DESTROY_FREE); + } } } agp_free_key(curr->key); @@ -233,11 +236,14 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, int scratch_pages; struct agp_memory *new; size_t i; + int cur_memory; if (!bridge) return NULL; - if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) + cur_memory = atomic_read(&bridge->current_memory_agp); + if ((cur_memory + page_count > bridge->max_memory_agp) || + (cur_memory + page_count < page_count)) return NULL; if (type >= AGP_USER_TYPES) { @@ -261,14 +267,23 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, if (new == NULL) return NULL; + if (bridge->driver->agp_alloc_pages) { + if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { + agp_free_memory(new); + return NULL; + } + new->bridge = bridge; + return new; + } + for (i = 0; i < page_count; i++) { - void *addr = bridge->driver->agp_alloc_page(bridge); + struct page *page = bridge->driver->agp_alloc_page(bridge); - if (addr == NULL) { + if (page == NULL) { agp_free_memory(new); return NULL; } - new->memory[i] = virt_to_gart(addr); + new->pages[i] = page; new->page_count++; } new->bridge = bridge; @@ -411,21 +426,26 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) if (curr == NULL) return -EINVAL; - if (curr->is_bound == TRUE) { + if (curr->is_bound) { printk(KERN_INFO PFX "memory %p is already bound!\n", curr); return -EINVAL; } - if (curr->is_flushed == FALSE) { + if (!curr->is_flushed) { curr->bridge->driver->cache_flush(); - curr->is_flushed = TRUE; + curr->is_flushed = true; } + ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); if (ret_val != 0) return ret_val; - curr->is_bound = TRUE; + curr->is_bound = true; curr->pg_start = pg_start; + spin_lock(&agp_bridge->mapped_lock); + list_add(&curr->mapped_list, &agp_bridge->mapped_list); + spin_unlock(&agp_bridge->mapped_lock); + return 0; } EXPORT_SYMBOL(agp_bind_memory); @@ -446,7 +466,7 @@ int agp_unbind_memory(struct agp_memory *curr) if (curr == NULL) return -EINVAL; - if (curr->is_bound != TRUE) { + if (!curr->is_bound) { printk(KERN_INFO PFX "memory %p was not bound!\n", curr); return -EINVAL; } @@ -456,12 +476,16 @@ int agp_unbind_memory(struct agp_memory *curr) if (ret_val != 0) return ret_val; - curr->is_bound = FALSE; + curr->is_bound = false; curr->pg_start = 0; + spin_lock(&curr->bridge->mapped_lock); + list_del(&curr->mapped_list); + spin_unlock(&curr->bridge->mapped_lock); return 0; } EXPORT_SYMBOL(agp_unbind_memory); + /* End - Routines for handling swapping of agp_memory into the GATT */ @@ -489,12 +513,12 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_ switch (*bridge_agpstat & 7) { case 4: *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); - printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate" + printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. " "Fixing up support for x2 & x1\n"); break; case 2: *bridge_agpstat |= AGPSTAT2_1X; - printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate" + printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. " "Fixing up support for x1\n"); break; default: @@ -668,7 +692,7 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); } else { - printk(KERN_INFO PFX "Fell back to AGPx4 mode because"); + printk(KERN_INFO PFX "Fell back to AGPx4 mode because "); if (!(*bridge_agpstat & AGPSTAT3_8X)) { printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", *bridge_agpstat, origbridge); @@ -754,7 +778,7 @@ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode EXPORT_SYMBOL(agp_collect_device_status); -void agp_device_command(u32 bridge_agpstat, int agp_v3) +void agp_device_command(u32 bridge_agpstat, bool agp_v3) { struct pci_dev *device = NULL; int mode; @@ -768,8 +792,8 @@ void agp_device_command(u32 bridge_agpstat, int agp_v3) if (!agp) continue; - printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n", - agp_v3 ? 3 : 2, pci_name(device), mode); + dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", + agp_v3 ? 3 : 2, mode); pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); } } @@ -797,10 +821,8 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) get_agp_version(agp_bridge); - printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", - agp_bridge->major_version, - agp_bridge->minor_version, - pci_name(agp_bridge->dev)); + dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", + agp_bridge->major_version, agp_bridge->minor_version); pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); @@ -818,7 +840,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) /* If we have 3.5, we can do the isoch stuff. */ if (bridge->minor_version >= 5) agp_3_5_enable(bridge); - agp_device_command(bridge_agpstat, TRUE); + agp_device_command(bridge_agpstat, true); return; } else { /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ @@ -829,13 +851,12 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) pci_write_config_dword(bridge->dev, bridge->capndx+AGPCTRL, temp); - printk(KERN_INFO PFX "Device is in legacy mode," - " falling back to 2.x\n"); + dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); } } /* AGP v<3 */ - agp_device_command(bridge_agpstat, FALSE); + agp_device_command(bridge_agpstat, false); } EXPORT_SYMBOL(agp_generic_enable); @@ -933,10 +954,12 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) bridge->driver->cache_flush(); #ifdef CONFIG_X86 - set_memory_uc((unsigned long)table, 1 << page_order); - bridge->gatt_table = (void *)table; + if (set_memory_uc((unsigned long)table, 1 << page_order)) + printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); + + bridge->gatt_table = (u32 __iomem *)table; #else - bridge->gatt_table = ioremap_nocache(virt_to_gart(table), + bridge->gatt_table = ioremap_nocache(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); bridge->driver->cache_flush(); #endif @@ -949,7 +972,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) return -ENOMEM; } - bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real); + bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); /* AK: bogus, should encode addresses > 4GB */ for (i = 0; i < num_entries; i++) { @@ -986,7 +1009,6 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; - break; default: page_order = 0; break; @@ -1053,7 +1075,6 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; - break; default: num_entries = 0; break; @@ -1071,8 +1092,8 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) return -EINVAL; } - /* AK: could wrap */ - if ((pg_start + mem->page_count) > num_entries) + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) return -EINVAL; j = pg_start; @@ -1083,13 +1104,15 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) j++; } - if (mem->is_flushed == FALSE) { + if (!mem->is_flushed) { bridge->driver->cache_flush(); - mem->is_flushed = TRUE; + mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - writel(bridge->driver->mask_memory(bridge, mem->memory[i], mask_type), + writel(bridge->driver->mask_memory(bridge, + page_to_phys(mem->pages[i]), + mask_type), bridge->gatt_table+j); } readl(bridge->gatt_table+j-1); /* PCI Posting. */ @@ -1104,7 +1127,7 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; struct agp_bridge_data *bridge; - int mask_type; + int mask_type, num_entries; bridge = mem->bridge; if (!bridge) @@ -1116,6 +1139,11 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) if (type != mem->type) return -EINVAL; + num_entries = agp_num_entries(); + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) + return -EINVAL; + mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); if (mask_type != 0) { /* The generic routines know nothing of memory types */ @@ -1159,7 +1187,7 @@ struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) return NULL; for (i = 0; i < page_count; i++) - new->memory[i] = 0; + new->pages[i] = NULL; new->page_count = 0; new->type = type; new->num_scratch_pages = pages; @@ -1175,11 +1203,41 @@ EXPORT_SYMBOL(agp_generic_alloc_user); * against a maximum value. */ -void *agp_generic_alloc_page(struct agp_bridge_data *bridge) +int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) +{ + struct page * page; + int i, ret = -ENOMEM; + + for (i = 0; i < num_pages; i++) { + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + /* agp_free_memory() needs gart address */ + if (page == NULL) + goto out; + +#ifndef CONFIG_X86 + map_page_into_agp(page); +#endif + get_page(page); + atomic_inc(&agp_bridge->current_memory_agp); + + mem->pages[i] = page; + mem->page_count++; + } + +#ifdef CONFIG_X86 + set_pages_array_uc(mem->pages, num_pages); +#endif + ret = 0; +out: + return ret; +} +EXPORT_SYMBOL(agp_generic_alloc_pages); + +struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) { struct page * page; - page = alloc_page(GFP_KERNEL | GFP_DMA32); + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (page == NULL) return NULL; @@ -1187,25 +1245,47 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge) get_page(page); atomic_inc(&agp_bridge->current_memory_agp); - return page_address(page); + return page; } EXPORT_SYMBOL(agp_generic_alloc_page); - -void agp_generic_destroy_page(void *addr, int flags) +void agp_generic_destroy_pages(struct agp_memory *mem) { + int i; struct page *page; - if (addr == NULL) + if (!mem) + return; + +#ifdef CONFIG_X86 + set_pages_array_wb(mem->pages, mem->page_count); +#endif + + for (i = 0; i < mem->page_count; i++) { + page = mem->pages[i]; + +#ifndef CONFIG_X86 + unmap_page_from_agp(page); +#endif + put_page(page); + __free_page(page); + atomic_dec(&agp_bridge->current_memory_agp); + mem->pages[i] = NULL; + } +} +EXPORT_SYMBOL(agp_generic_destroy_pages); + +void agp_generic_destroy_page(struct page *page, int flags) +{ + if (page == NULL) return; - page = virt_to_page(addr); if (flags & AGP_PAGE_DESTROY_UNMAP) unmap_page_from_agp(page); if (flags & AGP_PAGE_DESTROY_FREE) { put_page(page); - free_page((unsigned long)addr); + __free_page(page); atomic_dec(&agp_bridge->current_memory_agp); } } @@ -1246,13 +1326,13 @@ static void ipi_handler(void *null) void global_cache_flush(void) { - if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0) + if (on_each_cpu(ipi_handler, NULL, 1) != 0) panic(PFX "timed out waiting for the other CPUs!\n"); } EXPORT_SYMBOL(global_cache_flush); unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, - unsigned long addr, int type) + dma_addr_t addr, int type) { /* memory type is ignored in the generic routine */ if (bridge->driver->masks) @@ -1315,8 +1395,8 @@ int agp3_generic_configure(void) current_size = A_SIZE_16(agp_bridge->current_size); - pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); - agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); /* set aperture size */ pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); |
