diff options
Diffstat (limited to 'drivers/char/agp/generic.c')
| -rw-r--r-- | drivers/char/agp/generic.c | 441 |
1 files changed, 333 insertions, 108 deletions
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c index 5567ce8d72b..0fbccce1cee 100644 --- a/drivers/char/agp/generic.c +++ b/drivers/char/agp/generic.c @@ -27,10 +27,8 @@ * TODO: * - Allocate more than order 0 pages to avoid too much linear map splitting. */ -#include <linux/config.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/init.h> #include <linux/pagemap.h> #include <linux/miscdevice.h> #include <linux/pm.h> @@ -38,6 +36,8 @@ #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/mm.h> +#include <linux/sched.h> +#include <linux/slab.h> #include <asm/io.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> @@ -52,28 +52,6 @@ int agp_memory_reserved; */ EXPORT_SYMBOL_GPL(agp_memory_reserved); -#if defined(CONFIG_X86) -int map_page_into_agp(struct page *page) -{ - int i; - i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE); - /* Caller's responsibility to call global_flush_tlb() for - * performance reasons */ - return i; -} -EXPORT_SYMBOL_GPL(map_page_into_agp); - -int unmap_page_from_agp(struct page *page) -{ - int i; - i = change_page_attr(page, 1, PAGE_KERNEL); - /* Caller's responsibility to call global_flush_tlb() for - * performance reasons */ - return i; -} -EXPORT_SYMBOL_GPL(unmap_page_from_agp); -#endif - /* * Generic routines for handling agp_memory structures - * They use the basic page allocation routines to do the brunt of the work. @@ -102,6 +80,64 @@ static int agp_get_key(void) return -1; } +/* + * Use kmalloc if possible for the page list. Otherwise fall back to + * vmalloc. This speeds things up and also saves memory for small AGP + * regions. + */ + +void agp_alloc_page_array(size_t size, struct agp_memory *mem) +{ + mem->pages = NULL; + + if (size <= 2*PAGE_SIZE) + mem->pages = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (mem->pages == NULL) { + mem->pages = vmalloc(size); + } +} +EXPORT_SYMBOL(agp_alloc_page_array); + +void agp_free_page_array(struct agp_memory *mem) +{ + if (is_vmalloc_addr(mem->pages)) { + vfree(mem->pages); + } else { + kfree(mem->pages); + } +} +EXPORT_SYMBOL(agp_free_page_array); + + +static struct agp_memory *agp_create_user_memory(unsigned long num_agp_pages) +{ + struct agp_memory *new; + unsigned long alloc_size = num_agp_pages*sizeof(struct page *); + + if (INT_MAX/sizeof(struct page *) < num_agp_pages) + return NULL; + + new = kzalloc(sizeof(struct agp_memory), GFP_KERNEL); + if (new == NULL) + return NULL; + + new->key = agp_get_key(); + + if (new->key < 0) { + kfree(new); + return NULL; + } + + agp_alloc_page_array(alloc_size, new); + + if (new->pages == NULL) { + agp_free_key(new->key); + kfree(new); + return NULL; + } + new->num_scratch_pages = 0; + return new; +} struct agp_memory *agp_create_memory(int scratch_pages) { @@ -117,14 +153,16 @@ struct agp_memory *agp_create_memory(int scratch_pages) kfree(new); return NULL; } - new->memory = vmalloc(PAGE_SIZE * scratch_pages); - if (new->memory == NULL) { + agp_alloc_page_array(PAGE_SIZE * scratch_pages, new); + + if (new->pages == NULL) { agp_free_key(new->key); kfree(new); return NULL; } new->num_scratch_pages = scratch_pages; + new->type = AGP_NORMAL_MEMORY; return new; } EXPORT_SYMBOL(agp_create_memory); @@ -144,21 +182,37 @@ void agp_free_memory(struct agp_memory *curr) if (curr == NULL) return; - if (curr->is_bound == TRUE) + if (curr->is_bound) agp_unbind_memory(curr); + if (curr->type >= AGP_USER_TYPES) { + agp_generic_free_by_type(curr); + return; + } + if (curr->type != 0) { curr->bridge->driver->free_by_type(curr); return; } if (curr->page_count != 0) { - for (i = 0; i < curr->page_count; i++) { - curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i])); + if (curr->bridge->driver->agp_destroy_pages) { + curr->bridge->driver->agp_destroy_pages(curr); + } else { + + for (i = 0; i < curr->page_count; i++) { + curr->bridge->driver->agp_destroy_page( + curr->pages[i], + AGP_PAGE_DESTROY_UNMAP); + } + for (i = 0; i < curr->page_count; i++) { + curr->bridge->driver->agp_destroy_page( + curr->pages[i], + AGP_PAGE_DESTROY_FREE); + } } - flush_agp_mappings(); } agp_free_key(curr->key); - vfree(curr->memory); + agp_free_page_array(curr); kfree(curr); } EXPORT_SYMBOL(agp_free_memory); @@ -182,13 +236,23 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, int scratch_pages; struct agp_memory *new; size_t i; + int cur_memory; if (!bridge) return NULL; - if ((atomic_read(&bridge->current_memory_agp) + page_count) > bridge->max_memory_agp) + cur_memory = atomic_read(&bridge->current_memory_agp); + if ((cur_memory + page_count > bridge->max_memory_agp) || + (cur_memory + page_count < page_count)) return NULL; + if (type >= AGP_USER_TYPES) { + new = agp_generic_alloc_user(page_count, type); + if (new) + new->bridge = bridge; + return new; + } + if (type != 0) { new = bridge->driver->alloc_by_type(page_count, type); if (new) @@ -203,20 +267,27 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge, if (new == NULL) return NULL; + if (bridge->driver->agp_alloc_pages) { + if (bridge->driver->agp_alloc_pages(bridge, new, page_count)) { + agp_free_memory(new); + return NULL; + } + new->bridge = bridge; + return new; + } + for (i = 0; i < page_count; i++) { - void *addr = bridge->driver->agp_alloc_page(bridge); + struct page *page = bridge->driver->agp_alloc_page(bridge); - if (addr == NULL) { + if (page == NULL) { agp_free_memory(new); return NULL; } - new->memory[i] = virt_to_gart(addr); + new->pages[i] = page; new->page_count++; } new->bridge = bridge; - flush_agp_mappings(); - return new; } EXPORT_SYMBOL(agp_allocate_memory); @@ -299,7 +370,7 @@ EXPORT_SYMBOL_GPL(agp_num_entries); /** * agp_copy_info - copy bridge state information * - * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. + * @info: agp_kern_info pointer. The caller should insure that this pointer is valid. * * This function copies information about the agp bridge device and the state of * the agp backend into an agp_kern_info pointer. @@ -355,21 +426,26 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start) if (curr == NULL) return -EINVAL; - if (curr->is_bound == TRUE) { + if (curr->is_bound) { printk(KERN_INFO PFX "memory %p is already bound!\n", curr); return -EINVAL; } - if (curr->is_flushed == FALSE) { + if (!curr->is_flushed) { curr->bridge->driver->cache_flush(); - curr->is_flushed = TRUE; + curr->is_flushed = true; } + ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); if (ret_val != 0) return ret_val; - curr->is_bound = TRUE; + curr->is_bound = true; curr->pg_start = pg_start; + spin_lock(&agp_bridge->mapped_lock); + list_add(&curr->mapped_list, &agp_bridge->mapped_list); + spin_unlock(&agp_bridge->mapped_lock); + return 0; } EXPORT_SYMBOL(agp_bind_memory); @@ -390,7 +466,7 @@ int agp_unbind_memory(struct agp_memory *curr) if (curr == NULL) return -EINVAL; - if (curr->is_bound != TRUE) { + if (!curr->is_bound) { printk(KERN_INFO PFX "memory %p was not bound!\n", curr); return -EINVAL; } @@ -400,12 +476,16 @@ int agp_unbind_memory(struct agp_memory *curr) if (ret_val != 0) return ret_val; - curr->is_bound = FALSE; + curr->is_bound = false; curr->pg_start = 0; + spin_lock(&curr->bridge->mapped_lock); + list_del(&curr->mapped_list); + spin_unlock(&curr->bridge->mapped_lock); return 0; } EXPORT_SYMBOL(agp_unbind_memory); + /* End - Routines for handling swapping of agp_memory into the GATT */ @@ -420,6 +500,31 @@ static void agp_v2_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_ *requested_mode &= ~AGP2_RESERVED_MASK; } + /* + * Some dumb bridges are programmed to disobey the AGP2 spec. + * This is likely a BIOS misprogramming rather than poweron default, or + * it would be a lot more common. + * https://bugs.freedesktop.org/show_bug.cgi?id=8816 + * AGPv2 spec 6.1.9 states: + * The RATE field indicates the data transfer rates supported by this + * device. A.G.P. devices must report all that apply. + * Fix them up as best we can. + */ + switch (*bridge_agpstat & 7) { + case 4: + *bridge_agpstat |= (AGPSTAT2_2X | AGPSTAT2_1X); + printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x4 rate. " + "Fixing up support for x2 & x1\n"); + break; + case 2: + *bridge_agpstat |= AGPSTAT2_1X; + printk(KERN_INFO PFX "BIOS bug. AGP bridge claims to only support x2 rate. " + "Fixing up support for x1\n"); + break; + default: + break; + } + /* Check the speed bits make sense. Only one should be set. */ tmp = *requested_mode & 7; switch (tmp) { @@ -569,25 +674,37 @@ static void agp_v3_parse_one(u32 *requested_mode, u32 *bridge_agpstat, u32 *vga_ *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); goto done; + } else if (*requested_mode & AGPSTAT3_4X) { + *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); + *bridge_agpstat |= AGPSTAT3_4X; + goto done; + } else { /* - * If we didn't specify AGPx8, we can only do x4. - * If the hardware can't do x4, we're up shit creek, and never - * should have got this far. + * If we didn't specify an AGP mode, we see if both + * the graphics card, and the bridge can do x8, and use if so. + * If not, we fall back to x4 mode. */ - *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); - if ((*bridge_agpstat & AGPSTAT3_4X) && (*vga_agpstat & AGPSTAT3_4X)) - *bridge_agpstat |= AGPSTAT3_4X; - else { - printk(KERN_INFO PFX "Badness. Don't know which AGP mode to set. " - "[bridge_agpstat:%x vga_agpstat:%x fell back to:- bridge_agpstat:%x vga_agpstat:%x]\n", - origbridge, origvga, *bridge_agpstat, *vga_agpstat); - if (!(*bridge_agpstat & AGPSTAT3_4X)) - printk(KERN_INFO PFX "Bridge couldn't do AGP x4.\n"); - if (!(*vga_agpstat & AGPSTAT3_4X)) - printk(KERN_INFO PFX "Graphic card couldn't do AGP x4.\n"); - return; + if ((*bridge_agpstat & AGPSTAT3_8X) && (*vga_agpstat & AGPSTAT3_8X)) { + printk(KERN_INFO PFX "No AGP mode specified. Setting to highest mode " + "supported by bridge & card (x8).\n"); + *bridge_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); + *vga_agpstat &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD); + } else { + printk(KERN_INFO PFX "Fell back to AGPx4 mode because "); + if (!(*bridge_agpstat & AGPSTAT3_8X)) { + printk(KERN_INFO PFX "bridge couldn't do x8. bridge_agpstat:%x (orig=%x)\n", + *bridge_agpstat, origbridge); + *bridge_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); + *bridge_agpstat |= AGPSTAT3_4X; + } + if (!(*vga_agpstat & AGPSTAT3_8X)) { + printk(KERN_INFO PFX "graphics card couldn't do x8. vga_agpstat:%x (orig=%x)\n", + *vga_agpstat, origvga); + *vga_agpstat &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD); + *vga_agpstat |= AGPSTAT3_4X; + } } } @@ -661,7 +778,7 @@ u32 agp_collect_device_status(struct agp_bridge_data *bridge, u32 requested_mode EXPORT_SYMBOL(agp_collect_device_status); -void agp_device_command(u32 bridge_agpstat, int agp_v3) +void agp_device_command(u32 bridge_agpstat, bool agp_v3) { struct pci_dev *device = NULL; int mode; @@ -675,8 +792,8 @@ void agp_device_command(u32 bridge_agpstat, int agp_v3) if (!agp) continue; - printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n", - agp_v3 ? 3 : 2, pci_name(device), mode); + dev_info(&device->dev, "putting AGP V%d device into %dx mode\n", + agp_v3 ? 3 : 2, mode); pci_write_config_dword(device, agp + PCI_AGP_COMMAND, bridge_agpstat); } } @@ -704,10 +821,8 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) get_agp_version(agp_bridge); - printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n", - agp_bridge->major_version, - agp_bridge->minor_version, - pci_name(agp_bridge->dev)); + dev_info(&agp_bridge->dev->dev, "AGP %d.%d bridge\n", + agp_bridge->major_version, agp_bridge->minor_version); pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx + PCI_AGP_STATUS, &bridge_agpstat); @@ -725,7 +840,7 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) /* If we have 3.5, we can do the isoch stuff. */ if (bridge->minor_version >= 5) agp_3_5_enable(bridge); - agp_device_command(bridge_agpstat, TRUE); + agp_device_command(bridge_agpstat, true); return; } else { /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/ @@ -736,13 +851,12 @@ void agp_generic_enable(struct agp_bridge_data *bridge, u32 requested_mode) pci_write_config_dword(bridge->dev, bridge->capndx+AGPCTRL, temp); - printk(KERN_INFO PFX "Device is in legacy mode," - " falling back to 2.x\n"); + dev_info(&bridge->dev->dev, "bridge is in legacy mode, falling back to 2.x\n"); } } /* AGP v<3 */ - agp_device_command(bridge_agpstat, FALSE); + agp_device_command(bridge_agpstat, false); } EXPORT_SYMBOL(agp_generic_enable); @@ -809,12 +923,10 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) case U32_APER_SIZE: bridge->current_size = A_IDX32(bridge); break; - /* This case will never really happen. */ + /* These cases will never really happen. */ case FIXED_APER_SIZE: case LVL2_APER_SIZE: default: - bridge->current_size = - bridge->current_size; break; } temp = bridge->current_size; @@ -841,9 +953,16 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) agp_gatt_table = (void *)table; bridge->driver->cache_flush(); - bridge->gatt_table = ioremap_nocache(virt_to_gart(table), +#ifdef CONFIG_X86 + if (set_memory_uc((unsigned long)table, 1 << page_order)) + printk(KERN_WARNING "Could not set GATT table memory to UC!\n"); + + bridge->gatt_table = (u32 __iomem *)table; +#else + bridge->gatt_table = ioremap_nocache(virt_to_phys(table), (PAGE_SIZE * (1 << page_order))); bridge->driver->cache_flush(); +#endif if (bridge->gatt_table == NULL) { for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) @@ -853,7 +972,7 @@ int agp_generic_create_gatt_table(struct agp_bridge_data *bridge) return -ENOMEM; } - bridge->gatt_bus_addr = virt_to_gart(bridge->gatt_table_real); + bridge->gatt_bus_addr = virt_to_phys(bridge->gatt_table_real); /* AK: bogus, should encode addresses > 4GB */ for (i = 0; i < num_entries; i++) { @@ -890,7 +1009,6 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; - break; default: page_order = 0; break; @@ -900,7 +1018,11 @@ int agp_generic_free_gatt_table(struct agp_bridge_data *bridge) * called, then all agp memory is deallocated and removed * from the table. */ +#ifdef CONFIG_X86 + set_memory_wb((unsigned long)bridge->gatt_table, 1 << page_order); +#else iounmap(bridge->gatt_table); +#endif table = (char *) bridge->gatt_table_real; table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1); @@ -926,11 +1048,15 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) off_t j; void *temp; struct agp_bridge_data *bridge; + int mask_type; bridge = mem->bridge; if (!bridge) return -EINVAL; + if (mem->page_count == 0) + return 0; + temp = bridge->current_size; switch (bridge->driver->size_type) { @@ -949,7 +1075,6 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) case LVL2_APER_SIZE: /* The generic routines can't deal with 2 level gatt's */ return -EINVAL; - break; default: num_entries = 0; break; @@ -958,13 +1083,17 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) num_entries -= agp_memory_reserved/PAGE_SIZE; if (num_entries < 0) num_entries = 0; - if (type != 0 || mem->type != 0) { + if (type != mem->type) + return -EINVAL; + + mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); + if (mask_type != 0) { /* The generic routines know nothing of memory types */ return -EINVAL; } - /* AK: could wrap */ - if ((pg_start + mem->page_count) > num_entries) + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) return -EINVAL; j = pg_start; @@ -975,15 +1104,18 @@ int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type) j++; } - if (mem->is_flushed == FALSE) { + if (!mem->is_flushed) { bridge->driver->cache_flush(); - mem->is_flushed = TRUE; + mem->is_flushed = true; } for (i = 0, j = pg_start; i < mem->page_count; i++, j++) { - writel(bridge->driver->mask_memory(bridge, mem->memory[i], mem->type), bridge->gatt_table+j); - readl(bridge->gatt_table+j); /* PCI Posting. */ + writel(bridge->driver->mask_memory(bridge, + page_to_phys(mem->pages[i]), + mask_type), + bridge->gatt_table+j); } + readl(bridge->gatt_table+j-1); /* PCI Posting. */ bridge->driver->tlb_flush(mem); return 0; @@ -995,12 +1127,25 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { size_t i; struct agp_bridge_data *bridge; + int mask_type, num_entries; bridge = mem->bridge; if (!bridge) return -EINVAL; - if (type != 0 || mem->type != 0) { + if (mem->page_count == 0) + return 0; + + if (type != mem->type) + return -EINVAL; + + num_entries = agp_num_entries(); + if (((pg_start + mem->page_count) > num_entries) || + ((pg_start + mem->page_count) < pg_start)) + return -EINVAL; + + mask_type = bridge->driver->agp_type_to_mask_type(bridge, type); + if (mask_type != 0) { /* The generic routines know nothing of memory types */ return -EINVAL; } @@ -1008,31 +1153,48 @@ int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type) /* AK: bogus, should encode addresses > 4GB */ for (i = pg_start; i < (mem->page_count + pg_start); i++) { writel(bridge->scratch_page, bridge->gatt_table+i); - readl(bridge->gatt_table+i); /* PCI Posting. */ } + readl(bridge->gatt_table+i-1); /* PCI Posting. */ - global_cache_flush(); bridge->driver->tlb_flush(mem); return 0; } EXPORT_SYMBOL(agp_generic_remove_memory); - struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type) { return NULL; } EXPORT_SYMBOL(agp_generic_alloc_by_type); - void agp_generic_free_by_type(struct agp_memory *curr) { - vfree(curr->memory); + agp_free_page_array(curr); agp_free_key(curr->key); kfree(curr); } EXPORT_SYMBOL(agp_generic_free_by_type); +struct agp_memory *agp_generic_alloc_user(size_t page_count, int type) +{ + struct agp_memory *new; + int i; + int pages; + + pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE; + new = agp_create_user_memory(page_count); + if (new == NULL) + return NULL; + + for (i = 0; i < page_count; i++) + new->pages[i] = NULL; + new->page_count = 0; + new->type = type; + new->num_scratch_pages = pages; + + return new; +} +EXPORT_SYMBOL(agp_generic_alloc_user); /* * Basic Page Allocation Routines - @@ -1041,37 +1203,91 @@ EXPORT_SYMBOL(agp_generic_free_by_type); * against a maximum value. */ -void *agp_generic_alloc_page(struct agp_bridge_data *bridge) +int agp_generic_alloc_pages(struct agp_bridge_data *bridge, struct agp_memory *mem, size_t num_pages) +{ + struct page * page; + int i, ret = -ENOMEM; + + for (i = 0; i < num_pages; i++) { + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); + /* agp_free_memory() needs gart address */ + if (page == NULL) + goto out; + +#ifndef CONFIG_X86 + map_page_into_agp(page); +#endif + get_page(page); + atomic_inc(&agp_bridge->current_memory_agp); + + mem->pages[i] = page; + mem->page_count++; + } + +#ifdef CONFIG_X86 + set_pages_array_uc(mem->pages, num_pages); +#endif + ret = 0; +out: + return ret; +} +EXPORT_SYMBOL(agp_generic_alloc_pages); + +struct page *agp_generic_alloc_page(struct agp_bridge_data *bridge) { struct page * page; - page = alloc_page(GFP_KERNEL); + page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO); if (page == NULL) return NULL; map_page_into_agp(page); get_page(page); - SetPageLocked(page); atomic_inc(&agp_bridge->current_memory_agp); - return page_address(page); + return page; } EXPORT_SYMBOL(agp_generic_alloc_page); - -void agp_generic_destroy_page(void *addr) +void agp_generic_destroy_pages(struct agp_memory *mem) { + int i; struct page *page; - if (addr == NULL) + if (!mem) return; - page = virt_to_page(addr); - unmap_page_from_agp(page); - put_page(page); - unlock_page(page); - free_page((unsigned long)addr); - atomic_dec(&agp_bridge->current_memory_agp); +#ifdef CONFIG_X86 + set_pages_array_wb(mem->pages, mem->page_count); +#endif + + for (i = 0; i < mem->page_count; i++) { + page = mem->pages[i]; + +#ifndef CONFIG_X86 + unmap_page_from_agp(page); +#endif + put_page(page); + __free_page(page); + atomic_dec(&agp_bridge->current_memory_agp); + mem->pages[i] = NULL; + } +} +EXPORT_SYMBOL(agp_generic_destroy_pages); + +void agp_generic_destroy_page(struct page *page, int flags) +{ + if (page == NULL) + return; + + if (flags & AGP_PAGE_DESTROY_UNMAP) + unmap_page_from_agp(page); + + if (flags & AGP_PAGE_DESTROY_FREE) { + put_page(page); + __free_page(page); + atomic_dec(&agp_bridge->current_memory_agp); + } } EXPORT_SYMBOL(agp_generic_destroy_page); @@ -1110,13 +1326,13 @@ static void ipi_handler(void *null) void global_cache_flush(void) { - if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0) + if (on_each_cpu(ipi_handler, NULL, 1) != 0) panic(PFX "timed out waiting for the other CPUs!\n"); } EXPORT_SYMBOL(global_cache_flush); unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, - unsigned long addr, int type) + dma_addr_t addr, int type) { /* memory type is ignored in the generic routine */ if (bridge->driver->masks) @@ -1126,6 +1342,15 @@ unsigned long agp_generic_mask_memory(struct agp_bridge_data *bridge, } EXPORT_SYMBOL(agp_generic_mask_memory); +int agp_generic_type_to_mask_type(struct agp_bridge_data *bridge, + int type) +{ + if (type >= AGP_USER_TYPES) + return 0; + return type; +} +EXPORT_SYMBOL(agp_generic_type_to_mask_type); + /* * These functions are implemented according to the AGPv3 spec, * which covers implementation details that had previously been @@ -1170,8 +1395,8 @@ int agp3_generic_configure(void) current_size = A_SIZE_16(agp_bridge->current_size); - pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp); - agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + agp_bridge->gart_bus_addr = pci_bus_address(agp_bridge->dev, + AGP_APERTURE_BAR); /* set aperture size */ pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value); @@ -1192,7 +1417,7 @@ void agp3_generic_cleanup(void) } EXPORT_SYMBOL(agp3_generic_cleanup); -struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = +const struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] = { {4096, 1048576, 10,0x000}, {2048, 524288, 9, 0x800}, |
