3 * Copyright (C) 2002-2003 Dave Jones.
4 * Copyright (C) 1999 Jeff Hartmann.
5 * Copyright (C) 1999 Precision Insight, Inc.
6 * Copyright (C) 1999 Xi Graphics, Inc.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice shall be included
16 * in all copies or substantial portions of the Software.
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
21 * JEFF HARTMANN, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE
24 * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 * - Allocate more than order 0 pages to avoid too much linear map splitting.
29 #include <linux/config.h>
30 #include <linux/module.h>
31 #include <linux/pci.h>
32 #include <linux/init.h>
33 #include <linux/pagemap.h>
34 #include <linux/miscdevice.h>
36 #include <linux/agp_backend.h>
37 #include <linux/vmalloc.h>
40 __u32 *agp_gatt_table;
41 int agp_memory_reserved;
44 * Needed by the Nforce GART driver for the time being. Would be
45 * nice to do this some other way instead of needing this export.
47 EXPORT_SYMBOL_GPL(agp_memory_reserved);
50 * Generic routines for handling agp_memory structures -
51 * They use the basic page allocation routines to do the brunt of the work.
54 void agp_free_key(int key)
60 clear_bit(key, agp_bridge->key_list);
62 EXPORT_SYMBOL(agp_free_key);
65 static int agp_get_key(void)
69 bit = find_first_zero_bit(agp_bridge->key_list, MAXKEY);
71 set_bit(bit, agp_bridge->key_list);
78 struct agp_memory *agp_create_memory(int scratch_pages)
80 struct agp_memory *new;
82 new = kmalloc(sizeof(struct agp_memory), GFP_KERNEL);
87 memset(new, 0, sizeof(struct agp_memory));
88 new->key = agp_get_key();
94 new->memory = vmalloc(PAGE_SIZE * scratch_pages);
96 if (new->memory == NULL) {
97 agp_free_key(new->key);
101 new->num_scratch_pages = scratch_pages;
104 EXPORT_SYMBOL(agp_create_memory);
107 * agp_free_memory - free memory associated with an agp_memory pointer.
109 * @curr: agp_memory pointer to be freed.
111 * It is the only function that can be called when the backend is not owned
112 * by the caller. (So it can free memory on client death.)
114 void agp_free_memory(struct agp_memory *curr)
118 if ((agp_bridge->type == NOT_SUPPORTED) || (curr == NULL))
121 if (curr->is_bound == TRUE)
122 agp_unbind_memory(curr);
124 if (curr->type != 0) {
125 agp_bridge->driver->free_by_type(curr);
128 if (curr->page_count != 0) {
129 for (i = 0; i < curr->page_count; i++) {
130 agp_bridge->driver->agp_destroy_page(phys_to_virt(curr->memory[i]));
133 agp_free_key(curr->key);
137 EXPORT_SYMBOL(agp_free_memory);
139 #define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(unsigned long))
142 * agp_allocate_memory - allocate a group of pages of a certain type.
144 * @page_count: size_t argument of the number of pages
145 * @type: u32 argument of the type of memory to be allocated.
147 * Every agp bridge device will allow you to allocate AGP_NORMAL_MEMORY which
148 * maps to physical ram. Any other type is device dependent.
150 * It returns NULL whenever memory is unavailable.
152 struct agp_memory *agp_allocate_memory(size_t page_count, u32 type)
155 struct agp_memory *new;
158 if (agp_bridge->type == NOT_SUPPORTED)
161 if ((atomic_read(&agp_bridge->current_memory_agp) + page_count) > agp_bridge->max_memory_agp)
165 new = agp_bridge->driver->alloc_by_type(page_count, type);
169 scratch_pages = (page_count + ENTRIES_PER_PAGE - 1) / ENTRIES_PER_PAGE;
171 new = agp_create_memory(scratch_pages);
176 for (i = 0; i < page_count; i++) {
177 void *addr = agp_bridge->driver->agp_alloc_page();
180 agp_free_memory(new);
184 agp_bridge->driver->mask_memory(virt_to_phys(addr), type);
188 flush_agp_mappings();
192 EXPORT_SYMBOL(agp_allocate_memory);
195 /* End - Generic routines for handling agp_memory structures */
198 static int agp_return_size(void)
203 temp = agp_bridge->current_size;
205 switch (agp_bridge->driver->size_type) {
207 current_size = A_SIZE_8(temp)->size;
210 current_size = A_SIZE_16(temp)->size;
213 current_size = A_SIZE_32(temp)->size;
216 current_size = A_SIZE_LVL2(temp)->size;
218 case FIXED_APER_SIZE:
219 current_size = A_SIZE_FIX(temp)->size;
226 current_size -= (agp_memory_reserved / (1024*1024));
233 int agp_num_entries(void)
238 temp = agp_bridge->current_size;
240 switch (agp_bridge->driver->size_type) {
242 num_entries = A_SIZE_8(temp)->num_entries;
245 num_entries = A_SIZE_16(temp)->num_entries;
248 num_entries = A_SIZE_32(temp)->num_entries;
251 num_entries = A_SIZE_LVL2(temp)->num_entries;
253 case FIXED_APER_SIZE:
254 num_entries = A_SIZE_FIX(temp)->num_entries;
261 num_entries -= agp_memory_reserved>>PAGE_SHIFT;
266 EXPORT_SYMBOL_GPL(agp_num_entries);
270 * agp_copy_info - copy bridge state information
272 * @info: agp_kern_info pointer. The caller should insure that this pointer is valid.
274 * This function copies information about the agp bridge device and the state of
275 * the agp backend into an agp_kern_info pointer.
277 int agp_copy_info(struct agp_kern_info *info)
279 memset(info, 0, sizeof(struct agp_kern_info));
280 if (!agp_bridge || agp_bridge->type == NOT_SUPPORTED ||
281 !agp_bridge->version) {
282 info->chipset = NOT_SUPPORTED;
286 info->version.major = agp_bridge->version->major;
287 info->version.minor = agp_bridge->version->minor;
288 info->chipset = agp_bridge->type;
289 info->device = agp_bridge->dev;
290 info->mode = agp_bridge->mode;
291 info->aper_base = agp_bridge->gart_bus_addr;
292 info->aper_size = agp_return_size();
293 info->max_memory = agp_bridge->max_memory_agp;
294 info->current_memory = atomic_read(&agp_bridge->current_memory_agp);
295 info->cant_use_aperture = agp_bridge->driver->cant_use_aperture;
296 info->vm_ops = agp_bridge->vm_ops;
297 info->page_mask = ~0UL;
300 EXPORT_SYMBOL(agp_copy_info);
303 /* End - Routine to copy over information structure */
307 * Routines for handling swapping of agp_memory into the GATT -
308 * These routines take agp_memory and insert them into the GATT.
309 * They call device specific routines to actually write to the GATT.
313 * agp_bind_memory - Bind an agp_memory structure into the GATT.
315 * @curr: agp_memory pointer
316 * @pg_start: an offset into the graphics aperture translation table
318 * It returns -EINVAL if the pointer == NULL.
319 * It returns -EBUSY if the area of the table requested is already in use.
321 int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
325 if ((agp_bridge->type == NOT_SUPPORTED) || (curr == NULL))
328 if (curr->is_bound == TRUE) {
329 printk (KERN_INFO PFX "memory %p is already bound!\n", curr);
332 if (curr->is_flushed == FALSE) {
333 agp_bridge->driver->cache_flush();
334 curr->is_flushed = TRUE;
336 ret_val = agp_bridge->driver->insert_memory(curr, pg_start, curr->type);
341 curr->is_bound = TRUE;
342 curr->pg_start = pg_start;
345 EXPORT_SYMBOL(agp_bind_memory);
349 * agp_unbind_memory - Removes an agp_memory structure from the GATT
351 * @curr: agp_memory pointer to be removed from the GATT.
353 * It returns -EINVAL if this piece of agp_memory is not currently bound to
354 * the graphics aperture translation table or if the agp_memory pointer == NULL
356 int agp_unbind_memory(struct agp_memory *curr)
360 if ((agp_bridge->type == NOT_SUPPORTED) || (curr == NULL))
363 if (curr->is_bound != TRUE) {
364 printk (KERN_INFO PFX "memory %p was not bound!\n", curr);
368 ret_val = agp_bridge->driver->remove_memory(curr, curr->pg_start, curr->type);
373 curr->is_bound = FALSE;
377 EXPORT_SYMBOL(agp_unbind_memory);
379 /* End - Routines for handling swapping of agp_memory into the GATT */
382 /* Generic Agp routines - Start */
383 static void agp_v2_parse_one(u32 *mode, u32 *cmd, u32 *tmp)
385 /* disable SBA if it's not supported */
386 if (!((*cmd & AGPSTAT_SBA) && (*tmp & AGPSTAT_SBA) && (*mode & AGPSTAT_SBA)))
387 *cmd &= ~AGPSTAT_SBA;
390 if (!((*cmd & AGPSTAT2_4X) && (*tmp & AGPSTAT2_4X) && (*mode & AGPSTAT2_4X)))
391 *cmd &= ~AGPSTAT2_4X;
393 if (!((*cmd & AGPSTAT2_2X) && (*tmp & AGPSTAT2_2X) && (*mode & AGPSTAT2_2X)))
394 *cmd &= ~AGPSTAT2_2X;
396 if (!((*cmd & AGPSTAT2_1X) && (*tmp & AGPSTAT2_1X) && (*mode & AGPSTAT2_1X)))
397 *cmd &= ~AGPSTAT2_1X;
399 /* Now we know what mode it should be, clear out the unwanted bits. */
400 if (*cmd & AGPSTAT2_4X)
401 *cmd &= ~(AGPSTAT2_1X | AGPSTAT2_2X); /* 4X */
403 if (*cmd & AGPSTAT2_2X)
404 *cmd &= ~(AGPSTAT2_1X | AGPSTAT2_4X); /* 2X */
406 if (*cmd & AGPSTAT2_1X)
407 *cmd &= ~(AGPSTAT2_2X | AGPSTAT2_4X); /* 1Xf */
411 static void agp_v3_parse_one(u32 *mode, u32 *cmd, u32 *tmp)
413 /* ARQSZ - Set the value to the maximum one.
414 * Don't allow the mode register to override values. */
415 *cmd = ((*cmd & ~AGPSTAT_ARQSZ) |
416 max_t(u32,(*cmd & AGPSTAT_ARQSZ),(*tmp & AGPSTAT_ARQSZ)));
418 /* Calibration cycle.
419 * Don't allow the mode register to override values. */
420 *cmd = ((*cmd & ~AGPSTAT_CAL_MASK) |
421 min_t(u32,(*cmd & AGPSTAT_CAL_MASK),(*tmp & AGPSTAT_CAL_MASK)));
423 /* SBA *must* be supported for AGP v3 */
428 * Check for invalid speeds. This can happen when applications
429 * written before the AGP 3.0 standard pass AGP2.x modes to AGP3 hardware
431 if (*mode & AGPSTAT_MODE_3_0) {
433 * Caller hasn't a clue what its doing. We are in 3.0 mode,
434 * have been passed a 3.0 mode, but with 2.x speed bits set.
435 * AGP2.x 4x -> AGP3.0 4x.
437 if (*mode & AGPSTAT2_4X) {
438 printk (KERN_INFO PFX "%s passes broken AGP3 flags (%x). Fixed.\n",
439 current->comm, *mode);
440 *mode &= ~AGPSTAT2_4X;
441 *mode |= AGPSTAT3_4X;
445 * The caller doesn't know what they are doing. We are in 3.0 mode,
446 * but have been passed an AGP 2.x mode.
447 * Convert AGP 1x,2x,4x -> AGP 3.0 4x.
449 printk (KERN_INFO PFX "%s passes broken AGP2 flags (%x) in AGP3 mode. Fixed.\n",
450 current->comm, *mode);
451 *mode &= ~(AGPSTAT2_4X | AGPSTAT2_2X | AGPSTAT2_1X);
452 *mode |= AGPSTAT3_4X;
455 if (!((*cmd & AGPSTAT3_8X) && (*tmp & AGPSTAT3_8X) && (*mode & AGPSTAT3_8X)))
456 *cmd &= ~AGPSTAT3_8X;
458 if (!((*cmd & AGPSTAT3_4X) && (*tmp & AGPSTAT3_4X) && (*mode & AGPSTAT3_4X)))
459 *cmd &= ~AGPSTAT3_4X;
461 /* Clear out unwanted bits. */
462 if (*cmd & AGPSTAT3_8X)
463 *cmd &= ~(AGPSTAT3_4X | AGPSTAT3_RSVD);
464 if (*cmd & AGPSTAT3_4X)
465 *cmd &= ~(AGPSTAT3_8X | AGPSTAT3_RSVD);
468 //FIXME: This doesn't smell right.
469 //We need a function we pass an agp_device to.
470 u32 agp_collect_device_status(u32 mode, u32 cmd)
472 struct pci_dev *device = NULL;
477 while ((device = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device)) != NULL) {
478 cap_ptr = pci_find_capability(device, PCI_CAP_ID_AGP);
483 * Ok, here we have a AGP device. Disable impossible
484 * settings, and adjust the readqueue to the minimum.
486 pci_read_config_dword(device, cap_ptr+PCI_AGP_STATUS, &tmp);
488 /* adjust RQ depth */
489 cmd = ((cmd & ~AGPSTAT_RQ_DEPTH) |
490 min_t(u32, (mode & AGPSTAT_RQ_DEPTH),
491 min_t(u32, (cmd & AGPSTAT_RQ_DEPTH), (tmp & AGPSTAT_RQ_DEPTH))));
493 /* disable FW if it's not supported */
494 if (!((cmd & AGPSTAT_FW) && (tmp & AGPSTAT_FW) && (mode & AGPSTAT_FW)))
497 /* Check to see if we are operating in 3.0 mode */
498 pci_read_config_dword(device, cap_ptr+AGPSTAT, &agp3);
499 if (agp3 & AGPSTAT_MODE_3_0) {
500 agp_v3_parse_one(&mode, &cmd, &tmp);
502 agp_v2_parse_one(&mode, &cmd, &tmp);
507 EXPORT_SYMBOL(agp_collect_device_status);
510 void agp_device_command(u32 command, int agp_v3)
512 struct pci_dev *device = NULL;
515 mode = command & 0x7;
519 while ((device = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, device)) != NULL) {
520 u8 agp = pci_find_capability(device, PCI_CAP_ID_AGP);
524 printk(KERN_INFO PFX "Putting AGP V%d device at %s into %dx mode\n",
525 agp_v3 ? 3 : 2, pci_name(device), mode);
526 pci_write_config_dword(device, agp + PCI_AGP_COMMAND, command);
529 EXPORT_SYMBOL(agp_device_command);
532 void get_agp_version(struct agp_bridge_data *bridge)
536 /* Exit early if already set by errata workarounds. */
537 if (agp_bridge->major_version != 0)
540 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx, &ncapid);
541 agp_bridge->major_version = (ncapid >> AGP_MAJOR_VERSION_SHIFT) & 0xf;
542 agp_bridge->minor_version = (ncapid >> AGP_MINOR_VERSION_SHIFT) & 0xf;
544 EXPORT_SYMBOL(get_agp_version);
547 void agp_generic_enable(u32 mode)
552 get_agp_version(agp_bridge);
554 printk(KERN_INFO PFX "Found an AGP %d.%d compliant device at %s.\n",
555 agp_bridge->major_version,
556 agp_bridge->minor_version,
557 agp_bridge->dev->slot_name);
559 pci_read_config_dword(agp_bridge->dev,
560 agp_bridge->capndx + PCI_AGP_STATUS, &command);
562 command = agp_collect_device_status(mode, command);
563 command |= AGPSTAT_AGP_ENABLE;
565 /* Do AGP version specific frobbing. */
566 if(agp_bridge->major_version >= 3) {
567 pci_read_config_dword(agp_bridge->dev,
568 agp_bridge->capndx+AGPSTAT, &agp3);
570 /* Check to see if we are operating in 3.0 mode */
571 if (agp3 & AGPSTAT_MODE_3_0) {
572 /* If we have 3.5, we can do the isoch stuff. */
573 if (agp_bridge->minor_version >= 5)
574 agp_3_5_enable(agp_bridge);
575 agp_device_command(command, TRUE);
578 /* Disable calibration cycle in RX91<1> when not in AGP3.0 mode of operation.*/
579 command &= ~(7<<10) ;
580 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
582 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp);
584 printk (KERN_INFO PFX "Device is in legacy mode,"
585 " falling back to 2.x\n");
590 agp_device_command(command, FALSE);
592 EXPORT_SYMBOL(agp_generic_enable);
595 int agp_generic_create_gatt_table(void)
606 /* The generic routines can't handle 2 level gatt's */
607 if (agp_bridge->driver->size_type == LVL2_APER_SIZE)
611 i = agp_bridge->aperture_size_idx;
612 temp = agp_bridge->current_size;
613 size = page_order = num_entries = 0;
615 if (agp_bridge->driver->size_type != FIXED_APER_SIZE) {
617 switch (agp_bridge->driver->size_type) {
619 size = A_SIZE_8(temp)->size;
621 A_SIZE_8(temp)->page_order;
623 A_SIZE_8(temp)->num_entries;
626 size = A_SIZE_16(temp)->size;
627 page_order = A_SIZE_16(temp)->page_order;
628 num_entries = A_SIZE_16(temp)->num_entries;
631 size = A_SIZE_32(temp)->size;
632 page_order = A_SIZE_32(temp)->page_order;
633 num_entries = A_SIZE_32(temp)->num_entries;
635 /* This case will never really happen. */
636 case FIXED_APER_SIZE:
639 size = page_order = num_entries = 0;
643 table = (char *) __get_free_pages(GFP_KERNEL,
648 switch (agp_bridge->driver->size_type) {
650 agp_bridge->current_size = A_IDX8(agp_bridge);
653 agp_bridge->current_size = A_IDX16(agp_bridge);
656 agp_bridge->current_size = A_IDX32(agp_bridge);
658 /* This case will never really happen. */
659 case FIXED_APER_SIZE:
662 agp_bridge->current_size =
663 agp_bridge->current_size;
666 temp = agp_bridge->current_size;
668 agp_bridge->aperture_size_idx = i;
670 } while (!table && (i < agp_bridge->driver->num_aperture_sizes));
672 size = ((struct aper_size_info_fixed *) temp)->size;
673 page_order = ((struct aper_size_info_fixed *) temp)->page_order;
674 num_entries = ((struct aper_size_info_fixed *) temp)->num_entries;
675 table = (char *) __get_free_pages(GFP_KERNEL, page_order);
681 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
683 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
684 SetPageReserved(page);
686 agp_bridge->gatt_table_real = (u32 *) table;
687 agp_gatt_table = (void *)table;
689 agp_bridge->driver->cache_flush();
690 agp_bridge->gatt_table = ioremap_nocache(virt_to_phys(table),
691 (PAGE_SIZE * (1 << page_order)));
692 agp_bridge->driver->cache_flush();
694 if (agp_bridge->gatt_table == NULL) {
695 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
696 ClearPageReserved(page);
698 free_pages((unsigned long) table, page_order);
702 agp_bridge->gatt_bus_addr = virt_to_phys(agp_bridge->gatt_table_real);
704 /* AK: bogus, should encode addresses > 4GB */
705 for (i = 0; i < num_entries; i++)
706 agp_bridge->gatt_table[i] = (unsigned long) agp_bridge->scratch_page;
710 EXPORT_SYMBOL(agp_generic_create_gatt_table);
712 int agp_generic_free_gatt_table(void)
715 char *table, *table_end;
719 temp = agp_bridge->current_size;
721 switch (agp_bridge->driver->size_type) {
723 page_order = A_SIZE_8(temp)->page_order;
726 page_order = A_SIZE_16(temp)->page_order;
729 page_order = A_SIZE_32(temp)->page_order;
731 case FIXED_APER_SIZE:
732 page_order = A_SIZE_FIX(temp)->page_order;
735 /* The generic routines can't deal with 2 level gatt's */
743 /* Do not worry about freeing memory, because if this is
744 * called, then all agp memory is deallocated and removed
747 iounmap(agp_bridge->gatt_table);
748 table = (char *) agp_bridge->gatt_table_real;
749 table_end = table + ((PAGE_SIZE * (1 << page_order)) - 1);
751 for (page = virt_to_page(table); page <= virt_to_page(table_end); page++)
752 ClearPageReserved(page);
754 free_pages((unsigned long) agp_bridge->gatt_table_real, page_order);
756 agp_gatt_table = NULL;
757 agp_bridge->gatt_table = NULL;
758 agp_bridge->gatt_table_real = NULL;
759 agp_bridge->gatt_bus_addr = 0;
763 EXPORT_SYMBOL(agp_generic_free_gatt_table);
766 int agp_generic_insert_memory(struct agp_memory * mem, off_t pg_start, int type)
773 temp = agp_bridge->current_size;
775 switch (agp_bridge->driver->size_type) {
777 num_entries = A_SIZE_8(temp)->num_entries;
780 num_entries = A_SIZE_16(temp)->num_entries;
783 num_entries = A_SIZE_32(temp)->num_entries;
785 case FIXED_APER_SIZE:
786 num_entries = A_SIZE_FIX(temp)->num_entries;
789 /* The generic routines can't deal with 2 level gatt's */
797 num_entries -= agp_memory_reserved/PAGE_SIZE;
798 if (num_entries < 0) num_entries = 0;
800 if (type != 0 || mem->type != 0) {
801 /* The generic routines know nothing of memory types */
806 if ((pg_start + mem->page_count) > num_entries)
811 while (j < (pg_start + mem->page_count)) {
812 if (!PGE_EMPTY(agp_bridge, agp_bridge->gatt_table[j])) {
818 if (mem->is_flushed == FALSE) {
819 agp_bridge->driver->cache_flush();
820 mem->is_flushed = TRUE;
823 for (i = 0, j = pg_start; i < mem->page_count; i++, j++)
824 agp_bridge->gatt_table[j] =
825 agp_bridge->driver->mask_memory(
826 mem->memory[i], mem->type);
828 agp_bridge->driver->tlb_flush(mem);
831 EXPORT_SYMBOL(agp_generic_insert_memory);
834 int agp_generic_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
838 if (type != 0 || mem->type != 0) {
839 /* The generic routines know nothing of memory types */
843 /* AK: bogus, should encode addresses > 4GB */
844 for (i = pg_start; i < (mem->page_count + pg_start); i++) {
845 agp_bridge->gatt_table[i] =
846 (unsigned long) agp_bridge->scratch_page;
849 agp_bridge->driver->tlb_flush(mem);
852 EXPORT_SYMBOL(agp_generic_remove_memory);
855 struct agp_memory *agp_generic_alloc_by_type(size_t page_count, int type)
859 EXPORT_SYMBOL(agp_generic_alloc_by_type);
862 void agp_generic_free_by_type(struct agp_memory *curr)
864 if (curr->memory != NULL)
867 agp_free_key(curr->key);
870 EXPORT_SYMBOL(agp_generic_free_by_type);
874 * Basic Page Allocation Routines -
875 * These routines handle page allocation and by default they reserve the allocated
876 * memory. They also handle incrementing the current_memory_agp value, Which is checked
877 * against a maximum value.
880 void *agp_generic_alloc_page(void)
884 page = alloc_page(GFP_KERNEL);
888 map_page_into_agp(page);
892 atomic_inc(&agp_bridge->current_memory_agp);
893 return page_address(page);
895 EXPORT_SYMBOL(agp_generic_alloc_page);
898 void agp_generic_destroy_page(void *addr)
905 page = virt_to_page(addr);
906 unmap_page_from_agp(page);
909 free_page((unsigned long)addr);
910 atomic_dec(&agp_bridge->current_memory_agp);
912 EXPORT_SYMBOL(agp_generic_destroy_page);
914 /* End Basic Page Allocation Routines */
918 * agp_enable - initialise the agp point-to-point connection.
920 * @mode: agp mode register value to configure with.
922 void agp_enable(u32 mode)
924 if (agp_bridge->type == NOT_SUPPORTED)
926 agp_bridge->driver->agp_enable(mode);
928 EXPORT_SYMBOL(agp_enable);
932 static void ipi_handler(void *null)
938 void global_cache_flush(void)
941 if (on_each_cpu(ipi_handler, NULL, 1, 1) != 0)
942 panic(PFX "timed out waiting for the other CPUs!\n");
947 EXPORT_SYMBOL(global_cache_flush);
949 unsigned long agp_generic_mask_memory(unsigned long addr, int type)
951 /* memory type is ignored in the generic routine */
952 if (agp_bridge->driver->masks)
953 return addr | agp_bridge->driver->masks[0].mask;
957 EXPORT_SYMBOL(agp_generic_mask_memory);
960 * These functions are implemented according to the AGPv3 spec,
961 * which covers implementation details that had previously been
965 int agp3_generic_fetch_size(void)
969 struct aper_size_info_16 *values;
971 pci_read_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, &temp_size);
972 values = A_SIZE_16(agp_bridge->driver->aperture_sizes);
974 for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
975 if (temp_size == values[i].size_value) {
976 agp_bridge->previous_size =
977 agp_bridge->current_size = (void *) (values + i);
979 agp_bridge->aperture_size_idx = i;
980 return values[i].size;
985 EXPORT_SYMBOL(agp3_generic_fetch_size);
987 void agp3_generic_tlbflush(struct agp_memory *mem)
990 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
991 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_GTLBEN);
992 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl);
994 EXPORT_SYMBOL(agp3_generic_tlbflush);
996 int agp3_generic_configure(void)
999 struct aper_size_info_16 *current_size;
1001 current_size = A_SIZE_16(agp_bridge->current_size);
1003 pci_read_config_dword(agp_bridge->dev, AGP_APBASE, &temp);
1004 agp_bridge->gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK);
1006 /* set aperture size */
1007 pci_write_config_word(agp_bridge->dev, agp_bridge->capndx+AGPAPSIZE, current_size->size_value);
1008 /* set gart pointer */
1009 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPGARTLO, agp_bridge->gatt_bus_addr);
1010 /* enable aperture and GTLB */
1011 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &temp);
1012 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, temp | AGPCTRL_APERENB | AGPCTRL_GTLBEN);
1015 EXPORT_SYMBOL(agp3_generic_configure);
1017 void agp3_generic_cleanup(void)
1020 pci_read_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, &ctrl);
1021 pci_write_config_dword(agp_bridge->dev, agp_bridge->capndx+AGPCTRL, ctrl & ~AGPCTRL_APERENB);
1023 EXPORT_SYMBOL(agp3_generic_cleanup);
1025 struct aper_size_info_16 agp3_generic_sizes[AGP_GENERIC_SIZES_ENTRIES] =
1027 {4096, 1048576, 10,0x000},
1028 {2048, 524288, 9, 0x800},
1029 {1024, 262144, 8, 0xc00},
1030 { 512, 131072, 7, 0xe00},
1031 { 256, 65536, 6, 0xf00},
1032 { 128, 32768, 5, 0xf20},
1033 { 64, 16384, 4, 0xf30},
1034 { 32, 8192, 3, 0xf38},
1035 { 16, 4096, 2, 0xf3c},
1036 { 8, 2048, 1, 0xf3e},
1037 { 4, 1024, 0, 0xf3f}
1039 EXPORT_SYMBOL(agp3_generic_sizes);