diff -ruN orig/linux-2.4.21-pre5/drivers/char/Config.in linux-2.4.21-pre5/drivers/char/Config.in --- orig/linux-2.4.21-pre5/drivers/char/Config.in Fri Mar 14 21:05:03 2003 +++ linux-2.4.21-pre5/drivers/char/Config.in Fri Mar 14 22:35:31 2003 @@ -295,6 +295,7 @@ tristate '/dev/agpgart (AGP Support)' CONFIG_AGP fi if [ "$CONFIG_AGP" != "n" ]; then + bool 'AGP 3.0 Support (Only for VIA KT400, at the moment)' CONFIG_AGP3 bool ' Intel 440LX/BX/GX and I815/I820/I830M/I830MP/I840/I845/I850/I860 support' CONFIG_AGP_INTEL bool ' Intel I810/I815/I830M (on-board) support' CONFIG_AGP_I810 bool ' VIA chipset support' CONFIG_AGP_VIA diff -ruN orig/linux-2.4.21-pre5/drivers/char/agp/agp.h linux-2.4.21-pre5/drivers/char/agp/agp.h --- orig/linux-2.4.21-pre5/drivers/char/agp/agp.h Fri Mar 14 21:04:58 2003 +++ linux-2.4.21-pre5/drivers/char/agp/agp.h Fri Mar 14 23:21:51 2003 @@ -349,6 +349,12 @@ #define VIA_APSIZE 0x84 #define VIA_ATTBASE 0x88 +/* VIA KT400 */ +#define VIA_AGP3_GARTCTRL 0x90 +#define VIA_AGP3_APSIZE 0x94 +#define VIA_AGP3_ATTBASE 0x98 +#define VIA_AGPSEL 0xfd + /* SiS registers */ #define SIS_APBASE 0x10 #define SIS_ATTBASE 0x90 diff -ruN orig/linux-2.4.21-pre5/drivers/char/agp/agpgart_be.c linux-2.4.21-pre5/drivers/char/agp/agpgart_be.c --- orig/linux-2.4.21-pre5/drivers/char/agp/agpgart_be.c Fri Mar 14 21:04:58 2003 +++ linux-2.4.21-pre5/drivers/char/agp/agpgart_be.c Sat Mar 15 17:51:50 2003 @@ -577,7 +577,7 @@ for (page = virt_to_page(table); page <= virt_to_page(table_end); page++) SetPageReserved(page); - agp_bridge.gatt_table_real = (unsigned long *) table; + agp_bridge.gatt_table_real = (u32 *) table; agp_gatt_table = (void *)table; #ifdef CONFIG_X86 err = change_page_attr(virt_to_page(table), 1<maxbw > n->maxbw) + break; + } + list_add_tail(new, pos); + + return 0; +} + +static int agp_3_0_dev_list_sort(struct agp_3_0_dev *list, unsigned int ndevs) +{ + struct agp_3_0_dev *cur; + struct pci_dev *dev; + struct list_head *pos, *tmp, *head = &list->list, *start = head->next; + u32 nistat; + + INIT_LIST_HEAD(head); + + for(pos = start; pos != head;) { + cur = list_entry(pos, struct agp_3_0_dev, list); + dev = cur->dev; + + pci_read_config_dword(dev, cur->capndx + 0x0c, &nistat); + cur->maxbw = (nistat >> 16) & 0xff; + + tmp = pos; + pos = pos->next; + agp_3_0_dev_list_insert(head, tmp); + } + return 0; +} + +/* + * Initialize all isochronous transfer parameters for an AGP 3.0 + * node (i.e. a host bridge in combination with the adapters + * lying behind it...) + */ + +static int agp_3_0_isochronous_node_enable(struct agp_3_0_dev *dev_list, unsigned int ndevs) +{ + /* + * Convenience structure to make the calculations clearer + * here. The field names come straight from the AGP 3.0 spec. + */ + struct isoch_data { + u32 maxbw; + u32 n; + u32 y; + u32 l; + u32 rq; + struct agp_3_0_dev *dev; + }; + + struct pci_dev *td = agp_bridge.dev, *dev; + struct list_head *head = &dev_list->list, *pos; + struct agp_3_0_dev *cur; + struct isoch_data *master, target; + unsigned int cdev = 0; + u32 mnistat, tnistat, tstatus, mcmd; + u16 tnicmd, mnicmd; + u8 mcapndx; + u32 tot_bw = 0, tot_n = 0, tot_rq = 0, y_max, rq_isoch, rq_async; + u32 step, rem, rem_isoch, rem_async; + int ret = 0; + + /* + * We'll work with an array of isoch_data's (one for each + * device in dev_list) throughout this function. + */ + if((master = kmalloc(ndevs * sizeof(*master), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto get_out; + } + + /* + * Sort the device list by maxbw. We need to do this because the + * spec suggests that the devices with the smallest requirements + * have their resources allocated first, with all remaining resources + * falling to the device with the largest requirement. + * + * We don't exactly do this, we divide target resources by ndevs + * and split them amongst the AGP 3.0 devices. The remainder of such + * division operations are dropped on the last device, sort of like + * the spec mentions it should be done. + * + * We can't do this sort when we initially construct the dev_list + * because we don't know until this function whether isochronous + * transfers are enabled and consequently whether maxbw will mean + * anything. + */ + if((ret = agp_3_0_dev_list_sort(dev_list, ndevs)) != 0) + goto free_and_exit; + + pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat); + pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus); + + /* Extract power-on defaults from the target */ + target.maxbw = (tnistat >> 16) & 0xff; + target.n = (tnistat >> 8) & 0xff; + target.y = (tnistat >> 6) & 0x3; + target.l = (tnistat >> 3) & 0x7; + target.rq = (tstatus >> 24) & 0xff; + + y_max = target.y; + + /* + * Extract power-on defaults for each device in dev_list. Along + * the way, calculate the total isochronous bandwidth required + * by these devices and the largest requested payload size. + */ + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_0_dev, list); + dev = cur->dev; + + mcapndx = cur->capndx; + + pci_read_config_dword(dev, cur->capndx + 0x0c, &mnistat); + + master[cdev].maxbw = (mnistat >> 16) & 0xff; + master[cdev].n = (mnistat >> 8) & 0xff; + master[cdev].y = (mnistat >> 6) & 0x3; + master[cdev].dev = cur; + + tot_bw += master[cdev].maxbw; + y_max = max(y_max, master[cdev].y); + + cdev++; + } + + /* Check if this configuration has any chance of working */ + if(tot_bw > target.maxbw) { + printk(KERN_ERR PFX "isochronous bandwidth required " + "by AGP 3.0 devices exceeds that which is supported by " + "the AGP 3.0 bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + target.y = y_max; + + /* + * Write the calculated payload size into the target's NICMD + * register. Doing this directly effects the ISOCH_N value + * in the target's NISTAT register, so we need to do this now + * to get an accurate value for ISOCH_N later. + */ + pci_read_config_word(td, agp_bridge.capndx + 0x20, &tnicmd); + tnicmd &= ~(0x3 << 6); + tnicmd |= target.y << 6; + pci_write_config_word(td, agp_bridge.capndx + 0x20, tnicmd); + + /* Reread the target's ISOCH_N */ + pci_read_config_dword(td, agp_bridge.capndx + 0x0c, &tnistat); + target.n = (tnistat >> 8) & 0xff; + + /* Calculate the minimum ISOCH_N needed by each master */ + for(cdev = 0; cdev < ndevs; cdev++) { + master[cdev].y = target.y; + master[cdev].n = master[cdev].maxbw / (master[cdev].y + 1); + + tot_n += master[cdev].n; + } + + /* Exit if the minimal ISOCH_N allocation among the masters is more + * than the target can handle. */ + if(tot_n > target.n) { + printk(KERN_ERR PFX "number of isochronous " + "transactions per period required by AGP 3.0 devices " + "exceeds that which is supported by the AGP 3.0 " + "bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + /* Calculate left over ISOCH_N capability in the target. We'll give + * this to the hungriest device (as per the spec) */ + rem = target.n - tot_n; + + /* + * Calculate the minimum isochronous RQ depth needed by each master. + * Along the way, distribute the extra ISOCH_N capability calculated + * above. + */ + for(cdev = 0; cdev < ndevs; cdev++) { + /* + * This is a little subtle. If ISOCH_Y > 64B, then ISOCH_Y + * byte isochronous writes will be broken into 64B pieces. + * This means we need to budget more RQ depth to account for + * these kind of writes (each isochronous write is actually + * many writes on the AGP bus). + */ + master[cdev].rq = master[cdev].n; + if(master[cdev].y > 0x1) { + master[cdev].rq *= (1 << (master[cdev].y - 1)); + } + + tot_rq += master[cdev].rq; + + if(cdev == ndevs - 1) + master[cdev].n += rem; + } + + /* Figure the number of isochronous and asynchronous RQ slots the + * target is providing. */ + rq_isoch = (target.y > 0x1) ? target.n * (1 << (target.y - 1)) : target.n; + rq_async = target.rq - rq_isoch; + + /* Exit if the minimal RQ needs of the masters exceeds what the target + * can provide. */ + if(tot_rq > rq_isoch) { + printk(KERN_ERR PFX "number of request queue slots " + "required by the isochronous bandwidth requested by " + "AGP 3.0 devices exceeds the number provided by the " + "AGP 3.0 bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + /* Calculate asynchronous RQ capability in the target (per master) as + * well as the total number of leftover isochronous RQ slots. */ + step = rq_async / ndevs; + rem_async = step + (rq_async % ndevs); + rem_isoch = rq_isoch - tot_rq; + + /* Distribute the extra RQ slots calculated above and write our + * isochronous settings out to the actual devices. */ + for(cdev = 0; cdev < ndevs; cdev++) { + cur = master[cdev].dev; + dev = cur->dev; + + mcapndx = cur->capndx; + + master[cdev].rq += (cdev == ndevs - 1) + ? (rem_async + rem_isoch) : step; + + pci_read_config_word(dev, cur->capndx + 0x20, &mnicmd); + pci_read_config_dword(dev, cur->capndx + 0x08, &mcmd); + + mnicmd &= ~(0xff << 8); + mnicmd &= ~(0x3 << 6); + mcmd &= ~(0xff << 24); + + mnicmd |= master[cdev].n << 8; + mnicmd |= master[cdev].y << 6; + mcmd |= master[cdev].rq << 24; + + pci_write_config_dword(dev, cur->capndx + 0x08, mcmd); + pci_write_config_word(dev, cur->capndx + 0x20, mnicmd); + } + +free_and_exit: + kfree(master); + +get_out: + return ret; +} + +/* + * This function basically allocates request queue slots among the + * AGP 3.0 systems in nonisochronous nodes. The algorithm is + * pretty stupid, divide the total number of RQ slots provided by the + * target by ndevs. Distribute this many slots to each AGP 3.0 device, + * giving any left over slots to the last device in dev_list. + */ +static int agp_3_0_nonisochronous_node_enable(struct agp_3_0_dev *dev_list, unsigned int ndevs) +{ + struct agp_3_0_dev *cur; + struct list_head *head = &dev_list->list, *pos; + u32 tstatus, mcmd; + u32 trq, mrq, rem; + unsigned int cdev = 0; + + pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x04, &tstatus); + + trq = (tstatus >> 24) & 0xff; + mrq = trq / ndevs; + + rem = mrq + (trq % ndevs); + + for(pos = head->next; cdev < ndevs; cdev++, pos = pos->next) { + cur = list_entry(pos, struct agp_3_0_dev, list); + + pci_read_config_dword(cur->dev, cur->capndx + 0x08, &mcmd); + mcmd &= ~(0xff << 24); + mcmd |= ((cdev == ndevs - 1) ? rem : mrq) << 24; + pci_write_config_dword(cur->dev, cur->capndx + 0x08, mcmd); + } + + return 0; +} + +/* + * Fully configure and enable an AGP 3.0 host bridge and all the devices + * lying behind it. + */ +static int agp_3_0_node_enable(u32 mode, u32 minor) +{ + struct pci_dev *td = agp_bridge.dev, *dev; + u8 bus_num, mcapndx; + u32 isoch, arqsz, cal_cycle, tmp, rate; + u32 tstatus, tcmd, mcmd, mstatus, ncapid; + u32 mmajor, mminor; + u16 mpstat; + struct agp_3_0_dev *dev_list, *cur; + struct list_head *head, *pos; + unsigned int ndevs = 0; + int ret = 0; + + /* + * Allocate a head for our AGP 3.0 device list (multiple AGP 3.0 + * devices are allowed behind a single bridge). + */ + if((dev_list = kmalloc(sizeof(*dev_list), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto get_out; + } + head = &dev_list->list; + INIT_LIST_HEAD(head); + + /* + * Find all the devices on this bridge's secondary bus and add them + * to dev_list. + */ + pci_read_config_byte(td, PCI_SECONDARY_BUS, &bus_num); + pci_for_each_dev(dev) { + if(dev->bus->number == bus_num) { + if((cur = kmalloc(sizeof(*cur), GFP_KERNEL)) == NULL) { + ret = -ENOMEM; + goto free_and_exit; + } + + cur->dev = dev; + + pos = &cur->list; + list_add(pos, head); + ndevs++; + } + } + + /* Extract some power-on defaults from the target */ + pci_read_config_dword(td, agp_bridge.capndx + 0x04, &tstatus); + isoch = (tstatus >> 17) & 0x1; + arqsz = (tstatus >> 13) & 0x7; + cal_cycle = (tstatus >> 10) & 0x7; + rate = tstatus & 0x7; + + /* + * Take an initial pass through the devices lying behind our host + * bridge. Make sure each one is actually an AGP 3.0 device, otherwise + * exit with an error message. Along the way store the AGP 3.0 + * cap_ptr for each device, the minimum supported cal_cycle, and the + * minimum supported data rate. + */ + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_0_dev, list); + dev = cur->dev; + + pci_read_config_word(dev, PCI_STATUS, &mpstat); + if((mpstat & PCI_STATUS_CAP_LIST) == 0) + continue; + + pci_read_config_byte(dev, PCI_CAPABILITY_LIST, &mcapndx); + if (mcapndx != 0x00) { + do { + pci_read_config_dword(dev, mcapndx, &ncapid); + if ((ncapid & 0xff) != 0x02) + mcapndx = (ncapid >> 8) & 0xff; + } + while (((ncapid & 0xff) != 0x02) && (mcapndx != 0x00)); + } + + if(mcapndx == 0) { + printk(KERN_ERR PFX "woah! Non-AGP device " + "found on the secondary bus of an AGP 3.0 bridge!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + mmajor = (ncapid >> 20) & 0xf; + mminor = (ncapid >> 16) & 0xf; + + if(mmajor < 3) { + printk(KERN_ERR PFX "woah! AGP 2.0 device " + "found on the secondary bus of an AGP 3.0 " + "bridge operating with AGP 3.0 electricals!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + cur->capndx = mcapndx; + + pci_read_config_dword(dev, cur->capndx + 0x04, &mstatus); + + if(((mstatus >> 3) & 0x1) == 0) { + printk(KERN_ERR PFX "woah! AGP 3.0 device " + "not operating in AGP 3.0 mode found on the " + "secondary bus of an AGP 3.0 bridge operating " + "with AGP 3.0 electricals!\n"); + ret = -ENODEV; + goto free_and_exit; + } + + tmp = (mstatus >> 10) & 0x7; + cal_cycle = min(cal_cycle, tmp); + + /* figure the lesser rate */ + tmp = mstatus & 0x7; + if(tmp < rate) + rate = tmp; + + } + + /* Turn rate into something we can actually write out to AGPCMD */ + switch(rate) { + case 0x1: + case 0x2: + break; + case 0x3: + rate = 0x2; + break; + default: + printk(KERN_ERR PFX "woah! Bogus AGP rate (%d) " + "value found advertised behind an AGP 3.0 bridge!\n", rate); + ret = -ENODEV; + goto free_and_exit; + } + + /* + * Call functions to divide target resources amongst the AGP 3.0 + * masters. This process is dramatically different depending on + * whether isochronous transfers are supported. + */ + if(isoch != 0) { + if((ret = agp_3_0_isochronous_node_enable(dev_list, ndevs)) != 0) + goto free_and_exit; + } else { + if((ret = agp_3_0_nonisochronous_node_enable(dev_list,ndevs)) != 0) + goto free_and_exit; + } + + /* + * Set the calculated minimum supported cal_cycle and minimum + * supported transfer rate in the target's AGPCMD register. + * Also set the AGP_ENABLE bit, effectively 'turning on' the + * target (this has to be done _before_ turning on the masters). + */ + pci_read_config_dword(td, agp_bridge.capndx + 0x08, &tcmd); + + tcmd &= ~(0x7 << 10); + tcmd &= ~0x7; + + tcmd |= cal_cycle << 10; + tcmd |= 0x1 << 8; + tcmd |= rate; + + pci_write_config_dword(td, agp_bridge.capndx + 0x08, tcmd); + + /* + * Set the target's advertised arqsz value, the minimum supported + * transfer rate, and the AGP_ENABLE bit in each master's AGPCMD + * register. + */ + list_for_each(pos, head) { + cur = list_entry(pos, struct agp_3_0_dev, list); + dev = cur->dev; + + mcapndx = cur->capndx; + + pci_read_config_dword(dev, cur->capndx + 0x08, &mcmd); + + mcmd &= ~(0x7 << 13); + mcmd &= ~0x7; + + mcmd |= arqsz << 13; + mcmd |= 0x1 << 8; + mcmd |= rate; + + pci_write_config_dword(dev, cur->capndx + 0x08, mcmd); + } + +free_and_exit: + /* Be sure to free the dev_list */ + for(pos = head->next; pos != head;) { + cur = list_entry(pos, struct agp_3_0_dev, list); + + pos = pos->next; + kfree(cur); + } + kfree(dev_list); + +get_out: + return ret; +} + +/* + * Entry point to AGP 3.0 host bridge init. Check to see if we + * have an AGP 3.0 device operating in 3.0 mode. Call + * agp_3_0_node_enable or agp_generic_agp_enable if we don't + * (AGP 3.0 devices are required to operate as AGP 2.0 devices + * when not using 3.0 electricals. + */ +void agp_generic_agp_3_0_enable(u32 mode) +{ + u32 ncapid, major, minor, agp_3_0; + + pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx, &ncapid); + + major = (ncapid >> 20) & 0xf; + minor = (ncapid >> 16) & 0xf; + + printk(KERN_INFO PFX "Found an AGP %d.%d compliant device.\n",major, minor); + + if(major >= 3) { + pci_read_config_dword(agp_bridge.dev, agp_bridge.capndx + 0x4, &agp_3_0); + /* + * Check to see if we are operating in 3.0 mode + */ + if((agp_3_0 >> 3) & 0x1) + agp_3_0_node_enable(mode, minor); + } +} +#endif /* CONFIG_AGP3 */ + #ifdef CONFIG_AGP_I810 static aper_size_info_fixed intel_i810_sizes[] = { @@ -2100,7 +2639,6 @@ return values[i].size; } } - return 0; } @@ -2164,11 +2702,143 @@ static gatt_mask via_generic_masks[] = { - {0x00000000, 0} + {.mask = 0x00000000, .type = 0} }; +/* VIA chipset specific AGP 3.0 routines */ +#ifdef CONFIG_AGP3 +static int via_fetch_size_agp3(void) +{ + int i; + u16 temp; + aper_size_info_16 *values; + + values = A_SIZE_16(agp_bridge.aperture_sizes); + pci_read_config_word(agp_bridge.dev, VIA_AGP3_APSIZE, &temp); + temp &= 0xfff; + + for (i = 0; i < agp_bridge.num_aperture_sizes; i++) { + if (temp == values[i].size_value) { + agp_bridge.previous_size = + agp_bridge.current_size = (void *) (values + i); + agp_bridge.aperture_size_idx = i; + return values[i].size; + } + } + return 0; +} + +static int via_configure_agp3(void) +{ + u32 temp; + aper_size_info_16 *current_size; + + current_size = A_SIZE_16(agp_bridge.current_size); + + /* address to map too */ + pci_read_config_dword(agp_bridge.dev, VIA_APBASE, &temp); + agp_bridge.gart_bus_addr = (temp & PCI_BASE_ADDRESS_MEM_MASK); + + /* attbase - aperture GATT base */ + pci_write_config_dword(agp_bridge.dev, VIA_AGP3_ATTBASE, + agp_bridge.gatt_bus_addr & 0xfffff000); + return 0; +} + +static void via_cleanup_agp3(void) +{ + aper_size_info_16 *previous_size; + + previous_size = A_SIZE_16(agp_bridge.previous_size); + pci_write_config_byte(agp_bridge.dev, VIA_APSIZE, previous_size->size_value); +} + +static void via_tlbflush_agp3(agp_memory * mem) +{ + u32 temp; + + pci_read_config_dword(agp_bridge.dev, VIA_AGP3_GARTCTRL, &temp); + pci_write_config_dword(agp_bridge.dev, VIA_AGP3_GARTCTRL, temp & ~(1<<7)); + pci_write_config_dword(agp_bridge.dev, VIA_AGP3_GARTCTRL, temp); +} + +static aper_size_info_16 via_generic_agp3_sizes[11] = +{ + { 4, 1024, 0, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3|1<<2|1<<1|1<<0 }, + { 8, 2048, 1, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3|1<<2|1<<1}, + { 16, 4096, 2, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3|1<<2}, + { 32, 8192, 3, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4|1<<3}, + { 64, 16384, 4, 1<<11|1<<10|1<<9|1<<8|1<<5|1<<4}, + { 128, 32768, 5, 1<<11|1<<10|1<<9|1<<8|1<<5}, + { 256, 65536, 6, 1<<11|1<<10|1<<9|1<<8}, + { 512, 131072, 7, 1<<11|1<<10|1<<9}, + { 1024, 262144, 8, 1<<11|1<<10}, + { 2048, 524288, 9, 1<<11} /* 2GB <- Max supported */ +}; + +static int __init via_generic_agp3_setup (struct pci_dev *pdev) +{ + agp_bridge.dev = pdev; + agp_bridge.type = VIA_GENERIC; + agp_bridge.masks = via_generic_masks; + agp_bridge.aperture_sizes = (void *) via_generic_agp3_sizes; + agp_bridge.size_type = U16_APER_SIZE; + agp_bridge.num_aperture_sizes = 10; + agp_bridge.dev_private_data = NULL; + agp_bridge.needs_scratch_page = FALSE; + agp_bridge.agp_enable = agp_generic_agp_3_0_enable; + agp_bridge.configure = via_configure_agp3; + agp_bridge.fetch_size = via_fetch_size_agp3; + agp_bridge.cleanup = via_cleanup_agp3; + agp_bridge.tlb_flush = via_tlbflush_agp3; + agp_bridge.mask_memory = via_mask_memory; + agp_bridge.cache_flush = global_cache_flush; + agp_bridge.create_gatt_table = agp_generic_create_gatt_table; + agp_bridge.free_gatt_table = agp_generic_free_gatt_table; + agp_bridge.insert_memory = agp_generic_insert_memory; + agp_bridge.remove_memory = agp_generic_remove_memory; + agp_bridge.alloc_by_type = agp_generic_alloc_by_type; + agp_bridge.free_by_type = agp_generic_free_by_type; + agp_bridge.agp_alloc_page = agp_generic_alloc_page; + agp_bridge.agp_destroy_page = agp_generic_destroy_page; + agp_bridge.suspend = agp_generic_suspend; + agp_bridge.resume = agp_generic_resume; + agp_bridge.cant_use_aperture = 0; + return 0; +} +#else + +static int __init via_generic_agp3_setup (struct pci_dev *pdev) +{ + printk (KERN_INFO PFX "Bridge in AGP3 mode, but CONFIG_AGP3=n\n"); + return -ENODEV; +} + +#endif /* CONFIG_AGP3 */ + + static int __init via_generic_setup (struct pci_dev *pdev) { +#ifdef CONFIG_AGP3 + /* Garg, there are KT400s with KT266 IDs. */ + if (pdev->device == PCI_DEVICE_ID_VIA_8367_0) { + + /* Is there a KT400 subsystem ? */ + if (pdev->subsystem_device==PCI_DEVICE_ID_VIA_8377_0) { + u8 reg; + + printk (KERN_INFO PFX "Found KT400 in disguise as a KT266.\n"); + + /* Check AGP compatibility mode. */ + pci_read_config_byte(pdev, VIA_AGPSEL, ®); + if ((reg & (1<<1))==0) + return via_generic_agp3_setup(pdev); + + /* Its in 2.0 mode, drop through. */ + } + } +#endif + agp_bridge.masks = via_generic_masks; agp_bridge.aperture_sizes = (void *) via_generic_sizes; agp_bridge.size_type = U8_APER_SIZE; @@ -2193,10 +2863,25 @@ agp_bridge.suspend = agp_generic_suspend; agp_bridge.resume = agp_generic_resume; agp_bridge.cant_use_aperture = 0; - return 0; - - (void) pdev; /* unused */ +} + + +/* The KT400 does magick to put the AGP bridge compliant with the same + * standards version as the graphics card. */ +static int __init via_kt400_setup(struct pci_dev *pdev) +{ + u8 reg; + pci_read_config_byte(pdev, VIA_AGPSEL, ®); + /* Check AGP 2.0 compatibility mode. */ + if ((reg & (1<<1))==0) { + printk (KERN_INFO PFX "Setting up AGP 3.0\n"); + return via_generic_agp3_setup(pdev); + } + else { + printk (KERN_INFO PFX "Setting up AGP 2.0 for compatibility.\n"); + return via_generic_setup(pdev); + } } #endif /* CONFIG_AGP_VIA */ @@ -2450,8 +3135,8 @@ return retval; } - agp_bridge.gatt_table_real = page_dir.real; - agp_bridge.gatt_table = page_dir.remapped; + agp_bridge.gatt_table_real = (u32 *) page_dir.real; + agp_bridge.gatt_table = (u32 *) page_dir.remapped; agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real); /* Get the address for the gart region. @@ -2477,8 +3162,8 @@ { amd_page_map page_dir; - page_dir.real = agp_bridge.gatt_table_real; - page_dir.remapped = agp_bridge.gatt_table; + page_dir.real = (unsigned long *) agp_bridge.gatt_table_real; + page_dir.remapped = (unsigned long *) agp_bridge.gatt_table; amd_free_gatt_pages(); amd_free_page_map(&page_dir); @@ -3559,8 +4244,8 @@ return retval; } - agp_bridge.gatt_table_real = page_dir.real; - agp_bridge.gatt_table = page_dir.remapped; + agp_bridge.gatt_table_real = (u32 *) page_dir.real; + agp_bridge.gatt_table = (u32 *) page_dir.remapped; agp_bridge.gatt_bus_addr = virt_to_bus(page_dir.real); /* Get the address for the gart region. @@ -3588,8 +4273,8 @@ { serverworks_page_map page_dir; - page_dir.real = agp_bridge.gatt_table_real; - page_dir.remapped = agp_bridge.gatt_table; + page_dir.real = (unsigned long *) agp_bridge.gatt_table_real; + page_dir.remapped = (unsigned long *) agp_bridge.gatt_table; serverworks_free_gatt_pages(); serverworks_free_page_map(&page_dir); @@ -4687,7 +5372,7 @@ VIA_APOLLO_KT400, "Via", "Apollo Pro KT400", - via_generic_setup }, + via_kt400_setup }, { PCI_DEVICE_ID_VIA_P4X333, PCI_VENDOR_ID_VIA, VIA_APOLLO_P4X400,