# This is a BitKeeper generated diff -Nru style patch. # # ChangeSet # 2005/01/31 01:36:12-05:00 len.brown@intel.com # Merge intel.com:/home/lenb/src/24-stable-dev # into intel.com:/home/lenb/src/24-latest-dev # # arch/i386/kernel/pci-irq.c # 2005/01/31 01:36:10-05:00 len.brown@intel.com +0 -0 # Auto merged # # ChangeSet # 2005/01/31 00:38:19-05:00 len.brown@intel.com # Merge intel.com:/home/lenb/bk/linux-2.4.29 # into intel.com:/home/lenb/src/24-stable-dev # # arch/i386/kernel/pci-irq.c # 2005/01/31 00:38:18-05:00 len.brown@intel.com +0 -0 # Auto merged # # ChangeSet # 2005/01/31 00:13:49-05:00 len.brown@intel.com # Merge intel.com:/home/lenb/src/24-stable-dev # into intel.com:/home/lenb/src/24-latest-dev # # arch/i386/kernel/pci-irq.c # 2005/01/31 00:13:48-05:00 len.brown@intel.com +0 -0 # Auto merged # # ChangeSet # 2005/01/26 03:49:55-05:00 len.brown@intel.com # [ACPI] via interrupt quirk fix from 2.6 # http://bugzilla.kernel.org/show_bug.cgi?id=3319 # # Signed-off-by: David Shaohua Li # Signed-off-by: Len Brown vm_vfs_scan_ratio: # > ------------------ # > is what proportion of the VFS queues we will scan in one go. # > A value of 6 for vm_vfs_scan_ratio implies that 1/6th of the # > unused-inode, dentry and dquot caches will be freed during a # > normal aging round. # > Big fileservers (NFS, SMB etc.) probably want to set this # > value to 3 or 2. # > # > The default value is 6. # > ============================================================= # # Thanks for the info - but doesn't increasing the value of # vm_vfs_scan_ratio mean that less of the caches will be freed? # # Doing a few tests (on another test file system with 2 million or so # files and 1Gb of memory) running 'find $disk -type f', with # vm_vfs_scan_ratio set to 6 (or 10), the first two column values for # xfs_inode, linvfs_icache and dentry_cache in /proc/slabinfo reach about # 900000 and stay around that value, but setting vm_vfs_scan_ratio to 1, # then each value still reaches 900000, but then falls to a few thousand # and increases up to 900000 and then drop away again and repeats. # # This still happens when I cat many large files (100Mb) to /dev/null at # the same time as running the find i.e. the inode caches can still reach # 90% of the memory before being reclaimed (with vm_vfs_scan_ratio set to 1). # # If I stop the find process when the inode caches reach about 90% of the # memory, and then start cat'ing the large files, it appears the inode # caches are never reclaimed (or longer than it takes to cat 100Gb of data # to /dev/null) - is this expected behaviour? # # It seems the inode cache has priority over cached file data. # # What triggers the 'normal ageing round'? Is it possible to trigger this # earlier (at a lower memory usage), or give a higher priority to cached data? # # From: Andrew Morton # It does. If the machine is full of unmapped clean pagecache pages the # kernel won't even try to reclaim inodes. This should help a bit: # diff -Nru a/arch/i386/kernel/pci-irq.c b/arch/i386/kernel/pci-irq.c --- a/arch/i386/kernel/pci-irq.c 2005-01-31 03:29:46 -05:00 +++ b/arch/i386/kernel/pci-irq.c 2005-01-31 03:29:46 -05:00 @@ -1120,7 +1120,7 @@ void pcibios_enable_irq(struct pci_dev *dev) { u8 pin; - extern int interrupt_line_quirk; + extern int via_interrupt_line_quirk; struct pci_dev *temp_dev; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); @@ -1173,7 +1173,7 @@ } /* VIA bridges use interrupt line for apic/pci steering across the V-Link */ - else if (interrupt_line_quirk) + else if (via_interrupt_line_quirk) pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq & 15); } diff -Nru a/arch/x86_64/kernel/pci-irq.c b/arch/x86_64/kernel/pci-irq.c --- a/arch/x86_64/kernel/pci-irq.c 2005-01-31 03:29:46 -05:00 +++ b/arch/x86_64/kernel/pci-irq.c 2005-01-31 03:29:46 -05:00 @@ -742,7 +742,7 @@ void pcibios_enable_irq(struct pci_dev *dev) { u8 pin; - extern int interrupt_line_quirk; + extern int via_interrupt_line_quirk; pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) { @@ -762,6 +762,6 @@ } /* VIA bridges use interrupt line for apic/pci steering across the V-Link */ - else if (interrupt_line_quirk) + else if (via_interrupt_line_quirk) pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); } diff -Nru a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c --- a/drivers/acpi/pci_irq.c 2005-01-31 03:29:46 -05:00 +++ b/drivers/acpi/pci_irq.c 2005-01-31 03:29:46 -05:00 @@ -335,6 +335,7 @@ { int irq = 0; u8 pin = 0; + extern int via_interrupt_line_quirk; ACPI_FUNCTION_TRACE("acpi_pci_irq_enable"); @@ -382,6 +383,9 @@ return_VALUE(0); } } + + if (via_interrupt_line_quirk) + pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq & 15); dev->irq = irq; diff -Nru a/drivers/pci/quirks.c b/drivers/pci/quirks.c --- a/drivers/pci/quirks.c 2005-01-31 03:29:46 -05:00 +++ b/drivers/pci/quirks.c 2005-01-31 03:29:46 -05:00 @@ -368,9 +368,6 @@ * non-x86 architectures (yes Via exists on PPC among other places), * we must mask the PCI_INTERRUPT_LINE value versus 0xf to get * interrupts delivered properly. - * - * TODO: When we have device-specific interrupt routers, - * quirk_via_irqpic will go away from quirks. */ /* @@ -393,22 +390,6 @@ d->irq = irq; } -static void __init quirk_via_irqpic(struct pci_dev *dev) -{ - u8 irq, new_irq = dev->irq & 0xf; - - pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq); - - if (new_irq != irq) { - printk(KERN_INFO "PCI: Via IRQ fixup for %s, from %d to %d\n", - dev->slot_name, irq, new_irq); - - udelay(15); - pci_write_config_byte(dev, PCI_INTERRUPT_LINE, new_irq); - } -} - - /* * PIIX3 USB: We have to disable USB interrupts that are * hardwired to PIRQD# and may be shared with an @@ -639,12 +620,14 @@ * VIA northbridges care about PCI_INTERRUPT_LINE */ -int interrupt_line_quirk; +int via_interrupt_line_quirk; static void __init quirk_via_bridge(struct pci_dev *pdev) { - if(pdev->devfn == 0) - interrupt_line_quirk = 1; + if(pdev->devfn == 0) { + printk(KERN_INFO "PCI: Via IRQ fixup\n"); + via_interrupt_line_quirk = 1; + } } /* @@ -773,9 +756,6 @@ #endif { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_3, quirk_via_acpi }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_via_acpi }, - { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C586_2, quirk_via_irqpic }, - { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_5, quirk_via_irqpic }, - { PCI_FIXUP_FINAL, PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_6, quirk_via_irqpic }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic }, { PCI_FIXUP_FINAL, PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C, quirk_amd_ordering },