diff -urN linux-2.4.26-pre6/Documentation/DocBook/Makefile linux-2.4.26-pre6-ata14/Documentation/DocBook/Makefile --- linux-2.4.26-pre6/Documentation/DocBook/Makefile Tue Jan 7 15:50:24 2003 +++ linux-2.4.26-pre6-ata14/Documentation/DocBook/Makefile Thu Mar 25 22:45:24 2004 @@ -2,7 +2,7 @@ kernel-api.sgml parportbook.sgml kernel-hacking.sgml \ kernel-locking.sgml via-audio.sgml mousedrivers.sgml sis900.sgml \ deviceiobook.sgml procfs-guide.sgml tulip-user.sgml \ - journal-api.sgml + journal-api.sgml libata.sgml PS := $(patsubst %.sgml, %.ps, $(BOOKS)) PDF := $(patsubst %.sgml, %.pdf, $(BOOKS)) @@ -78,6 +78,18 @@ mcabook.sgml: mcabook.tmpl $(TOPDIR)/arch/i386/kernel/mca.c $(TOPDIR)/scripts/docgen $(TOPDIR)/arch/i386/kernel/mca.c \ mcabook.sgml + +libata.sgml: libata.tmpl $(TOPDIR)/drivers/scsi/libata-core.c \ + $(TOPDIR)/drivers/scsi/libata-scsi.c \ + $(TOPDIR)/drivers/scsi/sata_sil.c \ + $(TOPDIR)/drivers/scsi/ata_piix.c \ + $(TOPDIR)/drivers/scsi/sata_via.c + $(TOPDIR)/scripts/docgen $(TOPDIR)/drivers/scsi/libata-core.c \ + $(TOPDIR)/drivers/scsi/libata-scsi.c \ + $(TOPDIR)/drivers/scsi/sata_sil.c \ + $(TOPDIR)/drivers/scsi/ata_piix.c \ + $(TOPDIR)/drivers/scsi/sata_via.c \ + < libata.tmpl > libata.sgml videobook.sgml: videobook.tmpl $(TOPDIR)/drivers/media/video/videodev.c $(TOPDIR)/scripts/docgen $(TOPDIR)/drivers/media/video/videodev.c \ diff -urN linux-2.4.26-pre6/Documentation/DocBook/libata.tmpl linux-2.4.26-pre6-ata14/Documentation/DocBook/libata.tmpl --- linux-2.4.26-pre6/Documentation/DocBook/libata.tmpl Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/Documentation/DocBook/libata.tmpl Thu Mar 25 22:45:24 2004 @@ -0,0 +1,91 @@ + + + + + libATA Developer's Guide + + + + Jeff + Garzik + + + + + 2003 + Jeff Garzik + + + + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + + + + + + + + Thanks + + The bulk of the ATA knowledge comes thanks to long conversations with + Andre Hedrick (www.linux-ide.org). + + + Thanks to Alan Cox for pointing out similarities + between SATA and SCSI, and in general for motivation to hack on + libata. + + + libata's device detection + method, ata_pio_devchk, and in general all the early probing was + based on extensive study of Hale Landis's probe/reset code in his + ATADRVR driver (www.ata-atapi.com). + + + + + libata Library +!Edrivers/scsi/libata-core.c +!Edrivers/scsi/libata-scsi.c + + + + libata Internals +!Idrivers/scsi/libata-core.c +!Idrivers/scsi/libata-scsi.c + + + + ata_piix Internals +!Idrivers/scsi/ata_piix.c + + + + ata_sil Internals +!Idrivers/scsi/sata_sil.c + + + + ata_via Internals +!Idrivers/scsi/sata_via.c + + + diff -urN linux-2.4.26-pre6/drivers/ide/pci/piix.c linux-2.4.26-pre6-ata14/drivers/ide/pci/piix.c --- linux-2.4.26-pre6/drivers/ide/pci/piix.c Sun Mar 21 17:23:17 2004 +++ linux-2.4.26-pre6-ata14/drivers/ide/pci/piix.c Thu Mar 25 22:45:24 2004 @@ -880,7 +880,9 @@ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_11,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801E_11, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_10,PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17}, +#ifndef CONFIG_SCSI_SATA { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 18}, +#endif /* !CONFIG_SCSI_SATA */ { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 19}, { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 20}, { 0, }, diff -urN linux-2.4.26-pre6/drivers/pci/quirks.c linux-2.4.26-pre6-ata14/drivers/pci/quirks.c --- linux-2.4.26-pre6/drivers/pci/quirks.c Fri Oct 31 10:44:20 2003 +++ linux-2.4.26-pre6-ata14/drivers/pci/quirks.c Thu Mar 25 22:45:24 2004 @@ -719,6 +719,62 @@ } } +#ifdef CONFIG_SCSI_SATA +static void __init quirk_intel_ide_combined(struct pci_dev *pdev) +{ + u8 prog, comb, tmp; + + /* + * Narrow down to Intel SATA PCI devices. + */ + switch (pdev->device) { + /* PCI ids taken from drivers/scsi/ata_piix.c */ + case 0x24d1: + case 0x24df: + case 0x25a3: + case 0x25b0: + case 0x2651: + case 0x2652: + break; + default: + /* we do not handle this PCI device */ + return; + } + + /* + * Read combined mode register. + */ + pci_read_config_byte(pdev, 0x90, &tmp); /* combined mode reg */ + tmp &= 0x6; /* interesting bits 2:1, PATA primary/secondary */ + if (tmp == 0x4) /* bits 10x */ + comb = (1 << 0); /* SATA port 0, PATA port 1 */ + else if (tmp == 0x6) /* bits 11x */ + comb = (1 << 2); /* PATA port 0, SATA port 1 */ + else + return; /* not in combined mode */ + + /* + * Read programming interface register. + * (Tells us if it's legacy or native mode) + */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog); + + /* if SATA port is in native mode, we're ok. */ + if (prog & comb) + return; + + /* SATA port is in legacy mode. Reserve port so that + * IDE driver does not attempt to use it. If request_region + * fails, it will be obvious at boot time, so we don't bother + * checking return values. + */ + if (comb == (1 << 0)) + request_region(0x1f0, 8, "libata"); /* port 0 */ + else + request_region(0x170, 8, "libata"); /* port 1 */ +} +#endif /* CONFIG_SCSI_SATA */ + /* * The main table of quirks. */ @@ -807,6 +863,14 @@ { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_7205_0, asus_hides_smbus_hostbridge }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0, asus_hides_smbus_lpc }, { PCI_FIXUP_HEADER, PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_0, asus_hides_smbus_lpc }, + +#ifdef CONFIG_SCSI_SATA + /* Fixup BIOSes that configure Parallel ATA (PATA / IDE) and + * Serial ATA (SATA) into the same PCI ID. + */ + { PCI_FIXUP_FINAL, PCI_VENDOR_ID_INTEL, PCI_ANY_ID, + quirk_intel_ide_combined }, +#endif /* CONFIG_SCSI_SATA */ { 0 } }; diff -urN linux-2.4.26-pre6/drivers/scsi/Config.in linux-2.4.26-pre6-ata14/drivers/scsi/Config.in --- linux-2.4.26-pre6/drivers/scsi/Config.in Fri Jan 16 11:26:16 2004 +++ linux-2.4.26-pre6-ata14/drivers/scsi/Config.in Thu Mar 25 22:45:38 2004 @@ -68,6 +68,14 @@ dep_tristate 'AM53/79C974 PCI SCSI support' CONFIG_SCSI_AM53C974 $CONFIG_SCSI $CONFIG_PCI dep_tristate 'AMI MegaRAID support' CONFIG_SCSI_MEGARAID $CONFIG_SCSI dep_tristate 'AMI MegaRAID2 support' CONFIG_SCSI_MEGARAID2 $CONFIG_SCSI +dep_mbool 'Serial ATA (SATA) support' CONFIG_SCSI_SATA $CONFIG_SCSI +dep_tristate ' ServerWorks Frodo / Apple K2 SATA support (EXPERIMENTAL)' CONFIG_SCSI_SATA_SVW $CONFIG_SCSI_SATA $CONFIG_PCI $CONFIG_EXPERIMENTAL +dep_tristate ' Intel PIIX/ICH SATA support' CONFIG_SCSI_ATA_PIIX $CONFIG_SCSI_SATA $CONFIG_PCI +dep_tristate ' Promise SATA support (EXPERIMENTAL)' CONFIG_SCSI_SATA_PROMISE $CONFIG_SCSI_SATA $CONFIG_PCI $CONFIG_EXPERIMENTAL +dep_tristate ' Silicon Image SATA support (EXPERIMENTAL)' CONFIG_SCSI_SATA_SIL $CONFIG_SCSI_SATA $CONFIG_PCI $CONFIG_EXPERIMENTAL +dep_tristate ' SiS 964/180 SATA support (EXPERIMENTAL)' CONFIG_SCSI_SATA_SIS $CONFIG_SCSI_SATA $CONFIG_PCI $CONFIG_EXPERIMENTAL +dep_tristate ' VIA SATA support (EXPERIMENTAL)' CONFIG_SCSI_SATA_VIA $CONFIG_SCSI_SATA $CONFIG_PCI $CONFIG_EXPERIMENTAL +dep_tristate ' Vitesse VSC-7174 SATA support (EXPERIMENTAL)' CONFIG_SCSI_SATA_VITESSE $CONFIG_SCSI_SATA $CONFIG_PCI $CONFIG_EXPERIMENTAL dep_tristate 'BusLogic SCSI support' CONFIG_SCSI_BUSLOGIC $CONFIG_SCSI if [ "$CONFIG_SCSI_BUSLOGIC" != "n" ]; then diff -urN linux-2.4.26-pre6/drivers/scsi/Makefile linux-2.4.26-pre6-ata14/drivers/scsi/Makefile --- linux-2.4.26-pre6/drivers/scsi/Makefile Fri Jan 16 11:26:16 2004 +++ linux-2.4.26-pre6-ata14/drivers/scsi/Makefile Thu Mar 25 22:45:24 2004 @@ -21,7 +21,7 @@ O_TARGET := scsidrv.o -export-objs := scsi_syms.o 53c700.o +export-objs := scsi_syms.o 53c700.o libata-core.o mod-subdirs := pcmcia ../acorn/scsi @@ -130,6 +130,13 @@ obj-$(CONFIG_SCSI_CPQFCTS) += cpqfc.o obj-$(CONFIG_SCSI_LASI700) += lasi700.o 53c700.o obj-$(CONFIG_SCSI_NSP32) += nsp32.o +obj-$(CONFIG_SCSI_SATA_SVW) += libata.o sata_svw.o +obj-$(CONFIG_SCSI_ATA_PIIX) += libata.o ata_piix.o +obj-$(CONFIG_SCSI_SATA_PROMISE) += libata.o sata_promise.o +obj-$(CONFIG_SCSI_SATA_SIL) += libata.o sata_sil.o +obj-$(CONFIG_SCSI_SATA_VIA) += libata.o sata_via.o +obj-$(CONFIG_SCSI_SATA_VITESSE) += libata.o sata_vsc.o +obj-$(CONFIG_SCSI_SATA_SIS) += libata.o sata_sis.o subdir-$(CONFIG_ARCH_ACORN) += ../acorn/scsi obj-$(CONFIG_ARCH_ACORN) += ../acorn/scsi/acorn-scsi.o @@ -141,7 +148,7 @@ obj-$(CONFIG_CHR_DEV_SG) += sg.o list-multi := scsi_mod.o sd_mod.o sr_mod.o initio.o a100u2w.o cpqfc.o \ - zalon7xx_mod.o + zalon7xx_mod.o libata.o scsi_mod-objs := scsi.o hosts.o scsi_ioctl.o constants.o \ scsicam.o scsi_proc.o scsi_error.o \ scsi_obsolete.o scsi_queue.o scsi_lib.o \ @@ -154,6 +161,7 @@ zalon7xx_mod-objs := zalon7xx.o ncr53c8xx.o cpqfc-objs := cpqfcTSinit.o cpqfcTScontrol.o cpqfcTSi2c.o \ cpqfcTSworker.o cpqfcTStrigger.o +libata-objs := libata-core.o libata-scsi.o include $(TOPDIR)/Rules.make @@ -178,6 +186,9 @@ cpqfc.o: $(cpqfc-objs) $(LD) -r -o $@ $(cpqfc-objs) + +libata.o: $(libata-objs) + $(LD) -r -o $@ $(libata-objs) 53c8xx_d.h: 53c7,8xx.scr script_asm.pl ln -sf 53c7,8xx.scr fake8.c diff -urN linux-2.4.26-pre6/drivers/scsi/ata_piix.c linux-2.4.26-pre6-ata14/drivers/scsi/ata_piix.c --- linux-2.4.26-pre6/drivers/scsi/ata_piix.c Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/ata_piix.c Thu Mar 25 22:45:24 2004 @@ -0,0 +1,595 @@ +/* + + ata_piix.c - Intel PATA/SATA controllers + + + Copyright 2003-2004 Red Hat Inc + Copyright 2003-2004 Jeff Garzik + + + Copyright header from piix.c: + + Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer + Copyright (C) 1998-2000 Andre Hedrick + Copyright (C) 2003 Red Hat Inc + + May be copied or modified under the terms of the GNU General Public License + + */ + +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "ata_piix" +#define DRV_VERSION "1.02" + +enum { + PIIX_IOCFG = 0x54, /* IDE I/O configuration register */ + ICH5_PMR = 0x90, /* port mapping register */ + ICH5_PCS = 0x92, /* port control and status */ + + PIIX_FLAG_CHECKINTR = (1 << 29), /* make sure PCI INTx enabled */ + PIIX_FLAG_COMBINED = (1 << 30), /* combined mode possible */ + + /* combined mode. if set, PATA is channel 0. + * if clear, PATA is channel 1. + */ + PIIX_COMB_PATA_P0 = (1 << 1), + PIIX_COMB = (1 << 2), /* combined mode enabled? */ + + PIIX_PORT_PRESENT = (1 << 0), + PIIX_PORT_ENABLED = (1 << 4), + + PIIX_80C_PRI = (1 << 5) | (1 << 4), + PIIX_80C_SEC = (1 << 7) | (1 << 6), + + ich5_pata = 0, + ich5_sata = 1, + piix4_pata = 2, +}; + +static int piix_init_one (struct pci_dev *pdev, + const struct pci_device_id *ent); + +static void piix_pata_phy_reset(struct ata_port *ap); +static void piix_sata_phy_reset(struct ata_port *ap); +static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio); +static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma); + +static unsigned int in_module_init = 1; + +static struct pci_device_id piix_pci_tbl[] = { +#ifdef ATA_ENABLE_PATA + { 0x8086, 0x7111, PCI_ANY_ID, PCI_ANY_ID, 0, 0, piix4_pata }, + { 0x8086, 0x24db, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, + { 0x8086, 0x25a2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_pata }, +#endif + + /* NOTE: The following PCI ids must be kept in sync with the + * list in drivers/pci/quirks.c. + */ + + { 0x8086, 0x24d1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + { 0x8086, 0x24df, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + { 0x8086, 0x25a3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + { 0x8086, 0x25b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich5_sata }, + + /* ICH6 operates in two modes, "looks-like-ICH5" mode, + * and enhanced mode, with queueing and other fancy stuff. + * This is distinguished by PCI class code. + */ + { 0x8086, 0x2651, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich5_sata }, + { 0x8086, 0x2652, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_IDE << 8, 0xffff00, ich5_sata }, + + { } /* terminate list */ +}; + +static struct pci_driver piix_pci_driver = { + .name = DRV_NAME, + .id_table = piix_pci_tbl, + .probe = piix_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template piix_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .detect = ata_scsi_detect, + .release = ata_scsi_release, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = LIBATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .use_new_eh_code = ATA_SHT_NEW_EH_CODE, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .bios_param = ata_std_bios_param, +}; + +static struct ata_port_operations piix_pata_ops = { + .port_disable = ata_port_disable, + .set_piomode = piix_set_piomode, + .set_udmamode = piix_set_udmamode, + + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + .check_status = ata_check_status_pio, + .exec_command = ata_exec_command_pio, + + .phy_reset = piix_pata_phy_reset, + + .bmdma_start = ata_bmdma_start_pio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + + .irq_handler = ata_interrupt, + + .port_start = ata_port_start, + .port_stop = ata_port_stop, +}; + +static struct ata_port_operations piix_sata_ops = { + .port_disable = ata_port_disable, + .set_piomode = piix_set_piomode, + .set_udmamode = piix_set_udmamode, + + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + .check_status = ata_check_status_pio, + .exec_command = ata_exec_command_pio, + + .phy_reset = piix_sata_phy_reset, + + .bmdma_start = ata_bmdma_start_pio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + + .irq_handler = ata_interrupt, + + .port_start = ata_port_start, + .port_stop = ata_port_stop, +}; + +static struct ata_port_info piix_port_info[] = { + /* ich5_pata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | + PIIX_FLAG_CHECKINTR, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */ + .port_ops = &piix_pata_ops, + }, + + /* ich5_sata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | + PIIX_FLAG_COMBINED | PIIX_FLAG_CHECKINTR, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .port_ops = &piix_sata_ops, + }, + + /* piix4_pata */ + { + .sht = &piix_sht, + .host_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = ATA_UDMA_MASK_40C, /* FIXME: cbl det */ + .port_ops = &piix_pata_ops, + }, +}; + +static struct pci_bits piix_enable_bits[] = { + { 0x41U, 1U, 0x80UL, 0x80UL }, /* port 0 */ + { 0x43U, 1U, 0x80UL, 0x80UL }, /* port 1 */ +}; + +MODULE_AUTHOR("Andre Hedrick, Alan Cox, Andrzej Krzysztofowicz, Jeff Garzik"); +MODULE_DESCRIPTION("SCSI low-level driver for Intel PIIX/ICH ATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, piix_pci_tbl); + +/** + * piix_pata_cbl_detect - Probe host controller cable detect info + * @ap: Port for which cable detect info is desired + * + * Read 80c cable indicator from SATA PCI device's PCI config + * register. This register is normally set by firmware (BIOS). + * + * LOCKING: + * None (inherited from caller). + */ +static void piix_pata_cbl_detect(struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + u8 tmp, mask; + + /* no 80c support in host controller? */ + if ((ap->udma_mask & ~ATA_UDMA_MASK_40C) == 0) + goto cbl40; + + /* check BIOS cable detect results */ + mask = ap->port_no == 0 ? PIIX_80C_PRI : PIIX_80C_SEC; + pci_read_config_byte(pdev, PIIX_IOCFG, &tmp); + if ((tmp & mask) == 0) + goto cbl40; + + ap->cbl = ATA_CBL_PATA80; + return; + +cbl40: + ap->cbl = ATA_CBL_PATA40; + ap->udma_mask &= ATA_UDMA_MASK_40C; +} + +/** + * piix_pata_phy_reset - Probe specified port on PATA host controller + * @ap: Port to probe + * + * Probe PATA phy. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_pata_phy_reset(struct ata_port *ap) +{ + if (!pci_test_config_bits(ap->host_set->pdev, + &piix_enable_bits[ap->port_no])) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); + return; + } + + piix_pata_cbl_detect(ap); + + ata_port_probe(ap); + + ata_bus_reset(ap); +} + +/** + * piix_sata_probe - Probe PCI device for present SATA devices + * @pdev: PCI device to probe + * + * Reads SATA PCI device's PCI config register Port Configuration + * and Status (PCS) to determine port and device availability. + * + * LOCKING: + * None (inherited from caller). + * + * RETURNS: + * Non-zero if device detected, zero otherwise. + */ +static int piix_sata_probe (struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + int combined = (ap->flags & ATA_FLAG_SLAVE_POSS); + int orig_mask, mask, i; + u8 pcs; + + mask = (PIIX_PORT_PRESENT << ap->port_no) | + (PIIX_PORT_ENABLED << ap->port_no); + + pci_read_config_byte(pdev, ICH5_PCS, &pcs); + orig_mask = (int) pcs & 0xff; + + /* TODO: this is vaguely wrong for ICH6 combined mode, + * where only two of the four SATA ports are mapped + * onto a single ATA channel. It is also vaguely inaccurate + * for ICH5, which has only two ports. However, this is ok, + * as further device presence detection code will handle + * any false positives produced here. + */ + + for (i = 0; i < 4; i++) { + mask = (PIIX_PORT_PRESENT << i) | (PIIX_PORT_ENABLED << i); + + if ((orig_mask & mask) == mask) + if (combined || (i == ap->port_no)) + return 1; + } + + return 0; +} + +/** + * piix_sata_phy_reset - Probe specified port on SATA host controller + * @ap: Port to probe + * + * Probe SATA phy. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_sata_phy_reset(struct ata_port *ap) +{ + if (!pci_test_config_bits(ap->host_set->pdev, + &piix_enable_bits[ap->port_no])) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: port disabled. ignoring.\n", ap->id); + return; + } + + if (!piix_sata_probe(ap)) { + ata_port_disable(ap); + printk(KERN_INFO "ata%u: SATA port has no device.\n", ap->id); + return; + } + + ap->cbl = ATA_CBL_SATA; + + ata_port_probe(ap); + + ata_bus_reset(ap); +} + +/** + * piix_set_piomode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: um + * @pio: PIO mode, 0 - 4 + * + * Set PIO mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_set_piomode (struct ata_port *ap, struct ata_device *adev, + unsigned int pio) +{ + struct pci_dev *dev = ap->host_set->pdev; + unsigned int is_slave = (adev->flags & ATA_DFLAG_MASTER) ? 0 : 1; + unsigned int master_port= ap->port_no ? 0x42 : 0x40; + unsigned int slave_port = 0x44; + u16 master_data; + u8 slave_data; + + static const /* ISP RTC */ + u8 timings[][2] = { { 0, 0 }, + { 0, 0 }, + { 1, 0 }, + { 2, 1 }, + { 2, 3 }, }; + + pci_read_config_word(dev, master_port, &master_data); + if (is_slave) { + master_data |= 0x4000; + /* enable PPE, IE and TIME */ + master_data |= 0x0070; + pci_read_config_byte(dev, slave_port, &slave_data); + slave_data &= (ap->port_no ? 0x0f : 0xf0); + slave_data |= + (timings[pio][0] << 2) | + (timings[pio][1] << (ap->port_no ? 4 : 0)); + } else { + master_data &= 0xccf8; + /* enable PPE, IE and TIME */ + master_data |= 0x0007; + master_data |= + (timings[pio][0] << 12) | + (timings[pio][1] << 8); + } + pci_write_config_word(dev, master_port, master_data); + if (is_slave) + pci_write_config_byte(dev, slave_port, slave_data); +} + +/** + * piix_set_udmamode - Initialize host controller PATA PIO timings + * @ap: Port whose timings we are configuring + * @adev: um + * @udma: udma mode, 0 - 6 + * + * Set UDMA mode for device, in host controller PCI config space. + * + * LOCKING: + * None (inherited from caller). + */ + +static void piix_set_udmamode (struct ata_port *ap, struct ata_device *adev, + unsigned int udma) +{ + struct pci_dev *dev = ap->host_set->pdev; + u8 maslave = ap->port_no ? 0x42 : 0x40; + u8 speed = udma; + unsigned int drive_dn = (ap->port_no ? 2 : 0) + adev->devno; + int a_speed = 3 << (drive_dn * 4); + int u_flag = 1 << drive_dn; + int v_flag = 0x01 << drive_dn; + int w_flag = 0x10 << drive_dn; + int u_speed = 0; + int sitre; + u16 reg4042, reg44, reg48, reg4a, reg54; + u8 reg55; + + pci_read_config_word(dev, maslave, ®4042); + DPRINTK("reg4042 = 0x%04x\n", reg4042); + sitre = (reg4042 & 0x4000) ? 1 : 0; + pci_read_config_word(dev, 0x44, ®44); + pci_read_config_word(dev, 0x48, ®48); + pci_read_config_word(dev, 0x4a, ®4a); + pci_read_config_word(dev, 0x54, ®54); + pci_read_config_byte(dev, 0x55, ®55); + + switch(speed) { + case XFER_UDMA_4: + case XFER_UDMA_2: u_speed = 2 << (drive_dn * 4); break; + case XFER_UDMA_6: + case XFER_UDMA_5: + case XFER_UDMA_3: + case XFER_UDMA_1: u_speed = 1 << (drive_dn * 4); break; + case XFER_UDMA_0: u_speed = 0 << (drive_dn * 4); break; + default: + BUG(); + return; + } + + if (!(reg48 & u_flag)) + pci_write_config_word(dev, 0x48, reg48|u_flag); + if (speed == XFER_UDMA_5) { + pci_write_config_byte(dev, 0x55, (u8) reg55|w_flag); + } else { + pci_write_config_byte(dev, 0x55, (u8) reg55 & ~w_flag); + } + if (!(reg4a & u_speed)) { + pci_write_config_word(dev, 0x4a, reg4a & ~a_speed); + pci_write_config_word(dev, 0x4a, reg4a|u_speed); + } + if (speed > XFER_UDMA_2) { + if (!(reg54 & v_flag)) { + pci_write_config_word(dev, 0x54, reg54|v_flag); + } + } else { + pci_write_config_word(dev, 0x54, reg54 & ~v_flag); + } +} + +/* move to PCI layer, integrate w/ MSI stuff */ +static void pci_enable_intx(struct pci_dev *pdev) +{ + u16 pci_command; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_command); + if (pci_command & PCI_COMMAND_INTX_DISABLE) { + pci_command &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(pdev, PCI_COMMAND, pci_command); + } +} + +/** + * piix_init_one - Register PIIX ATA PCI device with kernel services + * @pdev: PCI device to register + * @ent: Entry in piix_pci_tbl matching with @pdev + * + * Called from kernel PCI layer. We probe for combined mode (sigh), + * and then hand over control to libata, for it to do the rest. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * Zero on success, or -ERRNO value. + */ + +static int piix_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_port_info *port_info[2]; + unsigned int combined = 0, n_ports = 1; + unsigned int pata_chan = 0, sata_chan = 0; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* no hotplugging support (FIXME) */ + if (!in_module_init) + return -ENODEV; + + port_info[0] = &piix_port_info[ent->driver_data]; + port_info[1] = NULL; + + if (port_info[0]->host_flags & PIIX_FLAG_COMBINED) { + u8 tmp; + pci_read_config_byte(pdev, ICH5_PMR, &tmp); + + if (tmp & PIIX_COMB) { + combined = 1; + if (tmp & PIIX_COMB_PATA_P0) + sata_chan = 1; + else + pata_chan = 1; + } + } + + /* On ICH5, some BIOSen disable the interrupt using the + * PCI_COMMAND_INTX_DISABLE bit added in PCI 2.3. + * On ICH6, this bit has the same effect, but only when + * MSI is disabled (and it is disabled, as we don't use + * message-signalled interrupts currently). + */ + if (port_info[0]->host_flags & PIIX_FLAG_CHECKINTR) + pci_enable_intx(pdev); + + if (combined) { + port_info[sata_chan] = &piix_port_info[ent->driver_data]; + port_info[sata_chan]->host_flags |= ATA_FLAG_SLAVE_POSS; + port_info[pata_chan] = &piix_port_info[ich5_pata]; + n_ports++; + + printk(KERN_WARNING DRV_NAME ": combined mode detected\n"); + } + + return ata_pci_init_one(pdev, port_info, n_ports); +} + +/** + * piix_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init piix_init(void) +{ + int rc; + + DPRINTK("pci_module_init\n"); + rc = pci_module_init(&piix_pci_driver); + if (rc) + return rc; + + in_module_init = 0; + + DPRINTK("scsi_register_host\n"); + rc = scsi_register_module(MODULE_SCSI_HA, &piix_sht); + if (rc) { + rc = -ENODEV; + goto err_out; + } + + DPRINTK("done\n"); + return 0; + +err_out: + pci_unregister_driver(&piix_pci_driver); + return rc; +} + +/** + * piix_exit - + * + * LOCKING: + * + */ + +static void __exit piix_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &piix_sht); + pci_unregister_driver(&piix_pci_driver); +} + +module_init(piix_init); +module_exit(piix_exit); + diff -urN linux-2.4.26-pre6/drivers/scsi/libata-core.c linux-2.4.26-pre6-ata14/drivers/scsi/libata-core.c --- linux-2.4.26-pre6/drivers/scsi/libata-core.c Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/libata-core.c Thu Mar 25 22:45:38 2004 @@ -0,0 +1,3498 @@ +/* + libata-core.c - helper library for ATA + + Copyright 2003-2004 Red Hat, Inc. All rights reserved. + Copyright 2003-2004 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include +#include +#include + +#include "libata.h" + +static void atapi_cdb_send(struct ata_port *ap); +static unsigned int ata_busy_sleep (struct ata_port *ap, + unsigned long tmout_pat, + unsigned long tmout); +static void __ata_dev_select (struct ata_port *ap, unsigned int device); +#if 0 /* to be used eventually */ +static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append); +#endif +static void ata_dma_complete(struct ata_port *ap, u8 host_stat, + unsigned int done_late); +static void ata_host_set_pio(struct ata_port *ap); +static void ata_host_set_udma(struct ata_port *ap); +static void ata_dev_set_pio(struct ata_port *ap, unsigned int device); +static void ata_dev_set_udma(struct ata_port *ap, unsigned int device); +static void ata_set_mode(struct ata_port *ap); + +static unsigned int ata_unique_id = 1; +static LIST_HEAD(ata_probe_list); +static spinlock_t ata_module_lock = SPIN_LOCK_UNLOCKED; + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("Library module for ATA devices"); +MODULE_LICENSE("GPL"); + +static const char * thr_state_name[] = { + "THR_UNKNOWN", + "THR_PORT_RESET", + "THR_AWAIT_DEATH", + "THR_PROBE_FAILED", + "THR_IDLE", + "THR_PROBE_SUCCESS", + "THR_PROBE_START", + "THR_PIO_POLL", + "THR_PIO_TMOUT", + "THR_PIO", + "THR_PIO_LAST", + "THR_PIO_LAST_POLL", + "THR_PIO_ERR", + "THR_PACKET", +}; + +/** + * ata_thr_state_name - convert thread state enum to string + * @thr_state: thread state to be converted to string + * + * Converts the specified thread state id to a constant C string. + * + * LOCKING: + * None. + * + * RETURNS: + * The THR_xxx-prefixed string naming the specified thread + * state id, or the string "". + */ + +static const char *ata_thr_state_name(unsigned int thr_state) +{ + if (thr_state < ARRAY_SIZE(thr_state_name)) + return thr_state_name[thr_state]; + return ""; +} + +/** + * msleep - sleep for a number of milliseconds + * @msecs: number of milliseconds to sleep + * + * Issues schedule_timeout call for the specified number + * of milliseconds. + * + * LOCKING: + * None. + */ + +static void msleep(unsigned long msecs) +{ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(msecs_to_jiffies(msecs)); +} + +/** + * ata_tf_load_pio - send taskfile registers to host controller + * @ioaddr: set of IO ports to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller using PIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + outb(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + outb(tf->hob_feature, ioaddr->feature_addr); + outb(tf->hob_nsect, ioaddr->nsect_addr); + outb(tf->hob_lbal, ioaddr->lbal_addr); + outb(tf->hob_lbam, ioaddr->lbam_addr); + outb(tf->hob_lbah, ioaddr->lbah_addr); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + outb(tf->feature, ioaddr->feature_addr); + outb(tf->nsect, ioaddr->nsect_addr); + outb(tf->lbal, ioaddr->lbal_addr); + outb(tf->lbam, ioaddr->lbam_addr); + outb(tf->lbah, ioaddr->lbah_addr); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + outb(tf->device, ioaddr->device_addr); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +/** + * ata_tf_load_mmio - send taskfile registers to host controller + * @ioaddr: set of IO ports to which output is sent + * @tf: ATA taskfile register set + * + * Outputs ATA taskfile to standard ATA host controller using MMIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + writeb(tf->ctl, ap->ioaddr.ctl_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + writeb(tf->hob_feature, (void *) ioaddr->feature_addr); + writeb(tf->hob_nsect, (void *) ioaddr->nsect_addr); + writeb(tf->hob_lbal, (void *) ioaddr->lbal_addr); + writeb(tf->hob_lbam, (void *) ioaddr->lbam_addr); + writeb(tf->hob_lbah, (void *) ioaddr->lbah_addr); + VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n", + tf->hob_feature, + tf->hob_nsect, + tf->hob_lbal, + tf->hob_lbam, + tf->hob_lbah); + } + + if (is_addr) { + writeb(tf->feature, (void *) ioaddr->feature_addr); + writeb(tf->nsect, (void *) ioaddr->nsect_addr); + writeb(tf->lbal, (void *) ioaddr->lbal_addr); + writeb(tf->lbam, (void *) ioaddr->lbam_addr); + writeb(tf->lbah, (void *) ioaddr->lbah_addr); + VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n", + tf->feature, + tf->nsect, + tf->lbal, + tf->lbam, + tf->lbah); + } + + if (tf->flags & ATA_TFLAG_DEVICE) { + writeb(tf->device, (void *) ioaddr->device_addr); + VPRINTK("device 0x%X\n", tf->device); + } + + ata_wait_idle(ap); +} + +/** + * ata_exec_command_pio - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues PIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + + outb(tf->command, ap->ioaddr.command_addr); + ata_pause(ap); +} + + +/** + * ata_exec_command_mmio - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues MMIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + + writeb(tf->command, (void *) ap->ioaddr.command_addr); + ata_pause(ap); +} + +/** + * ata_exec - issue ATA command to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues PIO write to ATA command register, with proper + * synchronization with interrupt handler / other threads. + * + * LOCKING: + * Obtains host_set lock. + */ + +static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf) +{ + unsigned long flags; + + DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command); + spin_lock_irqsave(&ap->host_set->lock, flags); + ap->ops->exec_command(ap, tf); + spin_unlock_irqrestore(&ap->host_set->lock, flags); +} + +/** + * ata_tf_to_host - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * via PIO, with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * Obtains host_set lock. + */ + +static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf) +{ + init_MUTEX_LOCKED(&ap->sem); + + ap->ops->tf_load(ap, tf); + + ata_exec(ap, tf); +} + +/** + * ata_tf_to_host_nolock - issue ATA taskfile to host controller + * @ap: port to which command is being issued + * @tf: ATA taskfile register set + * + * Issues ATA taskfile register set to ATA host controller, + * via PIO, with proper synchronization with interrupt handler and + * other threads. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf) +{ + init_MUTEX_LOCKED(&ap->sem); + + ap->ops->tf_load(ap, tf); + ap->ops->exec_command(ap, tf); +} + +/** + * ata_tf_read_pio - input device's ATA taskfile shadow registers + * @ioaddr: set of IO ports from which input is read + * @tf: ATA taskfile register set for storing input + * + * Reads ATA taskfile registers for currently-selected device + * into @tf via PIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->nsect = inb(ioaddr->nsect_addr); + tf->lbal = inb(ioaddr->lbal_addr); + tf->lbam = inb(ioaddr->lbam_addr); + tf->lbah = inb(ioaddr->lbah_addr); + tf->device = inb(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr); + tf->hob_feature = inb(ioaddr->error_addr); + tf->hob_nsect = inb(ioaddr->nsect_addr); + tf->hob_lbal = inb(ioaddr->lbal_addr); + tf->hob_lbam = inb(ioaddr->lbam_addr); + tf->hob_lbah = inb(ioaddr->lbah_addr); + } +} + +/** + * ata_tf_read_mmio - input device's ATA taskfile shadow registers + * @ioaddr: set of IO ports from which input is read + * @tf: ATA taskfile register set for storing input + * + * Reads ATA taskfile registers for currently-selected device + * into @tf via MMIO. + * + * LOCKING: + * Inherited from caller. + */ + +void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + tf->nsect = readb((void *)ioaddr->nsect_addr); + tf->lbal = readb((void *)ioaddr->lbal_addr); + tf->lbam = readb((void *)ioaddr->lbam_addr); + tf->lbah = readb((void *)ioaddr->lbah_addr); + tf->device = readb((void *)ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + writeb(tf->ctl | ATA_HOB, ap->ioaddr.ctl_addr); + tf->hob_feature = readb((void *)ioaddr->error_addr); + tf->hob_nsect = readb((void *)ioaddr->nsect_addr); + tf->hob_lbal = readb((void *)ioaddr->lbal_addr); + tf->hob_lbam = readb((void *)ioaddr->lbam_addr); + tf->hob_lbah = readb((void *)ioaddr->lbah_addr); + } +} + +/** + * ata_check_status_pio - Read device status reg & clear interrupt + * @ap: port where the device is + * + * Reads ATA taskfile status register for currently-selected device + * via PIO and return it's value. This also clears pending interrupts + * from this device + * + * LOCKING: + * Inherited from caller. + */ +u8 ata_check_status_pio(struct ata_port *ap) +{ + return inb(ap->ioaddr.status_addr); +} + +/** + * ata_check_status_mmio - Read device status reg & clear interrupt + * @ap: port where the device is + * + * Reads ATA taskfile status register for currently-selected device + * via MMIO and return it's value. This also clears pending interrupts + * from this device + * + * LOCKING: + * Inherited from caller. + */ +u8 ata_check_status_mmio(struct ata_port *ap) +{ + return readb((void *) ap->ioaddr.status_addr); +} + +static int ata_prot_to_cmd(int protocol, int lba48) +{ + int rcmd = 0, wcmd = 0; + + switch (protocol) { + case ATA_PROT_PIO: + if (lba48) { + rcmd = ATA_CMD_PIO_READ_EXT; + wcmd = ATA_CMD_PIO_WRITE_EXT; + } else { + rcmd = ATA_CMD_PIO_READ; + wcmd = ATA_CMD_PIO_WRITE; + } + break; + + case ATA_PROT_DMA: + if (lba48) { + rcmd = ATA_CMD_READ_EXT; + wcmd = ATA_CMD_WRITE_EXT; + } else { + rcmd = ATA_CMD_READ; + wcmd = ATA_CMD_WRITE; + } + break; + + default: + return -1; + } + + return rcmd | (wcmd << 8); +} + +static void ata_dev_set_protocol(struct ata_device *dev) +{ + int pio = (dev->flags & ATA_DFLAG_PIO); + int lba48 = (dev->flags & ATA_DFLAG_LBA48); + int proto, cmd; + + if (pio) + proto = dev->xfer_protocol = ATA_PROT_PIO; + else + proto = dev->xfer_protocol = ATA_PROT_DMA; + + cmd = ata_prot_to_cmd(proto, lba48); + if (cmd < 0) + BUG(); + + dev->read_cmd = cmd & 0xff; + dev->write_cmd = (cmd >> 8) & 0xff; +} + +static const char * udma_str[] = { + "UDMA/16", + "UDMA/25", + "UDMA/33", + "UDMA/44", + "UDMA/66", + "UDMA/100", + "UDMA/133", + "UDMA7", +}; + +/** + * ata_udma_string - convert UDMA bit offset to string + * @udma_mask: mask of bits supported; only highest bit counts. + * + * Determine string which represents the highest speed + * (highest bit in @udma_mask). + * + * LOCKING: + * None. + * + * RETURNS: + * Constant C string representing highest speed listed in + * @udma_mask, or the constant C string "". + */ + +static const char *ata_udma_string(unsigned int udma_mask) +{ + int i; + + for (i = 7; i >= 0; i--) { + if (udma_mask & (1 << i)) + return udma_str[i]; + } + + return ""; +} + +/** + * ata_pio_devchk - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static unsigned int ata_pio_devchk(struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + __ata_dev_select(ap, device); + + outb(0x55, ioaddr->nsect_addr); + outb(0xaa, ioaddr->lbal_addr); + + outb(0xaa, ioaddr->nsect_addr); + outb(0x55, ioaddr->lbal_addr); + + outb(0x55, ioaddr->nsect_addr); + outb(0xaa, ioaddr->lbal_addr); + + nsect = inb(ioaddr->nsect_addr); + lbal = inb(ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * ata_mmio_devchk - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static unsigned int ata_mmio_devchk(struct ata_port *ap, + unsigned int device) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u8 nsect, lbal; + + __ata_dev_select(ap, device); + + writeb(0x55, (void *) ioaddr->nsect_addr); + writeb(0xaa, (void *) ioaddr->lbal_addr); + + writeb(0xaa, (void *) ioaddr->nsect_addr); + writeb(0x55, (void *) ioaddr->lbal_addr); + + writeb(0x55, (void *) ioaddr->nsect_addr); + writeb(0xaa, (void *) ioaddr->lbal_addr); + + nsect = readb((void *) ioaddr->nsect_addr); + lbal = readb((void *) ioaddr->lbal_addr); + + if ((nsect == 0x55) && (lbal == 0xaa)) + return 1; /* we found a device */ + + return 0; /* nothing found */ +} + +/** + * ata_dev_devchk - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static unsigned int ata_dev_devchk(struct ata_port *ap, + unsigned int device) +{ + if (ap->flags & ATA_FLAG_MMIO) + return ata_mmio_devchk(ap, device); + return ata_pio_devchk(ap, device); +} + +/** + * ata_dev_classify - determine device type based on ATA-spec signature + * @tf: ATA taskfile register set for device to be identified + * + * Determine from taskfile register contents whether a device is + * ATA or ATAPI, as per "Signature and persistence" section + * of ATA/PI spec (volume 1, sect 5.14). + * + * LOCKING: + * None. + * + * RETURNS: + * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN + * the event of failure. + */ + +static unsigned int ata_dev_classify(struct ata_taskfile *tf) +{ + /* Apple's open source Darwin code hints that some devices only + * put a proper signature into the LBA mid/high registers, + * So, we only check those. It's sufficient for uniqueness. + */ + + if (((tf->lbam == 0) && (tf->lbah == 0)) || + ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) { + DPRINTK("found ATA device by sig\n"); + return ATA_DEV_ATA; + } + + if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) || + ((tf->lbam == 0x69) && (tf->lbah == 0x96))) { + DPRINTK("found ATAPI device by sig\n"); + return ATA_DEV_ATAPI; + } + + DPRINTK("unknown device\n"); + return ATA_DEV_UNKNOWN; +} + +/** + * ata_dev_try_classify - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device, + unsigned int maybe_have_dev) +{ + struct ata_device *dev = &ap->device[device]; + struct ata_taskfile tf; + unsigned int class; + u8 err; + + __ata_dev_select(ap, device); + + memset(&tf, 0, sizeof(tf)); + + err = ata_chk_err(ap); + ap->ops->tf_read(ap, &tf); + + dev->class = ATA_DEV_NONE; + + /* see if device passed diags */ + if (err == 1) + /* do nothing */ ; + else if ((device == 0) && (err == 0x81)) + /* do nothing */ ; + else + return err; + + /* determine if device if ATA or ATAPI */ + class = ata_dev_classify(&tf); + if (class == ATA_DEV_UNKNOWN) + return err; + if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) + return err; + + dev->class = class; + + return err; +} + +/** + * ata_dev_id_string - + * @dev: + * @s: + * @ofs: + * @len: + * + * LOCKING: + * + * RETURNS: + * + */ + +unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s, + unsigned int ofs, unsigned int len) +{ + unsigned int c, ret = 0; + + while (len > 0) { + c = dev->id[ofs] >> 8; + *s = c; + s++; + + ret = c = dev->id[ofs] & 0xff; + *s = c; + s++; + + ofs++; + len -= 2; + } + + return ret; +} + +/** + * ata_dev_parse_strings - + * @dev: + * + * LOCKING: + */ + +static void ata_dev_parse_strings(struct ata_device *dev) +{ + assert (dev->class == ATA_DEV_ATA); + memcpy(dev->vendor, "ATA ", 8); + + ata_dev_id_string(dev, dev->product, ATA_ID_PROD_OFS, + sizeof(dev->product)); +} + +/** + * __ata_dev_select - + * @ap: + * @device: + * + * LOCKING: + * + */ + +static void __ata_dev_select (struct ata_port *ap, unsigned int device) +{ + u8 tmp; + + if (device == 0) + tmp = ATA_DEVICE_OBS; + else + tmp = ATA_DEVICE_OBS | ATA_DEV1; + + if (ap->flags & ATA_FLAG_MMIO) { + writeb(tmp, (void *) ap->ioaddr.device_addr); + } else { + outb(tmp, ap->ioaddr.device_addr); + } + ata_pause(ap); /* needed; also flushes, for mmio */ +} + +/** + * ata_dev_select - + * @ap: + * @device: + * @wait: + * @can_sleep: + * + * LOCKING: + * + * RETURNS: + * + */ + +void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep) +{ + VPRINTK("ENTER, ata%u: device %u, wait %u\n", + ap->id, device, wait); + + if (wait) + ata_wait_idle(ap); + + __ata_dev_select(ap, device); + + if (wait) { + if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI) + msleep(150); + ata_wait_idle(ap); + } +} + +/** + * ata_dump_id - + * @dev: + * + * LOCKING: + */ + +static inline void ata_dump_id(struct ata_device *dev) +{ + DPRINTK("49==0x%04x " + "53==0x%04x " + "63==0x%04x " + "64==0x%04x " + "75==0x%04x \n", + dev->id[49], + dev->id[53], + dev->id[63], + dev->id[64], + dev->id[75]); + DPRINTK("80==0x%04x " + "81==0x%04x " + "82==0x%04x " + "83==0x%04x " + "84==0x%04x \n", + dev->id[80], + dev->id[81], + dev->id[82], + dev->id[83], + dev->id[84]); + DPRINTK("88==0x%04x " + "93==0x%04x\n", + dev->id[88], + dev->id[93]); +} + +/** + * ata_dev_identify - obtain IDENTIFY x DEVICE page + * @ap: port on which device we wish to probe resides + * @device: device bus address, starting at zero + * + * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE + * command, and read back the 512-byte device information page. + * The device information page is fed to us via the standard + * PIO-IN protocol, but we hand-code it here. (TODO: investigate + * using standard PIO-IN paths) + * + * After reading the device information page, we use several + * bits of information from it to initialize data structures + * that will be used during the lifetime of the ata_device. + * Other data from the info page is used to disqualify certain + * older ATA devices we do not wish to support. + * + * LOCKING: + * Inherited from caller. Some functions called by this function + * obtain the host_set lock. + */ + +static void ata_dev_identify(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + unsigned int i; + u16 tmp, udma_modes; + u8 status; + struct ata_taskfile tf; + unsigned int using_edd; + + if (!ata_dev_present(dev)) { + DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", + ap->id, device); + return; + } + + if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET)) + using_edd = 0; + else + using_edd = 1; + + DPRINTK("ENTER, host %u, dev %u\n", ap->id, device); + + assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI || + dev->class == ATA_DEV_NONE); + + ata_dev_select(ap, device, 1, 1); /* select device 0/1 */ + +retry: + ata_tf_init(ap, &tf, device); + tf.ctl |= ATA_NIEN; + tf.protocol = ATA_PROT_PIO; + + if (dev->class == ATA_DEV_ATA) { + tf.command = ATA_CMD_ID_ATA; + DPRINTK("do ATA identify\n"); + } else { + tf.command = ATA_CMD_ID_ATAPI; + DPRINTK("do ATAPI identify\n"); + } + + ata_tf_to_host(ap, &tf); + + /* crazy ATAPI devices... */ + if (dev->class == ATA_DEV_ATAPI) + msleep(150); + + if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) + goto err_out; + + status = ata_chk_status(ap); + if (status & ATA_ERR) { + /* + * arg! EDD works for all test cases, but seems to return + * the ATA signature for some ATAPI devices. Until the + * reason for this is found and fixed, we fix up the mess + * here. If IDENTIFY DEVICE returns command aborted + * (as ATAPI devices do), then we issue an + * IDENTIFY PACKET DEVICE. + * + * ATA software reset (SRST, the default) does not appear + * to have this problem. + */ + if ((using_edd) && (tf.command == ATA_CMD_ID_ATA)) { + u8 err = ata_chk_err(ap); + if (err & ATA_ABORTED) { + dev->class = ATA_DEV_ATAPI; + goto retry; + } + } + goto err_out; + } + + /* make sure we have BSY=0, DRQ=1 */ + if ((status & ATA_DRQ) == 0) { + printk(KERN_WARNING "ata%u: dev %u (ATA%s?) not returning id page (0x%x)\n", + ap->id, device, + dev->class == ATA_DEV_ATA ? "" : "PI", + status); + goto err_out; + } + + /* read IDENTIFY [X] DEVICE page */ + if (ap->flags & ATA_FLAG_MMIO) { + for (i = 0; i < ATA_ID_WORDS; i++) + dev->id[i] = readw((void *)ap->ioaddr.data_addr); + } else + for (i = 0; i < ATA_ID_WORDS; i++) + dev->id[i] = inw(ap->ioaddr.data_addr); + + /* wait for host_idle */ + status = ata_wait_idle(ap); + if (status & (ATA_BUSY | ATA_DRQ)) { + printk(KERN_WARNING "ata%u: dev %u (ATA%s?) error after id page (0x%x)\n", + ap->id, device, + dev->class == ATA_DEV_ATA ? "" : "PI", + status); + goto err_out; + } + + ata_irq_on(ap); /* re-enable interrupts */ + + /* print device capabilities */ + printk(KERN_DEBUG "ata%u: dev %u cfg " + "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n", + ap->id, device, dev->id[49], + dev->id[82], dev->id[83], dev->id[84], + dev->id[85], dev->id[86], dev->id[87], + dev->id[88]); + + /* + * common ATA, ATAPI feature tests + */ + + /* we require LBA and DMA support (bits 8 & 9 of word 49) */ + if (!ata_id_has_dma(dev) || !ata_id_has_lba(dev)) { + printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id); + goto err_out_nosup; + } + + /* we require UDMA support */ + udma_modes = + tmp = dev->id[ATA_ID_UDMA_MODES]; + if ((tmp & 0xff) == 0) { + printk(KERN_DEBUG "ata%u: no udma\n", ap->id); + goto err_out_nosup; + } + + ata_dump_id(dev); + + ata_dev_parse_strings(dev); + + /* ATA-specific feature tests */ + if (dev->class == ATA_DEV_ATA) { + if (!ata_id_is_ata(dev)) /* sanity check */ + goto err_out_nosup; + + tmp = dev->id[ATA_ID_MAJOR_VER]; + for (i = 14; i >= 1; i--) + if (tmp & (1 << i)) + break; + + /* we require at least ATA-3 */ + if (i < 3) { + printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id); + goto err_out_nosup; + } + + if (ata_id_has_lba48(dev)) { + dev->flags |= ATA_DFLAG_LBA48; + dev->n_sectors = ata_id_u64(dev, 100); + } else { + dev->n_sectors = ata_id_u32(dev, 60); + } + + ap->host->max_cmd_len = 16; + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors%s\n", + ap->id, device, + ata_udma_string(udma_modes), + (unsigned long long)dev->n_sectors, + dev->flags & ATA_DFLAG_LBA48 ? " (lba48)" : ""); + } + + /* ATAPI-specific feature tests */ + else { + if (ata_id_is_ata(dev)) /* sanity check */ + goto err_out_nosup; + + /* see if 16-byte commands supported */ + tmp = dev->id[0] & 0x3; + if (tmp == 1) + ap->host->max_cmd_len = 16; + + /* print device info to dmesg */ + printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n", + ap->id, device, + ata_udma_string(udma_modes)); + } + + DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap)); + return; + +err_out_nosup: + printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n", + ap->id, device); +err_out: + ata_irq_on(ap); /* re-enable interrupts */ + dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */ + DPRINTK("EXIT, err\n"); +} + +/** + * ata_port_reset - + * @ap: + * + * LOCKING: + */ + +static void ata_port_reset(struct ata_port *ap) +{ + unsigned int i, found = 0; + + ap->ops->phy_reset(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + goto err_out; + + for (i = 0; i < ATA_MAX_DEVICES; i++) { + ata_dev_identify(ap, i); + if (ata_dev_present(&ap->device[i])) { + found = 1; + if (ap->ops->dev_config) + ap->ops->dev_config(ap, &ap->device[i]); + } + } + + if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + goto err_out_disable; + + ata_set_mode(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + goto err_out_disable; + + ap->thr_state = THR_PROBE_SUCCESS; + + return; + +err_out_disable: + ap->ops->port_disable(ap); +err_out: + ap->thr_state = THR_PROBE_FAILED; +} + +/** + * ata_port_probe - + * @ap: + * + * LOCKING: + */ + +void ata_port_probe(struct ata_port *ap) +{ + ap->flags &= ~ATA_FLAG_PORT_DISABLED; +} + +/** + * sata_phy_reset - + * @ap: + * + * LOCKING: + * + */ +void sata_phy_reset(struct ata_port *ap) +{ + u32 sstatus; + unsigned long timeout = jiffies + (HZ * 5); + + if (ap->flags & ATA_FLAG_SATA_RESET) { + scr_write(ap, SCR_CONTROL, 0x301); /* issue phy wake/reset */ + scr_read(ap, SCR_STATUS); /* dummy read; flush */ + udelay(400); /* FIXME: a guess */ + } + scr_write(ap, SCR_CONTROL, 0x300); /* issue phy wake/clear reset */ + + /* wait for phy to become ready, if necessary */ + do { + msleep(200); + sstatus = scr_read(ap, SCR_STATUS); + if ((sstatus & 0xf) != 1) + break; + } while (time_before(jiffies, timeout)); + + /* TODO: phy layer with polling, timeouts, etc. */ + if (sata_dev_present(ap)) + ata_port_probe(ap); + else { + sstatus = scr_read(ap, SCR_STATUS); + printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n", + ap->id, sstatus); + ata_port_disable(ap); + } + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + + if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { + ata_port_disable(ap); + return; + } + + ata_bus_reset(ap); +} + +/** + * ata_port_disable - + * @ap: + * + * LOCKING: + */ + +void ata_port_disable(struct ata_port *ap) +{ + ap->device[0].class = ATA_DEV_NONE; + ap->device[1].class = ATA_DEV_NONE; + ap->flags |= ATA_FLAG_PORT_DISABLED; +} + +/** + * ata_set_mode - Program timings and issue SET FEATURES - XFER + * @ap: port on which timings will be programmed + * + * LOCKING: + * + */ +static void ata_set_mode(struct ata_port *ap) +{ + unsigned int force_pio, i; + + ata_host_set_pio(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + + ata_host_set_udma(ap); + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + +#ifdef ATA_FORCE_PIO + force_pio = 1; +#else + force_pio = 0; +#endif + + if (force_pio) { + ata_dev_set_pio(ap, 0); + ata_dev_set_pio(ap, 1); + } else { + ata_dev_set_udma(ap, 0); + ata_dev_set_udma(ap, 1); + } + + if (ap->flags & ATA_FLAG_PORT_DISABLED) + return; + + if (ap->ops->post_set_mode) + ap->ops->post_set_mode(ap); + + for (i = 0; i < 2; i++) { + struct ata_device *dev = &ap->device[i]; + ata_dev_set_protocol(dev); + } +} + +/** + * ata_busy_sleep - sleep until BSY clears, or timeout + * @ap: port containing status register to be polled + * @tmout_pat: impatience timeout + * @tmout: overall timeout + * + * LOCKING: + * + */ + +static unsigned int ata_busy_sleep (struct ata_port *ap, + unsigned long tmout_pat, + unsigned long tmout) +{ + unsigned long timer_start, timeout; + u8 status; + + status = ata_busy_wait(ap, ATA_BUSY, 300); + timer_start = jiffies; + timeout = timer_start + tmout_pat; + while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { + msleep(50); + status = ata_busy_wait(ap, ATA_BUSY, 3); + } + + if (status & ATA_BUSY) + printk(KERN_WARNING "ata%u is slow to respond, " + "please be patient\n", ap->id); + + timeout = timer_start + tmout; + while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) { + msleep(50); + status = ata_chk_status(ap); + } + + if (status & ATA_BUSY) { + printk(KERN_ERR "ata%u failed to respond (%lu secs)\n", + ap->id, tmout / HZ); + return 1; + } + + return 0; +} + +static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int dev0 = devmask & (1 << 0); + unsigned int dev1 = devmask & (1 << 1); + unsigned long timeout; + + /* if device 0 was found in ata_dev_devchk, wait for its + * BSY bit to clear + */ + if (dev0) + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* if device 1 was found in ata_dev_devchk, wait for + * register access, then wait for BSY to clear + */ + timeout = jiffies + ATA_TMOUT_BOOT; + while (dev1) { + u8 nsect, lbal; + + __ata_dev_select(ap, 1); + if (ap->flags & ATA_FLAG_MMIO) { + nsect = readb((void *) ioaddr->nsect_addr); + lbal = readb((void *) ioaddr->lbal_addr); + } else { + nsect = inb(ioaddr->nsect_addr); + lbal = inb(ioaddr->lbal_addr); + } + if ((nsect == 1) && (lbal == 1)) + break; + if (time_after(jiffies, timeout)) { + dev1 = 0; + break; + } + msleep(50); /* give drive a breather */ + } + if (dev1) + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + /* is all this really necessary? */ + __ata_dev_select(ap, 0); + if (dev1) + __ata_dev_select(ap, 1); + if (dev0) + __ata_dev_select(ap, 0); +} + +/** + * ata_bus_edd - + * @ap: + * + * LOCKING: + * + */ + +static unsigned int ata_bus_edd(struct ata_port *ap) +{ + struct ata_taskfile tf; + + /* set up execute-device-diag (bus reset) taskfile */ + /* also, take interrupts to a known state (disabled) */ + DPRINTK("execute-device-diag\n"); + ata_tf_init(ap, &tf, 0); + tf.ctl |= ATA_NIEN; + tf.command = ATA_CMD_EDD; + tf.protocol = ATA_PROT_NODATA; + + /* do bus reset */ + ata_tf_to_host(ap, &tf); + + /* spec says at least 2ms. but who knows with those + * crazy ATAPI devices... + */ + msleep(150); + + return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); +} + +static unsigned int ata_bus_softreset(struct ata_port *ap, + unsigned int devmask) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + DPRINTK("ata%u: bus reset via SRST\n", ap->id); + + /* software reset. causes dev0 to be selected */ + if (ap->flags & ATA_FLAG_MMIO) { + writeb(ap->ctl, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + writeb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(20); /* FIXME: flush */ + writeb(ap->ctl, ioaddr->ctl_addr); + } else { + outb(ap->ctl, ioaddr->ctl_addr); + udelay(10); + outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr); + udelay(10); + outb(ap->ctl, ioaddr->ctl_addr); + } + + /* spec mandates ">= 2ms" before checking status. + * We wait 150ms, because that was the magic delay used for + * ATAPI devices in Hale Landis's ATADRVR, for the period of time + * between when the ATA command register is written, and then + * status is checked. Because waiting for "a while" before + * checking status is fine, post SRST, we perform this magic + * delay here as well. + */ + msleep(150); + + ata_bus_post_reset(ap, devmask); + + return 0; +} + +/** + * ata_bus_reset - reset host port and associated ATA channel + * @ap: port to reset + * + * This is typically the first time we actually start issuing + * commands to the ATA channel. We wait for BSY to clear, then + * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its + * result. Determine what devices, if any, are on the channel + * by looking at the device 0/1 error register. Look at the signature + * stored in each device's taskfile registers, to determine if + * the device is ATA or ATAPI. + * + * LOCKING: + * Inherited from caller. Some functions called by this function + * obtain the host_set lock. + * + * SIDE EFFECTS: + * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. + */ + +void ata_bus_reset(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; + u8 err; + unsigned int dev0, dev1 = 0, rc = 0, devmask = 0; + + DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no); + + /* determine if device 0/1 are present */ + if (ap->flags & ATA_FLAG_SATA_RESET) + dev0 = 1; + else { + dev0 = ata_dev_devchk(ap, 0); + if (slave_possible) + dev1 = ata_dev_devchk(ap, 1); + } + + if (dev0) + devmask |= (1 << 0); + if (dev1) + devmask |= (1 << 1); + + /* select device 0 again */ + __ata_dev_select(ap, 0); + + /* issue bus reset */ + if (ap->flags & ATA_FLAG_SRST) + rc = ata_bus_softreset(ap, devmask); + else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) { + /* set up device control */ + if (ap->flags & ATA_FLAG_MMIO) + writeb(ap->ctl, ioaddr->ctl_addr); + else + outb(ap->ctl, ioaddr->ctl_addr); + rc = ata_bus_edd(ap); + } + + if (rc) + goto err_out; + + /* + * determine by signature whether we have ATA or ATAPI devices + */ + err = ata_dev_try_classify(ap, 0, dev0); + if ((slave_possible) && (err != 0x81)) + ata_dev_try_classify(ap, 1, dev1); + + /* re-enable interrupts */ + ata_irq_on(ap); + + /* is double-select really necessary? */ + if (ap->device[1].class != ATA_DEV_NONE) + __ata_dev_select(ap, 1); + if (ap->device[0].class != ATA_DEV_NONE) + __ata_dev_select(ap, 0); + + /* if no devices were detected, disable this port */ + if ((ap->device[0].class == ATA_DEV_NONE) && + (ap->device[1].class == ATA_DEV_NONE)) + goto err_out; + + if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { + /* set up device control for ATA_FLAG_SATA_RESET */ + if (ap->flags & ATA_FLAG_MMIO) + writeb(ap->ctl, ioaddr->ctl_addr); + else + outb(ap->ctl, ioaddr->ctl_addr); + } + + DPRINTK("EXIT\n"); + return; + +err_out: + printk(KERN_ERR "ata%u: disabling port\n", ap->id); + ap->ops->port_disable(ap); + + DPRINTK("EXIT\n"); +} + +/** + * ata_host_set_pio - + * @ap: + * + * LOCKING: + */ + +static void ata_host_set_pio(struct ata_port *ap) +{ + struct ata_device *master, *slave; + unsigned int pio, i; + u16 mask; + + master = &ap->device[0]; + slave = &ap->device[1]; + + assert (ata_dev_present(master) || ata_dev_present(slave)); + + mask = ap->pio_mask; + if (ata_dev_present(master)) + mask &= (master->id[ATA_ID_PIO_MODES] & 0x03); + if (ata_dev_present(slave)) + mask &= (slave->id[ATA_ID_PIO_MODES] & 0x03); + + /* require pio mode 3 or 4 support for host and all devices */ + if (mask == 0) { + printk(KERN_WARNING "ata%u: no PIO3/4 support, ignoring\n", + ap->id); + goto err_out; + } + + pio = (mask & ATA_ID_PIO4) ? 4 : 3; + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (ata_dev_present(&ap->device[i])) { + ap->device[i].pio_mode = (pio == 3) ? + XFER_PIO_3 : XFER_PIO_4; + if (ap->ops->set_piomode) + ap->ops->set_piomode(ap, &ap->device[i], pio); + } + + return; + +err_out: + ap->ops->port_disable(ap); +} + +/** + * ata_host_set_udma - + * @ap: + * + * LOCKING: + */ + +static void ata_host_set_udma(struct ata_port *ap) +{ + struct ata_device *master, *slave; + u16 mask; + unsigned int i, j; + int udma_mode = -1; + + master = &ap->device[0]; + slave = &ap->device[1]; + + assert (ata_dev_present(master) || ata_dev_present(slave)); + assert ((ap->flags & ATA_FLAG_PORT_DISABLED) == 0); + + DPRINTK("udma masks: host 0x%X, master 0x%X, slave 0x%X\n", + ap->udma_mask, + (!ata_dev_present(master)) ? 0xff : + (master->id[ATA_ID_UDMA_MODES] & 0xff), + (!ata_dev_present(slave)) ? 0xff : + (slave->id[ATA_ID_UDMA_MODES] & 0xff)); + + mask = ap->udma_mask; + if (ata_dev_present(master)) + mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff); + if (ata_dev_present(slave)) + mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff); + + i = XFER_UDMA_7; + while (i >= XFER_UDMA_0) { + j = i - XFER_UDMA_0; + DPRINTK("mask 0x%X i 0x%X j %u\n", mask, i, j); + if (mask & (1 << j)) { + udma_mode = i; + break; + } + + i--; + } + + /* require udma for host and all attached devices */ + if (udma_mode < 0) { + printk(KERN_WARNING "ata%u: no UltraDMA support, ignoring\n", + ap->id); + goto err_out; + } + + for (i = 0; i < ATA_MAX_DEVICES; i++) + if (ata_dev_present(&ap->device[i])) { + ap->device[i].udma_mode = udma_mode; + if (ap->ops->set_udmamode) + ap->ops->set_udmamode(ap, &ap->device[i], + udma_mode); + } + + return; + +err_out: + ap->ops->port_disable(ap); +} + +/** + * ata_dev_set_xfermode - + * @ap: + * @dev: + * + * LOCKING: + */ + +static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev) +{ + struct ata_taskfile tf; + + /* set up set-features taskfile */ + DPRINTK("set features - xfer mode\n"); + ata_tf_init(ap, &tf, dev->devno); + tf.ctl |= ATA_NIEN; + tf.command = ATA_CMD_SET_FEATURES; + tf.feature = SETFEATURES_XFER; + tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf.protocol = ATA_PROT_NODATA; + if (dev->flags & ATA_DFLAG_PIO) + tf.nsect = dev->pio_mode; + else + tf.nsect = dev->udma_mode; + + /* do bus reset */ + ata_tf_to_host(ap, &tf); + + /* crazy ATAPI devices... */ + if (dev->class == ATA_DEV_ATAPI) + msleep(150); + + ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); + + ata_irq_on(ap); /* re-enable interrupts */ + + ata_wait_idle(ap); + + DPRINTK("EXIT\n"); +} + +/** + * ata_dev_set_udma - + * @ap: + * @device: + * + * LOCKING: + */ + +static void ata_dev_set_udma(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + + if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + return; + + ata_dev_set_xfermode(ap, dev); + + assert((dev->udma_mode >= XFER_UDMA_0) && + (dev->udma_mode <= XFER_UDMA_7)); + printk(KERN_INFO "ata%u: dev %u configured for %s\n", + ap->id, device, + udma_str[dev->udma_mode - XFER_UDMA_0]); +} + +/** + * ata_dev_set_pio - + * @ap: + * @device: + * + * LOCKING: + */ + +static void ata_dev_set_pio(struct ata_port *ap, unsigned int device) +{ + struct ata_device *dev = &ap->device[device]; + + if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED)) + return; + + /* force PIO mode */ + dev->flags |= ATA_DFLAG_PIO; + + ata_dev_set_xfermode(ap, dev); + + assert((dev->pio_mode >= XFER_PIO_3) && + (dev->pio_mode <= XFER_PIO_4)); + printk(KERN_INFO "ata%u: dev %u configured for PIO%c\n", + ap->id, device, + dev->pio_mode == 3 ? '3' : '4'); +} + +/** + * ata_sg_clean - + * @qc: + * + * LOCKING: + */ + +static void ata_sg_clean(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct scsi_cmnd *cmd = qc->scsicmd; + struct scatterlist *sg = qc->sg; + int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); + + assert(dir == SCSI_DATA_READ || dir == SCSI_DATA_WRITE); + assert(qc->flags & ATA_QCFLAG_SG); + assert(sg != NULL); + + if (!cmd->use_sg) + assert(qc->n_elem == 1); + + DPRINTK("unmapping %u sg elements\n", qc->n_elem); + + if (cmd->use_sg) + pci_unmap_sg(ap->host_set->pdev, sg, qc->n_elem, dir); + else + pci_unmap_single(ap->host_set->pdev, sg_dma_address(&sg[0]), + sg_dma_len(&sg[0]), dir); + + qc->flags &= ~ATA_QCFLAG_SG; + qc->sg = NULL; +} + +/** + * ata_fill_sg - + * @qc: + * + * LOCKING: + * + */ +void ata_fill_sg(struct ata_queued_cmd *qc) +{ + struct scatterlist *sg = qc->sg; + struct ata_port *ap = qc->ap; + unsigned int idx, nelem; + + assert(sg != NULL); + assert(qc->n_elem > 0); + + idx = 0; + for (nelem = qc->n_elem; nelem; nelem--,sg++) { + u32 addr, boundary; + u32 sg_len, len; + + /* determine if physical DMA addr spans 64K boundary. + * Note h/w doesn't support 64-bit, so we unconditionally + * truncate dma_addr_t to u32. + */ + addr = (u32) sg_dma_address(sg); + sg_len = sg_dma_len(sg); + + while (sg_len) { + boundary = (addr & ~0xffff) + (0xffff + 1); + len = sg_len; + if ((addr + sg_len) > boundary) + len = boundary - addr; + + ap->prd[idx].addr = cpu_to_le32(addr); + ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff); + VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len); + + idx++; + sg_len -= len; + addr += len; + } + } + + if (idx) + ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); +} + +/** + * ata_sg_setup_one - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static int ata_sg_setup_one(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct scsi_cmnd *cmd = qc->scsicmd; + int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); + struct scatterlist *sg = qc->sg; + unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG); + + assert(sg == &qc->sgent); + assert(qc->n_elem == 1); + + sg->address = cmd->request_buffer; + sg->page = virt_to_page(cmd->request_buffer); + sg->offset = (unsigned long) cmd->request_buffer & ~PAGE_MASK; + sg_dma_len(sg) = cmd->request_bufflen; + + if (!have_sg) + return 0; + + sg_dma_address(sg) = pci_map_single(ap->host_set->pdev, + cmd->request_buffer, + cmd->request_bufflen, dir); + + DPRINTK("mapped buffer of %d bytes for %s\n", cmd->request_bufflen, + qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); + + return 0; +} + +/** + * ata_sg_setup - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static int ata_sg_setup(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct scsi_cmnd *cmd = qc->scsicmd; + struct scatterlist *sg; + int n_elem; + unsigned int have_sg = (qc->flags & ATA_QCFLAG_SG); + + VPRINTK("ENTER, ata%u, use_sg %d\n", ap->id, cmd->use_sg); + assert(cmd->use_sg > 0); + + sg = (struct scatterlist *)cmd->request_buffer; + if (have_sg) { + int dir = scsi_to_pci_dma_dir(cmd->sc_data_direction); + n_elem = pci_map_sg(ap->host_set->pdev, sg, cmd->use_sg, dir); + if (n_elem < 1) + return -1; + DPRINTK("%d sg elements mapped\n", n_elem); + } else { + n_elem = cmd->use_sg; + } + qc->n_elem = n_elem; + + return 0; +} + +/** + * ata_pio_poll - + * @ap: + * + * LOCKING: + * + * RETURNS: + * + */ + +static unsigned long ata_pio_poll(struct ata_port *ap) +{ + u8 status; + unsigned int poll_state = THR_UNKNOWN; + unsigned int reg_state = THR_UNKNOWN; + const unsigned int tmout_state = THR_PIO_TMOUT; + + switch (ap->thr_state) { + case THR_PIO: + case THR_PIO_POLL: + poll_state = THR_PIO_POLL; + reg_state = THR_PIO; + break; + case THR_PIO_LAST: + case THR_PIO_LAST_POLL: + poll_state = THR_PIO_LAST_POLL; + reg_state = THR_PIO_LAST; + break; + default: + BUG(); + break; + } + + status = ata_chk_status(ap); + if (status & ATA_BUSY) { + if (time_after(jiffies, ap->thr_timeout)) { + ap->thr_state = tmout_state; + return 0; + } + ap->thr_state = poll_state; + if (current->need_resched) + return 0; + return ATA_SHORT_PAUSE; + } + + ap->thr_state = reg_state; + return 0; +} + +/** + * ata_pio_start - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void ata_pio_start (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + + assert(qc->tf.protocol == ATA_PROT_PIO); + + qc->flags |= ATA_QCFLAG_POLL; + qc->tf.ctl |= ATA_NIEN; /* disable interrupts */ + ata_tf_to_host_nolock(ap, &qc->tf); + ata_thread_wake(ap, THR_PIO); +} + +/** + * ata_pio_complete - + * @ap: + * + * LOCKING: + */ + +static void ata_pio_complete (struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + unsigned long flags; + u8 drv_stat; + + /* + * This is purely hueristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, fall back to + * THR_PIO_POLL state. + */ + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + msleep(2); + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + ap->thr_state = THR_PIO_LAST_POLL; + ap->thr_timeout = jiffies + ATA_TMOUT_PIO; + return; + } + } + + drv_stat = ata_wait_idle(ap); + if (drv_stat & (ATA_BUSY | ATA_DRQ)) { + ap->thr_state = THR_PIO_ERR; + return; + } + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + + spin_lock_irqsave(&ap->host_set->lock, flags); + ap->thr_state = THR_IDLE; + spin_unlock_irqrestore(&ap->host_set->lock, flags); + + ata_irq_on(ap); + + ata_qc_complete(qc, drv_stat, 0); +} + +/** + * ata_pio_sector - + * @ap: + * + * LOCKING: + */ + +static void ata_pio_sector(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + struct scatterlist *sg; + struct scsi_cmnd *cmd; + unsigned char *buf; + u8 status; + + /* + * This is purely hueristic. This is a fast path. + * Sometimes when we enter, BSY will be cleared in + * a chk-status or two. If not, the drive is probably seeking + * or something. Snooze for a couple msecs, then + * chk-status again. If still busy, fall back to + * THR_PIO_POLL state. + */ + status = ata_busy_wait(ap, ATA_BUSY, 5); + if (status & ATA_BUSY) { + msleep(2); + status = ata_busy_wait(ap, ATA_BUSY, 10); + if (status & ATA_BUSY) { + ap->thr_state = THR_PIO_POLL; + ap->thr_timeout = jiffies + ATA_TMOUT_PIO; + return; + } + } + + /* handle BSY=0, DRQ=0 as error */ + if ((status & ATA_DRQ) == 0) { + ap->thr_state = THR_PIO_ERR; + return; + } + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + + cmd = qc->scsicmd; + sg = qc->sg; + + if (qc->cursect == (qc->nsect - 1)) + ap->thr_state = THR_PIO_LAST; + + buf = kmap(sg[qc->cursg].page) + + sg[qc->cursg].offset + (qc->cursg_ofs * ATA_SECT_SIZE); + + qc->cursect++; + qc->cursg_ofs++; + + if (cmd->use_sg) + if ((qc->cursg_ofs * ATA_SECT_SIZE) == sg_dma_len(&sg[qc->cursg])) { + qc->cursg++; + qc->cursg_ofs = 0; + } + + DPRINTK("data %s, drv_stat 0x%X\n", + qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read", + status); + + /* do the actual data transfer */ + /* FIXME: mmio-ize */ + if (qc->tf.flags & ATA_TFLAG_WRITE) + outsl(ap->ioaddr.data_addr, buf, ATA_SECT_DWORDS); + else + insl(ap->ioaddr.data_addr, buf, ATA_SECT_DWORDS); + + kunmap(sg[qc->cursg].page); +} + +#if 0 /* to be used eventually */ +/** + * ata_eng_schedule - run an iteration of the pio/dma/whatever engine + * @ap: port on which activity will occur + * @eng: instance of engine + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ +static void ata_eng_schedule (struct ata_port *ap, struct ata_engine *eng) +{ + /* FIXME */ +} +#endif + +/** + * ata_eng_timeout - Handle timeout of queued command + * @ap: Port on which timed-out command is active + * + * Some part of the kernel (currently, only the SCSI layer) + * has noticed that the active command on port @ap has not + * completed after a specified length of time. Handle this + * condition by disabling DMA (if necessary) and completing + * transactions, with error if necessary. + * + * This also handles the case of the "lost interrupt", where + * for some reason (possibly hardware bug, possibly driver bug) + * an interrupt was not delivered to the driver, even though the + * transaction completed successfully. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + */ + +void ata_eng_timeout(struct ata_port *ap) +{ + u8 host_stat, drv_stat; + struct ata_queued_cmd *qc; + + DPRINTK("ENTER\n"); + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (!qc) { + printk(KERN_ERR "ata%u: BUG: timeout without command\n", + ap->id); + goto out; + } + + /* hack alert! We cannot use the supplied completion + * function from inside the ->eh_strategy_handler() thread. + * libata is the only user of ->eh_strategy_handler() in + * any kernel, so the default scsi_done() assumes it is + * not being called from the SCSI EH. + */ + qc->scsidone = scsi_finish_command; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + host_stat = readb(mmio + ATA_DMA_STATUS); + } else + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + + printk(KERN_ERR "ata%u: DMA timeout, stat 0x%x\n", + ap->id, host_stat); + + ata_dma_complete(ap, host_stat, 1); + break; + + case ATA_PROT_NODATA: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + + default: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + } + +out: + DPRINTK("EXIT\n"); +} + +/** + * ata_qc_new - + * @ap: + * @dev: + * + * LOCKING: + */ + +static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) +{ + struct ata_queued_cmd *qc = NULL; + unsigned int i; + + for (i = 0; i < ATA_MAX_QUEUE; i++) + if (!test_and_set_bit(i, &ap->qactive)) { + qc = ata_qc_from_tag(ap, i); + break; + } + + if (qc) + qc->tag = i; + + return qc; +} + +/** + * ata_qc_new_init - + * @ap: + * @dev: + * + * LOCKING: + */ + +struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, + struct ata_device *dev) +{ + struct ata_queued_cmd *qc; + + qc = ata_qc_new(ap); + if (qc) { + qc->sg = NULL; + qc->flags = 0; + qc->scsicmd = NULL; + qc->ap = ap; + qc->dev = dev; + INIT_LIST_HEAD(&qc->node); + init_MUTEX_LOCKED(&qc->sem); + + ata_tf_init(ap, &qc->tf, dev->devno); + + if (likely((dev->flags & ATA_DFLAG_PIO) == 0)) + qc->flags |= ATA_QCFLAG_DMA; + if (dev->flags & ATA_DFLAG_LBA48) + qc->tf.flags |= ATA_TFLAG_LBA48; + } + + return qc; +} + +/** + * ata_qc_complete - + * @qc: + * @drv_stat: + * @done_late: + * + * LOCKING: + * + */ + +void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat, unsigned int done_late) +{ + struct ata_port *ap = qc->ap; + struct scsi_cmnd *cmd = qc->scsicmd; + unsigned int tag, do_clear = 0; + + assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */ + assert(qc->flags & ATA_QCFLAG_ACTIVE); + + if (likely(qc->flags & ATA_QCFLAG_SG)) + ata_sg_clean(qc); + + if (cmd) { + if (unlikely(drv_stat & (ATA_ERR | ATA_BUSY | ATA_DRQ))) { + if (qc->flags & ATA_QCFLAG_ATAPI) + cmd->result = SAM_STAT_CHECK_CONDITION; + else + ata_to_sense_error(qc); + } else { + if (done_late) + cmd->done_late = 1; + cmd->result = SAM_STAT_GOOD; + } + + qc->scsidone(cmd); + } + + qc->flags &= ~ATA_QCFLAG_ACTIVE; + tag = qc->tag; + if (likely(ata_tag_valid(tag))) { + if (tag == ap->active_tag) + ap->active_tag = ATA_TAG_POISON; + qc->tag = ATA_TAG_POISON; + do_clear = 1; + } + + up(&qc->sem); + + if (likely(do_clear)) + clear_bit(tag, &ap->qactive); +} + +#if 0 /* to be used eventually */ +/** + * ata_qc_push - + * @qc: + * @append: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ +static void ata_qc_push (struct ata_queued_cmd *qc, unsigned int append) +{ + struct ata_port *ap = qc->ap; + struct ata_engine *eng = &ap->eng; + + if (likely(append)) + list_add_tail(&qc->node, &eng->q); + else + list_add(&qc->node, &eng->q); + + if (!test_and_set_bit(ATA_EFLG_ACTIVE, &eng->flags)) + ata_eng_schedule(ap, eng); +} +#endif + +/** + * ata_qc_issue - + * @qc: + * + * LOCKING: + * + * RETURNS: + * + */ +int ata_qc_issue(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct scsi_cmnd *cmd = qc->scsicmd; + unsigned int dma = qc->flags & ATA_QCFLAG_DMA; + + ata_dev_select(ap, qc->dev->devno, 1, 0); + + /* set up SG table */ + if (cmd->use_sg) { + if (ata_sg_setup(qc)) + goto err_out; + } else { + if (ata_sg_setup_one(qc)) + goto err_out; + } + + ap->ops->fill_sg(qc); + + qc->ap->active_tag = qc->tag; + qc->flags |= ATA_QCFLAG_ACTIVE; + + if (likely(dma)) { + ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ + ap->ops->bmdma_start(qc); /* initiate bmdma */ + } else + /* load tf registers, initiate polling pio */ + ata_pio_start(qc); + + return 0; + +err_out: + return -1; +} + +/** + * ata_bmdma_start_mmio - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_bmdma_start_mmio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 host_stat, dmactl; + void *mmio = (void *) ap->ioaddr.bmdma_addr; + + /* load PRD table addr. */ + mb(); /* make sure PRD table writes are visible to controller */ + writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS); + + /* specify data direction, triple-check start bit is clear */ + dmactl = readb(mmio + ATA_DMA_CMD); + dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); + if (!rw) + dmactl |= ATA_DMA_WR; + writeb(dmactl, mmio + ATA_DMA_CMD); + + /* clear interrupt, error bits */ + host_stat = readb(mmio + ATA_DMA_STATUS); + writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, mmio + ATA_DMA_STATUS); + + /* issue r/w command */ + ap->ops->exec_command(ap, &qc->tf); + + /* start host DMA transaction */ + writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); + + /* Strictly, one may wish to issue a readb() here, to + * flush the mmio write. However, control also passes + * to the hardware at this point, and it will interrupt + * us when we are to resume control. So, in effect, + * we don't care when the mmio write flushes. + * Further, a read of the DMA status register _immediately_ + * following the write may not be what certain flaky hardware + * is expected, so I think it is best to not add a readb() + * without first all the MMIO ATA cards/mobos. + * Or maybe I'm just being paranoid. + */ +} + +/** + * ata_bmdma_start_pio - + * @qc: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_bmdma_start_pio (struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 host_stat, dmactl; + + /* load PRD table addr. */ + outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS); + + /* specify data direction, triple-check start bit is clear */ + dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); + if (!rw) + dmactl |= ATA_DMA_WR; + outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* clear interrupt, error bits */ + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + + /* issue r/w command */ + ap->ops->exec_command(ap, &qc->tf); + + /* start host DMA transaction */ + outb(dmactl | ATA_DMA_START, + ap->ioaddr.bmdma_addr + ATA_DMA_CMD); +} + +/** + * ata_dma_complete - + * @ap: + * @host_stat: + * @done_late: + * + * LOCKING: + */ + +static void ata_dma_complete(struct ata_port *ap, u8 host_stat, + unsigned int done_late) +{ + VPRINTK("ENTER\n"); + + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + + /* clear start/stop bit */ + writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START, + mmio + ATA_DMA_CMD); + + /* ack intr, err bits */ + writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + mmio + ATA_DMA_STATUS); + } else { + /* clear start/stop bit */ + outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START, + ap->ioaddr.bmdma_addr + ATA_DMA_CMD); + + /* ack intr, err bits */ + outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + } + + + /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ + ata_altstatus(ap); /* dummy read */ + + DPRINTK("host %u, host_stat==0x%X, drv_stat==0x%X\n", + ap->id, (u32) host_stat, (u32) ata_chk_status(ap)); + + /* get drive status; clear intr; complete txn */ + ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag), + ata_wait_idle(ap), done_late); +} + +/** + * ata_host_intr - Handle host interrupt for given (port, task) + * @ap: Port on which interrupt arrived (possibly...) + * @qc: Taskfile currently active in engine + * + * Handle host interrupt for given queued command. Currently, + * only DMA interrupts are handled. All other commands are + * handled via polling with interrupts disabled (nIEN bit). + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * One if interrupt was handled, zero if not (shared irq). + */ + +inline unsigned int ata_host_intr (struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + u8 status, host_stat; + unsigned int handled = 0; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + host_stat = readb(mmio + ATA_DMA_STATUS); + } else + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + VPRINTK("BUS_DMA (host_stat 0x%X)\n", host_stat); + + if (!(host_stat & ATA_DMA_INTR)) { + ap->stats.idle_irq++; + break; + } + + ata_dma_complete(ap, host_stat, 0); + handled = 1; + break; + + case ATA_PROT_NODATA: /* command completion, but no data xfer */ + status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); + ata_qc_complete(qc, status, 0); + handled = 1; + break; + + default: + ap->stats.idle_irq++; + +#ifdef ATA_IRQ_TRAP + if ((ap->stats.idle_irq % 1000) == 0) { + handled = 1; + ata_irq_ack(ap, 0); /* debug trap */ + printk(KERN_WARNING "ata%d: irq trap\n", ap->id); + } +#endif + break; + } + + return handled; +} + +/** + * ata_interrupt - + * @irq: + * @dev_instance: + * @regs: + * + * LOCKING: + * + * RETURNS: + * + */ + +irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + unsigned int i; + unsigned int handled = 0; + unsigned long flags; + + /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ + spin_lock_irqsave(&host_set->lock, flags); + + for (i = 0; i < host_set->n_ports; i++) { + struct ata_port *ap; + + ap = host_set->ports[i]; + if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0)) + handled += ata_host_intr(ap, qc); + } + } + + spin_unlock_irqrestore(&host_set->lock, flags); + + return IRQ_RETVAL(handled); +} + +/** + * ata_thread_wake - + * @ap: + * @thr_state: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_thread_wake(struct ata_port *ap, unsigned int thr_state) +{ + assert(ap->thr_state == THR_IDLE); + ap->thr_state = thr_state; + up(&ap->thr_sem); +} + +/** + * ata_thread_timer - + * @opaque: + * + * LOCKING: + */ + +static void ata_thread_timer(unsigned long opaque) +{ + struct ata_port *ap = (struct ata_port *) opaque; + + up(&ap->thr_sem); +} + +/** + * ata_thread_iter - + * @ap: + * + * LOCKING: + * + * RETURNS: + * + */ + +static unsigned long ata_thread_iter(struct ata_port *ap) +{ + long timeout = 0; + + DPRINTK("ata%u: thr_state %s\n", + ap->id, ata_thr_state_name(ap->thr_state)); + + switch (ap->thr_state) { + case THR_UNKNOWN: + ap->thr_state = THR_PORT_RESET; + break; + + case THR_PROBE_START: + down(&ap->sem); + ap->thr_state = THR_PORT_RESET; + break; + + case THR_PORT_RESET: + ata_port_reset(ap); + break; + + case THR_PROBE_SUCCESS: + up(&ap->probe_sem); + ap->thr_state = THR_IDLE; + break; + + case THR_PROBE_FAILED: + up(&ap->probe_sem); + ap->thr_state = THR_AWAIT_DEATH; + break; + + case THR_AWAIT_DEATH: + timeout = -1; + break; + + case THR_IDLE: + timeout = 30 * HZ; + break; + + case THR_PIO: + ata_pio_sector(ap); + break; + + case THR_PIO_LAST: + ata_pio_complete(ap); + break; + + case THR_PIO_POLL: + case THR_PIO_LAST_POLL: + timeout = ata_pio_poll(ap); + break; + + case THR_PIO_TMOUT: + printk(KERN_ERR "ata%d: FIXME: THR_PIO_TMOUT\n", /* FIXME */ + ap->id); + timeout = 11 * HZ; + break; + + case THR_PIO_ERR: + printk(KERN_ERR "ata%d: FIXME: THR_PIO_ERR\n", /* FIXME */ + ap->id); + timeout = 11 * HZ; + break; + + case THR_PACKET: + atapi_cdb_send(ap); + break; + + default: + printk(KERN_DEBUG "ata%u: unknown thr state %s\n", + ap->id, ata_thr_state_name(ap->thr_state)); + break; + } + + DPRINTK("ata%u: new thr_state %s, returning %ld\n", + ap->id, ata_thr_state_name(ap->thr_state), timeout); + return timeout; +} + +/** + * ata_thread - + * @data: + * + * LOCKING: + * + * RETURNS: + * + */ + +static int ata_thread (void *data) +{ + struct ata_port *ap = data; + long timeout; + + daemonize (); + reparent_to_init(); + spin_lock_irq(¤t->sigmask_lock); + sigemptyset(¤t->blocked); + recalc_sigpending(current); + spin_unlock_irq(¤t->sigmask_lock); + + sprintf(current->comm, "katad-%u", ap->id); + + while (1) { + cond_resched(); + + timeout = ata_thread_iter(ap); + + if (signal_pending (current)) { + spin_lock_irq(¤t->sigmask_lock); + flush_signals(current); + spin_unlock_irq(¤t->sigmask_lock); + } + + if ((timeout < 0) || (ap->time_to_die)) + break; + + /* note sleeping for full timeout not guaranteed (that's ok) */ + if (timeout) { + mod_timer(&ap->thr_timer, jiffies + timeout); + down_interruptible(&ap->thr_sem); + + if (signal_pending (current)) { + spin_lock_irq(¤t->sigmask_lock); + flush_signals(current); + spin_unlock_irq(¤t->sigmask_lock); + } + + if (ap->time_to_die) + break; + } + } + + printk(KERN_DEBUG "ata%u: thread exiting\n", ap->id); + ap->thr_pid = -1; + del_timer_sync(&ap->thr_timer); + complete_and_exit (&ap->thr_exited, 0); +} + +/** + * ata_thread_kill - kill per-port kernel thread + * @ap: port those thread is to be killed + * + * LOCKING: + * + */ + +static int ata_thread_kill(struct ata_port *ap) +{ + int ret = 0; + + if (ap->thr_pid >= 0) { + ap->time_to_die = 1; + wmb(); + ret = kill_proc(ap->thr_pid, SIGTERM, 1); + if (ret) + printk(KERN_ERR "ata%d: unable to kill kernel thread\n", + ap->id); + else + wait_for_completion(&ap->thr_exited); + } + + return ret; +} + +/** + * atapi_cdb_send - Write CDB bytes to hardware + * @ap: Port to which ATAPI device is attached. + * + * When device has indicated its readiness to accept + * a CDB, this function is called. Send the CDB. + * If DMA is to be performed, exit immediately. + * Otherwise, we are in polling mode, so poll + * status under operation succeeds or fails. + * + * LOCKING: + * Kernel thread context (may sleep) + */ + +static void atapi_cdb_send(struct ata_port *ap) +{ + struct ata_queued_cmd *qc; + u8 status; + + qc = ata_qc_from_tag(ap, ap->active_tag); + assert(qc != NULL); + assert(qc->flags & ATA_QCFLAG_ACTIVE); + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) + goto err_out; + + /* make sure DRQ is set */ + status = ata_chk_status(ap); + if ((status & ATA_DRQ) == 0) + goto err_out; + + /* send SCSI cdb */ + /* FIXME: mmio-ize */ + DPRINTK("send cdb\n"); + outsl(ap->ioaddr.data_addr, + qc->scsicmd->cmnd, ap->host->max_cmd_len / 4); + + /* if we are DMA'ing, irq handler takes over from here */ + if (qc->tf.feature == ATAPI_PKT_DMA) + goto out; + + /* sleep-wait for BSY to clear */ + DPRINTK("busy wait 2\n"); + if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) + goto err_out; + + /* wait for BSY,DRQ to clear */ + status = ata_wait_idle(ap); + if (status & (ATA_BUSY | ATA_DRQ)) + goto err_out; + + /* transaction completed, indicate such to scsi stack */ + ata_qc_complete(qc, status, 0); + ata_irq_on(ap); + +out: + ap->thr_state = THR_IDLE; + return; + +err_out: + ata_qc_complete(qc, ATA_ERR, 0); + goto out; +} + +int ata_port_start (struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + + ap->prd = pci_alloc_consistent(pdev, ATA_PRD_TBL_SZ, &ap->prd_dma); + if (!ap->prd) + return -ENOMEM; + + DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma); + + return 0; +} + +void ata_port_stop (struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + + pci_free_consistent(pdev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma); +} + +/** + * ata_host_remove - + * @ap: + * @do_unregister: + * + * LOCKING: + */ + +static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister) +{ + struct Scsi_Host *sh = ap->host; + + DPRINTK("ENTER\n"); + + if (do_unregister) + scsi_unregister(sh); + + ap->ops->port_stop(ap); +} + +/** + * ata_host_init - + * @host: + * @ent: + * @port_no: + * + * LOCKING: + * + */ + +static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, + struct ata_host_set *host_set, + struct ata_probe_ent *ent, unsigned int port_no) +{ + unsigned int i; + + host->max_id = 16; + host->max_lun = 1; + host->max_channel = 1; + host->unique_id = ata_unique_id++; + host->max_cmd_len = 12; + host->pci_dev = ent->pdev; + + ap->flags = ATA_FLAG_PORT_DISABLED; + ap->id = host->unique_id; + ap->host = host; + ap->ctl = ATA_DEVCTL_OBS; + ap->host_set = host_set; + ap->port_no = port_no; + ap->pio_mask = ent->pio_mask; + ap->udma_mask = ent->udma_mask; + ap->flags |= ent->host_flags; + ap->ops = ent->port_ops; + ap->thr_state = THR_PROBE_START; + ap->cbl = ATA_CBL_NONE; + ap->device[0].flags = ATA_DFLAG_MASTER; + ap->active_tag = ATA_TAG_POISON; + ap->last_ctl = 0xFF; + + /* ata_engine init */ + ap->eng.flags = 0; + INIT_LIST_HEAD(&ap->eng.q); + + for (i = 0; i < ATA_MAX_DEVICES; i++) + ap->device[i].devno = i; + + init_completion(&ap->thr_exited); + init_MUTEX_LOCKED(&ap->probe_sem); + init_MUTEX_LOCKED(&ap->sem); + init_MUTEX_LOCKED(&ap->thr_sem); + + init_timer(&ap->thr_timer); + ap->thr_timer.function = ata_thread_timer; + ap->thr_timer.data = (unsigned long) ap; + +#ifdef ATA_IRQ_TRAP + ap->stats.unhandled_irq = 1; + ap->stats.idle_irq = 1; +#endif + + memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports)); +} + +/** + * ata_host_add - + * @ent: + * @host_set: + * @port_no: + * + * LOCKING: + * + * RETURNS: + * + */ + +static struct ata_port * ata_host_add(struct ata_probe_ent *ent, + struct ata_host_set *host_set, + unsigned int port_no) +{ + struct Scsi_Host *host; + struct ata_port *ap; + int rc; + + DPRINTK("ENTER\n"); + host = scsi_register(ent->sht, sizeof(struct ata_port)); + if (!host) + return NULL; + + ap = (struct ata_port *) &host->hostdata[0]; + + ata_host_init(ap, host, host_set, ent, port_no); + + rc = ap->ops->port_start(ap); + if (rc) + goto err_out; + + ap->thr_pid = kernel_thread(ata_thread, ap, CLONE_FS | CLONE_FILES); + if (ap->thr_pid < 0) { + printk(KERN_ERR "ata%d: unable to start kernel thread\n", + ap->id); + goto err_out_free; + } + + return ap; + +err_out_free: + ap->ops->port_stop(ap); +err_out: + scsi_unregister(host); + return NULL; +} + +/** + * ata_device_add - + * @ent: + * + * LOCKING: + * + * RETURNS: + * + */ + +int ata_device_add(struct ata_probe_ent *ent) +{ + unsigned int count = 0, i; + struct pci_dev *pdev = ent->pdev; + struct ata_host_set *host_set; + + DPRINTK("ENTER\n"); + /* alloc a container for our list of ATA ports (buses) */ + host_set = kmalloc(sizeof(struct ata_host_set) + + (ent->n_ports * sizeof(void *)), GFP_KERNEL); + if (!host_set) + return 0; + memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *))); + spin_lock_init(&host_set->lock); + + host_set->pdev = pdev; + host_set->n_ports = ent->n_ports; + host_set->irq = ent->irq; + host_set->mmio_base = ent->mmio_base; + host_set->private_data = ent->private_data; + + /* register each port bound to this device */ + for (i = 0; i < ent->n_ports; i++) { + struct ata_port *ap; + + ap = ata_host_add(ent, host_set, i); + if (!ap) + goto err_out; + + host_set->ports[i] = ap; + + /* print per-port info to dmesg */ + printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX " + "bmdma 0x%lX irq %lu\n", + ap->id, + ap->flags & ATA_FLAG_SATA ? 'S' : 'P', + ata_udma_string(ent->udma_mask), + ap->ioaddr.cmd_addr, + ap->ioaddr.ctl_addr, + ap->ioaddr.bmdma_addr, + ent->irq); + + count++; + } + + if (!count) { + kfree(host_set); + return 0; + } + + /* obtain irq, that is shared between channels */ + if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags, + DRV_NAME, host_set)) + goto err_out; + + /* perform each probe synchronously */ + DPRINTK("probe begin\n"); + for (i = 0; i < count; i++) { + struct ata_port *ap; + + ap = host_set->ports[i]; + + DPRINTK("ata%u: probe begin\n", ap->id); + up(&ap->sem); /* start probe */ + + DPRINTK("ata%u: probe-wait begin\n", ap->id); + down(&ap->probe_sem); /* wait for end */ + + DPRINTK("ata%u: probe-wait end\n", ap->id); + } + + pci_set_drvdata(pdev, host_set); + + VPRINTK("EXIT, returning %u\n", ent->n_ports); + return ent->n_ports; /* success */ + +err_out: + for (i = 0; i < count; i++) { + ata_host_remove(host_set->ports[i], 1); + } + kfree(host_set); + VPRINTK("EXIT, returning 0\n"); + return 0; +} + +/** + * ata_scsi_detect - + * @sht: + * + * LOCKING: + * + * RETURNS: + * + */ + +int ata_scsi_detect(Scsi_Host_Template *sht) +{ + struct list_head *node; + struct ata_probe_ent *ent; + int count = 0; + + VPRINTK("ENTER\n"); + + sht->use_new_eh_code = 1; /* IORL hack, part deux */ + + spin_lock(&ata_module_lock); + while (!list_empty(&ata_probe_list)) { + node = ata_probe_list.next; + ent = list_entry(node, struct ata_probe_ent, node); + list_del(node); + + spin_unlock(&ata_module_lock); + + count += ata_device_add(ent); + kfree(ent); + + spin_lock(&ata_module_lock); + } + spin_unlock(&ata_module_lock); + + VPRINTK("EXIT, returning %d\n", count); + return count; +} + +/** + * ata_scsi_release - SCSI layer callback hook for host unload + * @host: libata host to be unloaded + * + * Performs all duties necessary to shut down a libata port: + * Kill port kthread, disable port, and release resources. + * + * LOCKING: + * Inherited from SCSI layer. + * + * RETURNS: + * One. + */ + +int ata_scsi_release(struct Scsi_Host *host) +{ + struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; + + DPRINTK("ENTER\n"); + + ata_thread_kill(ap); /* FIXME: check return val */ + + ap->ops->port_disable(ap); + ata_host_remove(ap, 0); + + DPRINTK("EXIT\n"); + return 1; +} + +/** + * ata_std_ports - initialize ioaddr with standard port offsets. + * @ioaddr: + */ +void ata_std_ports(struct ata_ioports *ioaddr) +{ + ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; + ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; + ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; + ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; + ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; + ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; + ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; + ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; + ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; + ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; +} + +/** + * ata_pci_init_one - + * @pdev: + * @port_info: + * @n_ports: + * + * LOCKING: + * Inherited from PCI layer (may sleep). + * + * RETURNS: + * + */ + +int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, + unsigned int n_ports) +{ + struct ata_probe_ent *probe_ent, *probe_ent2 = NULL; + struct ata_port_info *port0, *port1; + u8 tmp8, mask; + unsigned int legacy_mode = 0; + int rc; + + DPRINTK("ENTER\n"); + + port0 = port_info[0]; + if (n_ports > 1) + port1 = port_info[1]; + else + port1 = port0; + + if ((port0->host_flags & ATA_FLAG_NO_LEGACY) == 0) { + /* TODO: support transitioning to native mode? */ + pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8); + mask = (1 << 2) | (1 << 0); + if ((tmp8 & mask) != mask) + legacy_mode = (1 << 3); + } + + /* FIXME... */ + if ((!legacy_mode) && (n_ports > 1)) { + printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n"); + return -EINVAL; + } + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + if (legacy_mode) { + if (!request_region(0x1f0, 8, "libata")) { + struct resource *conflict, res; + res.start = 0x1f0; + res.end = 0x1f0 + 8 - 1; + conflict = ____request_resource(&ioport_resource, &res); + if (!strcmp(conflict->name, "libata")) + legacy_mode |= (1 << 0); + else + printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n"); + } else + legacy_mode |= (1 << 0); + + if (!request_region(0x170, 8, "libata")) { + struct resource *conflict, res; + res.start = 0x170; + res.end = 0x170 + 8 - 1; + conflict = ____request_resource(&ioport_resource, &res); + if (!strcmp(conflict->name, "libata")) + legacy_mode |= (1 << 1); + else + printk(KERN_WARNING "ata: 0x170 IDE port busy\n"); + } else + legacy_mode |= (1 << 1); + } + + /* we have legacy mode, but all ports are unavailable */ + if (legacy_mode == (1 << 3)) { + rc = -EBUSY; + goto err_out_regions; + } + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->pdev = pdev; + INIT_LIST_HEAD(&probe_ent->node); + + if (legacy_mode) { + probe_ent2 = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent2) { + rc = -ENOMEM; + goto err_out_free_ent; + } + + memset(probe_ent2, 0, sizeof(*probe_ent)); + probe_ent2->pdev = pdev; + INIT_LIST_HEAD(&probe_ent2->node); + } + + probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); + probe_ent->sht = port0->sht; + probe_ent->host_flags = port0->host_flags; + probe_ent->pio_mask = port0->pio_mask; + probe_ent->udma_mask = port0->udma_mask; + probe_ent->port_ops = port0->port_ops; + + if (legacy_mode) { + probe_ent->port[0].cmd_addr = 0x1f0; + probe_ent->port[0].altstatus_addr = + probe_ent->port[0].ctl_addr = 0x3f6; + probe_ent->n_ports = 1; + probe_ent->irq = 14; + ata_std_ports(&probe_ent->port[0]); + + probe_ent2->port[0].cmd_addr = 0x170; + probe_ent2->port[0].altstatus_addr = + probe_ent2->port[0].ctl_addr = 0x376; + probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8; + probe_ent2->n_ports = 1; + probe_ent2->irq = 15; + ata_std_ports(&probe_ent2->port[0]); + + probe_ent2->sht = port1->sht; + probe_ent2->host_flags = port1->host_flags; + probe_ent2->pio_mask = port1->pio_mask; + probe_ent2->udma_mask = port1->udma_mask; + probe_ent2->port_ops = port1->port_ops; + } else { + probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); + ata_std_ports(&probe_ent->port[0]); + probe_ent->port[0].altstatus_addr = + probe_ent->port[0].ctl_addr = + pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; + + probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); + ata_std_ports(&probe_ent->port[1]); + probe_ent->port[1].altstatus_addr = + probe_ent->port[1].ctl_addr = + pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; + probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8; + + probe_ent->n_ports = 2; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + } + + pci_set_master(pdev); + + spin_lock(&ata_module_lock); + if (legacy_mode) { + if (legacy_mode & (1 << 0)) + list_add_tail(&probe_ent->node, &ata_probe_list); + else + kfree(probe_ent); + if (legacy_mode & (1 << 1)) + list_add_tail(&probe_ent2->node, &ata_probe_list); + else + kfree(probe_ent2); + } else { + list_add_tail(&probe_ent->node, &ata_probe_list); + } + spin_unlock(&ata_module_lock); + + return 0; + +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + if (legacy_mode & (1 << 0)) + release_region(0x1f0, 8); + if (legacy_mode & (1 << 1)) + release_region(0x170, 8); + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +/** + * ata_pci_remove_one - PCI layer callback for device removal + * @pdev: PCI device that was removed + * + * PCI layer indicates to libata via this hook that + * hot-unplug or module unload event has occured. + * Handle this by unregistering all objects associated + * with this PCI device. Free those objects. Then finally + * release PCI resources and disable device. + * + * LOCKING: + * Inherited from PCI layer (may sleep). + */ + +void ata_pci_remove_one (struct pci_dev *pdev) +{ + struct ata_host_set *host_set = pci_get_drvdata(pdev); + struct ata_port *ap; + unsigned int i; + Scsi_Host_Template *sht; + int rc; + + /* FIXME: this unregisters all ports attached to the + * Scsi_Host_Template given. We _might_ have multiple + * templates (though we don't ATM), so this is ok... for now. + */ + ap = host_set->ports[0]; + sht = ap->host->hostt; + rc = scsi_unregister_module(MODULE_SCSI_HA, sht); + /* FIXME: handle 'rc' failure? */ + + free_irq(host_set->irq, host_set); + if (host_set->mmio_base) + iounmap(host_set->mmio_base); + if (host_set->ports[0]->ops->host_stop) + host_set->ports[0]->ops->host_stop(host_set); + + pci_release_regions(pdev); + + for (i = 0; i < host_set->n_ports; i++) { + struct ata_ioports *ioaddr; + + ap = host_set->ports[i]; + ioaddr = &ap->ioaddr; + + if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) { + if (ioaddr->cmd_addr == 0x1f0) + release_region(0x1f0, 8); + else if (ioaddr->cmd_addr == 0x170) + release_region(0x170, 8); + } + } + + kfree(host_set); + pci_disable_device(pdev); + pci_set_drvdata(pdev, NULL); +} + +/** + * ata_add_to_probe_list - add an entry to the list of things + * to be probed. + * @probe_ent: describes device to be probed. + * + * LOCKING: + */ + +void ata_add_to_probe_list(struct ata_probe_ent *probe_ent) +{ + spin_lock(&ata_module_lock); + list_add_tail(&probe_ent->node, &ata_probe_list); + spin_unlock(&ata_module_lock); +} + +/* move to PCI subsystem */ +int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits) +{ + unsigned long tmp = 0; + + switch (bits->width) { + case 1: { + u8 tmp8 = 0; + pci_read_config_byte(pdev, bits->reg, &tmp8); + tmp = tmp8; + break; + } + case 2: { + u16 tmp16 = 0; + pci_read_config_word(pdev, bits->reg, &tmp16); + tmp = tmp16; + break; + } + case 4: { + u32 tmp32 = 0; + pci_read_config_dword(pdev, bits->reg, &tmp32); + tmp = tmp32; + break; + } + + default: + return -EINVAL; + } + + tmp &= bits->mask; + + return (tmp == bits->val) ? 1 : 0; +} + + +/** + * ata_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init ata_init(void) +{ + printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); + return 0; +} + +module_init(ata_init); + +/* + * libata is essentially a library of internal helper functions for + * low-level ATA host controller drivers. As such, the API/ABI is + * likely to change as new drivers are added and updated. + * Do not depend on ABI/API stability. + */ + +EXPORT_SYMBOL_GPL(pci_test_config_bits); +EXPORT_SYMBOL_GPL(ata_std_bios_param); +EXPORT_SYMBOL_GPL(ata_std_ports); +EXPORT_SYMBOL_GPL(ata_device_add); +EXPORT_SYMBOL_GPL(ata_qc_complete); +EXPORT_SYMBOL_GPL(ata_eng_timeout); +EXPORT_SYMBOL_GPL(ata_tf_load_pio); +EXPORT_SYMBOL_GPL(ata_tf_load_mmio); +EXPORT_SYMBOL_GPL(ata_tf_read_pio); +EXPORT_SYMBOL_GPL(ata_tf_read_mmio); +EXPORT_SYMBOL_GPL(ata_check_status_pio); +EXPORT_SYMBOL_GPL(ata_check_status_mmio); +EXPORT_SYMBOL_GPL(ata_exec_command_pio); +EXPORT_SYMBOL_GPL(ata_exec_command_mmio); +EXPORT_SYMBOL_GPL(ata_port_start); +EXPORT_SYMBOL_GPL(ata_port_stop); +EXPORT_SYMBOL_GPL(ata_interrupt); +EXPORT_SYMBOL_GPL(ata_fill_sg); +EXPORT_SYMBOL_GPL(ata_bmdma_start_pio); +EXPORT_SYMBOL_GPL(ata_bmdma_start_mmio); +EXPORT_SYMBOL_GPL(ata_port_probe); +EXPORT_SYMBOL_GPL(sata_phy_reset); +EXPORT_SYMBOL_GPL(ata_bus_reset); +EXPORT_SYMBOL_GPL(ata_port_disable); +EXPORT_SYMBOL_GPL(ata_pci_init_one); +EXPORT_SYMBOL_GPL(ata_pci_remove_one); +EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); +EXPORT_SYMBOL_GPL(ata_scsi_error); +EXPORT_SYMBOL_GPL(ata_scsi_detect); +EXPORT_SYMBOL_GPL(ata_add_to_probe_list); +EXPORT_SYMBOL_GPL(ata_scsi_release); +EXPORT_SYMBOL_GPL(ata_host_intr); diff -urN linux-2.4.26-pre6/drivers/scsi/libata-scsi.c linux-2.4.26-pre6-ata14/drivers/scsi/libata-scsi.c --- linux-2.4.26-pre6/drivers/scsi/libata-scsi.c Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/libata-scsi.c Thu Mar 25 22:45:38 2004 @@ -0,0 +1,1099 @@ +/* + libata-scsi.c - helper library for ATA + + Copyright 2003-2004 Red Hat, Inc. All rights reserved. + Copyright 2003-2004 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include "sd.h" +#include + +#include "libata.h" + + +/** + * ata_std_bios_param - generic bios head/sector/cylinder calculator + * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS) + * mapping. Some situations may arise where the disk is not + * bootable if this is not used. + * + * LOCKING: + * + * RETURNS: + * + */ +int ata_std_bios_param(Disk * disk, /* SCSI disk */ + kdev_t dev, /* Device major, minor */ + int *ip /* Heads, sectors, cylinders in that order */ ) +{ + ip[0] = 255; + ip[1] = 63; + ip[2] = disk->capacity / (ip[0] * ip[1]); + + return 0; +} + + +struct ata_queued_cmd *ata_scsi_qc_new(struct ata_port *ap, + struct ata_device *dev, + struct scsi_cmnd *cmd, + void (*done)(struct scsi_cmnd *)) +{ + struct ata_queued_cmd *qc; + + qc = ata_qc_new_init(ap, dev); + if (qc) { + qc->scsicmd = cmd; + qc->scsidone = done; + + if (cmd->use_sg) { + qc->sg = (struct scatterlist *) cmd->request_buffer; + qc->n_elem = cmd->use_sg; + } else { + qc->sg = &qc->sgent; + qc->n_elem = 1; + } + } else { + cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); + done(cmd); + } + + return qc; +} + +/** + * ata_to_sense_error - + * @qc: + * @cmd: + * + * LOCKING: + */ + +void ata_to_sense_error(struct ata_queued_cmd *qc) +{ + struct scsi_cmnd *cmd = qc->scsicmd; + + cmd->result = SAM_STAT_CHECK_CONDITION; + + cmd->sense_buffer[0] = 0x70; + cmd->sense_buffer[2] = MEDIUM_ERROR; + cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */ + + /* additional-sense-code[-qualifier] */ + if (cmd->sc_data_direction == SCSI_DATA_READ) { + cmd->sense_buffer[12] = 0x11; /* "unrecovered read error" */ + cmd->sense_buffer[13] = 0x04; + } else { + cmd->sense_buffer[12] = 0x0C; /* "write error - */ + cmd->sense_buffer[13] = 0x02; /* auto-reallocation failed" */ + } +} + +/** + * ata_scsi_error - SCSI layer error handler callback + * @host: SCSI host on which error occurred + * + * Handles SCSI-layer-thrown error events. + * + * LOCKING: + * Inherited from SCSI layer (none, can sleep) + * + * RETURNS: + * Zero. + */ + +int ata_scsi_error(struct Scsi_Host *host) +{ + struct ata_port *ap; + + DPRINTK("ENTER\n"); + + ap = (struct ata_port *) &host->hostdata[0]; + ap->ops->eng_timeout(ap); + + DPRINTK("EXIT\n"); + return 0; +} + +/** + * ata_scsi_rw_xlat - + * @qc: + * @scsicmd: + * @cmd_size: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * + */ + +static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, u8 *scsicmd, + unsigned int cmd_size) +{ + struct ata_taskfile *tf = &qc->tf; + unsigned int lba48 = tf->flags & ATA_TFLAG_LBA48; + + qc->cursect = qc->cursg = qc->cursg_ofs = 0; + tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + tf->hob_nsect = 0; + tf->hob_lbal = 0; + tf->hob_lbam = 0; + tf->hob_lbah = 0; + tf->protocol = qc->dev->xfer_protocol; + + if (scsicmd[0] == READ_10 || scsicmd[0] == READ_6 || + scsicmd[0] == READ_16) { + tf->command = qc->dev->read_cmd; + VPRINTK("reading\n"); + } else { + tf->command = qc->dev->write_cmd; + tf->flags |= ATA_TFLAG_WRITE; + VPRINTK("writing\n"); + } + + if (cmd_size == 10) { + if (lba48) { + tf->hob_nsect = scsicmd[7]; + tf->hob_lbal = scsicmd[2]; + + qc->nsect = ((unsigned int)scsicmd[7] << 8) | + scsicmd[8]; + } else { + /* if we don't support LBA48 addressing, the request + * -may- be too large. */ + if ((scsicmd[2] & 0xf0) || scsicmd[7]) + return 1; + + /* stores LBA27:24 in lower 4 bits of device reg */ + tf->device |= scsicmd[2]; + + qc->nsect = scsicmd[8]; + } + tf->device |= ATA_LBA; + + tf->nsect = scsicmd[8]; + tf->lbal = scsicmd[5]; + tf->lbam = scsicmd[4]; + tf->lbah = scsicmd[3]; + + VPRINTK("ten-byte command\n"); + return 0; + } + + if (cmd_size == 6) { + qc->nsect = tf->nsect = scsicmd[4]; + tf->lbal = scsicmd[3]; + tf->lbam = scsicmd[2]; + tf->lbah = scsicmd[1] & 0x1f; /* mask out reserved bits */ + + VPRINTK("six-byte command\n"); + return 0; + } + + if (cmd_size == 16) { + /* rule out impossible LBAs and sector counts */ + if (scsicmd[2] || scsicmd[3] || scsicmd[10] || scsicmd[11]) + return 1; + + if (lba48) { + tf->hob_nsect = scsicmd[12]; + tf->hob_lbal = scsicmd[6]; + tf->hob_lbam = scsicmd[5]; + tf->hob_lbah = scsicmd[4]; + + qc->nsect = ((unsigned int)scsicmd[12] << 8) | + scsicmd[13]; + } else { + /* once again, filter out impossible non-zero values */ + if (scsicmd[4] || scsicmd[5] || scsicmd[12] || + (scsicmd[6] & 0xf0)) + return 1; + + /* stores LBA27:24 in lower 4 bits of device reg */ + tf->device |= scsicmd[2]; + + qc->nsect = scsicmd[13]; + } + tf->device |= ATA_LBA; + + tf->nsect = scsicmd[13]; + tf->lbal = scsicmd[9]; + tf->lbam = scsicmd[8]; + tf->lbah = scsicmd[7]; + + VPRINTK("sixteen-byte command\n"); + return 0; + } + + DPRINTK("no-byte command\n"); + return 1; +} + +/** + * ata_scsi_rw_queue - + * @ap: + * @dev: + * @cmd: + * @done: + * @cmd_size: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev, + struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), + unsigned int cmd_size) +{ + struct ata_queued_cmd *qc; + u8 *scsicmd = cmd->cmnd; + + VPRINTK("ENTER\n"); + + if (unlikely(cmd->request_bufflen < 1)) { + printk(KERN_WARNING "ata%u(%u): empty request buffer\n", + ap->id, dev->devno); + goto err_out; + } + + qc = ata_scsi_qc_new(ap, dev, cmd, done); + if (!qc) + return; + + qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */ + + if (ata_scsi_rw_xlat(qc, scsicmd, cmd_size)) + goto err_out; + + /* select device, send command to hardware */ + if (ata_qc_issue(qc)) + goto err_out; + + VPRINTK("EXIT\n"); + return; + +err_out: + ata_bad_cdb(cmd, done); + DPRINTK("EXIT - badcmd\n"); +} + +/** + * ata_scsi_rbuf_get - Map response buffer. + * @cmd: SCSI command containing buffer to be mapped. + * @buf_out: Pointer to mapped area. + * + * Maps buffer contained within SCSI command @cmd. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + * + * RETURNS: + * Length of response buffer. + */ + +static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out) +{ + u8 *buf; + unsigned int buflen; + + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + buf = kmap_atomic(sg->page, KM_USER0) + sg->offset; + buflen = sg->length; + } else { + buf = cmd->request_buffer; + buflen = cmd->request_bufflen; + } + + memset(buf, 0, buflen); + *buf_out = buf; + return buflen; +} + +/** + * ata_scsi_rbuf_put - Unmap response buffer. + * @cmd: SCSI command containing buffer to be unmapped. + * + * Unmaps response buffer contained within @cmd. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd) +{ + if (cmd->use_sg) { + struct scatterlist *sg; + + sg = (struct scatterlist *) cmd->request_buffer; + kunmap_atomic(sg->page, KM_USER0); + } +} + +/** + * ata_scsi_rbuf_fill - wrapper for SCSI command simulators + * @args: Port / device / SCSI command of interest. + * @actor: Callback hook for desired SCSI command simulator + * + * Takes care of the hard work of simulating a SCSI command... + * Mapping the response buffer, calling the command's handler, + * and handling the handler's return value. This return value + * indicates whether the handler wishes the SCSI command to be + * completed successfully, or not. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_scsi_rbuf_fill(struct ata_scsi_args *args, + unsigned int (*actor) (struct ata_scsi_args *args, + u8 *rbuf, unsigned int buflen)) +{ + u8 *rbuf; + unsigned int buflen, rc; + struct scsi_cmnd *cmd = args->cmd; + + buflen = ata_scsi_rbuf_get(cmd, &rbuf); + rc = actor(args, rbuf, buflen); + ata_scsi_rbuf_put(cmd); + + if (rc) + ata_bad_cdb(cmd, args->done); + else { + cmd->result = SAM_STAT_GOOD; + args->done(cmd); + } +} + +/** + * ata_scsiop_inq_std - Simulate INQUIRY command + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns standard device identification data associated + * with non-EVPD INQUIRY command output. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + const u8 hdr[] = { + TYPE_DISK, + 0, + 0x5, /* claim SPC-3 version compatibility */ + 2, + 96 - 4 + }; + + VPRINTK("ENTER\n"); + + memcpy(rbuf, hdr, sizeof(hdr)); + + if (buflen > 36) { + memcpy(&rbuf[8], args->dev->vendor, 8); + memcpy(&rbuf[16], args->dev->product, 16); + memcpy(&rbuf[32], DRV_VERSION, 4); + } + + if (buflen > 63) { + const u8 versions[] = { + 0x60, /* SAM-3 (no version claimed) */ + + 0x03, + 0x20, /* SBC-2 (no version claimed) */ + + 0x02, + 0x60 /* SPC-3 (no version claimed) */ + }; + + memcpy(rbuf + 59, versions, sizeof(versions)); + } + + return 0; +} + +/** + * ata_scsiop_inq_00 - Simulate INQUIRY EVPD page 0, list of pages + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns list of inquiry EVPD pages available. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + const u8 pages[] = { + 0x00, /* page 0x00, this page */ + 0x80, /* page 0x80, unit serial no page */ + 0x83 /* page 0x83, device ident page */ + }; + rbuf[3] = sizeof(pages); /* number of supported EVPD pages */ + + if (buflen > 6) + memcpy(rbuf + 4, pages, sizeof(pages)); + + return 0; +} + +/** + * ata_scsiop_inq_80 - Simulate INQUIRY EVPD page 80, device serial number + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns ATA device serial number. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + const u8 hdr[] = { + 0, + 0x80, /* this page code */ + 0, + ATA_SERNO_LEN, /* page len */ + }; + memcpy(rbuf, hdr, sizeof(hdr)); + + if (buflen > (ATA_SERNO_LEN + 4)) + ata_dev_id_string(args->dev, (unsigned char *) &rbuf[4], + ATA_ID_SERNO_OFS, ATA_SERNO_LEN); + + return 0; +} + +static const char *inq_83_str = "Linux ATA-SCSI simulator"; + +/** + * ata_scsiop_inq_83 - Simulate INQUIRY EVPD page 83, device identity + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Returns device identification. Currently hardcoded to + * return "Linux ATA-SCSI simulator". + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + rbuf[1] = 0x83; /* this page code */ + rbuf[3] = 4 + strlen(inq_83_str); /* page len */ + + /* our one and only identification descriptor (vendor-specific) */ + if (buflen > (strlen(inq_83_str) + 4 + 4)) { + rbuf[4 + 0] = 2; /* code set: ASCII */ + rbuf[4 + 3] = strlen(inq_83_str); + memcpy(rbuf + 4 + 4, inq_83_str, strlen(inq_83_str)); + } + + return 0; +} + +/** + * ata_scsiop_noop - + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * No operation. Simply returns success to caller, to indicate + * that the caller should successfully complete this SCSI command. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + VPRINTK("ENTER\n"); + return 0; +} + +/** + * ata_scsiop_sync_cache - Simulate SYNCHRONIZE CACHE command + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Initiates flush of device's cache. + * + * TODO: + * Actually do this :) + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + VPRINTK("ENTER\n"); + + /* FIXME */ + return 1; +} + +/** + * ata_msense_push - Push data onto MODE SENSE data output buffer + * @ptr_io: (input/output) Location to store more output data + * @last: End of output data buffer + * @buf: Pointer to BLOB being added to output buffer + * @buflen: Length of BLOB + * + * Store MODE SENSE data on an output buffer. + * + * LOCKING: + * None. + */ + +static void ata_msense_push(u8 **ptr_io, const u8 *last, + const u8 *buf, unsigned int buflen) +{ + u8 *ptr = *ptr_io; + + if ((ptr + buflen - 1) > last) + return; + + memcpy(ptr, buf, buflen); + + ptr += buflen; + + *ptr_io = ptr; +} + +/** + * ata_msense_caching - Simulate MODE SENSE caching info page + * @dev: + * @ptr_io: + * @last: + * + * Generate a caching info page, which conditionally indicates + * write caching to the SCSI layer, depending on device + * capabilities. + * + * LOCKING: + * None. + */ + +static unsigned int ata_msense_caching(struct ata_device *dev, u8 **ptr_io, + const u8 *last) +{ + u8 page[7] = { 0xf, 0, 0x10, 0, 0x8, 0xa, 0 }; + if (dev->flags & ATA_DFLAG_WCACHE) + page[6] = 0x4; + + ata_msense_push(ptr_io, last, page, sizeof(page)); + return sizeof(page); +} + +/** + * ata_msense_ctl_mode - Simulate MODE SENSE control mode page + * @dev: + * @ptr_io: + * @last: + * + * Generate a generic MODE SENSE control mode page. + * + * LOCKING: + * None. + */ + +static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last) +{ + const u8 page[] = {0xa, 0xa, 2, 0, 0, 0, 0, 0, 0xff, 0xff, 0, 30}; + + ata_msense_push(ptr_io, last, page, sizeof(page)); + return sizeof(page); +} + +/** + * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Simulate MODE SENSE commands. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + u8 *scsicmd = args->cmd->cmnd, *p, *last; + struct ata_device *dev = args->dev; + unsigned int page_control, six_byte, output_len; + + VPRINTK("ENTER\n"); + + six_byte = (scsicmd[0] == MODE_SENSE); + + /* we only support saved and current values (which we treat + * in the same manner) + */ + page_control = scsicmd[2] >> 6; + if ((page_control != 0) && (page_control != 3)) + return 1; + + if (six_byte) + output_len = 4; + else + output_len = 8; + + p = rbuf + output_len; + last = rbuf + buflen - 1; + + switch(scsicmd[2] & 0x3f) { + case 0x08: /* caching */ + output_len += ata_msense_caching(dev, &p, last); + break; + + case 0x0a: { /* control mode */ + output_len += ata_msense_ctl_mode(&p, last); + break; + } + + case 0x3f: /* all pages */ + output_len += ata_msense_caching(dev, &p, last); + output_len += ata_msense_ctl_mode(&p, last); + break; + + default: /* invalid page code */ + return 1; + } + + if (six_byte) { + output_len--; + rbuf[0] = output_len; + } else { + output_len -= 2; + rbuf[0] = output_len >> 8; + rbuf[1] = output_len; + } + + return 0; +} + +/** + * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Simulate READ CAPACITY commands. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + u64 n_sectors = args->dev->n_sectors; + u32 tmp; + + VPRINTK("ENTER\n"); + + n_sectors--; /* one off */ + + tmp = n_sectors; /* note: truncates, if lba48 */ + if (args->cmd->cmnd[0] == READ_CAPACITY) { + rbuf[0] = tmp >> (8 * 3); + rbuf[1] = tmp >> (8 * 2); + rbuf[2] = tmp >> (8 * 1); + rbuf[3] = tmp; + + tmp = ATA_SECT_SIZE; + rbuf[6] = tmp >> 8; + rbuf[7] = tmp; + + } else { + rbuf[2] = n_sectors >> (8 * 7); + rbuf[3] = n_sectors >> (8 * 6); + rbuf[4] = n_sectors >> (8 * 5); + rbuf[5] = n_sectors >> (8 * 4); + rbuf[6] = tmp >> (8 * 3); + rbuf[7] = tmp >> (8 * 2); + rbuf[8] = tmp >> (8 * 1); + rbuf[9] = tmp; + + tmp = ATA_SECT_SIZE; + rbuf[12] = tmp >> 8; + rbuf[13] = tmp; + } + + return 0; +} + +/** + * ata_scsiop_report_luns - Simulate REPORT LUNS command + * @args: Port / device / SCSI command of interest. + * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. + * @buflen: Response buffer length. + * + * Simulate REPORT LUNS command. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen) +{ + VPRINTK("ENTER\n"); + rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ + + return 0; +} + +/** + * ata_scsi_badcmd - + * @cmd: + * @done: + * @asc: + * @ascq: + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) +{ + DPRINTK("ENTER\n"); + cmd->result = SAM_STAT_CHECK_CONDITION; + + cmd->sense_buffer[0] = 0x70; + cmd->sense_buffer[2] = ILLEGAL_REQUEST; + cmd->sense_buffer[7] = 14 - 8; /* addnl. sense len. FIXME: correct? */ + cmd->sense_buffer[12] = asc; + cmd->sense_buffer[13] = ascq; + + done(cmd); +} + +/** + * atapi_scsi_queuecmd - Send CDB to ATAPI device + * @ap: Port to which ATAPI device is attached. + * @dev: Target device for CDB. + * @cmd: SCSI command being sent to device. + * @done: SCSI command completion function. + * + * Sends CDB to ATAPI device. If the Linux SCSI layer sends a + * non-data command, then this function handles the command + * directly, via polling. Otherwise, the bmdma engine is started. + * + * LOCKING: + * spin_lock_irqsave(host_set lock) + */ + +static void atapi_scsi_queuecmd(struct ata_port *ap, struct ata_device *dev, + struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +{ + struct ata_queued_cmd *qc; + u8 *scsicmd = cmd->cmnd, status; + unsigned int doing_dma = 0; + + VPRINTK("ENTER, drv_stat = 0x%x\n", ata_chk_status(ap)); + + if (cmd->sc_data_direction == SCSI_DATA_UNKNOWN) { + DPRINTK("unknown data, scsicmd 0x%x\n", scsicmd[0]); + ata_bad_cdb(cmd, done); + return; + } + + switch(scsicmd[0]) { + case READ_6: + case WRITE_6: + case MODE_SELECT: + case MODE_SENSE: + DPRINTK("read6/write6/modesel/modesense trap\n"); + ata_bad_scsiop(cmd, done); + return; + + default: + /* do nothing */ + break; + } + + qc = ata_scsi_qc_new(ap, dev, cmd, done); + if (!qc) { + printk(KERN_ERR "ata%u: command queue empty\n", ap->id); + return; + } + + qc->flags |= ATA_QCFLAG_ATAPI; + + qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; + if (cmd->sc_data_direction == SCSI_DATA_WRITE) { + qc->tf.flags |= ATA_TFLAG_WRITE; + DPRINTK("direction: write\n"); + } + + qc->tf.command = ATA_CMD_PACKET; + + /* set up SG table */ + if (cmd->sc_data_direction == SCSI_DATA_NONE) { + ap->active_tag = qc->tag; + qc->flags |= ATA_QCFLAG_ACTIVE | ATA_QCFLAG_POLL; + qc->tf.protocol = ATA_PROT_ATAPI; + + ata_dev_select(ap, dev->devno, 1, 0); + + DPRINTK("direction: none\n"); + qc->tf.ctl |= ATA_NIEN; /* disable interrupts */ + ata_tf_to_host_nolock(ap, &qc->tf); + } else { + qc->flags |= ATA_QCFLAG_SG; /* data is present; dma-map it */ + qc->tf.feature = ATAPI_PKT_DMA; + qc->tf.protocol = ATA_PROT_ATAPI_DMA; + + doing_dma = 1; + + /* select device, send command to hardware */ + if (ata_qc_issue(qc)) + goto err_out; + } + + status = ata_busy_wait(ap, ATA_BUSY, 1000); + if (status & ATA_BUSY) { + ata_thread_wake(ap, THR_PACKET); + return; + } + if ((status & ATA_DRQ) == 0) + goto err_out; + + /* FIXME: mmio-ize */ + DPRINTK("writing cdb\n"); + outsl(ap->ioaddr.data_addr, scsicmd, ap->host->max_cmd_len / 4); + + if (!doing_dma) + ata_thread_wake(ap, THR_PACKET); + + VPRINTK("EXIT\n"); + return; + +err_out: + if (!doing_dma) + ata_irq_on(ap); /* re-enable interrupts */ + ata_bad_cdb(cmd, done); + DPRINTK("EXIT - badcmd\n"); +} + +/** + * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device + * @cmd: SCSI command to be sent + * @done: Completion function, called when command is complete + * + * In some cases, this function translates SCSI commands into + * ATA taskfiles, and queues the taskfiles to be sent to + * hardware. In other cases, this function simulates a + * SCSI device by evaluating and responding to certain + * SCSI commands. This creates the overall effect of + * ATA and ATAPI devices appearing as SCSI devices. + * + * LOCKING: + * Releases scsi-layer-held lock, and obtains host_set lock. + * + * RETURNS: + * Zero. + */ + +int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +{ + u8 *scsicmd = cmd->cmnd; + struct ata_port *ap; + struct ata_device *dev; + struct ata_scsi_args args; + const unsigned int atapi_support = +#ifdef ATA_ENABLE_ATAPI + 1; +#else + 0; +#endif + + /* Note: spin_lock_irqsave is held by caller... */ + spin_unlock(&io_request_lock); + + ap = (struct ata_port *) &cmd->host->hostdata[0]; + + DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", + ap->id, + cmd->channel, cmd->target, cmd->lun, + scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3], + scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7], + scsicmd[8]); + + /* skip commands not addressed to targets we care about */ + if ((cmd->channel != 0) || (cmd->lun != 0) || + (cmd->target >= ATA_MAX_DEVICES)) { + cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */ + done(cmd); + goto out; + } + + spin_lock(&ap->host_set->lock); + + dev = &ap->device[cmd->target]; + + if (!ata_dev_present(dev)) { + DPRINTK("no device\n"); + cmd->result = (DID_BAD_TARGET << 16); /* FIXME: correct? */ + done(cmd); + goto out_unlock; + } + + if (dev->class == ATA_DEV_ATAPI) { + if (atapi_support) + atapi_scsi_queuecmd(ap, dev, cmd, done); + else { + cmd->result = (DID_BAD_TARGET << 16); /* correct? */ + done(cmd); + } + goto out_unlock; + } + + /* fast path */ + switch(scsicmd[0]) { + case READ_6: + case WRITE_6: + ata_scsi_rw_queue(ap, dev, cmd, done, 6); + goto out_unlock; + + case READ_10: + case WRITE_10: + ata_scsi_rw_queue(ap, dev, cmd, done, 10); + goto out_unlock; + + case READ_16: + case WRITE_16: + ata_scsi_rw_queue(ap, dev, cmd, done, 16); + goto out_unlock; + + default: + /* do nothing */ + break; + } + + /* + * slow path + */ + + args.ap = ap; + args.dev = dev; + args.cmd = cmd; + args.done = done; + + switch(scsicmd[0]) { + case TEST_UNIT_READY: /* FIXME: correct? */ + case FORMAT_UNIT: /* FIXME: correct? */ + case SEND_DIAGNOSTIC: /* FIXME: correct? */ + ata_scsi_rbuf_fill(&args, ata_scsiop_noop); + break; + + case INQUIRY: + if (scsicmd[1] & 2) /* is CmdDt set? */ + ata_bad_cdb(cmd, done); + else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); + else if (scsicmd[2] == 0x00) + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); + else if (scsicmd[2] == 0x80) + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); + else if (scsicmd[2] == 0x83) + ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); + else + ata_bad_cdb(cmd, done); + break; + + case MODE_SENSE: + case MODE_SENSE_10: + ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); + break; + + case MODE_SELECT: /* unconditionally return */ + case MODE_SELECT_10: /* bad-field-in-cdb */ + ata_bad_cdb(cmd, done); + break; + + case SYNCHRONIZE_CACHE: + if ((dev->flags & ATA_DFLAG_WCACHE) == 0) + ata_bad_scsiop(cmd, done); + else + ata_scsi_rbuf_fill(&args, ata_scsiop_sync_cache); + break; + + case READ_CAPACITY: + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); + break; + + case SERVICE_ACTION_IN: + if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) + ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); + else + ata_bad_cdb(cmd, done); + break; + + case REPORT_LUNS: + ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); + break; + + /* mandantory commands we haven't implemented yet */ + case REQUEST_SENSE: + + /* all other commands */ + default: + ata_bad_scsiop(cmd, done); + break; + } + +out_unlock: + spin_unlock(&ap->host_set->lock); +out: + spin_lock(&io_request_lock); + return 0; +} + diff -urN linux-2.4.26-pre6/drivers/scsi/libata.h linux-2.4.26-pre6-ata14/drivers/scsi/libata.h --- linux-2.4.26-pre6/drivers/scsi/libata.h Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/libata.h Thu Mar 25 22:45:24 2004 @@ -0,0 +1,94 @@ +/* + libata.h - helper library for ATA + + Copyright 2003-2004 Red Hat, Inc. All rights reserved. + Copyright 2003-2004 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#ifndef __LIBATA_H__ +#define __LIBATA_H__ + +#define DRV_NAME "libata" +#define DRV_VERSION "1.02" /* must be exactly four chars */ + +struct ata_scsi_args { + struct ata_port *ap; + struct ata_device *dev; + struct scsi_cmnd *cmd; + void (*done)(struct scsi_cmnd *); +}; + + +/* libata-core.c */ +extern unsigned int ata_dev_id_string(struct ata_device *dev, unsigned char *s, + unsigned int ofs, unsigned int len); +extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, + struct ata_device *dev); +extern int ata_qc_issue(struct ata_queued_cmd *qc); +extern void ata_dev_select(struct ata_port *ap, unsigned int device, + unsigned int wait, unsigned int can_sleep); +extern void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_thread_wake(struct ata_port *ap, unsigned int thr_state); + + +/* libata-scsi.c */ +extern void ata_to_sense_error(struct ata_queued_cmd *qc); +extern void ata_scsi_rw_queue(struct ata_port *ap, struct ata_device *dev, + struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), + unsigned int cmd_size); +extern int ata_scsi_error(struct Scsi_Host *host); +extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); + +extern unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); + +extern unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_sync_cache(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, + unsigned int buflen); +extern void ata_scsi_badcmd(struct scsi_cmnd *cmd, + void (*done)(struct scsi_cmnd *), + u8 asc, u8 ascq); +extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args, + unsigned int (*actor) (struct ata_scsi_args *args, + u8 *rbuf, unsigned int buflen)); + +static inline void ata_bad_scsiop(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +{ + ata_scsi_badcmd(cmd, done, 0x20, 0x00); +} + +static inline void ata_bad_cdb(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) +{ + ata_scsi_badcmd(cmd, done, 0x24, 0x00); +} + +#endif /* __LIBATA_H__ */ diff -urN linux-2.4.26-pre6/drivers/scsi/sata_promise.c linux-2.4.26-pre6-ata14/drivers/scsi/sata_promise.c --- linux-2.4.26-pre6/drivers/scsi/sata_promise.c Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/sata_promise.c Thu Mar 25 22:45:38 2004 @@ -0,0 +1,1837 @@ +/* + * sata_promise.c - Promise SATA + * + * Copyright 2003-2004 Red Hat, Inc. + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include +#include + +#define DRV_NAME "sata_promise" +#define DRV_VERSION "0.91" + + +enum { + PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */ + + PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */ + PDC_HDMA_PKT_SUBMIT = 0x100, /* Host DMA packet pointer addr */ + PDC_INT_SEQMASK = 0x40, /* Mask of asserted SEQ INTs */ + PDC_TBG_MODE = 0x41, /* TBG mode */ + PDC_FLASH_CTL = 0x44, /* Flash control register */ + PDC_PCI_CTL = 0x48, /* PCI control and status register */ + PDC_CTLSTAT = 0x60, /* IDE control and status register */ + PDC_SATA_PLUG_CSR = 0x6C, /* SATA Plug control/status reg */ + PDC_SLEW_CTL = 0x470, /* slew rate control reg */ + PDC_HDMA_CTLSTAT = 0x12C, /* Host DMA control / status */ + PDC_20621_SEQCTL = 0x400, + PDC_20621_SEQMASK = 0x480, + PDC_20621_GENERAL_CTL = 0x484, + PDC_20621_PAGE_SIZE = (32 * 1024), + + /* chosen, not constant, values; we design our own DIMM mem map */ + PDC_20621_DIMM_WINDOW = 0x0C, /* page# for 32K DIMM window */ + PDC_20621_DIMM_BASE = 0x00200000, + PDC_20621_DIMM_DATA = (64 * 1024), + PDC_DIMM_DATA_STEP = (256 * 1024), + PDC_DIMM_WINDOW_STEP = (8 * 1024), + PDC_DIMM_HOST_PRD = (6 * 1024), + PDC_DIMM_HOST_PKT = (128 * 0), + PDC_DIMM_HPKT_PRD = (128 * 1), + PDC_DIMM_ATA_PKT = (128 * 2), + PDC_DIMM_APKT_PRD = (128 * 3), + PDC_DIMM_HEADER_SZ = PDC_DIMM_APKT_PRD + 128, + PDC_PAGE_WINDOW = 0x40, + PDC_PAGE_DATA = PDC_PAGE_WINDOW + + (PDC_20621_DIMM_DATA / PDC_20621_PAGE_SIZE), + PDC_PAGE_SET = PDC_DIMM_DATA_STEP / PDC_20621_PAGE_SIZE, + + PDC_CHIP0_OFS = 0xC0000, /* offset of chip #0 */ + + board_2037x = 0, /* FastTrak S150 TX2plus */ + board_20319 = 1, /* FastTrak S150 TX4 */ + board_20621 = 2, /* FastTrak S150 SX4 */ + + PDC_HAS_PATA = (1 << 1), /* PDC20375 has PATA */ + + PDC_FLAG_20621 = (1 << 30), /* we have a 20621 */ + PDC_HDMA_RESET = (1 << 11), /* HDMA reset */ + + PDC_MAX_HDMA = 32, + PDC_HDMA_Q_MASK = (PDC_MAX_HDMA - 1), + + PDC_DIMM0_SPD_DEV_ADDRESS = 0x50, + PDC_DIMM1_SPD_DEV_ADDRESS = 0x51, + PDC_MAX_DIMM_MODULE = 0x02, + PDC_I2C_CONTROL_OFFSET = 0x48, + PDC_I2C_ADDR_DATA_OFFSET = 0x4C, + PDC_DIMM0_CONTROL_OFFSET = 0x80, + PDC_DIMM1_CONTROL_OFFSET = 0x84, + PDC_SDRAM_CONTROL_OFFSET = 0x88, + PDC_I2C_WRITE = 0x00000000, + PDC_I2C_READ = 0x00000040, + PDC_I2C_START = 0x00000080, + PDC_I2C_MASK_INT = 0x00000020, + PDC_I2C_COMPLETE = 0x00010000, + PDC_I2C_NO_ACK = 0x00100000, + PDC_DIMM_SPD_SUBADDRESS_START = 0x00, + PDC_DIMM_SPD_SUBADDRESS_END = 0x7F, + PDC_DIMM_SPD_ROW_NUM = 3, + PDC_DIMM_SPD_COLUMN_NUM = 4, + PDC_DIMM_SPD_MODULE_ROW = 5, + PDC_DIMM_SPD_TYPE = 11, + PDC_DIMM_SPD_FRESH_RATE = 12, + PDC_DIMM_SPD_BANK_NUM = 17, + PDC_DIMM_SPD_CAS_LATENCY = 18, + PDC_DIMM_SPD_ATTRIBUTE = 21, + PDC_DIMM_SPD_ROW_PRE_CHARGE = 27, + PDC_DIMM_SPD_ROW_ACTIVE_DELAY = 28, + PDC_DIMM_SPD_RAS_CAS_DELAY = 29, + PDC_DIMM_SPD_ACTIVE_PRECHARGE = 30, + PDC_DIMM_SPD_SYSTEM_FREQ = 126, + PDC_CTL_STATUS = 0x08, + PDC_DIMM_WINDOW_CTLR = 0x0C, + PDC_TIME_CONTROL = 0x3C, + PDC_TIME_PERIOD = 0x40, + PDC_TIME_COUNTER = 0x44, + PDC_GENERAL_CTLR = 0x484, + PCI_PLL_INIT = 0x8A531824, + PCI_X_TCOUNT = 0xEE1E5CFF +}; + + +struct pdc_port_priv { + u8 dimm_buf[(ATA_PRD_SZ * ATA_MAX_PRD) + 512]; + u8 *pkt; + dma_addr_t pkt_dma; +}; + +struct pdc_host_priv { + void *dimm_mmio; + + unsigned int doing_hdma; + unsigned int hdma_prod; + unsigned int hdma_cons; + struct { + struct ata_queued_cmd *qc; + unsigned int seq; + unsigned long pkt_ofs; + } hdma[32]; +}; + + +static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg); +static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static void pdc_dma_start(struct ata_queued_cmd *qc); +static void pdc20621_dma_start(struct ata_queued_cmd *qc); +static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs); +static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs); +static void pdc_eng_timeout(struct ata_port *ap); +static void pdc_20621_phy_reset (struct ata_port *ap); +static int pdc_port_start(struct ata_port *ap); +static void pdc_port_stop(struct ata_port *ap); +static void pdc_fill_sg(struct ata_queued_cmd *qc); +static void pdc20621_fill_sg(struct ata_queued_cmd *qc); +static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); +static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); +static void pdc20621_host_stop(struct ata_host_set *host_set); +static inline void pdc_dma_complete (struct ata_port *ap, + struct ata_queued_cmd *qc); +static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe); +static int pdc20621_detect_dimm(struct ata_probe_ent *pe); +static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, + u32 device, u32 subaddr, u32 *pdata); +static int pdc20621_prog_dimm0(struct ata_probe_ent *pe); +static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe); +#ifdef ATA_VERBOSE_DEBUG +static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, + void *psource, u32 offset, u32 size); +#endif +static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, + void *psource, u32 offset, u32 size); + + +static Scsi_Host_Template pdc_sata_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .detect = ata_scsi_detect, + .release = ata_scsi_release, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = LIBATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .use_new_eh_code = ATA_SHT_NEW_EH_CODE, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .bios_param = ata_std_bios_param, +}; + +static struct ata_port_operations pdc_sata_ops = { + .port_disable = ata_port_disable, + .tf_load = pdc_tf_load_mmio, + .tf_read = ata_tf_read_mmio, + .check_status = ata_check_status_mmio, + .exec_command = pdc_exec_command_mmio, + .phy_reset = sata_phy_reset, + .bmdma_start = pdc_dma_start, + .fill_sg = pdc_fill_sg, + .eng_timeout = pdc_eng_timeout, + .irq_handler = pdc_interrupt, + .scr_read = pdc_sata_scr_read, + .scr_write = pdc_sata_scr_write, + .port_start = pdc_port_start, + .port_stop = pdc_port_stop, +}; + +static struct ata_port_operations pdc_20621_ops = { + .port_disable = ata_port_disable, + .tf_load = pdc_tf_load_mmio, + .tf_read = ata_tf_read_mmio, + .check_status = ata_check_status_mmio, + .exec_command = pdc_exec_command_mmio, + .phy_reset = pdc_20621_phy_reset, + .bmdma_start = pdc20621_dma_start, + .fill_sg = pdc20621_fill_sg, + .eng_timeout = pdc_eng_timeout, + .irq_handler = pdc20621_interrupt, + .port_start = pdc_port_start, + .port_stop = pdc_port_stop, + .host_stop = pdc20621_host_stop, +}; + +static struct ata_port_info pdc_port_info[] = { + /* board_2037x */ + { + .sht = &pdc_sata_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .port_ops = &pdc_sata_ops, + }, + + /* board_20319 */ + { + .sht = &pdc_sata_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .port_ops = &pdc_sata_ops, + }, + + /* board_20621 */ + { + .sht = &pdc_sata_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO | + PDC_FLAG_20621, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x7f, /* udma0-6 ; FIXME */ + .port_ops = &pdc_20621_ops, + }, + +}; + +static struct pci_device_id pdc_sata_pci_tbl[] = { + { PCI_VENDOR_ID_PROMISE, 0x3371, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_2037x }, + { PCI_VENDOR_ID_PROMISE, 0x3373, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_2037x }, + { PCI_VENDOR_ID_PROMISE, 0x3375, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_2037x }, + { PCI_VENDOR_ID_PROMISE, 0x3376, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_2037x }, + { PCI_VENDOR_ID_PROMISE, 0x3318, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_20319 }, + { PCI_VENDOR_ID_PROMISE, 0x3319, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_20319 }, + { PCI_VENDOR_ID_PROMISE, 0x6622, PCI_ANY_ID, PCI_ANY_ID, 0, 0, + board_20621 }, + { } /* terminate list */ +}; + + +static struct pci_driver pdc_sata_pci_driver = { + .name = DRV_NAME, + .id_table = pdc_sata_pci_tbl, + .probe = pdc_sata_init_one, + .remove = ata_pci_remove_one, +}; + + +static void pdc20621_host_stop(struct ata_host_set *host_set) +{ + struct pdc_host_priv *hpriv = host_set->private_data; + void *dimm_mmio = hpriv->dimm_mmio; + + iounmap(dimm_mmio); + kfree(hpriv); +} + +static int pdc_port_start(struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + struct pdc_port_priv *pp; + int rc; + + rc = ata_port_start(ap); + if (rc) + return rc; + + pp = kmalloc(sizeof(*pp), GFP_KERNEL); + if (!pp) { + rc = -ENOMEM; + goto err_out; + } + memset(pp, 0, sizeof(*pp)); + + pp->pkt = pci_alloc_consistent(pdev, 128, &pp->pkt_dma); + if (!pp->pkt) { + rc = -ENOMEM; + goto err_out_kfree; + } + + ap->private_data = pp; + + return 0; + +err_out_kfree: + kfree(pp); +err_out: + ata_port_stop(ap); + return rc; +} + + +static void pdc_port_stop(struct ata_port *ap) +{ + struct pci_dev *pdev = ap->host_set->pdev; + struct pdc_port_priv *pp = ap->private_data; + + ap->private_data = NULL; + pci_free_consistent(pdev, 128, pp->pkt, pp->pkt_dma); + kfree(pp); + ata_port_stop(ap); +} + + +static void pdc_20621_phy_reset (struct ata_port *ap) +{ + VPRINTK("ENTER\n"); + ap->cbl = ATA_CBL_SATA; + ata_port_probe(ap); + ata_bus_reset(ap); +} + +static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + if (sc_reg > SCR_CONTROL) + return 0xffffffffU; + return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + + +static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, + u32 val) +{ + if (sc_reg > SCR_CONTROL) + return; + writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + +enum pdc_packet_bits { + PDC_PKT_READ = (1 << 2), + PDC_PKT_NODATA = (1 << 3), + + PDC_PKT_SIZEMASK = (1 << 7) | (1 << 6) | (1 << 5), + PDC_PKT_CLEAR_BSY = (1 << 4), + PDC_PKT_WAIT_DRDY = (1 << 3) | (1 << 4), + PDC_LAST_REG = (1 << 3), + + PDC_REG_DEVCTL = (1 << 3) | (1 << 2) | (1 << 1), +}; + +static inline unsigned int pdc_pkt_header(struct ata_taskfile *tf, + dma_addr_t sg_table, + unsigned int devno, u8 *buf) +{ + u8 dev_reg; + u32 *buf32 = (u32 *) buf; + + /* set control bits (byte 0), zero delay seq id (byte 3), + * and seq id (byte 2) + */ + switch (tf->protocol) { + case ATA_PROT_DMA: + if (!(tf->flags & ATA_TFLAG_WRITE)) + buf32[0] = cpu_to_le32(PDC_PKT_READ); + else + buf32[0] = 0; + break; + + case ATA_PROT_NODATA: + buf32[0] = cpu_to_le32(PDC_PKT_NODATA); + break; + + default: + BUG(); + break; + } + + buf32[1] = cpu_to_le32(sg_table); /* S/G table addr */ + buf32[2] = 0; /* no next-packet */ + + if (devno == 0) + dev_reg = ATA_DEVICE_OBS; + else + dev_reg = ATA_DEVICE_OBS | ATA_DEV1; + + /* select device */ + buf[12] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE; + buf[13] = dev_reg; + + /* device control register */ + buf[14] = (1 << 5) | PDC_REG_DEVCTL; + buf[15] = tf->ctl; + + return 16; /* offset of next byte */ +} + +static inline unsigned int pdc_pkt_footer(struct ata_taskfile *tf, u8 *buf, + unsigned int i) +{ + if (tf->flags & ATA_TFLAG_DEVICE) { + buf[i++] = (1 << 5) | ATA_REG_DEVICE; + buf[i++] = tf->device; + } + + /* and finally the command itself; also includes end-of-pkt marker */ + buf[i++] = (1 << 5) | PDC_LAST_REG | ATA_REG_CMD; + buf[i++] = tf->command; + + return i; +} + +static inline unsigned int pdc_prep_lba28(struct ata_taskfile *tf, u8 *buf, unsigned int i) +{ + /* the "(1 << 5)" should be read "(count << 5)" */ + + /* ATA command block registers */ + buf[i++] = (1 << 5) | ATA_REG_FEATURE; + buf[i++] = tf->feature; + + buf[i++] = (1 << 5) | ATA_REG_NSECT; + buf[i++] = tf->nsect; + + buf[i++] = (1 << 5) | ATA_REG_LBAL; + buf[i++] = tf->lbal; + + buf[i++] = (1 << 5) | ATA_REG_LBAM; + buf[i++] = tf->lbam; + + buf[i++] = (1 << 5) | ATA_REG_LBAH; + buf[i++] = tf->lbah; + + return i; +} + +static inline unsigned int pdc_prep_lba48(struct ata_taskfile *tf, u8 *buf, unsigned int i) +{ + /* the "(2 << 5)" should be read "(count << 5)" */ + + /* ATA command block registers */ + buf[i++] = (2 << 5) | ATA_REG_FEATURE; + buf[i++] = tf->hob_feature; + buf[i++] = tf->feature; + + buf[i++] = (2 << 5) | ATA_REG_NSECT; + buf[i++] = tf->hob_nsect; + buf[i++] = tf->nsect; + + buf[i++] = (2 << 5) | ATA_REG_LBAL; + buf[i++] = tf->hob_lbal; + buf[i++] = tf->lbal; + + buf[i++] = (2 << 5) | ATA_REG_LBAM; + buf[i++] = tf->hob_lbam; + buf[i++] = tf->lbam; + + buf[i++] = (2 << 5) | ATA_REG_LBAH; + buf[i++] = tf->hob_lbah; + buf[i++] = tf->lbah; + + return i; +} + +static inline void pdc20621_ata_sg(struct ata_taskfile *tf, u8 *buf, + unsigned int portno, + unsigned int total_len) +{ + u32 addr; + unsigned int dw = PDC_DIMM_APKT_PRD >> 2; + u32 *buf32 = (u32 *) buf; + + /* output ATA packet S/G table */ + addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + + (PDC_DIMM_DATA_STEP * portno); + VPRINTK("ATA sg addr 0x%x, %d\n", addr, addr); + buf32[dw] = cpu_to_le32(addr); + buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); + + VPRINTK("ATA PSG @ %x == (0x%x, 0x%x)\n", + PDC_20621_DIMM_BASE + + (PDC_DIMM_WINDOW_STEP * portno) + + PDC_DIMM_APKT_PRD, + buf32[dw], buf32[dw + 1]); +} + +static inline void pdc20621_host_sg(struct ata_taskfile *tf, u8 *buf, + unsigned int portno, + unsigned int total_len) +{ + u32 addr; + unsigned int dw = PDC_DIMM_HPKT_PRD >> 2; + u32 *buf32 = (u32 *) buf; + + /* output Host DMA packet S/G table */ + addr = PDC_20621_DIMM_BASE + PDC_20621_DIMM_DATA + + (PDC_DIMM_DATA_STEP * portno); + + buf32[dw] = cpu_to_le32(addr); + buf32[dw + 1] = cpu_to_le32(total_len | ATA_PRD_EOT); + + VPRINTK("HOST PSG @ %x == (0x%x, 0x%x)\n", + PDC_20621_DIMM_BASE + + (PDC_DIMM_WINDOW_STEP * portno) + + PDC_DIMM_HPKT_PRD, + buf32[dw], buf32[dw + 1]); +} + +static inline unsigned int pdc20621_ata_pkt(struct ata_taskfile *tf, + unsigned int devno, u8 *buf, + unsigned int portno) +{ + unsigned int i, dw; + u32 *buf32 = (u32 *) buf; + u8 dev_reg; + + unsigned int dimm_sg = PDC_20621_DIMM_BASE + + (PDC_DIMM_WINDOW_STEP * portno) + + PDC_DIMM_APKT_PRD; + VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); + + i = PDC_DIMM_ATA_PKT; + + /* + * Set up ATA packet + */ + if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) + buf[i++] = PDC_PKT_READ; + else if (tf->protocol == ATA_PROT_NODATA) + buf[i++] = PDC_PKT_NODATA; + else + buf[i++] = 0; + buf[i++] = 0; /* reserved */ + buf[i++] = portno + 1; /* seq. id */ + buf[i++] = 0xff; /* delay seq. id */ + + /* dimm dma S/G, and next-pkt */ + dw = i >> 2; + buf32[dw] = cpu_to_le32(dimm_sg); + buf32[dw + 1] = 0; + i += 8; + + if (devno == 0) + dev_reg = ATA_DEVICE_OBS; + else + dev_reg = ATA_DEVICE_OBS | ATA_DEV1; + + /* select device */ + buf[i++] = (1 << 5) | PDC_PKT_CLEAR_BSY | ATA_REG_DEVICE; + buf[i++] = dev_reg; + + /* device control register */ + buf[i++] = (1 << 5) | PDC_REG_DEVCTL; + buf[i++] = tf->ctl; + + return i; +} + +static inline void pdc20621_host_pkt(struct ata_taskfile *tf, u8 *buf, + unsigned int portno) +{ + unsigned int dw; + u32 tmp, *buf32 = (u32 *) buf; + + unsigned int host_sg = PDC_20621_DIMM_BASE + + (PDC_DIMM_WINDOW_STEP * portno) + + PDC_DIMM_HOST_PRD; + unsigned int dimm_sg = PDC_20621_DIMM_BASE + + (PDC_DIMM_WINDOW_STEP * portno) + + PDC_DIMM_HPKT_PRD; + VPRINTK("ENTER, dimm_sg == 0x%x, %d\n", dimm_sg, dimm_sg); + VPRINTK("host_sg == 0x%x, %d\n", host_sg, host_sg); + + dw = PDC_DIMM_HOST_PKT >> 2; + + /* + * Set up Host DMA packet + */ + if ((tf->protocol == ATA_PROT_DMA) && (!(tf->flags & ATA_TFLAG_WRITE))) + tmp = PDC_PKT_READ; + else + tmp = 0; + tmp |= ((portno + 1 + 4) << 16); /* seq. id */ + tmp |= (0xff << 24); /* delay seq. id */ + buf32[dw + 0] = cpu_to_le32(tmp); + buf32[dw + 1] = cpu_to_le32(host_sg); + buf32[dw + 2] = cpu_to_le32(dimm_sg); + buf32[dw + 3] = 0; + + VPRINTK("HOST PKT @ %x == (0x%x 0x%x 0x%x 0x%x)\n", + PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * portno) + + PDC_DIMM_HOST_PKT, + buf32[dw + 0], + buf32[dw + 1], + buf32[dw + 2], + buf32[dw + 3]); +} + +static void pdc20621_fill_sg(struct ata_queued_cmd *qc) +{ + struct scatterlist *sg = qc->sg; + struct ata_port *ap = qc->ap; + struct pdc_port_priv *pp = ap->private_data; + void *mmio = ap->host_set->mmio_base; + struct pdc_host_priv *hpriv = ap->host_set->private_data; + void *dimm_mmio = hpriv->dimm_mmio; + unsigned int portno = ap->port_no; + unsigned int i, last, idx, total_len = 0, sgt_len; + u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ]; + + VPRINTK("ata%u: ENTER\n", ap->id); + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + /* + * Build S/G table + */ + last = qc->n_elem; + idx = 0; + for (i = 0; i < last; i++) { + buf[idx++] = cpu_to_le32(sg_dma_address(&sg[i])); + buf[idx++] = cpu_to_le32(sg_dma_len(&sg[i])); + total_len += sg[i].length; + } + buf[idx - 1] |= cpu_to_le32(ATA_PRD_EOT); + sgt_len = idx * 4; + + /* + * Build ATA, host DMA packets + */ + pdc20621_host_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); + pdc20621_host_pkt(&qc->tf, &pp->dimm_buf[0], portno); + + pdc20621_ata_sg(&qc->tf, &pp->dimm_buf[0], portno, total_len); + i = pdc20621_ata_pkt(&qc->tf, qc->dev->devno, &pp->dimm_buf[0], portno); + + if (qc->tf.flags & ATA_TFLAG_LBA48) + i = pdc_prep_lba48(&qc->tf, &pp->dimm_buf[0], i); + else + i = pdc_prep_lba28(&qc->tf, &pp->dimm_buf[0], i); + + pdc_pkt_footer(&qc->tf, &pp->dimm_buf[0], i); + + /* copy three S/G tables and two packets to DIMM MMIO window */ + memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP), + &pp->dimm_buf, PDC_DIMM_HEADER_SZ); + memcpy_toio(dimm_mmio + (portno * PDC_DIMM_WINDOW_STEP) + + PDC_DIMM_HOST_PRD, + &pp->dimm_buf[PDC_DIMM_HEADER_SZ], sgt_len); + + /* force host FIFO dump */ + writel(0x00000001, mmio + PDC_20621_GENERAL_CTL); + + readl(dimm_mmio); /* MMIO PCI posting flush */ + + VPRINTK("ata pkt buf ofs %u, prd size %u, mmio copied\n", i, sgt_len); +} + +static void __pdc20621_push_hdma(struct ata_queued_cmd *qc, + unsigned int seq, + u32 pkt_ofs) +{ + struct ata_port *ap = qc->ap; + struct ata_host_set *host_set = ap->host_set; + void *mmio = host_set->mmio_base; + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); + readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ + + writel(pkt_ofs, mmio + PDC_HDMA_PKT_SUBMIT); + readl(mmio + PDC_HDMA_PKT_SUBMIT); /* flush */ +} + +static void pdc20621_push_hdma(struct ata_queued_cmd *qc, + unsigned int seq, + u32 pkt_ofs) +{ + struct ata_port *ap = qc->ap; + struct pdc_host_priv *pp = ap->host_set->private_data; + unsigned int idx = pp->hdma_prod & PDC_HDMA_Q_MASK; + + if (!pp->doing_hdma) { + __pdc20621_push_hdma(qc, seq, pkt_ofs); + pp->doing_hdma = 1; + return; + } + + pp->hdma[idx].qc = qc; + pp->hdma[idx].seq = seq; + pp->hdma[idx].pkt_ofs = pkt_ofs; + pp->hdma_prod++; +} + +static void pdc20621_pop_hdma(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pdc_host_priv *pp = ap->host_set->private_data; + unsigned int idx = pp->hdma_cons & PDC_HDMA_Q_MASK; + + /* if nothing on queue, we're done */ + if (pp->hdma_prod == pp->hdma_cons) { + pp->doing_hdma = 0; + return; + } + + __pdc20621_push_hdma(pp->hdma[idx].qc, pp->hdma[idx].seq, + pp->hdma[idx].pkt_ofs); + pp->hdma_cons++; +} + +#ifdef ATA_VERBOSE_DEBUG +static void pdc20621_dump_hdma(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + unsigned int port_no = ap->port_no; + struct pdc_host_priv *hpriv = ap->host_set->private_data; + void *dimm_mmio = hpriv->dimm_mmio; + + dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP); + dimm_mmio += PDC_DIMM_HOST_PKT; + + printk(KERN_ERR "HDMA[0] == 0x%08X\n", readl(dimm_mmio)); + printk(KERN_ERR "HDMA[1] == 0x%08X\n", readl(dimm_mmio + 4)); + printk(KERN_ERR "HDMA[2] == 0x%08X\n", readl(dimm_mmio + 8)); + printk(KERN_ERR "HDMA[3] == 0x%08X\n", readl(dimm_mmio + 12)); +} +#else +static inline void pdc20621_dump_hdma(struct ata_queued_cmd *qc) { } +#endif /* ATA_VERBOSE_DEBUG */ + +static void pdc20621_dma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct ata_host_set *host_set = ap->host_set; + unsigned int port_no = ap->port_no; + void *mmio = host_set->mmio_base; + unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); + u8 seq = (u8) (port_no + 1); + unsigned int doing_hdma = 0, port_ofs; + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + VPRINTK("ata%u: ENTER\n", ap->id); + + port_ofs = PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); + + /* if writing, we (1) DMA to DIMM, then (2) do ATA command */ + if (rw) { + doing_hdma = 1; + seq += 4; + } + + wmb(); /* flush PRD, pkt writes */ + + if (doing_hdma) { + pdc20621_dump_hdma(qc); + pdc20621_push_hdma(qc, seq, port_ofs + PDC_DIMM_HOST_PKT); + VPRINTK("queued ofs 0x%x (%u), seq %u\n", + port_ofs + PDC_DIMM_HOST_PKT, + port_ofs + PDC_DIMM_HOST_PKT, + seq); + } else { + writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); + readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */ + + writel(port_ofs + PDC_DIMM_ATA_PKT, + (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + VPRINTK("submitted ofs 0x%x (%u), seq %u\n", + port_ofs + PDC_DIMM_ATA_PKT, + port_ofs + PDC_DIMM_ATA_PKT, + seq); + } +} + +static inline unsigned int pdc20621_host_intr( struct ata_port *ap, + struct ata_queued_cmd *qc, + unsigned int doing_hdma, + void *mmio) +{ + unsigned int port_no = ap->port_no; + unsigned int port_ofs = + PDC_20621_DIMM_BASE + (PDC_DIMM_WINDOW_STEP * port_no); + u8 status; + unsigned int handled = 0; + + VPRINTK("ENTER\n"); + + if ((qc->tf.protocol == ATA_PROT_DMA) && /* read */ + (!(qc->tf.flags & ATA_TFLAG_WRITE))) { + + /* step two - DMA from DIMM to host */ + if (doing_hdma) { + VPRINTK("ata%u: read hdma, 0x%x 0x%x\n", ap->id, + readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); + pdc_dma_complete(ap, qc); + pdc20621_pop_hdma(qc); + } + + /* step one - exec ATA command */ + else { + u8 seq = (u8) (port_no + 1 + 4); + VPRINTK("ata%u: read ata, 0x%x 0x%x\n", ap->id, + readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); + + /* submit hdma pkt */ + pdc20621_dump_hdma(qc); + pdc20621_push_hdma(qc, seq, + port_ofs + PDC_DIMM_HOST_PKT); + } + handled = 1; + + } else if (qc->tf.protocol == ATA_PROT_DMA) { /* write */ + + /* step one - DMA from host to DIMM */ + if (doing_hdma) { + u8 seq = (u8) (port_no + 1); + VPRINTK("ata%u: write hdma, 0x%x 0x%x\n", ap->id, + readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); + + /* submit ata pkt */ + writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4)); + readl(mmio + PDC_20621_SEQCTL + (seq * 4)); + writel(port_ofs + PDC_DIMM_ATA_PKT, + (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + } + + /* step two - execute ATA command */ + else { + VPRINTK("ata%u: write ata, 0x%x 0x%x\n", ap->id, + readl(mmio + 0x104), readl(mmio + PDC_HDMA_CTLSTAT)); + pdc_dma_complete(ap, qc); + pdc20621_pop_hdma(qc); + } + handled = 1; + + /* command completion, but no data xfer */ + } else if (qc->tf.protocol == ATA_PROT_NODATA) { + + status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); + ata_qc_complete(qc, status, 0); + handled = 1; + + } else { + ap->stats.idle_irq++; + } + + return handled; +} + +static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + struct ata_port *ap; + u32 mask = 0; + unsigned int i, tmp, port_no; + unsigned int handled = 0; + void *mmio_base; + + VPRINTK("ENTER\n"); + + if (!host_set || !host_set->mmio_base) { + VPRINTK("QUICK EXIT\n"); + return IRQ_NONE; + } + + mmio_base = host_set->mmio_base; + + /* reading should also clear interrupts */ + mmio_base += PDC_CHIP0_OFS; + mask = readl(mmio_base + PDC_20621_SEQMASK); + VPRINTK("mask == 0x%x\n", mask); + + if (mask == 0xffffffff) { + VPRINTK("QUICK EXIT 2\n"); + return IRQ_NONE; + } + mask &= 0xffff; /* only 16 tags possible */ + if (!mask) { + VPRINTK("QUICK EXIT 3\n"); + return IRQ_NONE; + } + + spin_lock_irq(&host_set->lock); + + for (i = 1; i < 9; i++) { + port_no = i - 1; + if (port_no > 3) + port_no -= 4; + if (port_no >= host_set->n_ports) + ap = NULL; + else + ap = host_set->ports[port_no]; + tmp = mask & (1 << i); + VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); + if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0)) + handled += pdc20621_host_intr(ap, qc, (i > 4), + mmio_base); + } + } + + spin_unlock_irq(&host_set->lock); + + VPRINTK("mask == 0x%x\n", mask); + + VPRINTK("EXIT\n"); + + return IRQ_RETVAL(handled); +} + +static void pdc_fill_sg(struct ata_queued_cmd *qc) +{ + struct pdc_port_priv *pp = qc->ap->private_data; + unsigned int i; + + VPRINTK("ENTER\n"); + + ata_fill_sg(qc); + + i = pdc_pkt_header(&qc->tf, qc->ap->prd_dma, qc->dev->devno, pp->pkt); + + if (qc->tf.flags & ATA_TFLAG_LBA48) + i = pdc_prep_lba48(&qc->tf, pp->pkt, i); + else + i = pdc_prep_lba28(&qc->tf, pp->pkt, i); + + pdc_pkt_footer(&qc->tf, pp->pkt, i); +} + +static inline void pdc_dma_complete (struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + /* get drive status; clear intr; complete txn */ + ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag), + ata_wait_idle(ap), 0); +} + +static void pdc_eng_timeout(struct ata_port *ap) +{ + u8 drv_stat; + struct ata_queued_cmd *qc; + + DPRINTK("ENTER\n"); + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (!qc) { + printk(KERN_ERR "ata%u: BUG: timeout without command\n", + ap->id); + goto out; + } + + /* hack alert! We cannot use the supplied completion + * function from inside the ->eh_strategy_handler() thread. + * libata is the only user of ->eh_strategy_handler() in + * any kernel, so the default scsi_done() assumes it is + * not being called from the SCSI EH. + */ + qc->scsidone = scsi_finish_command; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + printk(KERN_ERR "ata%u: DMA timeout\n", ap->id); + ata_qc_complete(ata_qc_from_tag(ap, ap->active_tag), + ata_wait_idle(ap) | ATA_ERR, 0); + break; + + case ATA_PROT_NODATA: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + + default: + drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + printk(KERN_ERR "ata%u: unknown timeout, cmd 0x%x stat 0x%x\n", + ap->id, qc->tf.command, drv_stat); + + ata_qc_complete(qc, drv_stat, 1); + break; + } + +out: + DPRINTK("EXIT\n"); +} + +static inline unsigned int pdc_host_intr( struct ata_port *ap, + struct ata_queued_cmd *qc) +{ + u8 status; + unsigned int handled = 0; + + switch (qc->tf.protocol) { + case ATA_PROT_DMA: + pdc_dma_complete(ap, qc); + handled = 1; + break; + + case ATA_PROT_NODATA: /* command completion, but no data xfer */ + status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + DPRINTK("BUS_NODATA (drv_stat 0x%X)\n", status); + ata_qc_complete(qc, status, 0); + handled = 1; + break; + + default: + ap->stats.idle_irq++; + break; + } + + return handled; +} + +static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + struct ata_port *ap; + u32 mask = 0; + unsigned int i, tmp; + unsigned int handled = 0; + void *mmio_base; + + VPRINTK("ENTER\n"); + + if (!host_set || !host_set->mmio_base) { + VPRINTK("QUICK EXIT\n"); + return IRQ_NONE; + } + + mmio_base = host_set->mmio_base; + + /* reading should also clear interrupts */ + mask = readl(mmio_base + PDC_INT_SEQMASK); + + if (mask == 0xffffffff) { + VPRINTK("QUICK EXIT 2\n"); + return IRQ_NONE; + } + mask &= 0xffff; /* only 16 tags possible */ + if (!mask) { + VPRINTK("QUICK EXIT 3\n"); + return IRQ_NONE; + } + + spin_lock_irq(&host_set->lock); + + for (i = 0; i < host_set->n_ports; i++) { + VPRINTK("port %u\n", i); + ap = host_set->ports[i]; + tmp = mask & (1 << (i + 1)); + if (tmp && ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0)) + handled += pdc_host_intr(ap, qc); + } + } + + spin_unlock_irq(&host_set->lock); + + VPRINTK("EXIT\n"); + + return IRQ_RETVAL(handled); +} + +static void pdc_dma_start(struct ata_queued_cmd *qc) +{ + struct ata_port *ap = qc->ap; + struct pdc_port_priv *pp = ap->private_data; + unsigned int port_no = ap->port_no; + u8 seq = (u8) (port_no + 1); + + VPRINTK("ENTER, ap %p\n", ap); + + writel(0x00000001, ap->host_set->mmio_base + (seq * 4)); + readl(ap->host_set->mmio_base + (seq * 4)); /* flush */ + + pp->pkt[2] = seq; + wmb(); /* flush PRD, pkt writes */ + writel(pp->pkt_dma, (void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); + readl((void *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */ +} + +static void pdc_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + if (tf->protocol != ATA_PROT_DMA) + ata_tf_load_mmio(ap, tf); +} + + +static void pdc_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf) +{ + if (tf->protocol != ATA_PROT_DMA) + ata_exec_command_mmio(ap, tf); +} + + +static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = base; + port->data_addr = base; + port->feature_addr = + port->error_addr = base + 0x4; + port->nsect_addr = base + 0x8; + port->lbal_addr = base + 0xc; + port->lbam_addr = base + 0x10; + port->lbah_addr = base + 0x14; + port->device_addr = base + 0x18; + port->command_addr = + port->status_addr = base + 0x1c; + port->altstatus_addr = + port->ctl_addr = base + 0x38; +} + + +#ifdef ATA_VERBOSE_DEBUG +static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource, + u32 offset, u32 size) +{ + u32 window_size; + u16 idx; + u8 page_mask; + long dist; + void *mmio = pe->mmio_base; + struct pdc_host_priv *hpriv = pe->private_data; + void *dimm_mmio = hpriv->dimm_mmio; + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + page_mask = 0x00; + window_size = 0x2000 * 4; /* 32K byte uchar size */ + idx = (u16) (offset / window_size); + + writel(0x01, mmio + PDC_GENERAL_CTLR); + readl(mmio + PDC_GENERAL_CTLR); + writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); + readl(mmio + PDC_DIMM_WINDOW_CTLR); + + offset -= (idx * window_size); + idx++; + dist = ((long) (window_size - (offset + size))) >= 0 ? size : + (long) (window_size - offset); + memcpy_fromio((char *) psource, (char *) (dimm_mmio + offset / 4), + dist); + + psource += dist; + size -= dist; + for (; (long) size >= (long) window_size ;) { + writel(0x01, mmio + PDC_GENERAL_CTLR); + readl(mmio + PDC_GENERAL_CTLR); + writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); + readl(mmio + PDC_DIMM_WINDOW_CTLR); + memcpy_fromio((char *) psource, (char *) (dimm_mmio), + window_size / 4); + psource += window_size; + size -= window_size; + idx ++; + } + + if (size) { + writel(0x01, mmio + PDC_GENERAL_CTLR); + readl(mmio + PDC_GENERAL_CTLR); + writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); + readl(mmio + PDC_DIMM_WINDOW_CTLR); + memcpy_fromio((char *) psource, (char *) (dimm_mmio), + size / 4); + } +} +#endif + + +static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource, + u32 offset, u32 size) +{ + u32 window_size; + u16 idx; + u8 page_mask; + long dist; + void *mmio = pe->mmio_base; + struct pdc_host_priv *hpriv = pe->private_data; + void *dimm_mmio = hpriv->dimm_mmio; + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + page_mask = 0x00; + window_size = 0x2000 * 4; /* 32K byte uchar size */ + idx = (u16) (offset / window_size); + + writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); + readl(mmio + PDC_DIMM_WINDOW_CTLR); + offset -= (idx * window_size); + idx++; + dist = ((long) (window_size - (offset + size))) >= 0 ? size : + (long) (window_size - offset); + memcpy_toio((char *) (dimm_mmio + offset / 4), (char *) psource, dist); + writel(0x01, mmio + PDC_GENERAL_CTLR); + readl(mmio + PDC_GENERAL_CTLR); + + psource += dist; + size -= dist; + for (; (long) size >= (long) window_size ;) { + writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); + readl(mmio + PDC_DIMM_WINDOW_CTLR); + memcpy_toio((char *) (dimm_mmio), (char *) psource, + window_size / 4); + writel(0x01, mmio + PDC_GENERAL_CTLR); + readl(mmio + PDC_GENERAL_CTLR); + psource += window_size; + size -= window_size; + idx ++; + } + + if (size) { + writel(((idx) << page_mask), mmio + PDC_DIMM_WINDOW_CTLR); + readl(mmio + PDC_DIMM_WINDOW_CTLR); + memcpy_toio((char *) (dimm_mmio), (char *) psource, size / 4); + writel(0x01, mmio + PDC_GENERAL_CTLR); + readl(mmio + PDC_GENERAL_CTLR); + } +} + + +static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device, + u32 subaddr, u32 *pdata) +{ + void *mmio = pe->mmio_base; + u32 i2creg = 0; + u32 status; + u32 count =0; + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + i2creg |= device << 24; + i2creg |= subaddr << 16; + + /* Set the device and subaddress */ + writel(i2creg, mmio + PDC_I2C_ADDR_DATA_OFFSET); + readl(mmio + PDC_I2C_ADDR_DATA_OFFSET); + + /* Write Control to perform read operation, mask int */ + writel(PDC_I2C_READ | PDC_I2C_START | PDC_I2C_MASK_INT, + mmio + PDC_I2C_CONTROL_OFFSET); + + for (count = 0; count <= 1000; count ++) { + status = readl(mmio + PDC_I2C_CONTROL_OFFSET); + if (status & PDC_I2C_COMPLETE) { + status = readl(mmio + PDC_I2C_ADDR_DATA_OFFSET); + break; + } else if (count == 1000) + return 0; + } + + *pdata = (status >> 8) & 0x000000ff; + return 1; +} + + +static int pdc20621_detect_dimm(struct ata_probe_ent *pe) +{ + u32 data=0 ; + if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, + PDC_DIMM_SPD_SYSTEM_FREQ, &data)) { + if (data == 100) + return 100; + } else + return 0; + + if (pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, 9, &data)) { + if(data <= 0x75) + return 133; + } else + return 0; + + return 0; +} + + +static int pdc20621_prog_dimm0(struct ata_probe_ent *pe) +{ + u32 spd0[50]; + u32 data = 0; + int size, i; + u8 bdimmsize; + void *mmio = pe->mmio_base; + static const struct { + unsigned int reg; + unsigned int ofs; + } pdc_i2c_read_data [] = { + { PDC_DIMM_SPD_TYPE, 11 }, + { PDC_DIMM_SPD_FRESH_RATE, 12 }, + { PDC_DIMM_SPD_COLUMN_NUM, 4 }, + { PDC_DIMM_SPD_ATTRIBUTE, 21 }, + { PDC_DIMM_SPD_ROW_NUM, 3 }, + { PDC_DIMM_SPD_BANK_NUM, 17 }, + { PDC_DIMM_SPD_MODULE_ROW, 5 }, + { PDC_DIMM_SPD_ROW_PRE_CHARGE, 27 }, + { PDC_DIMM_SPD_ROW_ACTIVE_DELAY, 28 }, + { PDC_DIMM_SPD_RAS_CAS_DELAY, 29 }, + { PDC_DIMM_SPD_ACTIVE_PRECHARGE, 30 }, + { PDC_DIMM_SPD_CAS_LATENCY, 18 }, + }; + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + for(i=0; i spd0[28]) + ? spd0[29] : spd0[28]) + 9) / 10) - 1) << 10; + data |= ((spd0[30] - spd0[29] + 9) / 10 - 2) << 12; + + if (spd0[18] & 0x08) + data |= ((0x03) << 14); + else if (spd0[18] & 0x04) + data |= ((0x02) << 14); + else if (spd0[18] & 0x01) + data |= ((0x01) << 14); + else + data |= (0 << 14); + + /* + Calculate the size of bDIMMSize (power of 2) and + merge the DIMM size by program start/end address. + */ + + bdimmsize = spd0[4] + (spd0[5] / 2) + spd0[3] + (spd0[17] / 2) + 3; + size = (1 << bdimmsize) >> 20; /* size = xxx(MB) */ + data |= (((size / 16) - 1) << 16); + data |= (0 << 23); + data |= 8; + writel(data, mmio + PDC_DIMM0_CONTROL_OFFSET); + readl(mmio + PDC_DIMM0_CONTROL_OFFSET); + return size; +} + + +static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe) +{ + u32 data, spd0; + int error, i; + void *mmio = pe->mmio_base; + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + /* + Set To Default : DIMM Module Global Control Register (0x022259F1) + DIMM Arbitration Disable (bit 20) + DIMM Data/Control Output Driving Selection (bit12 - bit15) + Refresh Enable (bit 17) + */ + + data = 0x022259F1; + writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET); + readl(mmio + PDC_SDRAM_CONTROL_OFFSET); + + /* Turn on for ECC */ + pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, + PDC_DIMM_SPD_TYPE, &spd0); + if (spd0 == 0x02) { + data |= (0x01 << 16); + writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET); + readl(mmio + PDC_SDRAM_CONTROL_OFFSET); + printk(KERN_ERR "Local DIMM ECC Enabled\n"); + } + + /* DIMM Initialization Select/Enable (bit 18/19) */ + data &= (~(1<<18)); + data |= (1<<19); + writel(data, mmio + PDC_SDRAM_CONTROL_OFFSET); + + error = 1; + for (i = 1; i <= 10; i++) { /* polling ~5 secs */ + data = readl(mmio + PDC_SDRAM_CONTROL_OFFSET); + if (!(data & (1<<19))) { + error = 0; + break; + } + set_current_state(TASK_INTERRUPTIBLE); + schedule_timeout((i * 100) * HZ / 1000); + } + return error; +} + + +static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe) +{ + int speed, size, length; + u32 addr,spd0,pci_status; + u32 tmp=0; + u32 time_period=0; + u32 tcount=0; + u32 ticks=0; + u32 clock=0; + u32 fparam=0; + void *mmio = pe->mmio_base; + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + /* Initialize PLL based upon PCI Bus Frequency */ + + /* Initialize Time Period Register */ + writel(0xffffffff, mmio + PDC_TIME_PERIOD); + time_period = readl(mmio + PDC_TIME_PERIOD); + VPRINTK("Time Period Register (0x40): 0x%x\n", time_period); + + /* Enable timer */ + writel(0x00001a0, mmio + PDC_TIME_CONTROL); + readl(mmio + PDC_TIME_CONTROL); + + /* Wait 3 seconds */ + set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(3 * HZ); + + /* + When timer is enabled, counter is decreased every internal + clock cycle. + */ + + tcount = readl(mmio + PDC_TIME_COUNTER); + VPRINTK("Time Counter Register (0x44): 0x%x\n", tcount); + + /* + If SX4 is on PCI-X bus, after 3 seconds, the timer counter + register should be >= (0xffffffff - 3x10^8). + */ + if(tcount >= PCI_X_TCOUNT) { + ticks = (time_period - tcount); + VPRINTK("Num counters 0x%x (%d)\n", ticks, ticks); + + clock = (ticks / 300000); + VPRINTK("10 * Internal clk = 0x%x (%d)\n", clock, clock); + + clock = (clock * 33); + VPRINTK("10 * Internal clk * 33 = 0x%x (%d)\n", clock, clock); + + /* PLL F Param (bit 22:16) */ + fparam = (1400000 / clock) - 2; + VPRINTK("PLL F Param: 0x%x (%d)\n", fparam, fparam); + + /* OD param = 0x2 (bit 31:30), R param = 0x5 (bit 29:25) */ + pci_status = (0x8a001824 | (fparam << 16)); + } else + pci_status = PCI_PLL_INIT; + + /* Initialize PLL. */ + VPRINTK("pci_status: 0x%x\n", pci_status); + writel(pci_status, mmio + PDC_CTL_STATUS); + readl(mmio + PDC_CTL_STATUS); + + /* + Read SPD of DIMM by I2C interface, + and program the DIMM Module Controller. + */ + if (!(speed = pdc20621_detect_dimm(pe))) { + printk(KERN_ERR "Detect Local DIMM Fail\n"); + return 1; /* DIMM error */ + } + VPRINTK("Local DIMM Speed = %d\n", speed); + + /* Programming DIMM0 Module Control Register (index_CID0:80h) */ + size = pdc20621_prog_dimm0(pe); + VPRINTK("Local DIMM Size = %dMB\n",size); + + /* Programming DIMM Module Global Control Register (index_CID0:88h) */ + if (pdc20621_prog_dimm_global(pe)) { + printk(KERN_ERR "Programming DIMM Module Global Control Register Fail\n"); + return 1; + } + +#ifdef ATA_VERBOSE_DEBUG + { + u8 test_parttern1[40] = {0x55,0xAA,'P','r','o','m','i','s','e',' ', + 'N','o','t',' ','Y','e','t',' ','D','e','f','i','n','e','d',' ', + '1','.','1','0', + '9','8','0','3','1','6','1','2',0,0}; + u8 test_parttern2[40] = {0}; + + pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x10040, 40); + pdc20621_put_to_dimm(pe, (void *) test_parttern2, 0x40, 40); + + pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x10040, 40); + pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40); + printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], + test_parttern2[1], &(test_parttern2[2])); + pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x10040, + 40); + printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], + test_parttern2[1], &(test_parttern2[2])); + + pdc20621_put_to_dimm(pe, (void *) test_parttern1, 0x40, 40); + pdc20621_get_from_dimm(pe, (void *) test_parttern2, 0x40, 40); + printk(KERN_ERR "%x, %x, %s\n", test_parttern2[0], + test_parttern2[1], &(test_parttern2[2])); + } +#endif + + /* ECC initiliazation. */ + + pdc20621_i2c_read(pe, PDC_DIMM0_SPD_DEV_ADDRESS, + PDC_DIMM_SPD_TYPE, &spd0); + if (spd0 == 0x02) { + VPRINTK("Start ECC initialization\n"); + addr = 0; + length = size * 1024 * 1024; + while (addr < length) { + pdc20621_put_to_dimm(pe, (void *) &tmp, addr, + sizeof(u32)); + addr += sizeof(u32); + } + VPRINTK("Finish ECC initialization\n"); + } + return 0; +} + + +static void pdc_20621_init(struct ata_probe_ent *pe) +{ + u32 tmp; + void *mmio = pe->mmio_base; + + /* hard-code chip #0 */ + mmio += PDC_CHIP0_OFS; + + /* + * Select page 0x40 for our 32k DIMM window + */ + tmp = readl(mmio + PDC_20621_DIMM_WINDOW) & 0xffff0000; + tmp |= PDC_PAGE_WINDOW; /* page 40h; arbitrarily selected */ + writel(tmp, mmio + PDC_20621_DIMM_WINDOW); + + /* + * Reset Host DMA + */ + tmp = readl(mmio + PDC_HDMA_CTLSTAT); + tmp |= PDC_HDMA_RESET; + writel(tmp, mmio + PDC_HDMA_CTLSTAT); + readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ + + udelay(10); + + tmp = readl(mmio + PDC_HDMA_CTLSTAT); + tmp &= ~PDC_HDMA_RESET; + writel(tmp, mmio + PDC_HDMA_CTLSTAT); + readl(mmio + PDC_HDMA_CTLSTAT); /* flush */ +} + +static int pdc_pata_possible(struct pci_dev *pdev) +{ + if (pdev->device == 0x3375) + return 1; + return 0; +} + +static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe) +{ + struct pci_dev *pdev = pe->pdev; + void *mmio = pe->mmio_base; + u32 tmp; + + if (chip_id == board_20621) + BUG(); + + /* change FIFO_SHD to 8 dwords. Promise driver does this... + * dunno why. + */ + tmp = readl(mmio + PDC_FLASH_CTL); + if ((tmp & (1 << 16)) == 0) + writel(tmp | (1 << 16), mmio + PDC_FLASH_CTL); + + /* clear plug/unplug flags for all ports */ + tmp = readl(mmio + PDC_SATA_PLUG_CSR); + writel(tmp | 0xff, mmio + PDC_SATA_PLUG_CSR); + + /* mask plug/unplug ints */ + tmp = readl(mmio + PDC_SATA_PLUG_CSR); + writel(tmp | 0xff0000, mmio + PDC_SATA_PLUG_CSR); + + /* reduce TBG clock to 133 Mhz. FIXME: why? */ + tmp = readl(mmio + PDC_TBG_MODE); + tmp &= ~0x30000; /* clear bit 17, 16*/ + tmp |= 0x10000; /* set bit 17:16 = 0:1 */ + writel(tmp, mmio + PDC_TBG_MODE); + + /* adjust slew rate control register. FIXME: why? */ + tmp = readl(mmio + PDC_SLEW_CTL); + tmp &= 0xFFFFF03F; /* clear bit 11 ~ 6 */ + tmp |= 0x00000900; /* set bit 11-9 = 100b , bit 8-6 = 100 */ + writel(tmp, mmio + PDC_SLEW_CTL); + + /* check for PATA port on PDC20375 */ + if (pdc_pata_possible(pdev)) { + tmp = readl(mmio + PDC_PCI_CTL); + if (tmp & PDC_HAS_PATA) + printk(KERN_INFO DRV_NAME "(%s): sorry, PATA port not supported yet\n", + pci_name(pdev)); + } +} + +static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_probe_ent *probe_ent = NULL; + unsigned long base; + void *mmio_base, *dimm_mmio = NULL; + struct pdc_host_priv *hpriv = NULL; + unsigned int board_idx = (unsigned int) ent->driver_data; + unsigned int have_20621 = (board_idx == board_20621); + int rc; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* + * If this driver happens to only be useful on Apple's K2, then + * we should check that here as it has a normal Serverworks ID + */ + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (probe_ent == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->pdev = pdev; + INIT_LIST_HEAD(&probe_ent->node); + + mmio_base = ioremap(pci_resource_start(pdev, 3), + pci_resource_len(pdev, 3)); + if (mmio_base == NULL) { + rc = -ENOMEM; + goto err_out_free_ent; + } + base = (unsigned long) mmio_base; + + if (have_20621) { + hpriv = kmalloc(sizeof(*hpriv), GFP_KERNEL); + if (!hpriv) { + rc = -ENOMEM; + goto err_out_iounmap; + } + memset(hpriv, 0, sizeof(*hpriv)); + + dimm_mmio = ioremap(pci_resource_start(pdev, 4), + pci_resource_len(pdev, 4)); + if (!dimm_mmio) { + kfree(hpriv); + rc = -ENOMEM; + goto err_out_iounmap; + } + + hpriv->dimm_mmio = dimm_mmio; + } + + probe_ent->sht = pdc_port_info[board_idx].sht; + probe_ent->host_flags = pdc_port_info[board_idx].host_flags; + probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask; + probe_ent->udma_mask = pdc_port_info[board_idx].udma_mask; + probe_ent->port_ops = pdc_port_info[board_idx].port_ops; + + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->mmio_base = mmio_base; + + if (have_20621) { + probe_ent->private_data = hpriv; + base += PDC_CHIP0_OFS; + } + + pdc_sata_setup_port(&probe_ent->port[0], base + 0x200); + pdc_sata_setup_port(&probe_ent->port[1], base + 0x280); + + if (!have_20621) { + probe_ent->port[0].scr_addr = base + 0x400; + probe_ent->port[1].scr_addr = base + 0x500; + } + + /* notice 4-port boards */ + switch (board_idx) { + case board_20319: + case board_20621: + probe_ent->n_ports = 4; + + pdc_sata_setup_port(&probe_ent->port[2], base + 0x300); + pdc_sata_setup_port(&probe_ent->port[3], base + 0x380); + + if (!have_20621) { + probe_ent->port[2].scr_addr = base + 0x600; + probe_ent->port[3].scr_addr = base + 0x700; + } + break; + case board_2037x: + probe_ent->n_ports = 2; + break; + default: + BUG(); + break; + } + + pci_set_master(pdev); + + /* initialize adapter */ + if (have_20621) { + /* initialize local dimm */ + if (pdc20621_dimm_init(probe_ent)) { + rc = -ENOMEM; + goto err_out_iounmap_dimm; + } + pdc_20621_init(probe_ent); + } else + pdc_host_init(board_idx, probe_ent); + + ata_add_to_probe_list(probe_ent); + + return 0; + +err_out_iounmap_dimm: /* only get to this label if 20621 */ + kfree(hpriv); + iounmap(dimm_mmio); +err_out_iounmap: + iounmap(mmio_base); +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + + +static int __init pdc_sata_init(void) +{ + int rc; + + rc = pci_module_init(&pdc_sata_pci_driver); + if (rc) + return rc; + + rc = scsi_register_module(MODULE_SCSI_HA, &pdc_sata_sht); + if (rc) { + rc = -ENODEV; + goto err_out; + } + + return 0; + +err_out: + pci_unregister_driver(&pdc_sata_pci_driver); + return rc; +} + + +static void __exit pdc_sata_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &pdc_sata_sht); + pci_unregister_driver(&pdc_sata_pci_driver); +} + + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("Promise SATA low-level driver"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, pdc_sata_pci_tbl); + +module_init(pdc_sata_init); +module_exit(pdc_sata_exit); diff -urN linux-2.4.26-pre6/drivers/scsi/sata_sil.c linux-2.4.26-pre6-ata14/drivers/scsi/sata_sil.c --- linux-2.4.26-pre6/drivers/scsi/sata_sil.c Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/sata_sil.c Thu Mar 25 22:45:24 2004 @@ -0,0 +1,444 @@ +/* + * ata_sil.c - Silicon Image SATA + * + * Copyright 2003 Red Hat, Inc. + * Copyright 2003 Benjamin Herrenschmidt + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "sata_sil" +#define DRV_VERSION "0.54" + +enum { + sil_3112 = 0, + sil_3114 = 1, + + SIL_SYSCFG = 0x48, + SIL_MASK_IDE0_INT = (1 << 22), + SIL_MASK_IDE1_INT = (1 << 23), + SIL_MASK_IDE2_INT = (1 << 24), + SIL_MASK_IDE3_INT = (1 << 25), + SIL_MASK_2PORT = SIL_MASK_IDE0_INT | SIL_MASK_IDE1_INT, + SIL_MASK_4PORT = SIL_MASK_2PORT | + SIL_MASK_IDE2_INT | SIL_MASK_IDE3_INT, + + SIL_IDE2_BMDMA = 0x200, + + SIL_INTR_STEERING = (1 << 1), + SIL_QUIRK_MOD15WRITE = (1 << 0), + SIL_QUIRK_UDMA5MAX = (1 << 1), +}; + +static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static void sil_dev_config(struct ata_port *ap, struct ata_device *dev); +static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg); +static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); +static void sil_post_set_mode (struct ata_port *ap); + +static struct pci_device_id sil_pci_tbl[] = { + { 0x1095, 0x3112, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, + { 0x1095, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, + { 0x1095, 0x3512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3112 }, + { 0x1095, 0x3114, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sil_3114 }, + { } /* terminate list */ +}; + + +/* TODO firmware versions should be added - eric */ +struct sil_drivelist { + const char * product; + unsigned int quirk; +} sil_blacklist [] = { + { "ST320012AS", SIL_QUIRK_MOD15WRITE }, + { "ST330013AS", SIL_QUIRK_MOD15WRITE }, + { "ST340017AS", SIL_QUIRK_MOD15WRITE }, + { "ST360015AS", SIL_QUIRK_MOD15WRITE }, + { "ST380023AS", SIL_QUIRK_MOD15WRITE }, + { "ST3120023AS", SIL_QUIRK_MOD15WRITE }, + { "ST340014ASL", SIL_QUIRK_MOD15WRITE }, + { "ST360014ASL", SIL_QUIRK_MOD15WRITE }, + { "ST380011ASL", SIL_QUIRK_MOD15WRITE }, + { "ST3120022ASL", SIL_QUIRK_MOD15WRITE }, + { "ST3160021ASL", SIL_QUIRK_MOD15WRITE }, + { "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX }, + { } +}; + +static struct pci_driver sil_pci_driver = { + .name = DRV_NAME, + .id_table = sil_pci_tbl, + .probe = sil_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template sil_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .detect = ata_scsi_detect, + .release = ata_scsi_release, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = LIBATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .use_new_eh_code = ATA_SHT_NEW_EH_CODE, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .bios_param = ata_std_bios_param, +}; + +static struct ata_port_operations sil_ops = { + .port_disable = ata_port_disable, + .dev_config = sil_dev_config, + .tf_load = ata_tf_load_mmio, + .tf_read = ata_tf_read_mmio, + .check_status = ata_check_status_mmio, + .exec_command = ata_exec_command_mmio, + .phy_reset = sata_phy_reset, + .post_set_mode = sil_post_set_mode, + .bmdma_start = ata_bmdma_start_mmio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + .irq_handler = ata_interrupt, + .scr_read = sil_scr_read, + .scr_write = sil_scr_write, + .port_start = ata_port_start, + .port_stop = ata_port_stop, +}; + +static struct ata_port_info sil_port_info[] = { + /* sil_3112 */ + { + .sht = &sil_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x3f, /* udma0-5 */ + .port_ops = &sil_ops, + }, /* sil_3114 */ + { + .sht = &sil_sht, + .host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_SRST | ATA_FLAG_MMIO, + .pio_mask = 0x03, /* pio3-4 */ + .udma_mask = 0x3f, /* udma0-5 */ + .port_ops = &sil_ops, + }, +}; + +/* per-port register offsets */ +/* TODO: we can probably calculate rather than use a table */ +static const struct { + unsigned long tf; /* ATA taskfile register block */ + unsigned long ctl; /* ATA control/altstatus register block */ + unsigned long bmdma; /* DMA register block */ + unsigned long scr; /* SATA control register block */ + unsigned long sien; /* SATA Interrupt Enable register */ + unsigned long xfer_mode;/* data transfer mode register */ +} sil_port[] = { + /* port 0 ... */ + { 0x80, 0x8A, 0x00, 0x100, 0x148, 0xb4 }, + { 0xC0, 0xCA, 0x08, 0x180, 0x1c8, 0xf4 }, + { 0x280, 0x28A, 0x200, 0x300, 0x348, 0x2b4 }, + { 0x2C0, 0x2CA, 0x208, 0x380, 0x3c8, 0x2f4 }, + /* ... port 3 */ +}; + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("low-level driver for Silicon Image SATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, sil_pci_tbl); + +static void sil_post_set_mode (struct ata_port *ap) +{ + struct ata_host_set *host_set = ap->host_set; + struct ata_device *dev; + void *addr = host_set->mmio_base + sil_port[ap->port_no].xfer_mode; + u32 tmp, dev_mode[2]; + unsigned int i; + + for (i = 0; i < 2; i++) { + dev = &ap->device[i]; + if (!ata_dev_present(dev)) + dev_mode[i] = 0; /* PIO0/1/2 */ + else if (dev->flags & ATA_DFLAG_PIO) + dev_mode[i] = 1; /* PIO3/4 */ + else + dev_mode[i] = 3; /* UDMA */ + /* value 2 indicates MDMA */ + } + + tmp = readl(addr); + tmp &= ~((1<<5) | (1<<4) | (1<<1) | (1<<0)); + tmp |= dev_mode[0]; + tmp |= (dev_mode[1] << 4); + writel(tmp, addr); + readl(addr); /* flush */ +} + +static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg) +{ + unsigned long offset = ap->ioaddr.scr_addr; + + switch (sc_reg) { + case SCR_STATUS: + return offset + 4; + case SCR_ERROR: + return offset + 8; + case SCR_CONTROL: + return offset; + default: + /* do nothing */ + break; + } + + return 0; +} + +static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + void *mmio = (void *) sil_scr_addr(ap, sc_reg); + if (mmio) + return readl(mmio); + return 0xffffffffU; +} + +static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +{ + void *mmio = (void *) sil_scr_addr(ap, sc_reg); + if (mmio) + writel(val, mmio); +} + +/** + * sil_dev_config - Apply device/host-specific errata fixups + * @ap: Port containing device to be examined + * @dev: Device to be examined + * + * After the IDENTIFY [PACKET] DEVICE step is complete, and a + * device is known to be present, this function is called. + * We apply two errata fixups which are specific to Silicon Image, + * a Seagate and a Maxtor fixup. + * + * For certain Seagate devices, we must limit the maximum sectors + * to under 8K. + * + * For certain Maxtor devices, we must not program the drive + * beyond udma5. + * + * Both fixups are unfairly pessimistic. As soon as I get more + * information on these errata, I will create a more exhaustive + * list, and apply the fixups to only the specific + * devices/hosts/firmwares that need it. + * + * 20040111 - Seagate drives affected by the Mod15Write bug are blacklisted + * The Maxtor quirk is in the blacklist, but I'm keeping the original + * pessimistic fix for the following reasons: + * - There seems to be less info on it, only one device gleaned off the + * Windows driver, maybe only one is affected. More info would be greatly + * appreciated. + * - But then again UDMA5 is hardly anything to complain about + */ +static void sil_dev_config(struct ata_port *ap, struct ata_device *dev) +{ + unsigned int n, quirks = 0; + const char *s = &dev->product[0]; + unsigned int len = strnlen(s, sizeof(dev->product)); + + /* ATAPI specifies that empty space is blank-filled; remove blanks */ + while ((len > 0) && (s[len - 1] == ' ')) + len--; + + for (n = 0; sil_blacklist[n].product; n++) + if (!memcmp(sil_blacklist[n].product, s, + strlen(sil_blacklist[n].product))) { + quirks = sil_blacklist[n].quirk; + break; + } + + /* limit requests to 15 sectors */ + if (quirks & SIL_QUIRK_MOD15WRITE) { + printk(KERN_INFO "ata%u(%u): applying Seagate errata fix\n", + ap->id, dev->devno); + ap->host->max_sectors = 15; + ap->host->hostt->max_sectors = 15; + return; + } + + /* limit to udma5 */ + if (quirks & SIL_QUIRK_UDMA5MAX) { + printk(KERN_INFO "ata%u(%u): applying Maxtor errata fix %s\n", + ap->id, dev->devno, s); + ap->udma_mask &= ATA_UDMA5; + return; + } +} + +static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_probe_ent *probe_ent = NULL; + unsigned long base; + void *mmio_base; + int rc; + unsigned int i; + u32 tmp, irq_mask; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* + * If this driver happens to only be useful on Apple's K2, then + * we should check that here as it has a normal Serverworks ID + */ + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (probe_ent == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + INIT_LIST_HEAD(&probe_ent->node); + probe_ent->pdev = pdev; + probe_ent->port_ops = sil_port_info[ent->driver_data].port_ops; + probe_ent->sht = sil_port_info[ent->driver_data].sht; + probe_ent->n_ports = (ent->driver_data == sil_3114) ? 4 : 2; + probe_ent->pio_mask = sil_port_info[ent->driver_data].pio_mask; + probe_ent->udma_mask = sil_port_info[ent->driver_data].udma_mask; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->host_flags = sil_port_info[ent->driver_data].host_flags; + + mmio_base = ioremap(pci_resource_start(pdev, 5), + pci_resource_len(pdev, 5)); + if (mmio_base == NULL) { + rc = -ENOMEM; + goto err_out_free_ent; + } + + probe_ent->mmio_base = mmio_base; + + base = (unsigned long) mmio_base; + + for (i = 0; i < probe_ent->n_ports; i++) { + probe_ent->port[i].cmd_addr = base + sil_port[i].tf; + probe_ent->port[i].altstatus_addr = + probe_ent->port[i].ctl_addr = base + sil_port[i].ctl; + probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma; + probe_ent->port[i].scr_addr = base + sil_port[i].scr; + ata_std_ports(&probe_ent->port[i]); + } + + if (ent->driver_data == sil_3114) { + irq_mask = SIL_MASK_4PORT; + + /* flip the magic "make 4 ports work" bit */ + tmp = readl(mmio_base + SIL_IDE2_BMDMA); + if ((tmp & SIL_INTR_STEERING) == 0) + writel(tmp | SIL_INTR_STEERING, + mmio_base + SIL_IDE2_BMDMA); + + } else { + irq_mask = SIL_MASK_2PORT; + } + + /* make sure IDE0/1/2/3 interrupts are not masked */ + tmp = readl(mmio_base + SIL_SYSCFG); + if (tmp & irq_mask) { + tmp &= ~irq_mask; + writel(tmp, mmio_base + SIL_SYSCFG); + readl(mmio_base + SIL_SYSCFG); /* flush */ + } + + /* mask all SATA phy-related interrupts */ + /* TODO: unmask bit 6 (SError N bit) for hotplug */ + for (i = 0; i < probe_ent->n_ports; i++) + writel(0, mmio_base + sil_port[i].sien); + + pci_set_master(pdev); + + ata_add_to_probe_list(probe_ent); + + return 0; + +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +static int __init sil_init(void) +{ + int rc; + + rc = pci_module_init(&sil_pci_driver); + if (rc) + return rc; + + rc = scsi_register_module(MODULE_SCSI_HA, &sil_sht); + if (rc) { + rc = -ENODEV; + goto err_out; + } + + return 0; + +err_out: + pci_unregister_driver(&sil_pci_driver); + return rc; +} + +static void __exit sil_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &sil_sht); + pci_unregister_driver(&sil_pci_driver); +} + + +module_init(sil_init); +module_exit(sil_exit); diff -urN linux-2.4.26-pre6/drivers/scsi/sata_sis.c linux-2.4.26-pre6-ata14/drivers/scsi/sata_sis.c --- linux-2.4.26-pre6/drivers/scsi/sata_sis.c Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/sata_sis.c Thu Mar 25 22:45:24 2004 @@ -0,0 +1,221 @@ +/* + * sata_sis.c - Silicon Integrated Systems SATA + * + * Copyright 2004 Uwe Koziolek + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "sata_sis" +#define DRV_VERSION "0.04" + +enum { + sis_180 = 0, +}; + +static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg); +static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); + +static struct pci_device_id sis_pci_tbl[] = { + { PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, sis_180 }, + { } /* terminate list */ +}; + + +static struct pci_driver sis_pci_driver = { + .name = DRV_NAME, + .id_table = sis_pci_tbl, + .probe = sis_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template sis_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .detect = ata_scsi_detect, + .release = ata_scsi_release, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = ATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .use_new_eh_code = ATA_SHT_NEW_EH_CODE, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .bios_param = ata_std_bios_param, +}; + +static struct ata_port_operations sis_ops = { + .port_disable = ata_port_disable, + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + .check_status = ata_check_status_pio, + .exec_command = ata_exec_command_pio, + .phy_reset = sata_phy_reset, + .bmdma_start = ata_bmdma_start_pio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + .irq_handler = ata_interrupt, + .scr_read = sis_scr_read, + .scr_write = sis_scr_write, + .port_start = ata_port_start, + .port_stop = ata_port_stop, +}; + + +MODULE_AUTHOR("Uwe Koziolek"); +MODULE_DESCRIPTION("low-level driver for Silicon Integratad Systems SATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, sis_pci_tbl); + + +static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + if (sc_reg >= 16) + return 0xffffffffU; + + return inl(ap->ioaddr.scr_addr + (sc_reg * 4)); +} + +static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +{ + if (sc_reg >= 16) + return; + outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)); +} + +/* move to PCI layer, integrate w/ MSI stuff */ +static void pci_enable_intx(struct pci_dev *pdev) +{ + u16 pci_command; + + pci_read_config_word(pdev, PCI_COMMAND, &pci_command); + if (pci_command & PCI_COMMAND_INTX_DISABLE) { + pci_command &= ~PCI_COMMAND_INTX_DISABLE; + pci_write_config_word(pdev, PCI_COMMAND, pci_command); + } +} + +static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + struct ata_probe_ent *probe_ent = NULL; + int rc; + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->pdev = pdev; + INIT_LIST_HEAD(&probe_ent->node); + + probe_ent->sht = &sis_sht; + probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET | + ATA_FLAG_NO_LEGACY; + probe_ent->pio_mask = 0x03; + probe_ent->udma_mask = 0x7f; + probe_ent->port_ops = &sis_ops; + + probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); + ata_std_ports(&probe_ent->port[0]); + probe_ent->port[0].ctl_addr = + pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; + probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); + probe_ent->port[0].scr_addr = pci_resource_start(pdev, 5); + + probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); + ata_std_ports(&probe_ent->port[1]); + probe_ent->port[1].ctl_addr = + pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; + probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8; + probe_ent->port[1].scr_addr = pci_resource_start(pdev, 5) + 64; + + probe_ent->n_ports = 2; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + + pci_set_master(pdev); + pci_enable_intx(pdev); + + ata_add_to_probe_list(probe_ent); + + return 0; + +err_out_regions: + pci_release_regions(pdev); + +err_out: + pci_disable_device(pdev); + return rc; + +} + +static int __init sis_init(void) +{ + int rc = pci_module_init(&sis_pci_driver); + if (rc) + return rc; + + rc = scsi_register_module(MODULE_SCSI_HA, &sis_sht); + if (rc) { + pci_unregister_driver(&sis_pci_driver); + return -ENODEV; + } + + return 0; +} + +static void __exit sis_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &sis_sht); + pci_unregister_driver(&sis_pci_driver); +} + +module_init(sis_init); +module_exit(sis_exit); + diff -urN linux-2.4.26-pre6/drivers/scsi/sata_svw.c linux-2.4.26-pre6-ata14/drivers/scsi/sata_svw.c --- linux-2.4.26-pre6/drivers/scsi/sata_svw.c Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/sata_svw.c Thu Mar 25 22:45:24 2004 @@ -0,0 +1,409 @@ +/* + * ata_k2.c - Broadcom (Apple K2) SATA + * + * Copyright 2003 Benjamin Herrenschmidt + * + * Bits from Jeff Garzik, Copyright RedHat, Inc. + * + * This driver probably works with non-Apple versions of the + * Broadcom chipset... + * + * The contents of this file are subject to the Open + * Software License version 1.1 that can be found at + * http://www.opensource.org/licenses/osl-1.1.txt and is included herein + * by reference. + * + * Alternatively, the contents of this file may be used under the terms + * of the GNU General Public License version 2 (the "GPL") as distributed + * in the kernel source COPYING file, in which case the provisions of + * the GPL are applicable instead of the above. If you wish to allow + * the use of your version of this file only under the terms of the + * GPL and not to allow others to use your version of this file under + * the OSL, indicate your decision by deleting the provisions above and + * replace them with the notice and other provisions required by the GPL. + * If you do not delete the provisions above, a recipient may use your + * version of this file under either the OSL or the GPL. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#ifdef CONFIG_PPC_OF +#include +#include +#endif /* CONFIG_PPC_OF */ + +#define DRV_NAME "sata_svw" +#define DRV_VERSION "1.04" + +/* Taskfile registers offsets */ +#define K2_SATA_TF_CMD_OFFSET 0x00 +#define K2_SATA_TF_DATA_OFFSET 0x00 +#define K2_SATA_TF_ERROR_OFFSET 0x04 +#define K2_SATA_TF_NSECT_OFFSET 0x08 +#define K2_SATA_TF_LBAL_OFFSET 0x0c +#define K2_SATA_TF_LBAM_OFFSET 0x10 +#define K2_SATA_TF_LBAH_OFFSET 0x14 +#define K2_SATA_TF_DEVICE_OFFSET 0x18 +#define K2_SATA_TF_CMDSTAT_OFFSET 0x1c +#define K2_SATA_TF_CTL_OFFSET 0x20 + +/* DMA base */ +#define K2_SATA_DMA_CMD_OFFSET 0x30 + +/* SCRs base */ +#define K2_SATA_SCR_STATUS_OFFSET 0x40 +#define K2_SATA_SCR_ERROR_OFFSET 0x44 +#define K2_SATA_SCR_CONTROL_OFFSET 0x48 + +/* Others */ +#define K2_SATA_SICR1_OFFSET 0x80 +#define K2_SATA_SICR2_OFFSET 0x84 +#define K2_SATA_SIM_OFFSET 0x88 + +/* Port stride */ +#define K2_SATA_PORT_OFFSET 0x100 + + +static u32 k2_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + if (sc_reg > SCR_CONTROL) + return 0xffffffffU; + return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + + +static void k2_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, + u32 val) +{ + if (sc_reg > SCR_CONTROL) + return; + writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + + +static void k2_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + if (tf->ctl != ap->last_ctl) { + writeb(tf->ctl, ioaddr->ctl_addr); + ap->last_ctl = tf->ctl; + ata_wait_idle(ap); + } + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr); + writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr); + writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr); + writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr); + writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr); + } else if (is_addr) { + writew(tf->feature, ioaddr->feature_addr); + writew(tf->nsect, ioaddr->nsect_addr); + writew(tf->lbal, ioaddr->lbal_addr); + writew(tf->lbam, ioaddr->lbam_addr); + writew(tf->lbah, ioaddr->lbah_addr); + } + + if (tf->flags & ATA_TFLAG_DEVICE) + writeb(tf->device, ioaddr->device_addr); + + ata_wait_idle(ap); +} + + +static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u16 nsect, lbal, lbam, lbah; + + nsect = tf->nsect = readw(ioaddr->nsect_addr); + lbal = tf->lbal = readw(ioaddr->lbal_addr); + lbam = tf->lbam = readw(ioaddr->lbam_addr); + lbah = tf->lbah = readw(ioaddr->lbah_addr); + tf->device = readw(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + tf->hob_feature = readw(ioaddr->error_addr) >> 8; + tf->hob_nsect = nsect >> 8; + tf->hob_lbal = lbal >> 8; + tf->hob_lbam = lbam >> 8; + tf->hob_lbah = lbah >> 8; + } +} + + +static u8 k2_stat_check_status(struct ata_port *ap) +{ + return readl((void *) ap->ioaddr.status_addr); +} + +#ifdef CONFIG_PPC_OF +/* + * k2_sata_proc_info + * inout : decides on the direction of the dataflow and the meaning of the + * variables + * buffer: If inout==FALSE data is being written to it else read from it + * *start: If inout==FALSE start of the valid data in the buffer + * offset: If inout==FALSE offset from the beginning of the imaginary file + * from which we start writing into the buffer + * length: If inout==FALSE max number of bytes to be written into the buffer + * else number of bytes in the buffer + */ +static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start, + off_t offset, int count, int inout) +{ + struct ata_port *ap; + struct device_node *np; + int len, index; + + /* Find the ata_port */ + ap = (struct ata_port *) &shost->hostdata[0]; + if (ap == NULL) + return 0; + + /* Find the OF node for the PCI device proper */ + np = pci_device_to_OF_node(ap->host_set->pdev); + if (np == NULL) + return 0; + + /* Match it to a port node */ + index = (ap == ap->host_set->ports[0]) ? 0 : 1; + for (np = np->child; np != NULL; np = np->sibling) { + u32 *reg = (u32 *)get_property(np, "reg", NULL); + if (!reg) + continue; + if (index == *reg) + break; + } + if (np == NULL) + return 0; + + len = sprintf(page, "devspec: %s\n", np->full_name); + + return len; +} +#endif /* CONFIG_PPC_OF */ + + +static Scsi_Host_Template k2_sata_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .detect = ata_scsi_detect, + .release = ata_scsi_release, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = LIBATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .use_new_eh_code = ATA_SHT_NEW_EH_CODE, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, +#ifdef CONFIG_PPC_OF + .proc_info = k2_sata_proc_info, +#endif + .bios_param = ata_std_bios_param, +}; + + +static struct ata_port_operations k2_sata_ops = { + .port_disable = ata_port_disable, + .tf_load = k2_sata_tf_load, + .tf_read = k2_sata_tf_read, + .check_status = k2_stat_check_status, + .exec_command = ata_exec_command_mmio, + .phy_reset = sata_phy_reset, + .bmdma_start = ata_bmdma_start_mmio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + .irq_handler = ata_interrupt, + .scr_read = k2_sata_scr_read, + .scr_write = k2_sata_scr_write, + .port_start = ata_port_start, + .port_stop = ata_port_stop, +}; + +static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET; + port->data_addr = base + K2_SATA_TF_DATA_OFFSET; + port->feature_addr = + port->error_addr = base + K2_SATA_TF_ERROR_OFFSET; + port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET; + port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET; + port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET; + port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET; + port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET; + port->command_addr = + port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET; + port->altstatus_addr = + port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET; + port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET; + port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET; +} + + +static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_probe_ent *probe_ent = NULL; + unsigned long base; + void *mmio_base; + int rc; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + /* + * If this driver happens to only be useful on Apple's K2, then + * we should check that here as it has a normal Serverworks ID + */ + rc = pci_enable_device(pdev); + if (rc) + return rc; + /* + * Check if we have resources mapped at all (second function may + * have been disabled by firmware) + */ + if (pci_resource_len(pdev, 5) == 0) + return -ENODEV; + + /* Request PCI regions */ + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (probe_ent == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->pdev = pdev; + INIT_LIST_HEAD(&probe_ent->node); + + mmio_base = ioremap(pci_resource_start(pdev, 5), + pci_resource_len(pdev, 5)); + if (mmio_base == NULL) { + rc = -ENOMEM; + goto err_out_free_ent; + } + base = (unsigned long) mmio_base; + + /* Clear a magic bit in SCR1 according to Darwin, those help + * some funky seagate drives (though so far, those were already + * set by the firmware on the machines I had access to + */ + writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000, + mmio_base + K2_SATA_SICR1_OFFSET); + + /* Clear SATA error & interrupts we don't use */ + writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET); + writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); + + probe_ent->sht = &k2_sata_sht; + probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_SATA_RESET | + ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO; + probe_ent->port_ops = &k2_sata_ops; + probe_ent->n_ports = 4; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->mmio_base = mmio_base; + + /* We don't care much about the PIO/UDMA masks, but the core won't like us + * if we don't fill these + */ + probe_ent->pio_mask = 0x1f; + probe_ent->udma_mask = 0x7f; + + /* We have 4 ports per PCI function */ + k2_sata_setup_port(&probe_ent->port[0], base + 0 * K2_SATA_PORT_OFFSET); + k2_sata_setup_port(&probe_ent->port[1], base + 1 * K2_SATA_PORT_OFFSET); + k2_sata_setup_port(&probe_ent->port[2], base + 2 * K2_SATA_PORT_OFFSET); + k2_sata_setup_port(&probe_ent->port[3], base + 3 * K2_SATA_PORT_OFFSET); + + pci_set_master(pdev); + + ata_add_to_probe_list(probe_ent); + + return 0; + +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + + +static struct pci_device_id k2_sata_pci_tbl[] = { + { 0x1166, 0x0240, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, + { } +}; + + +static struct pci_driver k2_sata_pci_driver = { + .name = DRV_NAME, + .id_table = k2_sata_pci_tbl, + .probe = k2_sata_init_one, + .remove = ata_pci_remove_one, +}; + + +static int __init k2_sata_init(void) +{ + int rc; + + rc = pci_module_init(&k2_sata_pci_driver); + if (rc) + return rc; + + rc = scsi_register_module(MODULE_SCSI_HA, &k2_sata_sht); + if (rc) { + rc = -ENODEV; + goto err_out; + } + + return 0; + +err_out: + pci_unregister_driver(&k2_sata_pci_driver); + return rc; +} + + +static void __exit k2_sata_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &k2_sata_sht); + pci_unregister_driver(&k2_sata_pci_driver); +} + + +MODULE_AUTHOR("Benjamin Herrenschmidt"); +MODULE_DESCRIPTION("low-level driver for K2 SATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl); + +module_init(k2_sata_init); +module_exit(k2_sata_exit); diff -urN linux-2.4.26-pre6/drivers/scsi/sata_via.c linux-2.4.26-pre6-ata14/drivers/scsi/sata_via.c --- linux-2.4.26-pre6/drivers/scsi/sata_via.c Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/sata_via.c Thu Mar 25 22:45:38 2004 @@ -0,0 +1,319 @@ +/* + sata_via.c - VIA Serial ATA controllers + + Copyright 2003-2004 Red Hat, Inc. All rights reserved. + Copyright 2003-2004 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include +#include + +#define DRV_NAME "sata_via" +#define DRV_VERSION "0.20" + +enum { + via_sata = 0, + + SATA_CHAN_ENAB = 0x40, + SATA_INT_GATE = 0x41, + SATA_NATIVE_MODE = 0x42, + + PORT0 = (1 << 1), + PORT1 = (1 << 0), + + ENAB_ALL = PORT0 | PORT1, + + INT_GATE_ALL = PORT0 | PORT1, + + NATIVE_MODE_ALL = (1 << 7) | (1 << 6) | (1 << 5) | (1 << 4), +}; + +static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent); +static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg); +static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val); + +static struct pci_device_id svia_pci_tbl[] = { + { 0x1106, 0x3149, PCI_ANY_ID, PCI_ANY_ID, 0, 0, via_sata }, + + { } /* terminate list */ +}; + +static struct pci_driver svia_pci_driver = { + .name = DRV_NAME, + .id_table = svia_pci_tbl, + .probe = svia_init_one, + .remove = ata_pci_remove_one, +}; + +static Scsi_Host_Template svia_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .detect = ata_scsi_detect, + .release = ata_scsi_release, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = LIBATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .use_new_eh_code = ATA_SHT_NEW_EH_CODE, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .bios_param = ata_std_bios_param, +}; + +static struct ata_port_operations svia_sata_ops = { + .port_disable = ata_port_disable, + + .tf_load = ata_tf_load_pio, + .tf_read = ata_tf_read_pio, + .check_status = ata_check_status_pio, + .exec_command = ata_exec_command_pio, + + .phy_reset = sata_phy_reset, + + .bmdma_start = ata_bmdma_start_pio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + + .irq_handler = ata_interrupt, + + .scr_read = svia_scr_read, + .scr_write = svia_scr_write, + + .port_start = ata_port_start, + .port_stop = ata_port_stop, +}; + +MODULE_AUTHOR("Jeff Garzik"); +MODULE_DESCRIPTION("SCSI low-level driver for VIA SATA controllers"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, svia_pci_tbl); + +static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + if (sc_reg > SCR_CONTROL) + return 0xffffffffU; + return inl(ap->ioaddr.scr_addr + (4 * sc_reg)); +} + +static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val) +{ + if (sc_reg > SCR_CONTROL) + return; + outl(val, ap->ioaddr.scr_addr + (4 * sc_reg)); +} + +static const unsigned int svia_bar_sizes[] = { + 8, 4, 8, 4, 16, 256 +}; + +static unsigned long svia_scr_addr(unsigned long addr, unsigned int port) +{ + if (port >= 4) + return 0; /* invalid port */ + + addr &= ~((1 << 7) | (1 << 6)); + addr |= ((unsigned long)port << 6); + + return addr; +} + +/** + * svia_init_one - + * @pdev: + * @ent: + * + * LOCKING: + * + * RETURNS: + * + */ + +static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + unsigned int i; + int rc; + struct ata_probe_ent *probe_ent; + u8 tmp8; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + + for (i = 0; i < ARRAY_SIZE(svia_bar_sizes); i++) + if ((pci_resource_start(pdev, i) == 0) || + (pci_resource_len(pdev, i) < svia_bar_sizes[i])) { + printk(KERN_ERR DRV_NAME "(%s): invalid PCI BAR %u (sz 0x%lx, val 0x%lx)\n", + pci_name(pdev), i, + pci_resource_start(pdev, i), + pci_resource_len(pdev, i)); + rc = -ENODEV; + goto err_out_regions; + } + + rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (!probe_ent) { + printk(KERN_ERR DRV_NAME "(%s): out of memory\n", + pci_name(pdev)); + rc = -ENOMEM; + goto err_out_regions; + } + memset(probe_ent, 0, sizeof(*probe_ent)); + INIT_LIST_HEAD(&probe_ent->node); + probe_ent->pdev = pdev; + probe_ent->sht = &svia_sht; + probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_SRST | + ATA_FLAG_NO_LEGACY; + probe_ent->port_ops = &svia_sata_ops; + probe_ent->n_ports = 2; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->pio_mask = 0x1f; + probe_ent->udma_mask = 0x7f; + + probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0); + ata_std_ports(&probe_ent->port[0]); + probe_ent->port[0].altstatus_addr = + probe_ent->port[0].ctl_addr = + pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS; + probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4); + probe_ent->port[0].scr_addr = + svia_scr_addr(pci_resource_start(pdev, 5), 0); + + probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2); + ata_std_ports(&probe_ent->port[1]); + probe_ent->port[1].altstatus_addr = + probe_ent->port[1].ctl_addr = + pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS; + probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8; + probe_ent->port[1].scr_addr = + svia_scr_addr(pci_resource_start(pdev, 5), 1); + + pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &tmp8); + printk(KERN_INFO DRV_NAME "(%s): routed to hard irq line %d\n", + pci_name(pdev), + (int) (tmp8 & 0xf0) == 0xf0 ? 0 : tmp8 & 0x0f); + + /* make sure SATA channels are enabled */ + pci_read_config_byte(pdev, SATA_CHAN_ENAB, &tmp8); + if ((tmp8 & ENAB_ALL) != ENAB_ALL) { + printk(KERN_DEBUG DRV_NAME "(%s): enabling SATA channels (0x%x)\n", + pci_name(pdev), (int) tmp8); + tmp8 |= ENAB_ALL; + pci_write_config_byte(pdev, SATA_CHAN_ENAB, tmp8); + } + + /* make sure interrupts for each channel sent to us */ + pci_read_config_byte(pdev, SATA_INT_GATE, &tmp8); + if ((tmp8 & INT_GATE_ALL) != INT_GATE_ALL) { + printk(KERN_DEBUG DRV_NAME "(%s): enabling SATA channel interrupts (0x%x)\n", + pci_name(pdev), (int) tmp8); + tmp8 |= INT_GATE_ALL; + pci_write_config_byte(pdev, SATA_INT_GATE, tmp8); + } + + /* make sure native mode is enabled */ + pci_read_config_byte(pdev, SATA_NATIVE_MODE, &tmp8); + if ((tmp8 & NATIVE_MODE_ALL) != NATIVE_MODE_ALL) { + printk(KERN_DEBUG DRV_NAME "(%s): enabling SATA channel native mode (0x%x)\n", + pci_name(pdev), (int) tmp8); + tmp8 |= NATIVE_MODE_ALL; + pci_write_config_byte(pdev, SATA_NATIVE_MODE, tmp8); + } + + pci_set_master(pdev); + + ata_add_to_probe_list(probe_ent); + + return 0; + +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + +/** + * svia_init - + * + * LOCKING: + * + * RETURNS: + * + */ + +static int __init svia_init(void) +{ + int rc; + + rc = pci_module_init(&svia_pci_driver); + if (rc) + return rc; + + rc = scsi_register_module(MODULE_SCSI_HA, &svia_sht); + if (rc) { + pci_unregister_driver(&svia_pci_driver); + /* TODO: does scsi_register_module return errno val? */ + return -ENODEV; + } + + return 0; +} + +/** + * svia_exit - + * + * LOCKING: + * + */ + +static void __exit svia_exit(void) +{ + pci_unregister_driver(&svia_pci_driver); +} + +module_init(svia_init); +module_exit(svia_exit); + diff -urN linux-2.4.26-pre6/drivers/scsi/sata_vsc.c linux-2.4.26-pre6-ata14/drivers/scsi/sata_vsc.c --- linux-2.4.26-pre6/drivers/scsi/sata_vsc.c Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/drivers/scsi/sata_vsc.c Thu Mar 25 22:45:24 2004 @@ -0,0 +1,392 @@ +/* + * sata_vsc.c - Vitesse VSC7174 4 port DPA SATA + * + * Copyright 2004 SGI + * + * Bits from Jeff Garzik, Copyright RedHat, Inc. + * + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include "scsi.h" +#include "hosts.h" +#include + +#define DRV_NAME "sata_vsc" +#define DRV_VERSION "0.01" + +/* Interrupt register offsets (from chip base address) */ +#define VSC_SATA_INT_STAT_OFFSET 0x00 +#define VSC_SATA_INT_MASK_OFFSET 0x04 + +/* Taskfile registers offsets */ +#define VSC_SATA_TF_CMD_OFFSET 0x00 +#define VSC_SATA_TF_DATA_OFFSET 0x00 +#define VSC_SATA_TF_ERROR_OFFSET 0x04 +#define VSC_SATA_TF_FEATURE_OFFSET 0x06 +#define VSC_SATA_TF_NSECT_OFFSET 0x08 +#define VSC_SATA_TF_LBAL_OFFSET 0x0c +#define VSC_SATA_TF_LBAM_OFFSET 0x10 +#define VSC_SATA_TF_LBAH_OFFSET 0x14 +#define VSC_SATA_TF_DEVICE_OFFSET 0x18 +#define VSC_SATA_TF_STATUS_OFFSET 0x1c +#define VSC_SATA_TF_COMMAND_OFFSET 0x1d +#define VSC_SATA_TF_ALTSTATUS_OFFSET 0x28 +#define VSC_SATA_TF_CTL_OFFSET 0x29 + +/* DMA base */ +#define VSC_SATA_DMA_CMD_OFFSET 0x70 + +/* SCRs base */ +#define VSC_SATA_SCR_STATUS_OFFSET 0x100 +#define VSC_SATA_SCR_ERROR_OFFSET 0x104 +#define VSC_SATA_SCR_CONTROL_OFFSET 0x108 + +/* Port stride */ +#define VSC_SATA_PORT_OFFSET 0x200 + + +static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg) +{ + if (sc_reg > SCR_CONTROL) + return 0xffffffffU; + return readl((void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + + +static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg, + u32 val) +{ + if (sc_reg > SCR_CONTROL) + return; + writel(val, (void *) ap->ioaddr.scr_addr + (sc_reg * 4)); +} + + +static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl) +{ + unsigned long mask_addr; + u8 mask; + + mask_addr = (unsigned long) ap->host_set->mmio_base + + VSC_SATA_INT_MASK_OFFSET + ap->port_no; + mask = readb(mask_addr); + if (ctl & ATA_NIEN) + mask |= 0x80; + else + mask &= 0x7F; + writeb(mask, mask_addr); +} + + +static void vsc_sata_tf_load(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; + + /* + * The only thing the ctl register is used for is SRST. + * That is not enabled or disabled via tf_load. + * However, if ATA_NIEN is changed, then we need to change the interrupt register. + */ + if ((tf->ctl & ATA_NIEN) != (ap->last_ctl & ATA_NIEN)) { + ap->last_ctl = tf->ctl; + vsc_intr_mask_update(ap, tf->ctl & ATA_NIEN); + } + if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { + writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr); + writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr); + writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr); + writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr); + writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr); + } else if (is_addr) { + writew(tf->feature, ioaddr->feature_addr); + writew(tf->nsect, ioaddr->nsect_addr); + writew(tf->lbal, ioaddr->lbal_addr); + writew(tf->lbam, ioaddr->lbam_addr); + writew(tf->lbah, ioaddr->lbah_addr); + } + + if (tf->flags & ATA_TFLAG_DEVICE) + writeb(tf->device, ioaddr->device_addr); + + ata_wait_idle(ap); +} + + +static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + u16 nsect, lbal, lbam, lbah; + + nsect = tf->nsect = readw(ioaddr->nsect_addr); + lbal = tf->lbal = readw(ioaddr->lbal_addr); + lbam = tf->lbam = readw(ioaddr->lbam_addr); + lbah = tf->lbah = readw(ioaddr->lbah_addr); + tf->device = readw(ioaddr->device_addr); + + if (tf->flags & ATA_TFLAG_LBA48) { + tf->hob_feature = readb(ioaddr->error_addr); + tf->hob_nsect = nsect >> 8; + tf->hob_lbal = lbal >> 8; + tf->hob_lbam = lbam >> 8; + tf->hob_lbah = lbah >> 8; + } +} + + +/* + * vsc_sata_interrupt + * + * Read the interrupt register and process for the devices that have them pending. + */ +irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) +{ + struct ata_host_set *host_set = dev_instance; + unsigned int i; + unsigned int handled = 0; + u32 int_status; + + spin_lock(&host_set->lock); + + int_status = readl(host_set->mmio_base + VSC_SATA_INT_STAT_OFFSET); + + for (i = 0; i < host_set->n_ports; i++) { + if (int_status & ((u32) 0xFF << (8 * i))) { + struct ata_port *ap; + + ap = host_set->ports[i]; + if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { + struct ata_queued_cmd *qc; + + qc = ata_qc_from_tag(ap, ap->active_tag); + if (qc && ((qc->flags & ATA_QCFLAG_POLL) == 0)) + handled += ata_host_intr(ap, qc); + } + } + } + + spin_unlock(&host_set->lock); + + return IRQ_RETVAL(handled); +} + + +static Scsi_Host_Template vsc_sata_sht = { + .module = THIS_MODULE, + .name = DRV_NAME, + .detect = ata_scsi_detect, + .release = ata_scsi_release, + .queuecommand = ata_scsi_queuecmd, + .eh_strategy_handler = ata_scsi_error, + .can_queue = ATA_DEF_QUEUE, + .this_id = ATA_SHT_THIS_ID, + .sg_tablesize = LIBATA_MAX_PRD, + .max_sectors = ATA_MAX_SECTORS, + .cmd_per_lun = ATA_SHT_CMD_PER_LUN, + .use_new_eh_code = ATA_SHT_NEW_EH_CODE, + .emulated = ATA_SHT_EMULATED, + .use_clustering = ATA_SHT_USE_CLUSTERING, + .proc_name = DRV_NAME, + .bios_param = ata_std_bios_param, +}; + + +static struct ata_port_operations vsc_sata_ops = { + .port_disable = ata_port_disable, + .tf_load = vsc_sata_tf_load, + .tf_read = vsc_sata_tf_read, + .exec_command = ata_exec_command_mmio, + .check_status = ata_check_status_mmio, + .phy_reset = sata_phy_reset, + .bmdma_start = ata_bmdma_start_mmio, + .fill_sg = ata_fill_sg, + .eng_timeout = ata_eng_timeout, + .irq_handler = vsc_sata_interrupt, + .scr_read = vsc_sata_scr_read, + .scr_write = vsc_sata_scr_write, + .port_start = ata_port_start, + .port_stop = ata_port_stop, +}; + +static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base) +{ + port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET; + port->data_addr = base + VSC_SATA_TF_DATA_OFFSET; + port->error_addr = base + VSC_SATA_TF_ERROR_OFFSET; + port->feature_addr = base + VSC_SATA_TF_FEATURE_OFFSET; + port->nsect_addr = base + VSC_SATA_TF_NSECT_OFFSET; + port->lbal_addr = base + VSC_SATA_TF_LBAL_OFFSET; + port->lbam_addr = base + VSC_SATA_TF_LBAM_OFFSET; + port->lbah_addr = base + VSC_SATA_TF_LBAH_OFFSET; + port->device_addr = base + VSC_SATA_TF_DEVICE_OFFSET; + port->status_addr = base + VSC_SATA_TF_STATUS_OFFSET; + port->command_addr = base + VSC_SATA_TF_COMMAND_OFFSET; + port->altstatus_addr = base + VSC_SATA_TF_ALTSTATUS_OFFSET; + port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET; + port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET; + port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET; +} + + +static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) +{ + static int printed_version; + struct ata_probe_ent *probe_ent = NULL; + unsigned long base; + void *mmio_base; + int rc; + + if (!printed_version++) + printk(KERN_DEBUG DRV_NAME " version " DRV_VERSION "\n"); + + rc = pci_enable_device(pdev); + if (rc) + return rc; + + /* + * Check if we have needed resource mapped. + */ + if (pci_resource_len(pdev, 0) == 0) { + rc = -ENODEV; + goto err_out; + } + + rc = pci_request_regions(pdev, DRV_NAME); + if (rc) + goto err_out; + + /* + * Use 32 bit DMA mask, because 64 bit address support is poor. + */ + rc = pci_set_dma_mask(pdev, 0xFFFFFFFFULL); + if (rc) + goto err_out_regions; + + probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL); + if (probe_ent == NULL) { + rc = -ENOMEM; + goto err_out_regions; + } + memset(probe_ent, 0, sizeof(*probe_ent)); + probe_ent->pdev = pdev; + INIT_LIST_HEAD(&probe_ent->node); + + mmio_base = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); + if (mmio_base == NULL) { + rc = -ENOMEM; + goto err_out_free_ent; + } + base = (unsigned long) mmio_base; + + /* + * Due to a bug in the chip, the default cache line size can't be used + */ + pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x80); + + probe_ent->sht = &vsc_sata_sht; + probe_ent->host_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | + ATA_FLAG_MMIO | ATA_FLAG_SATA_RESET; + probe_ent->port_ops = &vsc_sata_ops; + probe_ent->n_ports = 4; + probe_ent->irq = pdev->irq; + probe_ent->irq_flags = SA_SHIRQ; + probe_ent->mmio_base = mmio_base; + + /* We don't care much about the PIO/UDMA masks, but the core won't like us + * if we don't fill these + */ + probe_ent->pio_mask = 0x1f; + probe_ent->udma_mask = 0x7f; + + /* We have 4 ports per PCI function */ + vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET); + vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET); + vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET); + vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET); + + pci_set_master(pdev); + + ata_add_to_probe_list(probe_ent); + + return 0; + +err_out_free_ent: + kfree(probe_ent); +err_out_regions: + pci_release_regions(pdev); +err_out: + pci_disable_device(pdev); + return rc; +} + + +/* + * 0x1725/0x7174 is the Vitesse VSC-7174 + * 0x8086/0x3200 is the Intel 31244, which is supposed to be identical + * compatibility is untested as of yet + */ +static struct pci_device_id vsc_sata_pci_tbl[] = { + { 0x1725, 0x7174, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, + { 0x8086, 0x3200, PCI_ANY_ID, PCI_ANY_ID, 0x10600, 0xFFFFFF, 0 }, + { } +}; + + +static struct pci_driver vsc_sata_pci_driver = { + .name = DRV_NAME, + .id_table = vsc_sata_pci_tbl, + .probe = vsc_sata_init_one, + .remove = ata_pci_remove_one, +}; + + +static int __init vsc_sata_init(void) +{ + int rc; + + DPRINTK("pci_module_init\n"); + rc = pci_module_init(&vsc_sata_pci_driver); + if (rc) + return rc; + + DPRINTK("scsi_register_host\n"); + rc = scsi_register_module(MODULE_SCSI_HA, &vsc_sata_sht); + if (rc) { + rc = -ENODEV; + goto err_out; + } + + DPRINTK("done\n"); + return 0; + +err_out: + pci_unregister_driver(&vsc_sata_pci_driver); + return rc; +} + + +static void __exit vsc_sata_exit(void) +{ + scsi_unregister_module(MODULE_SCSI_HA, &vsc_sata_sht); + pci_unregister_driver(&vsc_sata_pci_driver); +} + + +MODULE_AUTHOR("Jeremy Higdon"); +MODULE_DESCRIPTION("low-level driver for Vitesse VSC7174 SATA controller"); +MODULE_LICENSE("GPL"); +MODULE_DEVICE_TABLE(pci, vsc_sata_pci_tbl); + +module_init(vsc_sata_init); +module_exit(vsc_sata_exit); diff -urN linux-2.4.26-pre6/include/linux/ata.h linux-2.4.26-pre6-ata14/include/linux/ata.h --- linux-2.4.26-pre6/include/linux/ata.h Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/include/linux/ata.h Thu Mar 25 22:45:38 2004 @@ -0,0 +1,210 @@ + +/* + Copyright 2003-2004 Red Hat, Inc. All rights reserved. + Copyright 2003-2004 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#ifndef __LINUX_ATA_H__ +#define __LINUX_ATA_H__ + +/* defines only for the constants which don't work well as enums */ +#define ATA_DMA_BOUNDARY 0xffffUL +#define ATA_DMA_MASK 0xffffffffULL + +enum { + /* various global constants */ + ATA_MAX_DEVICES = 2, /* per bus/port */ + ATA_MAX_PRD = 256, /* we could make these 256/256 */ + ATA_SECT_SIZE = 512, + ATA_SECT_SIZE_MASK = (ATA_SECT_SIZE - 1), + ATA_SECT_DWORDS = ATA_SECT_SIZE / sizeof(u32), + + ATA_ID_WORDS = 256, + ATA_ID_PROD_OFS = 27, + ATA_ID_SERNO_OFS = 10, + ATA_ID_MAJOR_VER = 80, + ATA_ID_PIO_MODES = 64, + ATA_ID_UDMA_MODES = 88, + ATA_ID_PIO4 = (1 << 1), + + ATA_PCI_CTL_OFS = 2, + ATA_SERNO_LEN = 20, + ATA_UDMA0 = (1 << 0), + ATA_UDMA1 = ATA_UDMA0 | (1 << 1), + ATA_UDMA2 = ATA_UDMA1 | (1 << 2), + ATA_UDMA3 = ATA_UDMA2 | (1 << 3), + ATA_UDMA4 = ATA_UDMA3 | (1 << 4), + ATA_UDMA5 = ATA_UDMA4 | (1 << 5), + ATA_UDMA6 = ATA_UDMA5 | (1 << 6), + ATA_UDMA7 = ATA_UDMA6 | (1 << 7), + /* ATA_UDMA7 is just for completeness... doesn't exist (yet?). */ + + ATA_UDMA_MASK_40C = ATA_UDMA2, /* udma0-2 */ + + /* DMA-related */ + ATA_PRD_SZ = 8, + ATA_PRD_TBL_SZ = (ATA_MAX_PRD * ATA_PRD_SZ), + ATA_PRD_EOT = (1 << 31), /* end-of-table flag */ + + ATA_DMA_TABLE_OFS = 4, + ATA_DMA_STATUS = 2, + ATA_DMA_CMD = 0, + ATA_DMA_WR = (1 << 3), + ATA_DMA_START = (1 << 0), + ATA_DMA_INTR = (1 << 2), + ATA_DMA_ERR = (1 << 1), + ATA_DMA_ACTIVE = (1 << 0), + + /* bits in ATA command block registers */ + ATA_HOB = (1 << 7), /* LBA48 selector */ + ATA_NIEN = (1 << 1), /* disable-irq flag */ + ATA_LBA = (1 << 6), /* LBA28 selector */ + ATA_DEV1 = (1 << 4), /* Select Device 1 (slave) */ + ATA_BUSY = (1 << 7), /* BSY status bit */ + ATA_DEVICE_OBS = (1 << 7) | (1 << 5), /* obs bits in dev reg */ + ATA_DEVCTL_OBS = (1 << 3), /* obsolete bit in devctl reg */ + ATA_DRQ = (1 << 3), /* data request i/o */ + ATA_ERR = (1 << 0), /* have an error */ + ATA_SRST = (1 << 2), /* software reset */ + ATA_ABORTED = (1 << 2), /* command aborted */ + + /* ATA command block registers */ + ATA_REG_DATA = 0x00, + ATA_REG_ERR = 0x01, + ATA_REG_NSECT = 0x02, + ATA_REG_LBAL = 0x03, + ATA_REG_LBAM = 0x04, + ATA_REG_LBAH = 0x05, + ATA_REG_DEVICE = 0x06, + ATA_REG_STATUS = 0x07, + + ATA_REG_FEATURE = ATA_REG_ERR, /* and their aliases */ + ATA_REG_CMD = ATA_REG_STATUS, + ATA_REG_BYTEL = ATA_REG_LBAM, + ATA_REG_BYTEH = ATA_REG_LBAH, + ATA_REG_DEVSEL = ATA_REG_DEVICE, + ATA_REG_IRQ = ATA_REG_NSECT, + + /* ATA device commands */ + ATA_CMD_EDD = 0x90, /* execute device diagnostic */ + ATA_CMD_ID_ATA = 0xEC, + ATA_CMD_ID_ATAPI = 0xA1, + ATA_CMD_READ = 0xC8, + ATA_CMD_READ_EXT = 0x25, + ATA_CMD_WRITE = 0xCA, + ATA_CMD_WRITE_EXT = 0x35, + ATA_CMD_PIO_READ = 0x20, + ATA_CMD_PIO_READ_EXT = 0x24, + ATA_CMD_PIO_WRITE = 0x30, + ATA_CMD_PIO_WRITE_EXT = 0x34, + ATA_CMD_SET_FEATURES = 0xEF, + ATA_CMD_PACKET = 0xA0, + + /* SETFEATURES stuff */ + SETFEATURES_XFER = 0x03, + XFER_UDMA_7 = 0x47, + XFER_UDMA_6 = 0x46, + XFER_UDMA_5 = 0x45, + XFER_UDMA_4 = 0x44, + XFER_UDMA_3 = 0x43, + XFER_UDMA_2 = 0x42, + XFER_UDMA_1 = 0x41, + XFER_UDMA_0 = 0x40, + XFER_PIO_4 = 0x0C, + XFER_PIO_3 = 0x0B, + + /* ATAPI stuff */ + ATAPI_PKT_DMA = (1 << 0), + + /* cable types */ + ATA_CBL_NONE = 0, + ATA_CBL_PATA40 = 1, + ATA_CBL_PATA80 = 2, + ATA_CBL_PATA_UNK = 3, + ATA_CBL_SATA = 4, + + /* SATA Status and Control Registers */ + SCR_STATUS = 0, + SCR_ERROR = 1, + SCR_CONTROL = 2, + SCR_ACTIVE = 3, + SCR_NOTIFICATION = 4, + + /* struct ata_taskfile flags */ + ATA_TFLAG_LBA48 = (1 << 0), /* enable 48-bit LBA and "HOB" */ + ATA_TFLAG_ISADDR = (1 << 1), /* enable r/w to nsect/lba regs */ + ATA_TFLAG_DEVICE = (1 << 2), /* enable r/w to device reg */ + ATA_TFLAG_WRITE = (1 << 3), /* data dir: host->dev==1 (write) */ +}; + +enum ata_tf_protocols { + /* ATA taskfile protocols */ + ATA_PROT_UNKNOWN, /* unknown/invalid */ + ATA_PROT_NODATA, /* no data */ + ATA_PROT_PIO, /* PIO single sector */ + ATA_PROT_PIO_MULT, /* PIO multiple sector */ + ATA_PROT_DMA, /* DMA */ + ATA_PROT_ATAPI, /* packet command */ + ATA_PROT_ATAPI_DMA, /* packet command with special DMA sauce */ +}; + +/* core structures */ + +struct ata_prd { + u32 addr; + u32 flags_len; +} __attribute__((packed)); + +struct ata_taskfile { + unsigned long flags; /* ATA_TFLAG_xxx */ + u8 protocol; /* ATA_PROT_xxx */ + + u8 ctl; /* control reg */ + + u8 hob_feature; /* additional data */ + u8 hob_nsect; /* to support LBA48 */ + u8 hob_lbal; + u8 hob_lbam; + u8 hob_lbah; + + u8 feature; + u8 nsect; + u8 lbal; + u8 lbam; + u8 lbah; + + u8 device; + + u8 command; /* IO operation */ +}; + +#define ata_id_is_ata(dev) (((dev)->id[0] & (1 << 15)) == 0) +#define ata_id_has_lba48(dev) ((dev)->id[83] & (1 << 10)) +#define ata_id_has_lba(dev) ((dev)->id[49] & (1 << 8)) +#define ata_id_has_dma(dev) ((dev)->id[49] & (1 << 9)) +#define ata_id_u32(dev,n) \ + (((u32) (dev)->id[(n) + 1] << 16) | ((u32) (dev)->id[(n)])) +#define ata_id_u64(dev,n) \ + ( ((u64) dev->id[(n) + 3] << 48) | \ + ((u64) dev->id[(n) + 2] << 32) | \ + ((u64) dev->id[(n) + 1] << 16) | \ + ((u64) dev->id[(n) + 0]) ) + +#endif /* __LINUX_ATA_H__ */ diff -urN linux-2.4.26-pre6/include/linux/ioport.h linux-2.4.26-pre6-ata14/include/linux/ioport.h --- linux-2.4.26-pre6/include/linux/ioport.h Fri Oct 10 08:47:15 2003 +++ linux-2.4.26-pre6-ata14/include/linux/ioport.h Thu Mar 25 22:45:24 2004 @@ -85,6 +85,7 @@ extern int check_resource(struct resource *root, unsigned long, unsigned long); extern int request_resource(struct resource *root, struct resource *new); +extern struct resource * ____request_resource(struct resource *root, struct resource *new); extern int release_resource(struct resource *new); extern int allocate_resource(struct resource *root, struct resource *new, unsigned long size, diff -urN linux-2.4.26-pre6/include/linux/libata.h linux-2.4.26-pre6-ata14/include/linux/libata.h --- linux-2.4.26-pre6/include/linux/libata.h Thu Jan 1 01:00:00 1970 +++ linux-2.4.26-pre6-ata14/include/linux/libata.h Thu Mar 25 22:45:38 2004 @@ -0,0 +1,561 @@ +/* + Copyright 2003-2004 Red Hat, Inc. All rights reserved. + Copyright 2003-2004 Jeff Garzik + + The contents of this file are subject to the Open + Software License version 1.1 that can be found at + http://www.opensource.org/licenses/osl-1.1.txt and is included herein + by reference. + + Alternatively, the contents of this file may be used under the terms + of the GNU General Public License version 2 (the "GPL") as distributed + in the kernel source COPYING file, in which case the provisions of + the GPL are applicable instead of the above. If you wish to allow + the use of your version of this file only under the terms of the + GPL and not to allow others to use your version of this file under + the OSL, indicate your decision by deleting the provisions above and + replace them with the notice and other provisions required by the GPL. + If you do not delete the provisions above, a recipient may use your + version of this file under either the OSL or the GPL. + + */ + +#ifndef __LINUX_LIBATA_H__ +#define __LINUX_LIBATA_H__ + +#include +#include +#include +#include + +/* + * compile-time options + */ +#undef ATA_FORCE_PIO /* do not configure or use DMA */ +#undef ATA_DEBUG /* debugging output */ +#undef ATA_VERBOSE_DEBUG /* yet more debugging output */ +#undef ATA_IRQ_TRAP /* define to ack screaming irqs */ +#undef ATA_NDEBUG /* define to disable quick runtime checks */ +#undef ATA_ENABLE_ATAPI /* define to enable ATAPI support */ +#undef ATA_ENABLE_PATA /* define to enable PATA support in some + * low-level drivers */ + + +/* note: prints function name for you */ +#ifdef ATA_DEBUG +#define DPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#ifdef ATA_VERBOSE_DEBUG +#define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __FUNCTION__, ## args) +#else +#define VPRINTK(fmt, args...) +#endif /* ATA_VERBOSE_DEBUG */ +#else +#define DPRINTK(fmt, args...) +#define VPRINTK(fmt, args...) +#endif /* ATA_DEBUG */ + +#ifdef ATA_NDEBUG +#define assert(expr) +#else +#define assert(expr) \ + if(unlikely(!(expr))) { \ + printk(KERN_ERR "Assertion failed! %s,%s,%s,line=%d\n", \ + #expr,__FILE__,__FUNCTION__,__LINE__); \ + } +#endif + +/* defines only for the constants which don't work well as enums */ +#define ATA_TAG_POISON 0xfafbfcfdU + +enum { + /* various global constants */ + LIBATA_MAX_PRD = ATA_MAX_PRD / 2, + ATA_MAX_PORTS = 8, + ATA_DEF_QUEUE = 1, + ATA_MAX_QUEUE = 1, + ATA_MAX_SECTORS = 200, /* FIXME */ + ATA_MAX_BUS = 2, + ATA_DEF_BUSY_WAIT = 10000, + ATA_SHORT_PAUSE = (HZ >> 6) + 1, + + ATA_SHT_EMULATED = 1, + ATA_SHT_NEW_EH_CODE = 1, + ATA_SHT_CMD_PER_LUN = 1, + ATA_SHT_THIS_ID = -1, + ATA_SHT_USE_CLUSTERING = 0, + + /* struct ata_device stuff */ + ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ + ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ + ATA_DFLAG_MASTER = (1 << 2), /* is device 0? */ + ATA_DFLAG_WCACHE = (1 << 3), /* has write cache we can + * (hopefully) flush? */ + + ATA_DEV_UNKNOWN = 0, /* unknown device */ + ATA_DEV_ATA = 1, /* ATA device */ + ATA_DEV_ATA_UNSUP = 2, /* ATA device (unsupported) */ + ATA_DEV_ATAPI = 3, /* ATAPI device */ + ATA_DEV_ATAPI_UNSUP = 4, /* ATAPI device (unsupported) */ + ATA_DEV_NONE = 5, /* no device */ + + /* struct ata_port flags */ + ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */ + /* (doesn't imply presence) */ + ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ + ATA_FLAG_SATA = (1 << 3), + ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ + ATA_FLAG_SRST = (1 << 5), /* use ATA SRST, not E.D.D. */ + ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ + ATA_FLAG_SATA_RESET = (1 << 7), /* use COMRESET */ + + ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ + ATA_QCFLAG_DMA = (1 << 2), /* data delivered via DMA */ + ATA_QCFLAG_ATAPI = (1 << 3), /* is ATAPI packet command? */ + ATA_QCFLAG_SG = (1 << 4), /* have s/g table? */ + ATA_QCFLAG_POLL = (1 << 5), /* polling, no interrupts */ + + /* struct ata_engine atomic flags (use test_bit, etc.) */ + ATA_EFLG_ACTIVE = 0, /* engine is active */ + + /* various lengths of time */ + ATA_TMOUT_EDD = 5 * HZ, /* hueristic */ + ATA_TMOUT_PIO = 30 * HZ, + ATA_TMOUT_BOOT = 30 * HZ, /* hueristic */ + ATA_TMOUT_BOOT_QUICK = 7 * HZ, /* hueristic */ + ATA_TMOUT_CDB = 30 * HZ, + ATA_TMOUT_CDB_QUICK = 5 * HZ, + + /* ATA bus states */ + BUS_UNKNOWN = 0, + BUS_DMA = 1, + BUS_IDLE = 2, + BUS_NOINTR = 3, + BUS_NODATA = 4, + BUS_TIMER = 5, + BUS_PIO = 6, + BUS_EDD = 7, + BUS_IDENTIFY = 8, + BUS_PACKET = 9, + + /* thread states */ + THR_UNKNOWN = 0, + THR_PORT_RESET = (THR_UNKNOWN + 1), + THR_AWAIT_DEATH = (THR_PORT_RESET + 1), + THR_PROBE_FAILED = (THR_AWAIT_DEATH + 1), + THR_IDLE = (THR_PROBE_FAILED + 1), + THR_PROBE_SUCCESS = (THR_IDLE + 1), + THR_PROBE_START = (THR_PROBE_SUCCESS + 1), + THR_PIO_POLL = (THR_PROBE_START + 1), + THR_PIO_TMOUT = (THR_PIO_POLL + 1), + THR_PIO = (THR_PIO_TMOUT + 1), + THR_PIO_LAST = (THR_PIO + 1), + THR_PIO_LAST_POLL = (THR_PIO_LAST + 1), + THR_PIO_ERR = (THR_PIO_LAST_POLL + 1), + THR_PACKET = (THR_PIO_ERR + 1), + + /* SATA port states */ + PORT_UNKNOWN = 0, + PORT_ENABLED = 1, + PORT_DISABLED = 2, + + /* ata_qc_cb_t flags - note uses above ATA_QCFLAG_xxx namespace, + * but not numberspace + */ + ATA_QCFLAG_TIMEOUT = (1 << 0), +}; + +/* forward declarations */ +struct ata_port_operations; +struct ata_port; +struct ata_queued_cmd; + +/* typedefs */ +typedef void (*ata_qc_cb_t) (struct ata_queued_cmd *qc, unsigned int flags); + +struct ata_ioports { + unsigned long cmd_addr; + unsigned long data_addr; + unsigned long error_addr; + unsigned long feature_addr; + unsigned long nsect_addr; + unsigned long lbal_addr; + unsigned long lbam_addr; + unsigned long lbah_addr; + unsigned long device_addr; + unsigned long status_addr; + unsigned long command_addr; + unsigned long altstatus_addr; + unsigned long ctl_addr; + unsigned long bmdma_addr; + unsigned long scr_addr; +}; + +struct ata_probe_ent { + struct list_head node; + struct pci_dev *pdev; + struct ata_port_operations *port_ops; + Scsi_Host_Template *sht; + struct ata_ioports port[ATA_MAX_PORTS]; + unsigned int n_ports; + unsigned int pio_mask; + unsigned int udma_mask; + unsigned int legacy_mode; + unsigned long irq; + unsigned int irq_flags; + unsigned long host_flags; + void *mmio_base; + void *private_data; +}; + +struct ata_host_set { + spinlock_t lock; + struct pci_dev *pdev; + unsigned long irq; + void *mmio_base; + unsigned int n_ports; + void *private_data; + struct ata_port * ports[0]; +}; + +struct ata_queued_cmd { + struct ata_port *ap; + struct ata_device *dev; + + struct scsi_cmnd *scsicmd; + void (*scsidone)(struct scsi_cmnd *); + + struct list_head node; + unsigned long flags; /* ATA_QCFLAG_xxx */ + unsigned int tag; + unsigned int n_elem; + unsigned int nsect; + unsigned int cursect; + unsigned int cursg; + unsigned int cursg_ofs; + struct ata_taskfile tf; + struct scatterlist sgent; + + struct scatterlist *sg; + + ata_qc_cb_t callback; + + struct semaphore sem; + + void *private_data; +}; + +struct ata_host_stats { + unsigned long unhandled_irq; + unsigned long idle_irq; + unsigned long rw_reqbuf; +}; + +struct ata_device { + u64 n_sectors; /* size of device, if ATA */ + unsigned long flags; /* ATA_DFLAG_xxx */ + unsigned int class; /* ATA_DEV_xxx */ + unsigned int devno; /* 0 or 1 */ + u16 id[ATA_ID_WORDS]; /* IDENTIFY xxx DEVICE data */ + unsigned int pio_mode; + unsigned int udma_mode; + + unsigned char vendor[8]; /* space-padded, not ASCIIZ */ + unsigned char product[32]; /* WARNING: shorter than + * ATAPI7 spec size, 40 ASCII + * characters + */ + + /* cache info about current transfer mode */ + u8 xfer_protocol; /* taskfile xfer protocol */ + u8 read_cmd; /* opcode to use on read */ + u8 write_cmd; /* opcode to use on write */ +}; + +struct ata_engine { + unsigned long flags; + struct list_head q; +}; + +struct ata_port { + struct Scsi_Host *host; /* our co-allocated scsi host */ + struct ata_port_operations *ops; + unsigned long flags; /* ATA_FLAG_xxx */ + unsigned int id; /* unique id req'd by scsi midlyr */ + unsigned int port_no; /* unique port #; from zero */ + + struct ata_prd *prd; /* our SG list */ + dma_addr_t prd_dma; /* and its DMA mapping */ + + struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ + + u8 ctl; /* cache of ATA control register */ + u8 last_ctl; /* Cache last written value */ + unsigned int bus_state; + unsigned int port_state; + unsigned int pio_mask; + unsigned int udma_mask; + unsigned int cbl; /* cable type; ATA_CBL_xxx */ + + struct ata_engine eng; + + struct ata_device device[ATA_MAX_DEVICES]; + + struct ata_queued_cmd qcmd[ATA_MAX_QUEUE]; + unsigned long qactive; + unsigned int active_tag; + + struct ata_host_stats stats; + struct ata_host_set *host_set; + + struct semaphore sem; + struct semaphore probe_sem; + + unsigned int thr_state; + int time_to_die; + pid_t thr_pid; + struct completion thr_exited; + struct semaphore thr_sem; + struct timer_list thr_timer; + unsigned long thr_timeout; + + void *private_data; +}; + +struct ata_port_operations { + void (*port_disable) (struct ata_port *); + + void (*dev_config) (struct ata_port *, struct ata_device *); + + void (*set_piomode) (struct ata_port *, struct ata_device *, + unsigned int); + void (*set_udmamode) (struct ata_port *, struct ata_device *, + unsigned int); + + void (*tf_load) (struct ata_port *ap, struct ata_taskfile *tf); + void (*tf_read) (struct ata_port *ap, struct ata_taskfile *tf); + + void (*exec_command)(struct ata_port *ap, struct ata_taskfile *tf); + u8 (*check_status)(struct ata_port *ap); + + void (*phy_reset) (struct ata_port *ap); + void (*post_set_mode) (struct ata_port *ap); + + void (*bmdma_start) (struct ata_queued_cmd *qc); + void (*fill_sg) (struct ata_queued_cmd *qc); + void (*eng_timeout) (struct ata_port *ap); + + irqreturn_t (*irq_handler)(int, void *, struct pt_regs *); + + u32 (*scr_read) (struct ata_port *ap, unsigned int sc_reg); + void (*scr_write) (struct ata_port *ap, unsigned int sc_reg, + u32 val); + + int (*port_start) (struct ata_port *ap); + void (*port_stop) (struct ata_port *ap); + + void (*host_stop) (struct ata_host_set *host_set); +}; + +struct ata_port_info { + Scsi_Host_Template *sht; + unsigned long host_flags; + unsigned long pio_mask; + unsigned long udma_mask; + struct ata_port_operations *port_ops; +}; + +struct pci_bits { + unsigned int reg; /* PCI config register to read */ + unsigned int width; /* 1 (8 bit), 2 (16 bit), 4 (32 bit) */ + unsigned long mask; + unsigned long val; +}; + +extern void ata_port_probe(struct ata_port *); +extern void sata_phy_reset(struct ata_port *ap); +extern void ata_bus_reset(struct ata_port *ap); +extern void ata_port_disable(struct ata_port *); +extern void ata_std_ports(struct ata_ioports *ioaddr); +extern int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info, + unsigned int n_ports); +extern void ata_pci_remove_one (struct pci_dev *pdev); +extern int ata_device_add(struct ata_probe_ent *ent); +extern int ata_scsi_detect(Scsi_Host_Template *sht); +extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); +extern int ata_scsi_error(struct Scsi_Host *host); +extern int ata_scsi_release(struct Scsi_Host *host); +extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); +/* + * Default driver ops implementations + */ +extern void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf); +extern u8 ata_check_status_pio(struct ata_port *ap); +extern u8 ata_check_status_mmio(struct ata_port *ap); +extern void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf); +extern void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf); +extern int ata_port_start (struct ata_port *ap); +extern void ata_port_stop (struct ata_port *ap); +extern irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs); +extern void ata_fill_sg(struct ata_queued_cmd *qc); +extern void ata_bmdma_start_mmio (struct ata_queued_cmd *qc); +extern void ata_bmdma_start_pio (struct ata_queued_cmd *qc); +extern int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits); +extern void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat, unsigned int done_late); +extern void ata_eng_timeout(struct ata_port *ap); +extern void ata_add_to_probe_list (struct ata_probe_ent *probe_ent); +extern int ata_std_bios_param(Disk * disk, kdev_t dev, int *ip); + + +static inline unsigned long msecs_to_jiffies(unsigned long msecs) +{ + return ((HZ * msecs + 999) / 1000); +} + +static inline unsigned int ata_tag_valid(unsigned int tag) +{ + return (tag < ATA_MAX_QUEUE) ? 1 : 0; +} + +static inline unsigned int ata_dev_present(struct ata_device *dev) +{ + return ((dev->class == ATA_DEV_ATA) || + (dev->class == ATA_DEV_ATAPI)); +} + +static inline u8 ata_chk_err(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_MMIO) { + return readb((void *) ap->ioaddr.error_addr); + } + return inb(ap->ioaddr.error_addr); +} + +static inline u8 ata_chk_status(struct ata_port *ap) +{ + return ap->ops->check_status(ap); +} + +static inline u8 ata_altstatus(struct ata_port *ap) +{ + if (ap->flags & ATA_FLAG_MMIO) + return readb(ap->ioaddr.altstatus_addr); + return inb(ap->ioaddr.altstatus_addr); +} + +static inline void ata_pause(struct ata_port *ap) +{ + ata_altstatus(ap); + ndelay(400); +} + +static inline u8 ata_busy_wait(struct ata_port *ap, unsigned int bits, + unsigned int max) +{ + u8 status; + + do { + udelay(10); + status = ata_chk_status(ap); + max--; + } while ((status & bits) && (max > 0)); + + return status; +} + +static inline u8 ata_wait_idle(struct ata_port *ap) +{ + u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000); + + if (status & (ATA_BUSY | ATA_DRQ)) { + unsigned long l = ap->ioaddr.status_addr; + printk(KERN_WARNING + "ATA: abnormal status 0x%X on port 0x%lX\n", + status, l); + } + + return status; +} + +static inline struct ata_queued_cmd *ata_qc_from_tag (struct ata_port *ap, + unsigned int tag) +{ + if (likely(ata_tag_valid(tag))) + return &ap->qcmd[tag]; + return NULL; +} + +static inline void ata_tf_init(struct ata_port *ap, struct ata_taskfile *tf, unsigned int device) +{ + memset(tf, 0, sizeof(*tf)); + + tf->ctl = ap->ctl; + if (device == 0) + tf->device = ATA_DEVICE_OBS; + else + tf->device = ATA_DEVICE_OBS | ATA_DEV1; +} + +static inline u8 ata_irq_on(struct ata_port *ap) +{ + struct ata_ioports *ioaddr = &ap->ioaddr; + + ap->ctl &= ~ATA_NIEN; + ap->last_ctl = ap->ctl; + + if (ap->flags & ATA_FLAG_MMIO) + writeb(ap->ctl, ioaddr->ctl_addr); + else + outb(ap->ctl, ioaddr->ctl_addr); + return ata_wait_idle(ap); +} + +static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq) +{ + unsigned int bits = chk_drq ? ATA_BUSY | ATA_DRQ : ATA_BUSY; + u8 host_stat, post_stat, status; + + status = ata_busy_wait(ap, bits, 1000); + if (status & bits) + DPRINTK("abnormal status 0x%X\n", status); + + /* get controller status; clear intr, err bits */ + if (ap->flags & ATA_FLAG_MMIO) { + void *mmio = (void *) ap->ioaddr.bmdma_addr; + host_stat = readb(mmio + ATA_DMA_STATUS); + writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + mmio + ATA_DMA_STATUS); + + post_stat = readb(mmio + ATA_DMA_STATUS); + } else { + host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR, + ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + + post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); + } + + VPRINTK("irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n", + host_stat, post_stat, status); + + return status; +} + +static inline u32 scr_read(struct ata_port *ap, unsigned int reg) +{ + return ap->ops->scr_read(ap, reg); +} + +static inline void scr_write(struct ata_port *ap, unsigned int reg, u32 val) +{ + ap->ops->scr_write(ap, reg, val); +} + +static inline unsigned int sata_dev_present(struct ata_port *ap) +{ + return ((scr_read(ap, SCR_STATUS) & 0xf) == 0x3) ? 1 : 0; +} + +#endif /* __LINUX_LIBATA_H__ */ diff -urN linux-2.4.26-pre6/kernel/ksyms.c linux-2.4.26-pre6-ata14/kernel/ksyms.c --- linux-2.4.26-pre6/kernel/ksyms.c Fri Jan 16 11:26:27 2004 +++ linux-2.4.26-pre6-ata14/kernel/ksyms.c Thu Mar 25 22:45:24 2004 @@ -448,6 +448,7 @@ #endif /* resource handling */ +EXPORT_SYMBOL_GPL(____request_resource); /* may disappear in a few months */ EXPORT_SYMBOL(request_resource); EXPORT_SYMBOL(release_resource); EXPORT_SYMBOL(allocate_resource); diff -urN linux-2.4.26-pre6/kernel/resource.c linux-2.4.26-pre6-ata14/kernel/resource.c --- linux-2.4.26-pre6/kernel/resource.c Fri Oct 10 08:47:16 2003 +++ linux-2.4.26-pre6-ata14/kernel/resource.c Thu Mar 25 22:45:24 2004 @@ -166,6 +166,16 @@ return conflict ? -EBUSY : 0; } +struct resource *____request_resource(struct resource *root, struct resource *new) +{ + struct resource *conflict; + + write_lock(&resource_lock); + conflict = __request_resource(root, new); + write_unlock(&resource_lock); + return conflict; +} + int release_resource(struct resource *old) { int retval;