mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-14 04:41:26 +00:00
libata: convert to iomap
Convert libata core layer and LLDs to use iomap. * managed iomap is used. Pointer to pcim_iomap_table() is cached at host->iomap and used through out LLDs. This basically replaces host->mmio_base. * if possible, pcim_iomap_regions() is used Most iomap operation conversions are taken from Jeff Garzik <jgarzik@pobox.com>'s iomap branch. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
This commit is contained in:
parent
1a68ff13c8
commit
0d5ff56677
@ -446,16 +446,12 @@ static inline int ahci_nr_ports(u32 cap)
|
||||
return (cap & 0x1f) + 1;
|
||||
}
|
||||
|
||||
static inline unsigned long ahci_port_base_ul (unsigned long base, unsigned int port)
|
||||
static inline void __iomem *ahci_port_base(void __iomem *base,
|
||||
unsigned int port)
|
||||
{
|
||||
return base + 0x100 + (port * 0x80);
|
||||
}
|
||||
|
||||
static inline void __iomem *ahci_port_base (void __iomem *base, unsigned int port)
|
||||
{
|
||||
return (void __iomem *) ahci_port_base_ul((unsigned long)base, port);
|
||||
}
|
||||
|
||||
static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
|
||||
{
|
||||
unsigned int sc_reg;
|
||||
@ -469,7 +465,7 @@ static u32 ahci_scr_read (struct ata_port *ap, unsigned int sc_reg_in)
|
||||
return 0xffffffffU;
|
||||
}
|
||||
|
||||
return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
}
|
||||
|
||||
|
||||
@ -487,7 +483,7 @@ static void ahci_scr_write (struct ata_port *ap, unsigned int sc_reg_in,
|
||||
return;
|
||||
}
|
||||
|
||||
writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
}
|
||||
|
||||
static void ahci_start_engine(void __iomem *port_mmio)
|
||||
@ -729,7 +725,7 @@ static void ahci_init_controller(void __iomem *mmio, struct pci_dev *pdev,
|
||||
|
||||
static unsigned int ahci_dev_classify(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
|
||||
void __iomem *port_mmio = ap->ioaddr.cmd_addr;
|
||||
struct ata_taskfile tf;
|
||||
u32 tmp;
|
||||
|
||||
@ -757,7 +753,7 @@ static void ahci_fill_cmd_slot(struct ahci_port_priv *pp, unsigned int tag,
|
||||
|
||||
static int ahci_clo(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
|
||||
void __iomem *port_mmio = ap->ioaddr.cmd_addr;
|
||||
struct ahci_host_priv *hpriv = ap->host->private_data;
|
||||
u32 tmp;
|
||||
|
||||
@ -779,7 +775,7 @@ static int ahci_clo(struct ata_port *ap)
|
||||
static int ahci_softreset(struct ata_port *ap, unsigned int *class)
|
||||
{
|
||||
struct ahci_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
const u32 cmd_fis_len = 5; /* five dwords */
|
||||
const char *reason = NULL;
|
||||
@ -887,7 +883,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
|
||||
struct ahci_port_priv *pp = ap->private_data;
|
||||
u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
|
||||
struct ata_taskfile tf;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
int rc;
|
||||
|
||||
@ -915,7 +911,7 @@ static int ahci_hardreset(struct ata_port *ap, unsigned int *class)
|
||||
|
||||
static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class)
|
||||
{
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
int rc;
|
||||
|
||||
@ -940,7 +936,7 @@ static int ahci_vt8251_hardreset(struct ata_port *ap, unsigned int *class)
|
||||
|
||||
static void ahci_postreset(struct ata_port *ap, unsigned int *class)
|
||||
{
|
||||
void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
|
||||
void __iomem *port_mmio = ap->ioaddr.cmd_addr;
|
||||
u32 new_tmp, tmp;
|
||||
|
||||
ata_std_postreset(ap, class);
|
||||
@ -959,7 +955,7 @@ static void ahci_postreset(struct ata_port *ap, unsigned int *class)
|
||||
|
||||
static u8 ahci_check_status(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr;
|
||||
void __iomem *mmio = ap->ioaddr.cmd_addr;
|
||||
|
||||
return readl(mmio + PORT_TFDATA) & 0xFF;
|
||||
}
|
||||
@ -1105,7 +1101,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat)
|
||||
|
||||
static void ahci_host_intr(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
struct ata_eh_info *ehi = &ap->eh_info;
|
||||
struct ahci_port_priv *pp = ap->private_data;
|
||||
@ -1203,7 +1199,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
hpriv = host->private_data;
|
||||
mmio = host->mmio_base;
|
||||
mmio = host->iomap[AHCI_PCI_BAR];
|
||||
|
||||
/* sigh. 0xffffffff is a valid return from h/w */
|
||||
irq_stat = readl(mmio + HOST_IRQ_STAT);
|
||||
@ -1248,7 +1244,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance)
|
||||
static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
void __iomem *port_mmio = (void __iomem *) ap->ioaddr.cmd_addr;
|
||||
void __iomem *port_mmio = ap->ioaddr.cmd_addr;
|
||||
|
||||
if (qc->tf.protocol == ATA_PROT_NCQ)
|
||||
writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
|
||||
@ -1260,7 +1256,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
|
||||
|
||||
static void ahci_freeze(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
|
||||
/* turn IRQ off */
|
||||
@ -1269,7 +1265,7 @@ static void ahci_freeze(struct ata_port *ap)
|
||||
|
||||
static void ahci_thaw(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
u32 tmp;
|
||||
|
||||
@ -1284,7 +1280,7 @@ static void ahci_thaw(struct ata_port *ap)
|
||||
|
||||
static void ahci_error_handler(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
|
||||
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
|
||||
@ -1300,7 +1296,7 @@ static void ahci_error_handler(struct ata_port *ap)
|
||||
|
||||
static void ahci_vt8251_error_handler(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
|
||||
if (!(ap->pflags & ATA_PFLAG_FROZEN)) {
|
||||
@ -1317,7 +1313,7 @@ static void ahci_vt8251_error_handler(struct ata_port *ap)
|
||||
static void ahci_post_internal_cmd(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
|
||||
if (qc->flags & ATA_QCFLAG_FAILED)
|
||||
@ -1334,7 +1330,7 @@ static int ahci_port_suspend(struct ata_port *ap, pm_message_t mesg)
|
||||
{
|
||||
struct ahci_host_priv *hpriv = ap->host->private_data;
|
||||
struct ahci_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
const char *emsg = NULL;
|
||||
int rc;
|
||||
@ -1355,7 +1351,7 @@ static int ahci_port_resume(struct ata_port *ap)
|
||||
{
|
||||
struct ahci_port_priv *pp = ap->private_data;
|
||||
struct ahci_host_priv *hpriv = ap->host->private_data;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
|
||||
ahci_power_up(port_mmio, hpriv->cap);
|
||||
@ -1367,7 +1363,7 @@ static int ahci_port_resume(struct ata_port *ap)
|
||||
static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
|
||||
{
|
||||
struct ata_host *host = dev_get_drvdata(&pdev->dev);
|
||||
void __iomem *mmio = host->mmio_base;
|
||||
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
|
||||
u32 ctl;
|
||||
|
||||
if (mesg.event == PM_EVENT_SUSPEND) {
|
||||
@ -1388,7 +1384,7 @@ static int ahci_pci_device_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct ata_host *host = dev_get_drvdata(&pdev->dev);
|
||||
struct ahci_host_priv *hpriv = host->private_data;
|
||||
void __iomem *mmio = host->mmio_base;
|
||||
void __iomem *mmio = host->iomap[AHCI_PCI_BAR];
|
||||
int rc;
|
||||
|
||||
rc = ata_pci_device_do_resume(pdev);
|
||||
@ -1414,7 +1410,7 @@ static int ahci_port_start(struct ata_port *ap)
|
||||
struct device *dev = ap->host->dev;
|
||||
struct ahci_host_priv *hpriv = ap->host->private_data;
|
||||
struct ahci_port_priv *pp;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
void *mem;
|
||||
dma_addr_t mem_dma;
|
||||
@ -1474,7 +1470,7 @@ static int ahci_port_start(struct ata_port *ap)
|
||||
static void ahci_port_stop(struct ata_port *ap)
|
||||
{
|
||||
struct ahci_host_priv *hpriv = ap->host->private_data;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[AHCI_PCI_BAR];
|
||||
void __iomem *port_mmio = ahci_port_base(mmio, ap->port_no);
|
||||
const char *emsg = NULL;
|
||||
int rc;
|
||||
@ -1485,11 +1481,11 @@ static void ahci_port_stop(struct ata_port *ap)
|
||||
ata_port_printk(ap, KERN_WARNING, "%s (%d)\n", emsg, rc);
|
||||
}
|
||||
|
||||
static void ahci_setup_port(struct ata_ioports *port, unsigned long base,
|
||||
static void ahci_setup_port(struct ata_ioports *port, void __iomem *base,
|
||||
unsigned int port_idx)
|
||||
{
|
||||
VPRINTK("ENTER, base==0x%lx, port_idx %u\n", base, port_idx);
|
||||
base = ahci_port_base_ul(base, port_idx);
|
||||
base = ahci_port_base(base, port_idx);
|
||||
VPRINTK("base now==0x%lx\n", base);
|
||||
|
||||
port->cmd_addr = base;
|
||||
@ -1502,7 +1498,7 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
|
||||
{
|
||||
struct ahci_host_priv *hpriv = probe_ent->private_data;
|
||||
struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
|
||||
void __iomem *mmio = probe_ent->mmio_base;
|
||||
void __iomem *mmio = probe_ent->iomap[AHCI_PCI_BAR];
|
||||
unsigned int i, cap_n_ports, using_dac;
|
||||
int rc;
|
||||
|
||||
@ -1569,7 +1565,7 @@ static int ahci_host_init(struct ata_probe_ent *probe_ent)
|
||||
}
|
||||
|
||||
for (i = 0; i < probe_ent->n_ports; i++)
|
||||
ahci_setup_port(&probe_ent->port[i], (unsigned long) mmio, i);
|
||||
ahci_setup_port(&probe_ent->port[i], mmio, i);
|
||||
|
||||
ahci_init_controller(mmio, pdev, probe_ent->n_ports,
|
||||
probe_ent->port_flags, hpriv);
|
||||
@ -1583,7 +1579,7 @@ static void ahci_print_info(struct ata_probe_ent *probe_ent)
|
||||
{
|
||||
struct ahci_host_priv *hpriv = probe_ent->private_data;
|
||||
struct pci_dev *pdev = to_pci_dev(probe_ent->dev);
|
||||
void __iomem *mmio = probe_ent->mmio_base;
|
||||
void __iomem *mmio = probe_ent->iomap[AHCI_PCI_BAR];
|
||||
u32 vers, cap, impl, speed;
|
||||
const char *speed_s;
|
||||
u16 cc;
|
||||
@ -1657,8 +1653,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ata_probe_ent *probe_ent;
|
||||
struct ahci_host_priv *hpriv;
|
||||
unsigned long base;
|
||||
void __iomem *mmio_base;
|
||||
int rc;
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
@ -1679,11 +1673,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc) {
|
||||
rc = pcim_iomap_regions(pdev, 1 << AHCI_PCI_BAR, DRV_NAME);
|
||||
if (rc == -EBUSY)
|
||||
pcim_pin_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
if (pci_enable_msi(pdev))
|
||||
pci_intx(pdev, 1);
|
||||
@ -1695,11 +1689,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
probe_ent->dev = pci_dev_to_dev(pdev);
|
||||
INIT_LIST_HEAD(&probe_ent->node);
|
||||
|
||||
mmio_base = pcim_iomap(pdev, AHCI_PCI_BAR, 0);
|
||||
if (mmio_base == NULL)
|
||||
return -ENOMEM;
|
||||
base = (unsigned long) mmio_base;
|
||||
|
||||
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
|
||||
if (!hpriv)
|
||||
return -ENOMEM;
|
||||
@ -1712,7 +1701,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
probe_ent->private_data = hpriv;
|
||||
|
||||
/* initialize adapter */
|
||||
|
@ -79,7 +79,7 @@ static int generic_set_mode(struct ata_port *ap, struct ata_device **unused)
|
||||
|
||||
/* Bits 5 and 6 indicate if DMA is active on master/slave */
|
||||
if (ap->ioaddr.bmdma_addr)
|
||||
dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
|
||||
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
||||
struct ata_device *dev = &ap->device[i];
|
||||
@ -138,7 +138,7 @@ static struct ata_port_operations generic_port_ops = {
|
||||
.bmdma_stop = ata_bmdma_stop,
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
|
@ -299,7 +299,7 @@ static const struct ata_port_operations piix_pata_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
@ -330,7 +330,7 @@ static const struct ata_port_operations ich_pata_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
@ -358,7 +358,7 @@ static const struct ata_port_operations piix_sata_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
|
@ -600,113 +600,47 @@ void ata_dev_disable(struct ata_device *dev)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_pio_devchk - PATA device presence detection
|
||||
* @ap: ATA channel to examine
|
||||
* @device: Device to examine (starting at zero)
|
||||
*
|
||||
* This technique was originally described in
|
||||
* Hale Landis's ATADRVR (www.ata-atapi.com), and
|
||||
* later found its way into the ATA/ATAPI spec.
|
||||
*
|
||||
* Write a pattern to the ATA shadow registers,
|
||||
* and if a device is present, it will respond by
|
||||
* correctly storing and echoing back the
|
||||
* ATA shadow register contents.
|
||||
*
|
||||
* LOCKING:
|
||||
* caller.
|
||||
*/
|
||||
|
||||
static unsigned int ata_pio_devchk(struct ata_port *ap,
|
||||
unsigned int device)
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
u8 nsect, lbal;
|
||||
|
||||
ap->ops->dev_select(ap, device);
|
||||
|
||||
outb(0x55, ioaddr->nsect_addr);
|
||||
outb(0xaa, ioaddr->lbal_addr);
|
||||
|
||||
outb(0xaa, ioaddr->nsect_addr);
|
||||
outb(0x55, ioaddr->lbal_addr);
|
||||
|
||||
outb(0x55, ioaddr->nsect_addr);
|
||||
outb(0xaa, ioaddr->lbal_addr);
|
||||
|
||||
nsect = inb(ioaddr->nsect_addr);
|
||||
lbal = inb(ioaddr->lbal_addr);
|
||||
|
||||
if ((nsect == 0x55) && (lbal == 0xaa))
|
||||
return 1; /* we found a device */
|
||||
|
||||
return 0; /* nothing found */
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_mmio_devchk - PATA device presence detection
|
||||
* @ap: ATA channel to examine
|
||||
* @device: Device to examine (starting at zero)
|
||||
*
|
||||
* This technique was originally described in
|
||||
* Hale Landis's ATADRVR (www.ata-atapi.com), and
|
||||
* later found its way into the ATA/ATAPI spec.
|
||||
*
|
||||
* Write a pattern to the ATA shadow registers,
|
||||
* and if a device is present, it will respond by
|
||||
* correctly storing and echoing back the
|
||||
* ATA shadow register contents.
|
||||
*
|
||||
* LOCKING:
|
||||
* caller.
|
||||
*/
|
||||
|
||||
static unsigned int ata_mmio_devchk(struct ata_port *ap,
|
||||
unsigned int device)
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
u8 nsect, lbal;
|
||||
|
||||
ap->ops->dev_select(ap, device);
|
||||
|
||||
writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
|
||||
writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
|
||||
|
||||
writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
|
||||
writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
|
||||
|
||||
writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
|
||||
writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
|
||||
|
||||
nsect = readb((void __iomem *) ioaddr->nsect_addr);
|
||||
lbal = readb((void __iomem *) ioaddr->lbal_addr);
|
||||
|
||||
if ((nsect == 0x55) && (lbal == 0xaa))
|
||||
return 1; /* we found a device */
|
||||
|
||||
return 0; /* nothing found */
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_devchk - PATA device presence detection
|
||||
* @ap: ATA channel to examine
|
||||
* @device: Device to examine (starting at zero)
|
||||
*
|
||||
* Dispatch ATA device presence detection, depending
|
||||
* on whether we are using PIO or MMIO to talk to the
|
||||
* ATA shadow registers.
|
||||
* This technique was originally described in
|
||||
* Hale Landis's ATADRVR (www.ata-atapi.com), and
|
||||
* later found its way into the ATA/ATAPI spec.
|
||||
*
|
||||
* Write a pattern to the ATA shadow registers,
|
||||
* and if a device is present, it will respond by
|
||||
* correctly storing and echoing back the
|
||||
* ATA shadow register contents.
|
||||
*
|
||||
* LOCKING:
|
||||
* caller.
|
||||
*/
|
||||
|
||||
static unsigned int ata_devchk(struct ata_port *ap,
|
||||
unsigned int device)
|
||||
static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
|
||||
{
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
return ata_mmio_devchk(ap, device);
|
||||
return ata_pio_devchk(ap, device);
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
u8 nsect, lbal;
|
||||
|
||||
ap->ops->dev_select(ap, device);
|
||||
|
||||
iowrite8(0x55, ioaddr->nsect_addr);
|
||||
iowrite8(0xaa, ioaddr->lbal_addr);
|
||||
|
||||
iowrite8(0xaa, ioaddr->nsect_addr);
|
||||
iowrite8(0x55, ioaddr->lbal_addr);
|
||||
|
||||
iowrite8(0x55, ioaddr->nsect_addr);
|
||||
iowrite8(0xaa, ioaddr->lbal_addr);
|
||||
|
||||
nsect = ioread8(ioaddr->nsect_addr);
|
||||
lbal = ioread8(ioaddr->lbal_addr);
|
||||
|
||||
if ((nsect == 0x55) && (lbal == 0xaa))
|
||||
return 1; /* we found a device */
|
||||
|
||||
return 0; /* nothing found */
|
||||
}
|
||||
|
||||
/**
|
||||
@ -926,11 +860,7 @@ void ata_std_dev_select (struct ata_port *ap, unsigned int device)
|
||||
else
|
||||
tmp = ATA_DEVICE_OBS | ATA_DEV1;
|
||||
|
||||
if (ap->flags & ATA_FLAG_MMIO) {
|
||||
writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
|
||||
} else {
|
||||
outb(tmp, ap->ioaddr.device_addr);
|
||||
}
|
||||
iowrite8(tmp, ap->ioaddr.device_addr);
|
||||
ata_pause(ap); /* needed; also flushes, for mmio */
|
||||
}
|
||||
|
||||
@ -2616,13 +2546,8 @@ static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
|
||||
u8 nsect, lbal;
|
||||
|
||||
ap->ops->dev_select(ap, 1);
|
||||
if (ap->flags & ATA_FLAG_MMIO) {
|
||||
nsect = readb((void __iomem *) ioaddr->nsect_addr);
|
||||
lbal = readb((void __iomem *) ioaddr->lbal_addr);
|
||||
} else {
|
||||
nsect = inb(ioaddr->nsect_addr);
|
||||
lbal = inb(ioaddr->lbal_addr);
|
||||
}
|
||||
nsect = ioread8(ioaddr->nsect_addr);
|
||||
lbal = ioread8(ioaddr->lbal_addr);
|
||||
if ((nsect == 1) && (lbal == 1))
|
||||
break;
|
||||
if (time_after(jiffies, timeout)) {
|
||||
@ -2650,19 +2575,11 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
|
||||
DPRINTK("ata%u: bus reset via SRST\n", ap->id);
|
||||
|
||||
/* software reset. causes dev0 to be selected */
|
||||
if (ap->flags & ATA_FLAG_MMIO) {
|
||||
writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
|
||||
udelay(20); /* FIXME: flush */
|
||||
writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
|
||||
udelay(20); /* FIXME: flush */
|
||||
writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
|
||||
} else {
|
||||
outb(ap->ctl, ioaddr->ctl_addr);
|
||||
udelay(10);
|
||||
outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
|
||||
udelay(10);
|
||||
outb(ap->ctl, ioaddr->ctl_addr);
|
||||
}
|
||||
iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||||
udelay(20); /* FIXME: flush */
|
||||
iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
|
||||
udelay(20); /* FIXME: flush */
|
||||
iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||||
|
||||
/* spec mandates ">= 2ms" before checking status.
|
||||
* We wait 150ms, because that was the magic delay used for
|
||||
@ -2763,10 +2680,7 @@ void ata_bus_reset(struct ata_port *ap)
|
||||
|
||||
if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
|
||||
/* set up device control for ATA_FLAG_SATA_RESET */
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
|
||||
else
|
||||
outb(ap->ctl, ioaddr->ctl_addr);
|
||||
iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||||
}
|
||||
|
||||
DPRINTK("EXIT\n");
|
||||
@ -3159,12 +3073,8 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
|
||||
}
|
||||
|
||||
/* set up device control */
|
||||
if (ap->ioaddr.ctl_addr) {
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
|
||||
else
|
||||
outb(ap->ctl, ap->ioaddr.ctl_addr);
|
||||
}
|
||||
if (ap->ioaddr.ctl_addr)
|
||||
iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
|
||||
|
||||
DPRINTK("EXIT\n");
|
||||
}
|
||||
@ -3880,53 +3790,7 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_mmio_data_xfer - Transfer data by MMIO
|
||||
* @adev: device for this I/O
|
||||
* @buf: data buffer
|
||||
* @buflen: buffer length
|
||||
* @write_data: read/write
|
||||
*
|
||||
* Transfer data from/to the device data register by MMIO.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data)
|
||||
{
|
||||
struct ata_port *ap = adev->ap;
|
||||
unsigned int i;
|
||||
unsigned int words = buflen >> 1;
|
||||
u16 *buf16 = (u16 *) buf;
|
||||
void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
|
||||
|
||||
/* Transfer multiple of 2 bytes */
|
||||
if (write_data) {
|
||||
for (i = 0; i < words; i++)
|
||||
writew(le16_to_cpu(buf16[i]), mmio);
|
||||
} else {
|
||||
for (i = 0; i < words; i++)
|
||||
buf16[i] = cpu_to_le16(readw(mmio));
|
||||
}
|
||||
|
||||
/* Transfer trailing 1 byte, if any. */
|
||||
if (unlikely(buflen & 0x01)) {
|
||||
u16 align_buf[1] = { 0 };
|
||||
unsigned char *trailing_buf = buf + buflen - 1;
|
||||
|
||||
if (write_data) {
|
||||
memcpy(align_buf, trailing_buf, 1);
|
||||
writew(le16_to_cpu(align_buf[0]), mmio);
|
||||
} else {
|
||||
align_buf[0] = cpu_to_le16(readw(mmio));
|
||||
memcpy(trailing_buf, align_buf, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_pio_data_xfer - Transfer data by PIO
|
||||
* ata_data_xfer - Transfer data by PIO
|
||||
* @adev: device to target
|
||||
* @buf: data buffer
|
||||
* @buflen: buffer length
|
||||
@ -3937,18 +3801,17 @@ void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data)
|
||||
void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data)
|
||||
{
|
||||
struct ata_port *ap = adev->ap;
|
||||
unsigned int words = buflen >> 1;
|
||||
|
||||
/* Transfer multiple of 2 bytes */
|
||||
if (write_data)
|
||||
outsw(ap->ioaddr.data_addr, buf, words);
|
||||
iowrite16_rep(ap->ioaddr.data_addr, buf, words);
|
||||
else
|
||||
insw(ap->ioaddr.data_addr, buf, words);
|
||||
ioread16_rep(ap->ioaddr.data_addr, buf, words);
|
||||
|
||||
/* Transfer trailing 1 byte, if any. */
|
||||
if (unlikely(buflen & 0x01)) {
|
||||
@ -3957,16 +3820,16 @@ void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
|
||||
if (write_data) {
|
||||
memcpy(align_buf, trailing_buf, 1);
|
||||
outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
|
||||
iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
|
||||
} else {
|
||||
align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
|
||||
align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
|
||||
memcpy(trailing_buf, align_buf, 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_pio_data_xfer_noirq - Transfer data by PIO
|
||||
* ata_data_xfer_noirq - Transfer data by PIO
|
||||
* @adev: device to target
|
||||
* @buf: data buffer
|
||||
* @buflen: buffer length
|
||||
@ -3978,13 +3841,12 @@ void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data)
|
||||
void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data)
|
||||
{
|
||||
unsigned long flags;
|
||||
local_irq_save(flags);
|
||||
ata_pio_data_xfer(adev, buf, buflen, write_data);
|
||||
ata_data_xfer(adev, buf, buflen, write_data);
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
|
||||
@ -5770,7 +5632,7 @@ int ata_device_add(const struct ata_probe_ent *ent)
|
||||
host->n_ports = ent->n_ports;
|
||||
host->irq = ent->irq;
|
||||
host->irq2 = ent->irq2;
|
||||
host->mmio_base = ent->mmio_base;
|
||||
host->iomap = ent->iomap;
|
||||
host->private_data = ent->private_data;
|
||||
|
||||
/* register each port bound to this device */
|
||||
@ -5808,8 +5670,8 @@ int ata_device_add(const struct ata_probe_ent *ent)
|
||||
(ap->pio_mask << ATA_SHIFT_PIO);
|
||||
|
||||
/* print per-port info to dmesg */
|
||||
ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
|
||||
"ctl 0x%lX bmdma 0x%lX irq %d\n",
|
||||
ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
|
||||
"ctl 0x%p bmdma 0x%p irq %d\n",
|
||||
ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
|
||||
ata_mode_string(xfer_mode_mask),
|
||||
ap->ioaddr.cmd_addr,
|
||||
@ -6328,9 +6190,8 @@ EXPORT_SYMBOL_GPL(ata_altstatus);
|
||||
EXPORT_SYMBOL_GPL(ata_exec_command);
|
||||
EXPORT_SYMBOL_GPL(ata_port_start);
|
||||
EXPORT_SYMBOL_GPL(ata_interrupt);
|
||||
EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
|
||||
EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
|
||||
EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
|
||||
EXPORT_SYMBOL_GPL(ata_data_xfer);
|
||||
EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
|
||||
EXPORT_SYMBOL_GPL(ata_qc_prep);
|
||||
EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
|
||||
EXPORT_SYMBOL_GPL(ata_bmdma_setup);
|
||||
|
@ -56,10 +56,7 @@ u8 ata_irq_on(struct ata_port *ap)
|
||||
ap->ctl &= ~ATA_NIEN;
|
||||
ap->last_ctl = ap->ctl;
|
||||
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
|
||||
else
|
||||
outb(ap->ctl, ioaddr->ctl_addr);
|
||||
iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||||
tmp = ata_wait_idle(ap);
|
||||
|
||||
ap->ops->irq_clear(ap);
|
||||
@ -68,7 +65,7 @@ u8 ata_irq_on(struct ata_port *ap)
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_tf_load_pio - send taskfile registers to host controller
|
||||
* ata_tf_load - send taskfile registers to host controller
|
||||
* @ap: Port to which output is sent
|
||||
* @tf: ATA taskfile register set
|
||||
*
|
||||
@ -78,273 +75,72 @@ u8 ata_irq_on(struct ata_port *ap)
|
||||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
|
||||
|
||||
if (tf->ctl != ap->last_ctl) {
|
||||
outb(tf->ctl, ioaddr->ctl_addr);
|
||||
ap->last_ctl = tf->ctl;
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
|
||||
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
|
||||
outb(tf->hob_feature, ioaddr->feature_addr);
|
||||
outb(tf->hob_nsect, ioaddr->nsect_addr);
|
||||
outb(tf->hob_lbal, ioaddr->lbal_addr);
|
||||
outb(tf->hob_lbam, ioaddr->lbam_addr);
|
||||
outb(tf->hob_lbah, ioaddr->lbah_addr);
|
||||
VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
|
||||
tf->hob_feature,
|
||||
tf->hob_nsect,
|
||||
tf->hob_lbal,
|
||||
tf->hob_lbam,
|
||||
tf->hob_lbah);
|
||||
}
|
||||
|
||||
if (is_addr) {
|
||||
outb(tf->feature, ioaddr->feature_addr);
|
||||
outb(tf->nsect, ioaddr->nsect_addr);
|
||||
outb(tf->lbal, ioaddr->lbal_addr);
|
||||
outb(tf->lbam, ioaddr->lbam_addr);
|
||||
outb(tf->lbah, ioaddr->lbah_addr);
|
||||
VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
|
||||
tf->feature,
|
||||
tf->nsect,
|
||||
tf->lbal,
|
||||
tf->lbam,
|
||||
tf->lbah);
|
||||
}
|
||||
|
||||
if (tf->flags & ATA_TFLAG_DEVICE) {
|
||||
outb(tf->device, ioaddr->device_addr);
|
||||
VPRINTK("device 0x%X\n", tf->device);
|
||||
}
|
||||
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_tf_load_mmio - send taskfile registers to host controller
|
||||
* @ap: Port to which output is sent
|
||||
* @tf: ATA taskfile register set
|
||||
*
|
||||
* Outputs ATA taskfile to standard ATA host controller using MMIO.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
|
||||
|
||||
if (tf->ctl != ap->last_ctl) {
|
||||
writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
|
||||
ap->last_ctl = tf->ctl;
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
|
||||
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
|
||||
writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
|
||||
writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
|
||||
writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
|
||||
writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
|
||||
writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
|
||||
VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
|
||||
tf->hob_feature,
|
||||
tf->hob_nsect,
|
||||
tf->hob_lbal,
|
||||
tf->hob_lbam,
|
||||
tf->hob_lbah);
|
||||
}
|
||||
|
||||
if (is_addr) {
|
||||
writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
|
||||
writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
|
||||
writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
|
||||
writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
|
||||
writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
|
||||
VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
|
||||
tf->feature,
|
||||
tf->nsect,
|
||||
tf->lbal,
|
||||
tf->lbam,
|
||||
tf->lbah);
|
||||
}
|
||||
|
||||
if (tf->flags & ATA_TFLAG_DEVICE) {
|
||||
writeb(tf->device, (void __iomem *) ioaddr->device_addr);
|
||||
VPRINTK("device 0x%X\n", tf->device);
|
||||
}
|
||||
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_tf_load - send taskfile registers to host controller
|
||||
* @ap: Port to which output is sent
|
||||
* @tf: ATA taskfile register set
|
||||
*
|
||||
* Outputs ATA taskfile to standard ATA host controller using MMIO
|
||||
* or PIO as indicated by the ATA_FLAG_MMIO flag.
|
||||
* Writes the control, feature, nsect, lbal, lbam, and lbah registers.
|
||||
* Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
|
||||
* hob_lbal, hob_lbam, and hob_lbah.
|
||||
*
|
||||
* This function waits for idle (!BUSY and !DRQ) after writing
|
||||
* registers. If the control register has a new value, this
|
||||
* function also waits for idle after writing control and before
|
||||
* writing the remaining registers.
|
||||
*
|
||||
* May be used as the tf_load() entry in ata_port_operations.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
{
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
ata_tf_load_mmio(ap, tf);
|
||||
else
|
||||
ata_tf_load_pio(ap, tf);
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
|
||||
|
||||
if (tf->ctl != ap->last_ctl) {
|
||||
iowrite8(tf->ctl, ioaddr->ctl_addr);
|
||||
ap->last_ctl = tf->ctl;
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
|
||||
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
|
||||
iowrite8(tf->hob_feature, ioaddr->feature_addr);
|
||||
iowrite8(tf->hob_nsect, ioaddr->nsect_addr);
|
||||
iowrite8(tf->hob_lbal, ioaddr->lbal_addr);
|
||||
iowrite8(tf->hob_lbam, ioaddr->lbam_addr);
|
||||
iowrite8(tf->hob_lbah, ioaddr->lbah_addr);
|
||||
VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
|
||||
tf->hob_feature,
|
||||
tf->hob_nsect,
|
||||
tf->hob_lbal,
|
||||
tf->hob_lbam,
|
||||
tf->hob_lbah);
|
||||
}
|
||||
|
||||
if (is_addr) {
|
||||
iowrite8(tf->feature, ioaddr->feature_addr);
|
||||
iowrite8(tf->nsect, ioaddr->nsect_addr);
|
||||
iowrite8(tf->lbal, ioaddr->lbal_addr);
|
||||
iowrite8(tf->lbam, ioaddr->lbam_addr);
|
||||
iowrite8(tf->lbah, ioaddr->lbah_addr);
|
||||
VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
|
||||
tf->feature,
|
||||
tf->nsect,
|
||||
tf->lbal,
|
||||
tf->lbam,
|
||||
tf->lbah);
|
||||
}
|
||||
|
||||
if (tf->flags & ATA_TFLAG_DEVICE) {
|
||||
iowrite8(tf->device, ioaddr->device_addr);
|
||||
VPRINTK("device 0x%X\n", tf->device);
|
||||
}
|
||||
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_exec_command_pio - issue ATA command to host controller
|
||||
* @ap: port to which command is being issued
|
||||
* @tf: ATA taskfile register set
|
||||
*
|
||||
* Issues PIO write to ATA command register, with proper
|
||||
* synchronization with interrupt handler / other threads.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
|
||||
static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
{
|
||||
DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
|
||||
|
||||
outb(tf->command, ap->ioaddr.command_addr);
|
||||
ata_pause(ap);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_exec_command_mmio - issue ATA command to host controller
|
||||
* @ap: port to which command is being issued
|
||||
* @tf: ATA taskfile register set
|
||||
*
|
||||
* Issues MMIO write to ATA command register, with proper
|
||||
* synchronization with interrupt handler / other threads.
|
||||
*
|
||||
* FIXME: missing write posting for 400nS delay enforcement
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
|
||||
static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
{
|
||||
DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
|
||||
|
||||
writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
|
||||
ata_pause(ap);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_exec_command - issue ATA command to host controller
|
||||
* @ap: port to which command is being issued
|
||||
* @tf: ATA taskfile register set
|
||||
*
|
||||
* Issues PIO/MMIO write to ATA command register, with proper
|
||||
* synchronization with interrupt handler / other threads.
|
||||
* Issues ATA command, with proper synchronization with interrupt
|
||||
* handler / other threads.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
{
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
ata_exec_command_mmio(ap, tf);
|
||||
else
|
||||
ata_exec_command_pio(ap, tf);
|
||||
DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
|
||||
|
||||
iowrite8(tf->command, ap->ioaddr.command_addr);
|
||||
ata_pause(ap);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_tf_read_pio - input device's ATA taskfile shadow registers
|
||||
* @ap: Port from which input is read
|
||||
* @tf: ATA taskfile register set for storing input
|
||||
*
|
||||
* Reads ATA taskfile registers for currently-selected device
|
||||
* into @tf.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
|
||||
tf->command = ata_check_status(ap);
|
||||
tf->feature = inb(ioaddr->error_addr);
|
||||
tf->nsect = inb(ioaddr->nsect_addr);
|
||||
tf->lbal = inb(ioaddr->lbal_addr);
|
||||
tf->lbam = inb(ioaddr->lbam_addr);
|
||||
tf->lbah = inb(ioaddr->lbah_addr);
|
||||
tf->device = inb(ioaddr->device_addr);
|
||||
|
||||
if (tf->flags & ATA_TFLAG_LBA48) {
|
||||
outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
|
||||
tf->hob_feature = inb(ioaddr->error_addr);
|
||||
tf->hob_nsect = inb(ioaddr->nsect_addr);
|
||||
tf->hob_lbal = inb(ioaddr->lbal_addr);
|
||||
tf->hob_lbam = inb(ioaddr->lbam_addr);
|
||||
tf->hob_lbah = inb(ioaddr->lbah_addr);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_tf_read_mmio - input device's ATA taskfile shadow registers
|
||||
* @ap: Port from which input is read
|
||||
* @tf: ATA taskfile register set for storing input
|
||||
*
|
||||
* Reads ATA taskfile registers for currently-selected device
|
||||
* into @tf via MMIO.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
|
||||
static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
|
||||
{
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
|
||||
tf->command = ata_check_status(ap);
|
||||
tf->feature = readb((void __iomem *)ioaddr->error_addr);
|
||||
tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
|
||||
tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
|
||||
tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
|
||||
tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
|
||||
tf->device = readb((void __iomem *)ioaddr->device_addr);
|
||||
|
||||
if (tf->flags & ATA_TFLAG_LBA48) {
|
||||
writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
|
||||
tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
|
||||
tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
|
||||
tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
|
||||
tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
|
||||
tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_tf_read - input device's ATA taskfile shadow registers
|
||||
* @ap: Port from which input is read
|
||||
@ -353,55 +149,31 @@ static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
|
||||
* Reads ATA taskfile registers for currently-selected device
|
||||
* into @tf.
|
||||
*
|
||||
* Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
|
||||
* is set, also reads the hob registers.
|
||||
*
|
||||
* May be used as the tf_read() entry in ata_port_operations.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
|
||||
{
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
ata_tf_read_mmio(ap, tf);
|
||||
else
|
||||
ata_tf_read_pio(ap, tf);
|
||||
}
|
||||
struct ata_ioports *ioaddr = &ap->ioaddr;
|
||||
|
||||
/**
|
||||
* ata_check_status_pio - Read device status reg & clear interrupt
|
||||
* @ap: port where the device is
|
||||
*
|
||||
* Reads ATA taskfile status register for currently-selected device
|
||||
* and return its value. This also clears pending interrupts
|
||||
* from this device
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
static u8 ata_check_status_pio(struct ata_port *ap)
|
||||
{
|
||||
return inb(ap->ioaddr.status_addr);
|
||||
}
|
||||
tf->command = ata_check_status(ap);
|
||||
tf->feature = ioread8(ioaddr->error_addr);
|
||||
tf->nsect = ioread8(ioaddr->nsect_addr);
|
||||
tf->lbal = ioread8(ioaddr->lbal_addr);
|
||||
tf->lbam = ioread8(ioaddr->lbam_addr);
|
||||
tf->lbah = ioread8(ioaddr->lbah_addr);
|
||||
tf->device = ioread8(ioaddr->device_addr);
|
||||
|
||||
/**
|
||||
* ata_check_status_mmio - Read device status reg & clear interrupt
|
||||
* @ap: port where the device is
|
||||
*
|
||||
* Reads ATA taskfile status register for currently-selected device
|
||||
* via MMIO and return its value. This also clears pending interrupts
|
||||
* from this device
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
static u8 ata_check_status_mmio(struct ata_port *ap)
|
||||
{
|
||||
return readb((void __iomem *) ap->ioaddr.status_addr);
|
||||
if (tf->flags & ATA_TFLAG_LBA48) {
|
||||
iowrite8(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
|
||||
tf->hob_feature = ioread8(ioaddr->error_addr);
|
||||
tf->hob_nsect = ioread8(ioaddr->nsect_addr);
|
||||
tf->hob_lbal = ioread8(ioaddr->lbal_addr);
|
||||
tf->hob_lbam = ioread8(ioaddr->lbam_addr);
|
||||
tf->hob_lbah = ioread8(ioaddr->lbah_addr);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_check_status - Read device status reg & clear interrupt
|
||||
* @ap: port where the device is
|
||||
@ -410,19 +182,14 @@ static u8 ata_check_status_mmio(struct ata_port *ap)
|
||||
* and return its value. This also clears pending interrupts
|
||||
* from this device
|
||||
*
|
||||
* May be used as the check_status() entry in ata_port_operations.
|
||||
*
|
||||
* LOCKING:
|
||||
* Inherited from caller.
|
||||
*/
|
||||
u8 ata_check_status(struct ata_port *ap)
|
||||
{
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
return ata_check_status_mmio(ap);
|
||||
return ata_check_status_pio(ap);
|
||||
return ioread8(ap->ioaddr.status_addr);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_altstatus - Read device alternate status reg
|
||||
* @ap: port where the device is
|
||||
@ -441,58 +208,52 @@ u8 ata_altstatus(struct ata_port *ap)
|
||||
if (ap->ops->check_altstatus)
|
||||
return ap->ops->check_altstatus(ap);
|
||||
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
return readb((void __iomem *)ap->ioaddr.altstatus_addr);
|
||||
return inb(ap->ioaddr.altstatus_addr);
|
||||
return ioread8(ap->ioaddr.altstatus_addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
|
||||
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
|
||||
* @qc: Info associated with this ATA transaction.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
|
||||
static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
|
||||
void ata_bmdma_setup(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
|
||||
u8 dmactl;
|
||||
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
|
||||
|
||||
/* load PRD table addr. */
|
||||
mb(); /* make sure PRD table writes are visible to controller */
|
||||
writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
|
||||
iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
|
||||
|
||||
/* specify data direction, triple-check start bit is clear */
|
||||
dmactl = readb(mmio + ATA_DMA_CMD);
|
||||
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
|
||||
if (!rw)
|
||||
dmactl |= ATA_DMA_WR;
|
||||
writeb(dmactl, mmio + ATA_DMA_CMD);
|
||||
iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
|
||||
/* issue r/w command */
|
||||
ap->ops->exec_command(ap, &qc->tf);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
|
||||
* ata_bmdma_start - Start a PCI IDE BMDMA transaction
|
||||
* @qc: Info associated with this ATA transaction.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
|
||||
static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
|
||||
void ata_bmdma_start (struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
|
||||
u8 dmactl;
|
||||
|
||||
/* start host DMA transaction */
|
||||
dmactl = readb(mmio + ATA_DMA_CMD);
|
||||
writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
|
||||
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
iowrite8(dmactl | ATA_DMA_START, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
|
||||
/* Strictly, one may wish to issue a readb() here, to
|
||||
* flush the mmio write. However, control also passes
|
||||
@ -507,96 +268,6 @@ static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
|
||||
*/
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
|
||||
* @qc: Info associated with this ATA transaction.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
|
||||
static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
|
||||
u8 dmactl;
|
||||
|
||||
/* load PRD table addr. */
|
||||
outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
|
||||
|
||||
/* specify data direction, triple-check start bit is clear */
|
||||
dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
|
||||
if (!rw)
|
||||
dmactl |= ATA_DMA_WR;
|
||||
outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
|
||||
/* issue r/w command */
|
||||
ap->ops->exec_command(ap, &qc->tf);
|
||||
}
|
||||
|
||||
/**
|
||||
* ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
|
||||
* @qc: Info associated with this ATA transaction.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
|
||||
static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
u8 dmactl;
|
||||
|
||||
/* start host DMA transaction */
|
||||
dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
outb(dmactl | ATA_DMA_START,
|
||||
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_bmdma_start - Start a PCI IDE BMDMA transaction
|
||||
* @qc: Info associated with this ATA transaction.
|
||||
*
|
||||
* Writes the ATA_DMA_START flag to the DMA command register.
|
||||
*
|
||||
* May be used as the bmdma_start() entry in ata_port_operations.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void ata_bmdma_start(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (qc->ap->flags & ATA_FLAG_MMIO)
|
||||
ata_bmdma_start_mmio(qc);
|
||||
else
|
||||
ata_bmdma_start_pio(qc);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_bmdma_setup - Set up PCI IDE BMDMA transaction
|
||||
* @qc: Info associated with this ATA transaction.
|
||||
*
|
||||
* Writes address of PRD table to device's PRD Table Address
|
||||
* register, sets the DMA control register, and calls
|
||||
* ops->exec_command() to start the transfer.
|
||||
*
|
||||
* May be used as the bmdma_setup() entry in ata_port_operations.
|
||||
*
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
void ata_bmdma_setup(struct ata_queued_cmd *qc)
|
||||
{
|
||||
if (qc->ap->flags & ATA_FLAG_MMIO)
|
||||
ata_bmdma_setup_mmio(qc);
|
||||
else
|
||||
ata_bmdma_setup_pio(qc);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
|
||||
* @ap: Port associated with this ATA transaction.
|
||||
@ -608,23 +279,16 @@ void ata_bmdma_setup(struct ata_queued_cmd *qc)
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
|
||||
void ata_bmdma_irq_clear(struct ata_port *ap)
|
||||
{
|
||||
if (!ap->ioaddr.bmdma_addr)
|
||||
void __iomem *mmio = ap->ioaddr.bmdma_addr;
|
||||
|
||||
if (!mmio)
|
||||
return;
|
||||
|
||||
if (ap->flags & ATA_FLAG_MMIO) {
|
||||
void __iomem *mmio =
|
||||
((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
|
||||
writeb(readb(mmio), mmio);
|
||||
} else {
|
||||
unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
|
||||
outb(inb(addr), addr);
|
||||
}
|
||||
iowrite8(ioread8(mmio + ATA_DMA_STATUS), mmio + ATA_DMA_STATUS);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_bmdma_status - Read PCI IDE BMDMA status
|
||||
* @ap: Port associated with this ATA transaction.
|
||||
@ -636,19 +300,11 @@ void ata_bmdma_irq_clear(struct ata_port *ap)
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
|
||||
u8 ata_bmdma_status(struct ata_port *ap)
|
||||
{
|
||||
u8 host_stat;
|
||||
if (ap->flags & ATA_FLAG_MMIO) {
|
||||
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
|
||||
host_stat = readb(mmio + ATA_DMA_STATUS);
|
||||
} else
|
||||
host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
return host_stat;
|
||||
return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ata_bmdma_stop - Stop PCI IDE BMDMA transfer
|
||||
* @qc: Command we are ending DMA for
|
||||
@ -660,21 +316,14 @@ u8 ata_bmdma_status(struct ata_port *ap)
|
||||
* LOCKING:
|
||||
* spin_lock_irqsave(host lock)
|
||||
*/
|
||||
|
||||
void ata_bmdma_stop(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
if (ap->flags & ATA_FLAG_MMIO) {
|
||||
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
|
||||
void __iomem *mmio = ap->ioaddr.bmdma_addr;
|
||||
|
||||
/* clear start/stop bit */
|
||||
writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
|
||||
mmio + ATA_DMA_CMD);
|
||||
} else {
|
||||
/* clear start/stop bit */
|
||||
outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
|
||||
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
}
|
||||
/* clear start/stop bit */
|
||||
iowrite8(ioread8(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
|
||||
mmio + ATA_DMA_CMD);
|
||||
|
||||
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
|
||||
ata_altstatus(ap); /* dummy read */
|
||||
@ -696,10 +345,7 @@ void ata_bmdma_freeze(struct ata_port *ap)
|
||||
ap->ctl |= ATA_NIEN;
|
||||
ap->last_ctl = ap->ctl;
|
||||
|
||||
if (ap->flags & ATA_FLAG_MMIO)
|
||||
writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
|
||||
else
|
||||
outb(ap->ctl, ioaddr->ctl_addr);
|
||||
iowrite8(ap->ctl, ioaddr->ctl_addr);
|
||||
|
||||
/* Under certain circumstances, some controllers raise IRQ on
|
||||
* ATA_NIEN manipulation. Also, many controllers fail to mask
|
||||
@ -868,11 +514,24 @@ static int ata_resources_present(struct pci_dev *pdev, int port)
|
||||
struct ata_probe_ent *
|
||||
ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
|
||||
{
|
||||
struct ata_probe_ent *probe_ent =
|
||||
ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
|
||||
int p = 0;
|
||||
unsigned long bmdma;
|
||||
struct ata_probe_ent *probe_ent;
|
||||
int i, p = 0;
|
||||
void __iomem * const *iomap;
|
||||
|
||||
/* iomap BARs */
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (pcim_iomap(pdev, i, 0) == NULL) {
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"failed to iomap PCI BAR %d\n", i);
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
pcim_iomap(pdev, 4, 0); /* may fail */
|
||||
iomap = pcim_iomap_table(pdev);
|
||||
|
||||
/* alloc and init probe_ent */
|
||||
probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
|
||||
if (!probe_ent)
|
||||
return NULL;
|
||||
|
||||
@ -887,33 +546,30 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
|
||||
ports &= ~ATA_PORT_SECONDARY;
|
||||
|
||||
if (ports & ATA_PORT_PRIMARY) {
|
||||
probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
|
||||
probe_ent->port[p].cmd_addr = iomap[0];
|
||||
probe_ent->port[p].altstatus_addr =
|
||||
probe_ent->port[p].ctl_addr =
|
||||
pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
|
||||
bmdma = pci_resource_start(pdev, 4);
|
||||
if (bmdma) {
|
||||
probe_ent->port[p].ctl_addr = (void __iomem *)
|
||||
((unsigned long)iomap[1] | ATA_PCI_CTL_OFS);
|
||||
if (iomap[4]) {
|
||||
if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
|
||||
(inb(bmdma + 2) & 0x80))
|
||||
(ioread8(iomap[4] + 2) & 0x80))
|
||||
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
|
||||
probe_ent->port[p].bmdma_addr = bmdma;
|
||||
probe_ent->port[p].bmdma_addr = iomap[4];
|
||||
}
|
||||
ata_std_ports(&probe_ent->port[p]);
|
||||
p++;
|
||||
}
|
||||
|
||||
if (ports & ATA_PORT_SECONDARY) {
|
||||
probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
|
||||
probe_ent->port[p].cmd_addr = iomap[2];
|
||||
probe_ent->port[p].altstatus_addr =
|
||||
probe_ent->port[p].ctl_addr =
|
||||
pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
|
||||
bmdma = pci_resource_start(pdev, 4);
|
||||
if (bmdma) {
|
||||
bmdma += 8;
|
||||
probe_ent->port[p].ctl_addr = (void __iomem *)
|
||||
((unsigned long)iomap[3] | ATA_PCI_CTL_OFS);
|
||||
if (iomap[4]) {
|
||||
if ((!(port[p]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
|
||||
(inb(bmdma + 2) & 0x80))
|
||||
(ioread8(iomap[4] + 10) & 0x80))
|
||||
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
|
||||
probe_ent->port[p].bmdma_addr = bmdma;
|
||||
probe_ent->port[p].bmdma_addr = iomap[4] + 8;
|
||||
}
|
||||
ata_std_ports(&probe_ent->port[p]);
|
||||
probe_ent->pinfo2 = port[1];
|
||||
@ -924,13 +580,29 @@ ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int
|
||||
return probe_ent;
|
||||
}
|
||||
|
||||
|
||||
static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
|
||||
struct ata_port_info **port, int port_mask)
|
||||
{
|
||||
struct ata_probe_ent *probe_ent;
|
||||
unsigned long bmdma = pci_resource_start(pdev, 4);
|
||||
void __iomem *iomap[5] = { }, *bmdma;
|
||||
|
||||
if (port_mask & ATA_PORT_PRIMARY) {
|
||||
iomap[0] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CMD, 8);
|
||||
iomap[1] = devm_ioport_map(&pdev->dev, ATA_PRIMARY_CTL, 1);
|
||||
if (!iomap[0] || !iomap[1])
|
||||
return NULL;
|
||||
}
|
||||
|
||||
if (port_mask & ATA_PORT_SECONDARY) {
|
||||
iomap[2] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CMD, 8);
|
||||
iomap[3] = devm_ioport_map(&pdev->dev, ATA_SECONDARY_CTL, 1);
|
||||
if (!iomap[2] || !iomap[3])
|
||||
return NULL;
|
||||
}
|
||||
|
||||
bmdma = pcim_iomap(pdev, 4, 16); /* may fail */
|
||||
|
||||
/* alloc and init probe_ent */
|
||||
probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
|
||||
if (!probe_ent)
|
||||
return NULL;
|
||||
@ -940,13 +612,13 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
|
||||
|
||||
if (port_mask & ATA_PORT_PRIMARY) {
|
||||
probe_ent->irq = ATA_PRIMARY_IRQ(pdev);
|
||||
probe_ent->port[0].cmd_addr = ATA_PRIMARY_CMD;
|
||||
probe_ent->port[0].cmd_addr = iomap[0];
|
||||
probe_ent->port[0].altstatus_addr =
|
||||
probe_ent->port[0].ctl_addr = ATA_PRIMARY_CTL;
|
||||
probe_ent->port[0].ctl_addr = iomap[1];
|
||||
if (bmdma) {
|
||||
probe_ent->port[0].bmdma_addr = bmdma;
|
||||
if ((!(port[0]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
|
||||
(inb(bmdma + 2) & 0x80))
|
||||
(ioread8(bmdma + 2) & 0x80))
|
||||
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
|
||||
}
|
||||
ata_std_ports(&probe_ent->port[0]);
|
||||
@ -958,13 +630,13 @@ static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
|
||||
probe_ent->irq2 = ATA_SECONDARY_IRQ(pdev);
|
||||
else
|
||||
probe_ent->irq = ATA_SECONDARY_IRQ(pdev);
|
||||
probe_ent->port[1].cmd_addr = ATA_SECONDARY_CMD;
|
||||
probe_ent->port[1].cmd_addr = iomap[2];
|
||||
probe_ent->port[1].altstatus_addr =
|
||||
probe_ent->port[1].ctl_addr = ATA_SECONDARY_CTL;
|
||||
probe_ent->port[1].ctl_addr = iomap[3];
|
||||
if (bmdma) {
|
||||
probe_ent->port[1].bmdma_addr = bmdma + 8;
|
||||
if ((!(port[1]->flags & ATA_FLAG_IGN_SIMPLEX)) &&
|
||||
(inb(bmdma + 10) & 0x80))
|
||||
(ioread8(bmdma + 10) & 0x80))
|
||||
probe_ent->_host_flags |= ATA_HOST_SIMPLEX;
|
||||
}
|
||||
ata_std_ports(&probe_ent->port[1]);
|
||||
|
@ -370,7 +370,7 @@ static struct ata_port_operations ali_early_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -409,7 +409,7 @@ static struct ata_port_operations ali_20_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -445,7 +445,7 @@ static struct ata_port_operations ali_c2_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -480,7 +480,7 @@ static struct ata_port_operations ali_c5_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -362,7 +362,7 @@ static struct ata_port_operations amd33_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -394,7 +394,7 @@ static struct ata_port_operations amd66_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -426,7 +426,7 @@ static struct ata_port_operations amd100_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -458,7 +458,7 @@ static struct ata_port_operations amd133_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -490,7 +490,7 @@ static struct ata_port_operations nv100_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -522,7 +522,7 @@ static struct ata_port_operations nv133_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -341,7 +341,7 @@ static const struct ata_port_operations artop6210_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -371,7 +371,7 @@ static const struct ata_port_operations artop6260_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -252,7 +252,7 @@ static struct ata_port_operations atiixp_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -313,7 +313,7 @@ static struct ata_port_operations cmd64x_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -345,7 +345,7 @@ static struct ata_port_operations cmd646r1_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -377,7 +377,7 @@ static struct ata_port_operations cmd648_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -99,9 +99,9 @@ static void cs5520_set_timings(struct ata_port *ap, struct ata_device *adev, int
|
||||
static void cs5520_enable_dma(struct ata_port *ap, struct ata_device *adev)
|
||||
{
|
||||
/* Set the DMA enable/disable flag */
|
||||
u8 reg = inb(ap->ioaddr.bmdma_addr + 0x02);
|
||||
u8 reg = ioread8(ap->ioaddr.bmdma_addr + 0x02);
|
||||
reg |= 1<<(adev->devno + 5);
|
||||
outb(reg, ap->ioaddr.bmdma_addr + 0x02);
|
||||
iowrite8(reg, ap->ioaddr.bmdma_addr + 0x02);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -193,7 +193,7 @@ static struct ata_port_operations cs5520_port_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -204,6 +204,7 @@ static struct ata_port_operations cs5520_port_ops = {
|
||||
static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
{
|
||||
u8 pcicfg;
|
||||
void *iomap[5];
|
||||
static struct ata_probe_ent probe[2];
|
||||
int ports = 0;
|
||||
|
||||
@ -234,6 +235,16 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
/* Map IO ports */
|
||||
iomap[0] = devm_ioport_map(&dev->dev, 0x1F0, 8);
|
||||
iomap[1] = devm_ioport_map(&dev->dev, 0x3F6, 1);
|
||||
iomap[2] = devm_ioport_map(&dev->dev, 0x170, 8);
|
||||
iomap[3] = devm_ioport_map(&dev->dev, 0x376, 1);
|
||||
iomap[4] = pcim_iomap(dev, 2, 0);
|
||||
|
||||
if (!iomap[0] || !iomap[1] || !iomap[2] || !iomap[3] || !iomap[4])
|
||||
return -ENOMEM;
|
||||
|
||||
/* We have to do our own plumbing as the PCI setup for this
|
||||
chipset is non-standard so we can't punt to the libata code */
|
||||
|
||||
@ -247,10 +258,10 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
|
||||
probe[0].irq_flags = 0;
|
||||
probe[0].port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
|
||||
probe[0].n_ports = 1;
|
||||
probe[0].port[0].cmd_addr = 0x1F0;
|
||||
probe[0].port[0].ctl_addr = 0x3F6;
|
||||
probe[0].port[0].altstatus_addr = 0x3F6;
|
||||
probe[0].port[0].bmdma_addr = pci_resource_start(dev, 2);
|
||||
probe[0].port[0].cmd_addr = iomap[0];
|
||||
probe[0].port[0].ctl_addr = iomap[1];
|
||||
probe[0].port[0].altstatus_addr = iomap[1];
|
||||
probe[0].port[0].bmdma_addr = iomap[4];
|
||||
|
||||
/* The secondary lurks at different addresses but is otherwise
|
||||
the same beastie */
|
||||
@ -258,10 +269,10 @@ static int __devinit cs5520_init_one(struct pci_dev *dev, const struct pci_devic
|
||||
probe[1] = probe[0];
|
||||
INIT_LIST_HEAD(&probe[1].node);
|
||||
probe[1].irq = 15;
|
||||
probe[1].port[0].cmd_addr = 0x170;
|
||||
probe[1].port[0].ctl_addr = 0x376;
|
||||
probe[1].port[0].altstatus_addr = 0x376;
|
||||
probe[1].port[0].bmdma_addr = pci_resource_start(dev, 2) + 8;
|
||||
probe[1].port[0].cmd_addr = iomap[2];
|
||||
probe[1].port[0].ctl_addr = iomap[3];
|
||||
probe[1].port[0].altstatus_addr = iomap[3];
|
||||
probe[1].port[0].bmdma_addr = iomap[4] + 8;
|
||||
|
||||
/* Let libata fill in the port details */
|
||||
ata_std_ports(&probe[0].port[0]);
|
||||
|
@ -37,6 +37,13 @@
|
||||
#define DRV_NAME "pata_cs5530"
|
||||
#define DRV_VERSION "0.7.1"
|
||||
|
||||
static void __iomem *cs5530_port_base(struct ata_port *ap)
|
||||
{
|
||||
unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr;
|
||||
|
||||
return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no);
|
||||
}
|
||||
|
||||
/**
|
||||
* cs5530_set_piomode - PIO setup
|
||||
* @ap: ATA interface
|
||||
@ -52,19 +59,19 @@ static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
{0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010},
|
||||
{0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010}
|
||||
};
|
||||
unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no;
|
||||
void __iomem *base = cs5530_port_base(ap);
|
||||
u32 tuning;
|
||||
int format;
|
||||
|
||||
/* Find out which table to use */
|
||||
tuning = inl(base + 0x04);
|
||||
tuning = ioread32(base + 0x04);
|
||||
format = (tuning & 0x80000000UL) ? 1 : 0;
|
||||
|
||||
/* Now load the right timing register */
|
||||
if (adev->devno)
|
||||
base += 0x08;
|
||||
|
||||
outl(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
|
||||
iowrite32(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -79,12 +86,12 @@ static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
|
||||
static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
|
||||
{
|
||||
unsigned long base = ( ap->ioaddr.bmdma_addr & ~0x0F) + 0x20 + 0x10 * ap->port_no;
|
||||
void __iomem *base = cs5530_port_base(ap);
|
||||
u32 tuning, timing = 0;
|
||||
u8 reg;
|
||||
|
||||
/* Find out which table to use */
|
||||
tuning = inl(base + 0x04);
|
||||
tuning = ioread32(base + 0x04);
|
||||
|
||||
switch(adev->dma_mode) {
|
||||
case XFER_UDMA_0:
|
||||
@ -105,20 +112,20 @@ static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev)
|
||||
/* Merge in the PIO format bit */
|
||||
timing |= (tuning & 0x80000000UL);
|
||||
if (adev->devno == 0) /* Master */
|
||||
outl(timing, base + 0x04);
|
||||
iowrite32(timing, base + 0x04);
|
||||
else {
|
||||
if (timing & 0x00100000)
|
||||
tuning |= 0x00100000; /* UDMA for both */
|
||||
else
|
||||
tuning &= ~0x00100000; /* MWDMA for both */
|
||||
outl(tuning, base + 0x04);
|
||||
outl(timing, base + 0x0C);
|
||||
iowrite32(tuning, base + 0x04);
|
||||
iowrite32(timing, base + 0x0C);
|
||||
}
|
||||
|
||||
/* Set the DMA capable bit in the BMDMA area */
|
||||
reg = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
reg |= (1 << (5 + adev->devno));
|
||||
outb(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
|
||||
/* Remember the last DMA setup we did */
|
||||
|
||||
@ -210,7 +217,7 @@ static struct ata_port_operations cs5530_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = cs5530_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -214,7 +214,7 @@ static struct ata_port_operations cs5535_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -165,7 +165,7 @@ static struct ata_port_operations cy82c693_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -261,7 +261,7 @@ static const struct ata_port_operations efar_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -361,7 +361,7 @@ static struct ata_port_operations hpt366_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -634,24 +634,24 @@ static void hpt370_bmdma_stop(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
|
||||
u8 dma_stat = inb(ap->ioaddr.bmdma_addr + 2);
|
||||
u8 dma_stat = ioread8(ap->ioaddr.bmdma_addr + 2);
|
||||
u8 dma_cmd;
|
||||
unsigned long bmdma = ap->ioaddr.bmdma_addr;
|
||||
void __iomem *bmdma = ap->ioaddr.bmdma_addr;
|
||||
|
||||
if (dma_stat & 0x01) {
|
||||
udelay(20);
|
||||
dma_stat = inb(bmdma + 2);
|
||||
dma_stat = ioread8(bmdma + 2);
|
||||
}
|
||||
if (dma_stat & 0x01) {
|
||||
/* Clear the engine */
|
||||
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
|
||||
udelay(10);
|
||||
/* Stop DMA */
|
||||
dma_cmd = inb(bmdma );
|
||||
outb(dma_cmd & 0xFE, bmdma);
|
||||
dma_cmd = ioread8(bmdma );
|
||||
iowrite8(dma_cmd & 0xFE, bmdma);
|
||||
/* Clear Error */
|
||||
dma_stat = inb(bmdma + 2);
|
||||
outb(dma_stat | 0x06 , bmdma + 2);
|
||||
dma_stat = ioread8(bmdma + 2);
|
||||
iowrite8(dma_stat | 0x06 , bmdma + 2);
|
||||
/* Clear the engine */
|
||||
pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
|
||||
udelay(10);
|
||||
@ -796,7 +796,7 @@ static struct ata_port_operations hpt370_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -833,7 +833,7 @@ static struct ata_port_operations hpt370a_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -871,7 +871,7 @@ static struct ata_port_operations hpt372_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -909,7 +909,7 @@ static struct ata_port_operations hpt374_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -263,26 +263,26 @@ static void hpt3x2n_bmdma_stop(struct ata_queued_cmd *qc)
|
||||
|
||||
static void hpt3x2n_set_clock(struct ata_port *ap, int source)
|
||||
{
|
||||
unsigned long bmdma = ap->ioaddr.bmdma_addr;
|
||||
void __iomem *bmdma = ap->ioaddr.bmdma_addr;
|
||||
|
||||
/* Tristate the bus */
|
||||
outb(0x80, bmdma+0x73);
|
||||
outb(0x80, bmdma+0x77);
|
||||
iowrite8(0x80, bmdma+0x73);
|
||||
iowrite8(0x80, bmdma+0x77);
|
||||
|
||||
/* Switch clock and reset channels */
|
||||
outb(source, bmdma+0x7B);
|
||||
outb(0xC0, bmdma+0x79);
|
||||
iowrite8(source, bmdma+0x7B);
|
||||
iowrite8(0xC0, bmdma+0x79);
|
||||
|
||||
/* Reset state machines */
|
||||
outb(0x37, bmdma+0x70);
|
||||
outb(0x37, bmdma+0x74);
|
||||
iowrite8(0x37, bmdma+0x70);
|
||||
iowrite8(0x37, bmdma+0x74);
|
||||
|
||||
/* Complete reset */
|
||||
outb(0x00, bmdma+0x79);
|
||||
iowrite8(0x00, bmdma+0x79);
|
||||
|
||||
/* Reconnect channels to bus */
|
||||
outb(0x00, bmdma+0x73);
|
||||
outb(0x00, bmdma+0x77);
|
||||
iowrite8(0x00, bmdma+0x73);
|
||||
iowrite8(0x00, bmdma+0x77);
|
||||
}
|
||||
|
||||
/* Check if our partner interface is busy */
|
||||
@ -373,7 +373,7 @@ static struct ata_port_operations hpt3x2n_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = hpt3x2n_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -148,7 +148,7 @@ static struct ata_port_operations hpt3x3_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -53,7 +53,7 @@ static struct ata_port_operations isapnp_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -73,6 +73,7 @@ static struct ata_port_operations isapnp_port_ops = {
|
||||
static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev_id)
|
||||
{
|
||||
struct ata_probe_ent ae;
|
||||
void __iomem *cmd_addr, *ctl_addr;
|
||||
|
||||
if (pnp_port_valid(idev, 0) == 0)
|
||||
return -ENODEV;
|
||||
@ -81,6 +82,10 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
|
||||
if (pnp_irq_valid(idev, 0) == 0)
|
||||
return -ENODEV;
|
||||
|
||||
cmd_addr = devm_ioport_map(&idev->dev, pnp_port_start(idev, 0), 8);
|
||||
if (!cmd_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
memset(&ae, 0, sizeof(struct ata_probe_ent));
|
||||
INIT_LIST_HEAD(&ae.node);
|
||||
ae.dev = &idev->dev;
|
||||
@ -91,11 +96,13 @@ static int isapnp_init_one(struct pnp_dev *idev, const struct pnp_device_id *dev
|
||||
ae.irq = pnp_irq(idev, 0);
|
||||
ae.irq_flags = 0;
|
||||
ae.port_flags = ATA_FLAG_SLAVE_POSS;
|
||||
ae.port[0].cmd_addr = pnp_port_start(idev, 0);
|
||||
ae.port[0].cmd_addr = cmd_addr;
|
||||
|
||||
if (pnp_port_valid(idev, 1) == 0) {
|
||||
ae.port[0].altstatus_addr = pnp_port_start(idev, 1);
|
||||
ae.port[0].ctl_addr = pnp_port_start(idev, 1);
|
||||
ctl_addr = devm_ioport_map(&idev->dev,
|
||||
pnp_port_start(idev, 1), 1);
|
||||
ae.port[0].altstatus_addr = ctl_addr;
|
||||
ae.port[0].ctl_addr = ctl_addr;
|
||||
ae.port_flags |= ATA_FLAG_SRST;
|
||||
}
|
||||
ata_std_ports(&ae.port[0]);
|
||||
|
@ -273,7 +273,7 @@ static const struct ata_port_operations it8213_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -492,7 +492,7 @@ static int it821x_smart_set_mode(struct ata_port *ap, struct ata_device **unused
|
||||
/* Bits 5 and 6 indicate if DMA is active on master/slave */
|
||||
/* It is possible that BMDMA isn't allocated */
|
||||
if (ap->ioaddr.bmdma_addr)
|
||||
dma_enabled = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
dma_enabled = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
|
||||
for (i = 0; i < ATA_MAX_DEVICES; i++) {
|
||||
struct ata_device *dev = &ap->device[i];
|
||||
@ -674,7 +674,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = it821x_smart_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -708,7 +708,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = it821x_passthru_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
.irq_handler = ata_interrupt,
|
||||
|
@ -140,9 +140,9 @@ static struct ata_port_operations ixp4xx_port_ops = {
|
||||
static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
|
||||
struct ixp4xx_pata_data *data)
|
||||
{
|
||||
ioaddr->cmd_addr = (unsigned long) data->cs0;
|
||||
ioaddr->altstatus_addr = (unsigned long) data->cs1 + 0x06;
|
||||
ioaddr->ctl_addr = (unsigned long) data->cs1 + 0x06;
|
||||
ioaddr->cmd_addr = data->cs0;
|
||||
ioaddr->altstatus_addr = data->cs1 + 0x06;
|
||||
ioaddr->ctl_addr = data->cs1 + 0x06;
|
||||
|
||||
ata_std_ports(ioaddr);
|
||||
|
||||
@ -152,19 +152,19 @@ static void ixp4xx_setup_port(struct ata_ioports *ioaddr,
|
||||
* ixp4xx in little endian mode.
|
||||
*/
|
||||
|
||||
ioaddr->data_addr ^= 0x02;
|
||||
ioaddr->cmd_addr ^= 0x03;
|
||||
ioaddr->altstatus_addr ^= 0x03;
|
||||
ioaddr->ctl_addr ^= 0x03;
|
||||
ioaddr->error_addr ^= 0x03;
|
||||
ioaddr->feature_addr ^= 0x03;
|
||||
ioaddr->nsect_addr ^= 0x03;
|
||||
ioaddr->lbal_addr ^= 0x03;
|
||||
ioaddr->lbam_addr ^= 0x03;
|
||||
ioaddr->lbah_addr ^= 0x03;
|
||||
ioaddr->device_addr ^= 0x03;
|
||||
ioaddr->status_addr ^= 0x03;
|
||||
ioaddr->command_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->data_addr ^= 0x02;
|
||||
*(unsigned long *)&ioaddr->cmd_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->altstatus_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->ctl_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->error_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->feature_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->nsect_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->lbal_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->lbam_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->lbah_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->device_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->status_addr ^= 0x03;
|
||||
*(unsigned long *)&ioaddr->command_addr ^= 0x03;
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -161,7 +161,7 @@ static const struct ata_port_operations jmicron_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
/* IRQ-related hooks */
|
||||
.irq_handler = ata_interrupt,
|
||||
|
@ -164,7 +164,7 @@ static struct ata_port_operations simple_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer_noirq,
|
||||
.data_xfer = ata_data_xfer_noirq,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -187,7 +187,7 @@ static struct ata_port_operations legacy_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer_noirq,
|
||||
.data_xfer = ata_data_xfer_noirq,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -253,31 +253,33 @@ static void pdc_data_xfer_vlb(struct ata_device *adev, unsigned char *buf, unsig
|
||||
local_irq_save(flags);
|
||||
|
||||
/* Perform the 32bit I/O synchronization sequence */
|
||||
inb(ap->ioaddr.nsect_addr);
|
||||
inb(ap->ioaddr.nsect_addr);
|
||||
inb(ap->ioaddr.nsect_addr);
|
||||
ioread8(ap->ioaddr.nsect_addr);
|
||||
ioread8(ap->ioaddr.nsect_addr);
|
||||
ioread8(ap->ioaddr.nsect_addr);
|
||||
|
||||
/* Now the data */
|
||||
|
||||
if (write_data)
|
||||
outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
else
|
||||
insl(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
|
||||
if (unlikely(slop)) {
|
||||
u32 pad;
|
||||
if (write_data) {
|
||||
memcpy(&pad, buf + buflen - slop, slop);
|
||||
outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
|
||||
pad = le32_to_cpu(pad);
|
||||
iowrite32(pad, ap->ioaddr.data_addr);
|
||||
} else {
|
||||
pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
|
||||
pad = ioread32(ap->ioaddr.data_addr);
|
||||
pad = cpu_to_le16(pad);
|
||||
memcpy(buf + buflen - slop, &pad, slop);
|
||||
}
|
||||
}
|
||||
local_irq_restore(flags);
|
||||
}
|
||||
else
|
||||
ata_pio_data_xfer_noirq(adev, buf, buflen, write_data);
|
||||
ata_data_xfer_noirq(adev, buf, buflen, write_data);
|
||||
}
|
||||
|
||||
static struct ata_port_operations pdc20230_port_ops = {
|
||||
@ -326,8 +328,8 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
inb(0x3E6);
|
||||
inb(0x3E6);
|
||||
|
||||
outb(recover << 4 | active, ap->ioaddr.device_addr);
|
||||
inb(ap->ioaddr.status_addr);
|
||||
iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
|
||||
ioread8(ap->ioaddr.status_addr);
|
||||
}
|
||||
|
||||
static struct ata_port_operations ht6560a_port_ops = {
|
||||
@ -345,7 +347,7 @@ static struct ata_port_operations ht6560a_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer, /* Check vlb/noirq */
|
||||
.data_xfer = ata_data_xfer, /* Check vlb/noirq */
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -379,7 +381,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
inb(0x3E6);
|
||||
inb(0x3E6);
|
||||
|
||||
outb(recover << 4 | active, ap->ioaddr.device_addr);
|
||||
iowrite8(recover << 4 | active, ap->ioaddr.device_addr);
|
||||
|
||||
if (adev->class != ATA_DEV_ATA) {
|
||||
u8 rconf = inb(0x3E6);
|
||||
@ -388,7 +390,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
outb(rconf, 0x3E6);
|
||||
}
|
||||
}
|
||||
inb(ap->ioaddr.status_addr);
|
||||
ioread8(ap->ioaddr.status_addr);
|
||||
}
|
||||
|
||||
static struct ata_port_operations ht6560b_port_ops = {
|
||||
@ -406,7 +408,7 @@ static struct ata_port_operations ht6560b_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer, /* FIXME: Check 32bit and noirq */
|
||||
.data_xfer = ata_data_xfer, /* FIXME: Check 32bit and noirq */
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -454,12 +456,12 @@ static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev
|
||||
u8 rc;
|
||||
|
||||
/* Enter configuration mode */
|
||||
inw(ap->ioaddr.error_addr);
|
||||
inw(ap->ioaddr.error_addr);
|
||||
outb(3, ap->ioaddr.nsect_addr);
|
||||
ioread16(ap->ioaddr.error_addr);
|
||||
ioread16(ap->ioaddr.error_addr);
|
||||
iowrite8(3, ap->ioaddr.nsect_addr);
|
||||
|
||||
/* Read VLB clock strapping */
|
||||
clock = 1000000000 / khz[inb(ap->ioaddr.lbah_addr) & 0x03];
|
||||
clock = 1000000000 / khz[ioread8(ap->ioaddr.lbah_addr) & 0x03];
|
||||
|
||||
/* Get the timing data in cycles */
|
||||
ata_timing_compute(adev, adev->pio_mode, &t, clock, 1000);
|
||||
@ -477,33 +479,33 @@ static void opti82c611a_set_piomode(struct ata_port *ap, struct ata_device *adev
|
||||
setup = FIT(t.setup, 1, 4) - 1;
|
||||
|
||||
/* Select the right timing bank for write timing */
|
||||
rc = inb(ap->ioaddr.lbal_addr);
|
||||
rc = ioread8(ap->ioaddr.lbal_addr);
|
||||
rc &= 0x7F;
|
||||
rc |= (adev->devno << 7);
|
||||
outb(rc, ap->ioaddr.lbal_addr);
|
||||
iowrite8(rc, ap->ioaddr.lbal_addr);
|
||||
|
||||
/* Write the timings */
|
||||
outb(active << 4 | recover, ap->ioaddr.error_addr);
|
||||
iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
|
||||
|
||||
/* Select the right bank for read timings, also
|
||||
load the shared timings for address */
|
||||
rc = inb(ap->ioaddr.device_addr);
|
||||
rc = ioread8(ap->ioaddr.device_addr);
|
||||
rc &= 0xC0;
|
||||
rc |= adev->devno; /* Index select */
|
||||
rc |= (setup << 4) | 0x04;
|
||||
outb(rc, ap->ioaddr.device_addr);
|
||||
iowrite8(rc, ap->ioaddr.device_addr);
|
||||
|
||||
/* Load the read timings */
|
||||
outb(active << 4 | recover, ap->ioaddr.data_addr);
|
||||
iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
|
||||
|
||||
/* Ensure the timing register mode is right */
|
||||
rc = inb (ap->ioaddr.lbal_addr);
|
||||
rc = ioread8(ap->ioaddr.lbal_addr);
|
||||
rc &= 0x73;
|
||||
rc |= 0x84;
|
||||
outb(rc, ap->ioaddr.lbal_addr);
|
||||
iowrite8(rc, ap->ioaddr.lbal_addr);
|
||||
|
||||
/* Exit command mode */
|
||||
outb(0x83, ap->ioaddr.nsect_addr);
|
||||
iowrite8(0x83, ap->ioaddr.nsect_addr);
|
||||
}
|
||||
|
||||
|
||||
@ -522,7 +524,7 @@ static struct ata_port_operations opti82c611a_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -551,9 +553,9 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
sysclk = opti_syscfg(0xAC) & 0xC0; /* BIOS set */
|
||||
|
||||
/* Enter configuration mode */
|
||||
inw(ap->ioaddr.error_addr);
|
||||
inw(ap->ioaddr.error_addr);
|
||||
outb(3, ap->ioaddr.nsect_addr);
|
||||
ioread16(ap->ioaddr.error_addr);
|
||||
ioread16(ap->ioaddr.error_addr);
|
||||
iowrite8(3, ap->ioaddr.nsect_addr);
|
||||
|
||||
/* Read VLB clock strapping */
|
||||
clock = 1000000000 / khz[sysclk];
|
||||
@ -574,33 +576,33 @@ static void opti82c46x_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
setup = FIT(t.setup, 1, 4) - 1;
|
||||
|
||||
/* Select the right timing bank for write timing */
|
||||
rc = inb(ap->ioaddr.lbal_addr);
|
||||
rc = ioread8(ap->ioaddr.lbal_addr);
|
||||
rc &= 0x7F;
|
||||
rc |= (adev->devno << 7);
|
||||
outb(rc, ap->ioaddr.lbal_addr);
|
||||
iowrite8(rc, ap->ioaddr.lbal_addr);
|
||||
|
||||
/* Write the timings */
|
||||
outb(active << 4 | recover, ap->ioaddr.error_addr);
|
||||
iowrite8(active << 4 | recover, ap->ioaddr.error_addr);
|
||||
|
||||
/* Select the right bank for read timings, also
|
||||
load the shared timings for address */
|
||||
rc = inb(ap->ioaddr.device_addr);
|
||||
rc = ioread8(ap->ioaddr.device_addr);
|
||||
rc &= 0xC0;
|
||||
rc |= adev->devno; /* Index select */
|
||||
rc |= (setup << 4) | 0x04;
|
||||
outb(rc, ap->ioaddr.device_addr);
|
||||
iowrite8(rc, ap->ioaddr.device_addr);
|
||||
|
||||
/* Load the read timings */
|
||||
outb(active << 4 | recover, ap->ioaddr.data_addr);
|
||||
iowrite8(active << 4 | recover, ap->ioaddr.data_addr);
|
||||
|
||||
/* Ensure the timing register mode is right */
|
||||
rc = inb (ap->ioaddr.lbal_addr);
|
||||
rc = ioread8(ap->ioaddr.lbal_addr);
|
||||
rc &= 0x73;
|
||||
rc |= 0x84;
|
||||
outb(rc, ap->ioaddr.lbal_addr);
|
||||
iowrite8(rc, ap->ioaddr.lbal_addr);
|
||||
|
||||
/* Exit command mode */
|
||||
outb(0x83, ap->ioaddr.nsect_addr);
|
||||
iowrite8(0x83, ap->ioaddr.nsect_addr);
|
||||
|
||||
/* We need to know this for quad device on the MVB */
|
||||
ap->host->private_data = ap;
|
||||
@ -650,7 +652,7 @@ static struct ata_port_operations opti82c46x_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = opti82c46x_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -676,6 +678,7 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
|
||||
struct ata_probe_ent ae;
|
||||
struct platform_device *pdev;
|
||||
struct ata_port_operations *ops = &legacy_port_ops;
|
||||
void __iomem *io_addr, *ctrl_addr;
|
||||
int pio_modes = pio_mask;
|
||||
u32 mask = (1 << port);
|
||||
int ret;
|
||||
@ -689,6 +692,12 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
|
||||
devm_request_region(&pdev->dev, ctrl, 1, "pata_legacy") == NULL)
|
||||
goto fail;
|
||||
|
||||
ret = -ENOMEM;
|
||||
io_addr = devm_ioport_map(&pdev->dev, io, 8);
|
||||
ctrl_addr = devm_ioport_map(&pdev->dev, ctrl, 1);
|
||||
if (!io_addr || !ctrl_addr)
|
||||
goto fail;
|
||||
|
||||
if (ht6560a & mask) {
|
||||
ops = &ht6560a_port_ops;
|
||||
pio_modes = 0x07;
|
||||
@ -754,9 +763,9 @@ static __init int legacy_init_one(int port, unsigned long io, unsigned long ctrl
|
||||
ae.irq = irq;
|
||||
ae.irq_flags = 0;
|
||||
ae.port_flags = ATA_FLAG_SLAVE_POSS|ATA_FLAG_SRST;
|
||||
ae.port[0].cmd_addr = io;
|
||||
ae.port[0].altstatus_addr = ctrl;
|
||||
ae.port[0].ctl_addr = ctrl;
|
||||
ae.port[0].cmd_addr = io_addr;
|
||||
ae.port[0].altstatus_addr = ctrl_addr;
|
||||
ae.port[0].ctl_addr = ctrl_addr;
|
||||
ata_std_ports(&ae.port[0]);
|
||||
ae.private_data = ld;
|
||||
|
||||
|
@ -57,7 +57,7 @@ static int marvell_pre_reset(struct ata_port *ap)
|
||||
switch(ap->port_no)
|
||||
{
|
||||
case 0:
|
||||
if (inb(ap->ioaddr.bmdma_addr + 1) & 1)
|
||||
if (ioread8(ap->ioaddr.bmdma_addr + 1) & 1)
|
||||
ap->cbl = ATA_CBL_PATA40;
|
||||
else
|
||||
ap->cbl = ATA_CBL_PATA80;
|
||||
@ -129,7 +129,7 @@ static const struct ata_port_operations marvell_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
/* Timeout handling */
|
||||
.irq_handler = ata_interrupt,
|
||||
|
@ -295,7 +295,7 @@ static struct ata_port_operations mpc52xx_ata_port_ops = {
|
||||
.error_handler = mpc52xx_ata_error_handler,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
.port_start = ata_port_start,
|
||||
@ -308,7 +308,7 @@ static struct ata_probe_ent mpc52xx_ata_probe_ent = {
|
||||
.pio_mask = 0x1f, /* Up to PIO4 */
|
||||
.mwdma_mask = 0x00, /* No MWDMA */
|
||||
.udma_mask = 0x00, /* No UDMA */
|
||||
.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST | ATA_FLAG_MMIO,
|
||||
.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST,
|
||||
.irq_flags = 0,
|
||||
};
|
||||
|
||||
@ -324,18 +324,18 @@ mpc52xx_ata_init_one(struct device *dev, struct mpc52xx_ata_priv *priv)
|
||||
ae->irq = priv->ata_irq;
|
||||
|
||||
aio->cmd_addr = 0; /* Don't have a classic reg block */
|
||||
aio->altstatus_addr = (unsigned long)&priv->ata_regs->tf_control;
|
||||
aio->ctl_addr = (unsigned long)&priv->ata_regs->tf_control;
|
||||
aio->data_addr = (unsigned long)&priv->ata_regs->tf_data;
|
||||
aio->error_addr = (unsigned long)&priv->ata_regs->tf_features;
|
||||
aio->feature_addr = (unsigned long)&priv->ata_regs->tf_features;
|
||||
aio->nsect_addr = (unsigned long)&priv->ata_regs->tf_sec_count;
|
||||
aio->lbal_addr = (unsigned long)&priv->ata_regs->tf_sec_num;
|
||||
aio->lbam_addr = (unsigned long)&priv->ata_regs->tf_cyl_low;
|
||||
aio->lbah_addr = (unsigned long)&priv->ata_regs->tf_cyl_high;
|
||||
aio->device_addr = (unsigned long)&priv->ata_regs->tf_dev_head;
|
||||
aio->status_addr = (unsigned long)&priv->ata_regs->tf_command;
|
||||
aio->command_addr = (unsigned long)&priv->ata_regs->tf_command;
|
||||
aio->altstatus_addr = &priv->ata_regs->tf_control;
|
||||
aio->ctl_addr = &priv->ata_regs->tf_control;
|
||||
aio->data_addr = &priv->ata_regs->tf_data;
|
||||
aio->error_addr = &priv->ata_regs->tf_features;
|
||||
aio->feature_addr = &priv->ata_regs->tf_features;
|
||||
aio->nsect_addr = &priv->ata_regs->tf_sec_count;
|
||||
aio->lbal_addr = &priv->ata_regs->tf_sec_num;
|
||||
aio->lbam_addr = &priv->ata_regs->tf_cyl_low;
|
||||
aio->lbah_addr = &priv->ata_regs->tf_cyl_high;
|
||||
aio->device_addr = &priv->ata_regs->tf_dev_head;
|
||||
aio->status_addr = &priv->ata_regs->tf_command;
|
||||
aio->command_addr = &priv->ata_regs->tf_command;
|
||||
|
||||
ae->private_data = priv;
|
||||
|
||||
|
@ -188,7 +188,7 @@ static struct ata_port_operations mpiix_port_ops = {
|
||||
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = mpiix_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -199,10 +199,11 @@ static struct ata_port_operations mpiix_port_ops = {
|
||||
static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
{
|
||||
/* Single threaded by the PCI probe logic */
|
||||
static struct ata_probe_ent probe[2];
|
||||
static struct ata_probe_ent probe;
|
||||
static int printed_version;
|
||||
void __iomem *cmd_addr, *ctl_addr;
|
||||
u16 idetim;
|
||||
int enabled;
|
||||
int irq;
|
||||
|
||||
if (!printed_version++)
|
||||
dev_printk(KERN_DEBUG, &dev->dev, "version " DRV_VERSION "\n");
|
||||
@ -215,43 +216,43 @@ static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
if (!(idetim & ENABLED))
|
||||
return -ENODEV;
|
||||
|
||||
if (!(idetim & SECONDARY)) {
|
||||
irq = 14;
|
||||
cmd_addr = devm_ioport_map(&dev->dev, 0x1F0, 8);
|
||||
ctl_addr = devm_ioport_map(&dev->dev, 0x3F6, 1);
|
||||
} else {
|
||||
irq = 15;
|
||||
cmd_addr = devm_ioport_map(&dev->dev, 0x170, 8);
|
||||
ctl_addr = devm_ioport_map(&dev->dev, 0x376, 1);
|
||||
}
|
||||
|
||||
if (!cmd_addr || !ctl_addr)
|
||||
return -ENOMEM;
|
||||
|
||||
/* We do our own plumbing to avoid leaking special cases for whacko
|
||||
ancient hardware into the core code. There are two issues to
|
||||
worry about. #1 The chip is a bridge so if in legacy mode and
|
||||
without BARs set fools the setup. #2 If you pci_disable_device
|
||||
the MPIIX your box goes castors up */
|
||||
|
||||
INIT_LIST_HEAD(&probe[0].node);
|
||||
probe[0].dev = pci_dev_to_dev(dev);
|
||||
probe[0].port_ops = &mpiix_port_ops;
|
||||
probe[0].sht = &mpiix_sht;
|
||||
probe[0].pio_mask = 0x1F;
|
||||
probe[0].irq = 14;
|
||||
probe[0].irq_flags = SA_SHIRQ;
|
||||
probe[0].port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
|
||||
probe[0].n_ports = 1;
|
||||
probe[0].port[0].cmd_addr = 0x1F0;
|
||||
probe[0].port[0].ctl_addr = 0x3F6;
|
||||
probe[0].port[0].altstatus_addr = 0x3F6;
|
||||
|
||||
/* The secondary lurks at different addresses but is otherwise
|
||||
the same beastie */
|
||||
|
||||
INIT_LIST_HEAD(&probe[1].node);
|
||||
probe[1] = probe[0];
|
||||
probe[1].irq = 15;
|
||||
probe[1].port[0].cmd_addr = 0x170;
|
||||
probe[1].port[0].ctl_addr = 0x376;
|
||||
probe[1].port[0].altstatus_addr = 0x376;
|
||||
INIT_LIST_HEAD(&probe.node);
|
||||
probe.dev = pci_dev_to_dev(dev);
|
||||
probe.port_ops = &mpiix_port_ops;
|
||||
probe.sht = &mpiix_sht;
|
||||
probe.pio_mask = 0x1F;
|
||||
probe.irq = irq;
|
||||
probe.irq_flags = SA_SHIRQ;
|
||||
probe.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
|
||||
probe.n_ports = 1;
|
||||
probe.port[0].cmd_addr = cmd_addr;
|
||||
probe.port[0].ctl_addr = ctl_addr;
|
||||
probe.port[0].altstatus_addr = ctl_addr;
|
||||
|
||||
/* Let libata fill in the port details */
|
||||
ata_std_ports(&probe[0].port[0]);
|
||||
ata_std_ports(&probe[1].port[0]);
|
||||
ata_std_ports(&probe.port[0]);
|
||||
|
||||
/* Now add the port that is active */
|
||||
enabled = (idetim & SECONDARY) ? 1 : 0;
|
||||
|
||||
if (ata_device_add(&probe[enabled]))
|
||||
if (ata_device_add(&probe))
|
||||
return 0;
|
||||
return -ENODEV;
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ static const struct ata_port_operations netcell_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
/* IRQ-related hooks */
|
||||
.irq_handler = ata_interrupt,
|
||||
|
@ -179,7 +179,7 @@ static struct ata_port_operations ns87410_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ns87410_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -259,7 +259,7 @@ static const struct ata_port_operations oldpiix_pata_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = oldpiix_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -95,18 +95,18 @@ static void opti_error_handler(struct ata_port *ap)
|
||||
|
||||
static void opti_write_reg(struct ata_port *ap, u8 val, int reg)
|
||||
{
|
||||
unsigned long regio = ap->ioaddr.cmd_addr;
|
||||
void __iomem *regio = ap->ioaddr.cmd_addr;
|
||||
|
||||
/* These 3 unlock the control register access */
|
||||
inw(regio + 1);
|
||||
inw(regio + 1);
|
||||
outb(3, regio + 2);
|
||||
ioread16(regio + 1);
|
||||
ioread16(regio + 1);
|
||||
iowrite8(3, regio + 2);
|
||||
|
||||
/* Do the I/O */
|
||||
outb(val, regio + reg);
|
||||
iowrite8(val, regio + reg);
|
||||
|
||||
/* Relock */
|
||||
outb(0x83, regio + 2);
|
||||
iowrite8(0x83, regio + 2);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -124,7 +124,7 @@ static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
struct ata_device *pair = ata_dev_pair(adev);
|
||||
int clock;
|
||||
int pio = adev->pio_mode - XFER_PIO_0;
|
||||
unsigned long regio = ap->ioaddr.cmd_addr;
|
||||
void __iomem *regio = ap->ioaddr.cmd_addr;
|
||||
u8 addr;
|
||||
|
||||
/* Address table precomputed with prefetch off and a DCLK of 2 */
|
||||
@ -137,8 +137,8 @@ static void opti_set_piomode(struct ata_port *ap, struct ata_device *adev)
|
||||
{ 0x58, 0x44, 0x32, 0x22, 0x21 }
|
||||
};
|
||||
|
||||
outb(0xff, regio + 5);
|
||||
clock = inw(regio + 5) & 1;
|
||||
iowrite8(0xff, regio + 5);
|
||||
clock = ioread16(regio + 5) & 1;
|
||||
|
||||
/*
|
||||
* As with many controllers the address setup time is shared
|
||||
@ -205,7 +205,7 @@ static struct ata_port_operations opti_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -91,12 +91,12 @@ static void optidma_error_handler(struct ata_port *ap)
|
||||
|
||||
static void optidma_unlock(struct ata_port *ap)
|
||||
{
|
||||
unsigned long regio = ap->ioaddr.cmd_addr;
|
||||
void __iomem *regio = ap->ioaddr.cmd_addr;
|
||||
|
||||
/* These 3 unlock the control register access */
|
||||
inw(regio + 1);
|
||||
inw(regio + 1);
|
||||
outb(3, regio + 2);
|
||||
ioread16(regio + 1);
|
||||
ioread16(regio + 1);
|
||||
iowrite8(3, regio + 2);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -108,10 +108,10 @@ static void optidma_unlock(struct ata_port *ap)
|
||||
|
||||
static void optidma_lock(struct ata_port *ap)
|
||||
{
|
||||
unsigned long regio = ap->ioaddr.cmd_addr;
|
||||
void __iomem *regio = ap->ioaddr.cmd_addr;
|
||||
|
||||
/* Relock */
|
||||
outb(0x83, regio + 2);
|
||||
iowrite8(0x83, regio + 2);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -133,7 +133,7 @@ static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo
|
||||
struct ata_device *pair = ata_dev_pair(adev);
|
||||
int pio = adev->pio_mode - XFER_PIO_0;
|
||||
int dma = adev->dma_mode - XFER_MW_DMA_0;
|
||||
unsigned long regio = ap->ioaddr.cmd_addr;
|
||||
void __iomem *regio = ap->ioaddr.cmd_addr;
|
||||
u8 addr;
|
||||
|
||||
/* Address table precomputed with a DCLK of 2 */
|
||||
@ -178,20 +178,20 @@ static void optidma_set_mode(struct ata_port *ap, struct ata_device *adev, u8 mo
|
||||
|
||||
/* Commence primary programming sequence */
|
||||
/* First we load the device number into the timing select */
|
||||
outb(adev->devno, regio + MISC_REG);
|
||||
iowrite8(adev->devno, regio + MISC_REG);
|
||||
/* Now we load the data timings into read data/write data */
|
||||
if (mode < XFER_MW_DMA_0) {
|
||||
outb(data_rec_timing[pci_clock][pio], regio + READ_REG);
|
||||
outb(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
|
||||
iowrite8(data_rec_timing[pci_clock][pio], regio + READ_REG);
|
||||
iowrite8(data_rec_timing[pci_clock][pio], regio + WRITE_REG);
|
||||
} else if (mode < XFER_UDMA_0) {
|
||||
outb(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
|
||||
outb(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
|
||||
iowrite8(dma_data_rec_timing[pci_clock][dma], regio + READ_REG);
|
||||
iowrite8(dma_data_rec_timing[pci_clock][dma], regio + WRITE_REG);
|
||||
}
|
||||
/* Finally we load the address setup into the misc register */
|
||||
outb(addr | adev->devno, regio + MISC_REG);
|
||||
iowrite8(addr | adev->devno, regio + MISC_REG);
|
||||
|
||||
/* Programming sequence complete, timing 0 dev 0, timing 1 dev 1 */
|
||||
outb(0x85, regio + CNTRL_REG);
|
||||
iowrite8(0x85, regio + CNTRL_REG);
|
||||
|
||||
/* Switch back to IDE mode */
|
||||
optidma_lock(ap);
|
||||
@ -389,7 +389,7 @@ static struct ata_port_operations optidma_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -422,7 +422,7 @@ static struct ata_port_operations optiplus_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -88,7 +88,7 @@ static struct ata_port_operations pcmcia_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer_noirq,
|
||||
.data_xfer = ata_data_xfer_noirq,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -121,6 +121,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
|
||||
cistpl_cftable_entry_t *cfg;
|
||||
int pass, last_ret = 0, last_fn = 0, is_kme = 0, ret = -ENOMEM;
|
||||
unsigned long io_base, ctl_base;
|
||||
void __iomem *io_addr, *ctl_addr;
|
||||
|
||||
info = kzalloc(sizeof(*info), GFP_KERNEL);
|
||||
if (info == NULL)
|
||||
@ -231,10 +232,17 @@ next_entry:
|
||||
CS_CHECK(RequestIRQ, pcmcia_request_irq(pdev, &pdev->irq));
|
||||
CS_CHECK(RequestConfiguration, pcmcia_request_configuration(pdev, &pdev->conf));
|
||||
|
||||
/* iomap */
|
||||
ret = -ENOMEM;
|
||||
io_addr = devm_ioport_map(&pdev->dev, io_base, 8);
|
||||
ctl_addr = devm_ioport_map(&pdev->dev, ctl_base, 1);
|
||||
if (!io_addr || !ctl_addr)
|
||||
goto failed;
|
||||
|
||||
/* Success. Disable the IRQ nIEN line, do quirks */
|
||||
outb(0x02, ctl_base);
|
||||
iowrite8(0x02, ctl_addr);
|
||||
if (is_kme)
|
||||
outb(0x81, ctl_base + 0x01);
|
||||
iowrite8(0x81, ctl_addr + 0x01);
|
||||
|
||||
/* FIXME: Could be more ports at base + 0x10 but we only deal with
|
||||
one right now */
|
||||
@ -256,11 +264,12 @@ next_entry:
|
||||
ae.irq = pdev->irq.AssignedIRQ;
|
||||
ae.irq_flags = SA_SHIRQ;
|
||||
ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
|
||||
ae.port[0].cmd_addr = io_base;
|
||||
ae.port[0].altstatus_addr = ctl_base;
|
||||
ae.port[0].ctl_addr = ctl_base;
|
||||
ae.port[0].cmd_addr = io_addr;
|
||||
ae.port[0].altstatus_addr = ctl_addr;
|
||||
ae.port[0].ctl_addr = ctl_addr;
|
||||
ata_std_ports(&ae.port[0]);
|
||||
|
||||
ret = -ENODEV;
|
||||
if (ata_device_add(&ae) == 0)
|
||||
goto failed;
|
||||
|
||||
|
@ -45,6 +45,8 @@
|
||||
#endif
|
||||
|
||||
enum {
|
||||
PDC_MMIO_BAR = 5,
|
||||
|
||||
PDC_UDMA_100 = 0,
|
||||
PDC_UDMA_133 = 1,
|
||||
|
||||
@ -158,7 +160,7 @@ static struct ata_port_operations pdc2027x_pata100_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
@ -190,7 +192,7 @@ static struct ata_port_operations pdc2027x_pata133_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
@ -239,7 +241,7 @@ MODULE_DEVICE_TABLE(pci, pdc2027x_pci_tbl);
|
||||
*/
|
||||
static inline void __iomem *port_mmio(struct ata_port *ap, unsigned int offset)
|
||||
{
|
||||
return ap->host->mmio_base + ap->port_no * 0x100 + offset;
|
||||
return ap->host->iomap[PDC_MMIO_BAR] + ap->port_no * 0x100 + offset;
|
||||
}
|
||||
|
||||
/**
|
||||
@ -520,18 +522,19 @@ static int pdc2027x_check_atapi_dma(struct ata_queued_cmd *qc)
|
||||
|
||||
static long pdc_read_counter(struct ata_probe_ent *probe_ent)
|
||||
{
|
||||
void __iomem *mmio_base = probe_ent->iomap[PDC_MMIO_BAR];
|
||||
long counter;
|
||||
int retry = 1;
|
||||
u32 bccrl, bccrh, bccrlv, bccrhv;
|
||||
|
||||
retry:
|
||||
bccrl = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff;
|
||||
bccrh = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
|
||||
bccrl = readl(mmio_base + PDC_BYTE_COUNT) & 0xffff;
|
||||
bccrh = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
|
||||
rmb();
|
||||
|
||||
/* Read the counter values again for verification */
|
||||
bccrlv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT) & 0xffff;
|
||||
bccrhv = readl(probe_ent->mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
|
||||
bccrlv = readl(mmio_base + PDC_BYTE_COUNT) & 0xffff;
|
||||
bccrhv = readl(mmio_base + PDC_BYTE_COUNT + 0x100) & 0xffff;
|
||||
rmb();
|
||||
|
||||
counter = (bccrh << 15) | bccrl;
|
||||
@ -562,7 +565,7 @@ retry:
|
||||
*/
|
||||
static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsigned int board_idx)
|
||||
{
|
||||
|
||||
void __iomem *mmio_base = probe_ent->iomap[PDC_MMIO_BAR];
|
||||
u16 pll_ctl;
|
||||
long pll_clock_khz = pll_clock / 1000;
|
||||
long pout_required = board_idx? PDC_133_MHZ:PDC_100_MHZ;
|
||||
@ -581,7 +584,7 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi
|
||||
/* Show the current clock value of PLL control register
|
||||
* (maybe already configured by the firmware)
|
||||
*/
|
||||
pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL);
|
||||
pll_ctl = readw(mmio_base + PDC_PLL_CTL);
|
||||
|
||||
PDPRINTK("pll_ctl[%X]\n", pll_ctl);
|
||||
#endif
|
||||
@ -621,8 +624,8 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi
|
||||
|
||||
PDPRINTK("Writing pll_ctl[%X]\n", pll_ctl);
|
||||
|
||||
writew(pll_ctl, probe_ent->mmio_base + PDC_PLL_CTL);
|
||||
readw(probe_ent->mmio_base + PDC_PLL_CTL); /* flush */
|
||||
writew(pll_ctl, mmio_base + PDC_PLL_CTL);
|
||||
readw(mmio_base + PDC_PLL_CTL); /* flush */
|
||||
|
||||
/* Wait the PLL circuit to be stable */
|
||||
mdelay(30);
|
||||
@ -632,7 +635,7 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi
|
||||
* Show the current clock value of PLL control register
|
||||
* (maybe configured by the firmware)
|
||||
*/
|
||||
pll_ctl = readw(probe_ent->mmio_base + PDC_PLL_CTL);
|
||||
pll_ctl = readw(mmio_base + PDC_PLL_CTL);
|
||||
|
||||
PDPRINTK("pll_ctl[%X]\n", pll_ctl);
|
||||
#endif
|
||||
@ -648,6 +651,7 @@ static void pdc_adjust_pll(struct ata_probe_ent *probe_ent, long pll_clock, unsi
|
||||
*/
|
||||
static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent)
|
||||
{
|
||||
void __iomem *mmio_base = probe_ent->iomap[PDC_MMIO_BAR];
|
||||
u32 scr;
|
||||
long start_count, end_count;
|
||||
long pll_clock;
|
||||
@ -656,10 +660,10 @@ static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent)
|
||||
start_count = pdc_read_counter(probe_ent);
|
||||
|
||||
/* Start the test mode */
|
||||
scr = readl(probe_ent->mmio_base + PDC_SYS_CTL);
|
||||
scr = readl(mmio_base + PDC_SYS_CTL);
|
||||
PDPRINTK("scr[%X]\n", scr);
|
||||
writel(scr | (0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL);
|
||||
readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */
|
||||
writel(scr | (0x01 << 14), mmio_base + PDC_SYS_CTL);
|
||||
readl(mmio_base + PDC_SYS_CTL); /* flush */
|
||||
|
||||
/* Let the counter run for 100 ms. */
|
||||
mdelay(100);
|
||||
@ -668,10 +672,10 @@ static long pdc_detect_pll_input_clock(struct ata_probe_ent *probe_ent)
|
||||
end_count = pdc_read_counter(probe_ent);
|
||||
|
||||
/* Stop the test mode */
|
||||
scr = readl(probe_ent->mmio_base + PDC_SYS_CTL);
|
||||
scr = readl(mmio_base + PDC_SYS_CTL);
|
||||
PDPRINTK("scr[%X]\n", scr);
|
||||
writel(scr & ~(0x01 << 14), probe_ent->mmio_base + PDC_SYS_CTL);
|
||||
readl(probe_ent->mmio_base + PDC_SYS_CTL); /* flush */
|
||||
writel(scr & ~(0x01 << 14), mmio_base + PDC_SYS_CTL);
|
||||
readl(mmio_base + PDC_SYS_CTL); /* flush */
|
||||
|
||||
/* calculate the input clock in Hz */
|
||||
pll_clock = (start_count - end_count) * 10;
|
||||
@ -716,7 +720,7 @@ static int pdc_hardware_init(struct pci_dev *pdev, struct ata_probe_ent *pe, uns
|
||||
* @port: ata ioports to setup
|
||||
* @base: base address
|
||||
*/
|
||||
static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
|
||||
static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
|
||||
{
|
||||
port->cmd_addr =
|
||||
port->data_addr = base;
|
||||
@ -750,7 +754,6 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
|
||||
unsigned int board_idx = (unsigned int) ent->driver_data;
|
||||
|
||||
struct ata_probe_ent *probe_ent;
|
||||
unsigned long base;
|
||||
void __iomem *mmio_base;
|
||||
int rc;
|
||||
|
||||
@ -761,7 +764,7 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -781,12 +784,6 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
|
||||
probe_ent->dev = pci_dev_to_dev(pdev);
|
||||
INIT_LIST_HEAD(&probe_ent->node);
|
||||
|
||||
mmio_base = pcim_iomap(pdev, 5, 0);
|
||||
if (!mmio_base)
|
||||
return -ENOMEM;
|
||||
|
||||
base = (unsigned long) mmio_base;
|
||||
|
||||
probe_ent->sht = pdc2027x_port_info[board_idx].sht;
|
||||
probe_ent->port_flags = pdc2027x_port_info[board_idx].flags;
|
||||
probe_ent->pio_mask = pdc2027x_port_info[board_idx].pio_mask;
|
||||
@ -796,12 +793,14 @@ static int __devinit pdc2027x_init_one(struct pci_dev *pdev, const struct pci_de
|
||||
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = SA_SHIRQ;
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
pdc_ata_setup_port(&probe_ent->port[0], base + 0x17c0);
|
||||
probe_ent->port[0].bmdma_addr = base + 0x1000;
|
||||
pdc_ata_setup_port(&probe_ent->port[1], base + 0x15c0);
|
||||
probe_ent->port[1].bmdma_addr = base + 0x1008;
|
||||
mmio_base = probe_ent->iomap[PDC_MMIO_BAR];
|
||||
|
||||
pdc_ata_setup_port(&probe_ent->port[0], mmio_base + 0x17c0);
|
||||
probe_ent->port[0].bmdma_addr = mmio_base + 0x1000;
|
||||
pdc_ata_setup_port(&probe_ent->port[1], mmio_base + 0x15c0);
|
||||
probe_ent->port[1].bmdma_addr = mmio_base + 0x1008;
|
||||
|
||||
probe_ent->n_ports = 2;
|
||||
|
||||
|
@ -170,17 +170,17 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
|
||||
struct ata_taskfile *tf = &qc->tf;
|
||||
int sel66 = ap->port_no ? 0x08: 0x02;
|
||||
|
||||
unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
|
||||
unsigned long clock = master + 0x11;
|
||||
unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
|
||||
void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
|
||||
void __iomem *clock = master + 0x11;
|
||||
void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
|
||||
|
||||
u32 len;
|
||||
|
||||
/* Check we keep host level locking here */
|
||||
if (adev->dma_mode >= XFER_UDMA_2)
|
||||
outb(inb(clock) | sel66, clock);
|
||||
iowrite8(ioread8(clock) | sel66, clock);
|
||||
else
|
||||
outb(inb(clock) & ~sel66, clock);
|
||||
iowrite8(ioread8(clock) & ~sel66, clock);
|
||||
|
||||
/* The DMA clocks may have been trashed by a reset. FIXME: make conditional
|
||||
and move to qc_issue ? */
|
||||
@ -196,7 +196,7 @@ static void pdc2026x_bmdma_start(struct ata_queued_cmd *qc)
|
||||
else
|
||||
len |= 0x05000000;
|
||||
|
||||
outl(len, atapi_reg);
|
||||
iowrite32(len, atapi_reg);
|
||||
}
|
||||
|
||||
/* Activate DMA */
|
||||
@ -219,19 +219,19 @@ static void pdc2026x_bmdma_stop(struct ata_queued_cmd *qc)
|
||||
|
||||
int sel66 = ap->port_no ? 0x08: 0x02;
|
||||
/* The clock bits are in the same register for both channels */
|
||||
unsigned long master = ap->host->ports[0]->ioaddr.bmdma_addr;
|
||||
unsigned long clock = master + 0x11;
|
||||
unsigned long atapi_reg = master + 0x20 + (4 * ap->port_no);
|
||||
void __iomem *master = ap->host->ports[0]->ioaddr.bmdma_addr;
|
||||
void __iomem *clock = master + 0x11;
|
||||
void __iomem *atapi_reg = master + 0x20 + (4 * ap->port_no);
|
||||
|
||||
/* Cases the state machine will not complete correctly */
|
||||
if (tf->protocol == ATA_PROT_ATAPI_DMA || ( tf->flags & ATA_TFLAG_LBA48)) {
|
||||
outl(0, atapi_reg);
|
||||
outb(inb(clock) & ~sel66, clock);
|
||||
iowrite32(0, atapi_reg);
|
||||
iowrite8(ioread8(clock) & ~sel66, clock);
|
||||
}
|
||||
/* Check we keep host level locking here */
|
||||
/* Flip back to 33Mhz for PIO */
|
||||
if (adev->dma_mode >= XFER_UDMA_2)
|
||||
outb(inb(clock) & ~sel66, clock);
|
||||
iowrite8(ioread8(clock) & ~sel66, clock);
|
||||
|
||||
ata_bmdma_stop(qc);
|
||||
}
|
||||
@ -294,7 +294,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
|
||||
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -326,7 +326,7 @@ static struct ata_port_operations pdc2026x_port_ops = {
|
||||
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -83,7 +83,7 @@ static struct ata_port_operations pata_platform_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer_noirq,
|
||||
.data_xfer = ata_data_xfer_noirq,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -134,7 +134,6 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
|
||||
struct resource *io_res, *ctl_res;
|
||||
struct ata_probe_ent ae;
|
||||
unsigned int mmio;
|
||||
int ret;
|
||||
|
||||
/*
|
||||
* Simple resource validation ..
|
||||
@ -188,48 +187,29 @@ static int __devinit pata_platform_probe(struct platform_device *pdev)
|
||||
* Handle the MMIO case
|
||||
*/
|
||||
if (mmio) {
|
||||
ae.port_flags |= ATA_FLAG_MMIO;
|
||||
|
||||
ae.port[0].cmd_addr = (unsigned long)
|
||||
devm_ioremap(&pdev->dev, io_res->start,
|
||||
io_res->end - io_res->start + 1);
|
||||
if (unlikely(!ae.port[0].cmd_addr)) {
|
||||
dev_err(&pdev->dev, "failed to remap IO base\n");
|
||||
return -ENXIO;
|
||||
}
|
||||
|
||||
ae.port[0].ctl_addr = (unsigned long)
|
||||
devm_ioremap(&pdev->dev, ctl_res->start,
|
||||
ctl_res->end - ctl_res->start + 1);
|
||||
if (unlikely(!ae.port[0].ctl_addr)) {
|
||||
dev_err(&pdev->dev, "failed to remap CTL base\n");
|
||||
ret = -ENXIO;
|
||||
goto bad_remap;
|
||||
}
|
||||
ae.port[0].cmd_addr = devm_ioremap(&pdev->dev, io_res->start,
|
||||
io_res->end - io_res->start + 1);
|
||||
ae.port[0].ctl_addr = devm_ioremap(&pdev->dev, ctl_res->start,
|
||||
ctl_res->end - ctl_res->start + 1);
|
||||
} else {
|
||||
ae.port[0].cmd_addr = io_res->start;
|
||||
ae.port[0].ctl_addr = ctl_res->start;
|
||||
ae.port[0].cmd_addr = devm_ioport_map(&pdev->dev, io_res->start,
|
||||
io_res->end - io_res->start + 1);
|
||||
ae.port[0].ctl_addr = devm_ioport_map(&pdev->dev, ctl_res->start,
|
||||
ctl_res->end - ctl_res->start + 1);
|
||||
}
|
||||
if (!ae.port[0].cmd_addr || !ae.port[0].ctl_addr) {
|
||||
dev_err(&pdev->dev, "failed to map IO/CTL base\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
ae.port[0].altstatus_addr = ae.port[0].ctl_addr;
|
||||
|
||||
pata_platform_setup_port(&ae.port[0], pdev->dev.platform_data);
|
||||
|
||||
if (unlikely(ata_device_add(&ae) == 0)) {
|
||||
ret = -ENODEV;
|
||||
goto add_failed;
|
||||
}
|
||||
if (unlikely(ata_device_add(&ae) == 0))
|
||||
return -ENODEV;
|
||||
|
||||
return 0;
|
||||
|
||||
add_failed:
|
||||
if (ae.port[0].ctl_addr && mmio)
|
||||
iounmap((void __iomem *)ae.port[0].ctl_addr);
|
||||
bad_remap:
|
||||
if (ae.port[0].cmd_addr && mmio)
|
||||
iounmap((void __iomem *)ae.port[0].cmd_addr);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -131,22 +131,24 @@ static void qdi_data_xfer(struct ata_device *adev, unsigned char *buf, unsigned
|
||||
|
||||
if (ata_id_has_dword_io(adev->id)) {
|
||||
if (write_data)
|
||||
outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
else
|
||||
insl(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
|
||||
if (unlikely(slop)) {
|
||||
u32 pad;
|
||||
if (write_data) {
|
||||
memcpy(&pad, buf + buflen - slop, slop);
|
||||
outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
|
||||
pad = le32_to_cpu(pad);
|
||||
iowrite32(pad, ap->ioaddr.data_addr);
|
||||
} else {
|
||||
pad = cpu_to_le32(inl(ap->ioaddr.data_addr));
|
||||
pad = ioread32(ap->ioaddr.data_addr);
|
||||
pad = cpu_to_le32(pad);
|
||||
memcpy(buf + buflen - slop, &pad, slop);
|
||||
}
|
||||
}
|
||||
} else
|
||||
ata_pio_data_xfer(adev, buf, buflen, write_data);
|
||||
ata_data_xfer(adev, buf, buflen, write_data);
|
||||
}
|
||||
|
||||
static struct scsi_host_template qdi_sht = {
|
||||
@ -234,10 +236,9 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
|
||||
{
|
||||
struct ata_probe_ent ae;
|
||||
struct platform_device *pdev;
|
||||
void __iomem *io_addr, *ctl_addr;
|
||||
int ret;
|
||||
|
||||
unsigned long ctrl = io + 0x206;
|
||||
|
||||
/*
|
||||
* Fill in a probe structure first of all
|
||||
*/
|
||||
@ -246,6 +247,12 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
|
||||
if (IS_ERR(pdev))
|
||||
return PTR_ERR(pdev);
|
||||
|
||||
ret = -ENOMEM;
|
||||
io_addr = devm_ioport_map(&pdev->dev, io, 8);
|
||||
ctl_addr = devm_ioport_map(&pdev->dev, io + 0x206, 1);
|
||||
if (!io_addr || !ctl_addr)
|
||||
goto fail;
|
||||
|
||||
memset(&ae, 0, sizeof(struct ata_probe_ent));
|
||||
INIT_LIST_HEAD(&ae.node);
|
||||
ae.dev = &pdev->dev;
|
||||
@ -263,9 +270,9 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
|
||||
ae.irq = irq;
|
||||
ae.irq_flags = 0;
|
||||
ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
|
||||
ae.port[0].cmd_addr = io;
|
||||
ae.port[0].altstatus_addr = ctrl;
|
||||
ae.port[0].ctl_addr = ctrl;
|
||||
ae.port[0].cmd_addr = io_addr;
|
||||
ae.port[0].altstatus_addr = ctl_addr;
|
||||
ae.port[0].ctl_addr = ctl_addr;
|
||||
ata_std_ports(&ae.port[0]);
|
||||
|
||||
/*
|
||||
@ -278,14 +285,17 @@ static __init int qdi_init_one(unsigned long port, int type, unsigned long io, i
|
||||
qdi_data[nr_qdi_host].platform_dev = pdev;
|
||||
|
||||
printk(KERN_INFO DRV_NAME": qd%d at 0x%lx.\n", type, io);
|
||||
ret = ata_device_add(&ae);
|
||||
if (ret == 0) {
|
||||
platform_device_unregister(pdev);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ret = -ENODEV;
|
||||
if (!ata_device_add(&ae))
|
||||
goto fail;
|
||||
|
||||
qdi_host[nr_qdi_host++] = dev_get_drvdata(&pdev->dev);
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
platform_device_unregister(pdev);
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -255,7 +255,7 @@ static const struct ata_port_operations radisys_pata_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = radisys_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -115,7 +115,7 @@ static struct ata_port_operations rz1000_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
|
@ -220,7 +220,7 @@ static struct ata_port_operations sc1200_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = sc1200_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -348,7 +348,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -381,7 +381,7 @@ static struct ata_port_operations serverworks_csb_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -252,7 +252,7 @@ static struct ata_port_operations sil680_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -603,7 +603,7 @@ static const struct ata_port_operations sis_133_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -634,7 +634,7 @@ static const struct ata_port_operations sis_133_early_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -666,7 +666,7 @@ static const struct ata_port_operations sis_100_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -697,7 +697,7 @@ static const struct ata_port_operations sis_66_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -728,7 +728,7 @@ static const struct ata_port_operations sis_old_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -262,7 +262,7 @@ static struct ata_port_operations sl82c105_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -221,7 +221,7 @@ static struct ata_port_operations triflex_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -334,7 +334,7 @@ static struct ata_port_operations via_port_ops = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
@ -367,7 +367,7 @@ static struct ata_port_operations via_port_ops_noirq = {
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
|
||||
.data_xfer = ata_pio_data_xfer_noirq,
|
||||
.data_xfer = ata_data_xfer_noirq,
|
||||
|
||||
.irq_handler = ata_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
|
@ -100,22 +100,24 @@ static void winbond_data_xfer(struct ata_device *adev, unsigned char *buf, unsig
|
||||
|
||||
if (ata_id_has_dword_io(adev->id)) {
|
||||
if (write_data)
|
||||
outsl(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
iowrite32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
else
|
||||
insl(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
ioread32_rep(ap->ioaddr.data_addr, buf, buflen >> 2);
|
||||
|
||||
if (unlikely(slop)) {
|
||||
u32 pad;
|
||||
if (write_data) {
|
||||
memcpy(&pad, buf + buflen - slop, slop);
|
||||
outl(le32_to_cpu(pad), ap->ioaddr.data_addr);
|
||||
pad = le32_to_cpu(pad);
|
||||
iowrite32(pad, ap->ioaddr.data_addr);
|
||||
} else {
|
||||
pad = cpu_to_le16(inl(ap->ioaddr.data_addr));
|
||||
pad = ioread32(ap->ioaddr.data_addr);
|
||||
pad = cpu_to_le16(pad);
|
||||
memcpy(buf + buflen - slop, &pad, slop);
|
||||
}
|
||||
}
|
||||
} else
|
||||
ata_pio_data_xfer(adev, buf, buflen, write_data);
|
||||
ata_data_xfer(adev, buf, buflen, write_data);
|
||||
}
|
||||
|
||||
static struct scsi_host_template winbond_sht = {
|
||||
@ -197,6 +199,8 @@ static __init int winbond_init_one(unsigned long port)
|
||||
return 0;
|
||||
|
||||
for (i = 0; i < 2 ; i ++) {
|
||||
unsigned long cmd_port = 0x1F0 - (0x80 * i);
|
||||
void __iomem *cmd_addr, *ctl_addr;
|
||||
|
||||
if (reg & (1 << i)) {
|
||||
/*
|
||||
@ -207,6 +211,13 @@ static __init int winbond_init_one(unsigned long port)
|
||||
if (IS_ERR(pdev))
|
||||
return PTR_ERR(pdev);
|
||||
|
||||
cmd_addr = devm_ioport_map(&pdev->dev, cmd_port, 8);
|
||||
ctl_addr = devm_ioport_map(&pdev->dev, cmd_port + 0x0206, 1);
|
||||
if (!cmd_addr || !ctl_addr) {
|
||||
platform_device_unregister(pdev);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(&ae, 0, sizeof(struct ata_probe_ent));
|
||||
INIT_LIST_HEAD(&ae.node);
|
||||
ae.dev = &pdev->dev;
|
||||
@ -220,9 +231,9 @@ static __init int winbond_init_one(unsigned long port)
|
||||
ae.irq = 14 + i;
|
||||
ae.irq_flags = 0;
|
||||
ae.port_flags = ATA_FLAG_SLAVE_POSS | ATA_FLAG_SRST;
|
||||
ae.port[0].cmd_addr = 0x1F0 - (0x80 * i);
|
||||
ae.port[0].altstatus_addr = ae.port[0].cmd_addr + 0x0206;
|
||||
ae.port[0].ctl_addr = ae.port[0].altstatus_addr;
|
||||
ae.port[0].cmd_addr = cmd_addr;
|
||||
ae.port[0].altstatus_addr = ctl_addr;
|
||||
ae.port[0].ctl_addr = ctl_addr;
|
||||
ata_std_ports(&ae.port[0]);
|
||||
/*
|
||||
* Hook in a private data structure per channel
|
||||
|
@ -51,9 +51,15 @@
|
||||
#define ADMA_ATA_REGS(base,port_no) ((base) + ((port_no) * 0x40))
|
||||
|
||||
/* macro to calculate base address for ADMA regs */
|
||||
#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
|
||||
#define ADMA_REGS(base,port_no) ((base) + 0x80 + ((port_no) * 0x20))
|
||||
|
||||
/* macro to obtain addresses from ata_host */
|
||||
#define ADMA_HOST_REGS(host,port_no) \
|
||||
ADMA_REGS((host)->iomap[ADMA_MMIO_BAR], port_no)
|
||||
|
||||
enum {
|
||||
ADMA_MMIO_BAR = 4,
|
||||
|
||||
ADMA_PORTS = 2,
|
||||
ADMA_CPB_BYTES = 40,
|
||||
ADMA_PRD_BYTES = LIBATA_MAX_PRD * 16,
|
||||
@ -166,7 +172,7 @@ static const struct ata_port_operations adma_ata_ops = {
|
||||
.qc_prep = adma_qc_prep,
|
||||
.qc_issue = adma_qc_issue,
|
||||
.eng_timeout = adma_eng_timeout,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.irq_handler = adma_intr,
|
||||
.irq_clear = adma_irq_clear,
|
||||
.port_start = adma_port_start,
|
||||
@ -234,11 +240,10 @@ static void adma_reset_engine(void __iomem *chan)
|
||||
static void adma_reinit_engine(struct ata_port *ap)
|
||||
{
|
||||
struct adma_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio_base = ap->host->mmio_base;
|
||||
void __iomem *chan = ADMA_REGS(mmio_base, ap->port_no);
|
||||
void __iomem *chan = ADMA_HOST_REGS(ap->host, ap->port_no);
|
||||
|
||||
/* mask/clear ATA interrupts */
|
||||
writeb(ATA_NIEN, (void __iomem *)ap->ioaddr.ctl_addr);
|
||||
writeb(ATA_NIEN, ap->ioaddr.ctl_addr);
|
||||
ata_check_status(ap);
|
||||
|
||||
/* reset the ADMA engine */
|
||||
@ -262,7 +267,7 @@ static void adma_reinit_engine(struct ata_port *ap)
|
||||
|
||||
static inline void adma_enter_reg_mode(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
|
||||
void __iomem *chan = ADMA_HOST_REGS(ap->host, ap->port_no);
|
||||
|
||||
writew(aPIOMD4, chan + ADMA_CONTROL);
|
||||
readb(chan + ADMA_STATUS); /* flush */
|
||||
@ -409,7 +414,7 @@ static void adma_qc_prep(struct ata_queued_cmd *qc)
|
||||
static inline void adma_packet_start(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
void __iomem *chan = ADMA_REGS(ap->host->mmio_base, ap->port_no);
|
||||
void __iomem *chan = ADMA_HOST_REGS(ap->host, ap->port_no);
|
||||
|
||||
VPRINTK("ENTER, ap %p\n", ap);
|
||||
|
||||
@ -442,13 +447,12 @@ static unsigned int adma_qc_issue(struct ata_queued_cmd *qc)
|
||||
static inline unsigned int adma_intr_pkt(struct ata_host *host)
|
||||
{
|
||||
unsigned int handled = 0, port_no;
|
||||
u8 __iomem *mmio_base = host->mmio_base;
|
||||
|
||||
for (port_no = 0; port_no < host->n_ports; ++port_no) {
|
||||
struct ata_port *ap = host->ports[port_no];
|
||||
struct adma_port_priv *pp;
|
||||
struct ata_queued_cmd *qc;
|
||||
void __iomem *chan = ADMA_REGS(mmio_base, port_no);
|
||||
void __iomem *chan = ADMA_HOST_REGS(host, port_no);
|
||||
u8 status = readb(chan + ADMA_STATUS);
|
||||
|
||||
if (status == 0)
|
||||
@ -522,7 +526,7 @@ static irqreturn_t adma_intr(int irq, void *dev_instance)
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
|
||||
static void adma_ata_setup_port(struct ata_ioports *port, unsigned long base)
|
||||
static void adma_ata_setup_port(struct ata_ioports *port, void __iomem *base)
|
||||
{
|
||||
port->cmd_addr =
|
||||
port->data_addr = base + 0x000;
|
||||
@ -570,7 +574,7 @@ static int adma_port_start(struct ata_port *ap)
|
||||
|
||||
static void adma_port_stop(struct ata_port *ap)
|
||||
{
|
||||
adma_reset_engine(ADMA_REGS(ap->host->mmio_base, ap->port_no));
|
||||
adma_reset_engine(ADMA_HOST_REGS(ap->host, ap->port_no));
|
||||
}
|
||||
|
||||
static void adma_host_stop(struct ata_host *host)
|
||||
@ -578,14 +582,14 @@ static void adma_host_stop(struct ata_host *host)
|
||||
unsigned int port_no;
|
||||
|
||||
for (port_no = 0; port_no < ADMA_PORTS; ++port_no)
|
||||
adma_reset_engine(ADMA_REGS(host->mmio_base, port_no));
|
||||
adma_reset_engine(ADMA_HOST_REGS(host, port_no));
|
||||
}
|
||||
|
||||
static void adma_host_init(unsigned int chip_id,
|
||||
struct ata_probe_ent *probe_ent)
|
||||
{
|
||||
unsigned int port_no;
|
||||
void __iomem *mmio_base = probe_ent->mmio_base;
|
||||
void __iomem *mmio_base = probe_ent->iomap[ADMA_MMIO_BAR];
|
||||
|
||||
/* enable/lock aGO operation */
|
||||
writeb(7, mmio_base + ADMA_MODE_LOCK);
|
||||
@ -615,7 +619,7 @@ static int adma_set_dma_masks(struct pci_dev *pdev, void __iomem *mmio_base)
|
||||
}
|
||||
|
||||
static int adma_ata_init_one(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
static int printed_version;
|
||||
struct ata_probe_ent *probe_ent = NULL;
|
||||
@ -630,16 +634,13 @@ static int adma_ata_init_one(struct pci_dev *pdev,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0)
|
||||
return -ENODEV;
|
||||
|
||||
mmio_base = pcim_iomap(pdev, 4, 0);
|
||||
if (mmio_base == NULL)
|
||||
return -ENOMEM;
|
||||
rc = pcim_iomap_regions(pdev, 1 << ADMA_MMIO_BAR, DRV_NAME);
|
||||
if (rc)
|
||||
return rc;
|
||||
mmio_base = pcim_iomap_table(pdev)[ADMA_MMIO_BAR];
|
||||
|
||||
rc = adma_set_dma_masks(pdev, mmio_base);
|
||||
if (rc)
|
||||
@ -661,12 +662,12 @@ static int adma_ata_init_one(struct pci_dev *pdev,
|
||||
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
probe_ent->n_ports = ADMA_PORTS;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
|
||||
adma_ata_setup_port(&probe_ent->port[port_no],
|
||||
ADMA_ATA_REGS((unsigned long)mmio_base, port_no));
|
||||
ADMA_ATA_REGS(mmio_base, port_no));
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
@ -147,7 +147,7 @@ static const int scr_map[] = {
|
||||
|
||||
static void __iomem * inic_port_base(struct ata_port *ap)
|
||||
{
|
||||
return ap->host->mmio_base + ap->port_no * PORT_SIZE;
|
||||
return ap->host->iomap[MMIO_BAR] + ap->port_no * PORT_SIZE;
|
||||
}
|
||||
|
||||
static void __inic_set_pirq_mask(struct ata_port *ap, u8 mask)
|
||||
@ -324,7 +324,7 @@ static void inic_host_intr(struct ata_port *ap)
|
||||
static irqreturn_t inic_interrupt(int irq, void *dev_instance)
|
||||
{
|
||||
struct ata_host *host = dev_instance;
|
||||
void __iomem *mmio_base = host->mmio_base;
|
||||
void __iomem *mmio_base = host->iomap[MMIO_BAR];
|
||||
u16 host_irq_stat;
|
||||
int i, handled = 0;;
|
||||
|
||||
@ -566,7 +566,7 @@ static struct ata_port_operations inic_port_ops = {
|
||||
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = inic_qc_issue,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = inic_freeze,
|
||||
.thaw = inic_thaw,
|
||||
@ -638,7 +638,7 @@ static int inic_pci_device_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct ata_host *host = dev_get_drvdata(&pdev->dev);
|
||||
struct inic_host_priv *hpriv = host->private_data;
|
||||
void __iomem *mmio_base = host->mmio_base;
|
||||
void __iomem *mmio_base = host->iomap[MMIO_BAR];
|
||||
int rc;
|
||||
|
||||
ata_pci_device_do_resume(pdev);
|
||||
@ -661,7 +661,7 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
struct ata_port_info *pinfo = &inic_port_info;
|
||||
struct ata_probe_ent *probe_ent;
|
||||
struct inic_host_priv *hpriv;
|
||||
void __iomem *mmio_base;
|
||||
void __iomem * const *iomap;
|
||||
int i, rc;
|
||||
|
||||
if (!printed_version++)
|
||||
@ -675,9 +675,10 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
mmio_base = pci_iomap(pdev, MMIO_BAR, 0);
|
||||
if (!mmio_base)
|
||||
return -ENOMEM;
|
||||
rc = pcim_iomap_regions(pdev, 0x3f, DRV_NAME);
|
||||
if (rc)
|
||||
return rc;
|
||||
iomap = pcim_iomap_table(pdev);
|
||||
|
||||
/* Set dma_mask. This devices doesn't support 64bit addressing. */
|
||||
rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
|
||||
@ -713,26 +714,25 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = SA_SHIRQ;
|
||||
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
probe_ent->iomap = iomap;
|
||||
|
||||
for (i = 0; i < NR_PORTS; i++) {
|
||||
struct ata_ioports *port = &probe_ent->port[i];
|
||||
unsigned long port_base =
|
||||
(unsigned long)mmio_base + i * PORT_SIZE;
|
||||
void __iomem *port_base = iomap[MMIO_BAR] + i * PORT_SIZE;
|
||||
|
||||
port->cmd_addr = pci_resource_start(pdev, 2 * i);
|
||||
port->cmd_addr = iomap[2 * i];
|
||||
port->altstatus_addr =
|
||||
port->ctl_addr =
|
||||
pci_resource_start(pdev, 2 * i + 1) | ATA_PCI_CTL_OFS;
|
||||
port->ctl_addr = (void __iomem *)
|
||||
((unsigned long)iomap[2 * i + 1] | ATA_PCI_CTL_OFS);
|
||||
port->scr_addr = port_base + PORT_SCR;
|
||||
|
||||
ata_std_ports(port);
|
||||
}
|
||||
|
||||
probe_ent->private_data = hpriv;
|
||||
hpriv->cached_hctl = readw(mmio_base + HOST_CTL);
|
||||
hpriv->cached_hctl = readw(iomap[MMIO_BAR] + HOST_CTL);
|
||||
|
||||
rc = init_controller(mmio_base, hpriv->cached_hctl);
|
||||
rc = init_controller(iomap[MMIO_BAR], hpriv->cached_hctl);
|
||||
if (rc) {
|
||||
dev_printk(KERN_ERR, &pdev->dev,
|
||||
"failed to initialize controller\n");
|
||||
|
@ -404,7 +404,7 @@ static const struct ata_port_operations mv5_ops = {
|
||||
|
||||
.qc_prep = mv_qc_prep,
|
||||
.qc_issue = mv_qc_issue,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.eng_timeout = mv_eng_timeout,
|
||||
|
||||
@ -431,7 +431,7 @@ static const struct ata_port_operations mv6_ops = {
|
||||
|
||||
.qc_prep = mv_qc_prep,
|
||||
.qc_issue = mv_qc_issue,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.eng_timeout = mv_eng_timeout,
|
||||
|
||||
@ -458,7 +458,7 @@ static const struct ata_port_operations mv_iie_ops = {
|
||||
|
||||
.qc_prep = mv_qc_prep_iie,
|
||||
.qc_issue = mv_qc_issue,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.eng_timeout = mv_eng_timeout,
|
||||
|
||||
@ -615,7 +615,7 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
|
||||
|
||||
static inline void __iomem *mv_ap_base(struct ata_port *ap)
|
||||
{
|
||||
return mv_port_base(ap->host->mmio_base, ap->port_no);
|
||||
return mv_port_base(ap->host->iomap[MV_PRIMARY_BAR], ap->port_no);
|
||||
}
|
||||
|
||||
static inline int mv_get_hc_count(unsigned long port_flags)
|
||||
@ -1299,7 +1299,7 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
|
||||
*/
|
||||
static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
|
||||
{
|
||||
void __iomem *mmio = host->mmio_base;
|
||||
void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
|
||||
void __iomem *hc_mmio = mv_hc_base(mmio, hc);
|
||||
struct ata_queued_cmd *qc;
|
||||
u32 hc_irq_cause;
|
||||
@ -1342,8 +1342,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
|
||||
} else {
|
||||
/* PIO: check for device (drive) interrupt */
|
||||
if ((DEV_IRQ << hard_port) & hc_irq_cause) {
|
||||
ata_status = readb((void __iomem *)
|
||||
ap->ioaddr.status_addr);
|
||||
ata_status = readb(ap->ioaddr.status_addr);
|
||||
handled = 1;
|
||||
/* ignore spurious intr if drive still BUSY */
|
||||
if (ata_status & ATA_BUSY) {
|
||||
@ -1403,7 +1402,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
|
||||
{
|
||||
struct ata_host *host = dev_instance;
|
||||
unsigned int hc, handled = 0, n_hcs;
|
||||
void __iomem *mmio = host->mmio_base;
|
||||
void __iomem *mmio = host->iomap[MV_PRIMARY_BAR];
|
||||
struct mv_host_priv *hpriv;
|
||||
u32 irq_stat;
|
||||
|
||||
@ -1479,22 +1478,24 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
|
||||
|
||||
static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
|
||||
{
|
||||
void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
|
||||
void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
|
||||
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
|
||||
unsigned int ofs = mv5_scr_offset(sc_reg_in);
|
||||
|
||||
if (ofs != 0xffffffffU)
|
||||
return readl(mmio + ofs);
|
||||
return readl(addr + ofs);
|
||||
else
|
||||
return (u32) ofs;
|
||||
}
|
||||
|
||||
static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
|
||||
{
|
||||
void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
|
||||
void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
|
||||
void __iomem *addr = mv5_phy_base(mmio, ap->port_no);
|
||||
unsigned int ofs = mv5_scr_offset(sc_reg_in);
|
||||
|
||||
if (ofs != 0xffffffffU)
|
||||
writelfl(val, mmio + ofs);
|
||||
writelfl(val, addr + ofs);
|
||||
}
|
||||
|
||||
static void mv5_reset_bus(struct pci_dev *pdev, void __iomem *mmio)
|
||||
@ -1856,7 +1857,7 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
|
||||
static void mv_stop_and_reset(struct ata_port *ap)
|
||||
{
|
||||
struct mv_host_priv *hpriv = ap->host->private_data;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
|
||||
|
||||
mv_stop_dma(ap);
|
||||
|
||||
@ -1954,10 +1955,10 @@ comreset_retry:
|
||||
break;
|
||||
}
|
||||
|
||||
tf.lbah = readb((void __iomem *) ap->ioaddr.lbah_addr);
|
||||
tf.lbam = readb((void __iomem *) ap->ioaddr.lbam_addr);
|
||||
tf.lbal = readb((void __iomem *) ap->ioaddr.lbal_addr);
|
||||
tf.nsect = readb((void __iomem *) ap->ioaddr.nsect_addr);
|
||||
tf.lbah = readb(ap->ioaddr.lbah_addr);
|
||||
tf.lbam = readb(ap->ioaddr.lbam_addr);
|
||||
tf.lbal = readb(ap->ioaddr.lbal_addr);
|
||||
tf.nsect = readb(ap->ioaddr.nsect_addr);
|
||||
|
||||
dev->class = ata_dev_classify(&tf);
|
||||
if (!ata_dev_enabled(dev)) {
|
||||
@ -1989,17 +1990,17 @@ static void mv_phy_reset(struct ata_port *ap)
|
||||
*/
|
||||
static void mv_eng_timeout(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio = ap->host->iomap[MV_PRIMARY_BAR];
|
||||
struct ata_queued_cmd *qc;
|
||||
unsigned long flags;
|
||||
|
||||
ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
|
||||
DPRINTK("All regs @ start of eng_timeout\n");
|
||||
mv_dump_all_regs(ap->host->mmio_base, ap->port_no,
|
||||
to_pci_dev(ap->host->dev));
|
||||
mv_dump_all_regs(mmio, ap->port_no, to_pci_dev(ap->host->dev));
|
||||
|
||||
qc = ata_qc_from_tag(ap, ap->active_tag);
|
||||
printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
|
||||
ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
|
||||
mmio, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
|
||||
|
||||
spin_lock_irqsave(&ap->host->lock, flags);
|
||||
mv_err_intr(ap, 0);
|
||||
@ -2027,7 +2028,7 @@ static void mv_eng_timeout(struct ata_port *ap)
|
||||
*/
|
||||
static void mv_port_init(struct ata_ioports *port, void __iomem *port_mmio)
|
||||
{
|
||||
unsigned long shd_base = (unsigned long) port_mmio + SHD_BLK_OFS;
|
||||
void __iomem *shd_base = port_mmio + SHD_BLK_OFS;
|
||||
unsigned serr_ofs;
|
||||
|
||||
/* PIO related setup
|
||||
@ -2175,7 +2176,7 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
|
||||
unsigned int board_idx)
|
||||
{
|
||||
int rc = 0, n_hc, port, hc;
|
||||
void __iomem *mmio = probe_ent->mmio_base;
|
||||
void __iomem *mmio = probe_ent->iomap[MV_PRIMARY_BAR];
|
||||
struct mv_host_priv *hpriv = probe_ent->private_data;
|
||||
|
||||
/* global interrupt mask */
|
||||
@ -2297,7 +2298,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
struct ata_probe_ent *probe_ent;
|
||||
struct mv_host_priv *hpriv;
|
||||
unsigned int board_idx = (unsigned int)ent->driver_data;
|
||||
void __iomem *mmio_base;
|
||||
int rc;
|
||||
|
||||
if (!printed_version++)
|
||||
@ -2308,11 +2308,11 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
return rc;
|
||||
pci_set_master(pdev);
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc) {
|
||||
rc = pcim_iomap_regions(pdev, 1 << MV_PRIMARY_BAR, DRV_NAME);
|
||||
if (rc == -EBUSY)
|
||||
pcim_pin_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
|
||||
if (probe_ent == NULL)
|
||||
@ -2321,10 +2321,6 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
probe_ent->dev = pci_dev_to_dev(pdev);
|
||||
INIT_LIST_HEAD(&probe_ent->node);
|
||||
|
||||
mmio_base = pcim_iomap(pdev, MV_PRIMARY_BAR, 0);
|
||||
if (mmio_base == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
|
||||
if (!hpriv)
|
||||
return -ENOMEM;
|
||||
@ -2337,7 +2333,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
probe_ent->private_data = hpriv;
|
||||
|
||||
/* initialize adapter */
|
||||
|
@ -54,6 +54,8 @@
|
||||
#define NV_ADMA_DMA_BOUNDARY 0xffffffffUL
|
||||
|
||||
enum {
|
||||
NV_MMIO_BAR = 5,
|
||||
|
||||
NV_PORTS = 2,
|
||||
NV_PIO_MASK = 0x1f,
|
||||
NV_MWDMA_MASK = 0x07,
|
||||
@ -357,7 +359,7 @@ static const struct ata_port_operations nv_generic_ops = {
|
||||
.thaw = ata_bmdma_thaw,
|
||||
.error_handler = nv_error_handler,
|
||||
.post_internal_cmd = ata_bmdma_post_internal_cmd,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.irq_handler = nv_generic_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
.scr_read = nv_scr_read,
|
||||
@ -382,7 +384,7 @@ static const struct ata_port_operations nv_nf2_ops = {
|
||||
.thaw = nv_nf2_thaw,
|
||||
.error_handler = nv_error_handler,
|
||||
.post_internal_cmd = ata_bmdma_post_internal_cmd,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.irq_handler = nv_nf2_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
.scr_read = nv_scr_read,
|
||||
@ -407,7 +409,7 @@ static const struct ata_port_operations nv_ck804_ops = {
|
||||
.thaw = nv_ck804_thaw,
|
||||
.error_handler = nv_error_handler,
|
||||
.post_internal_cmd = ata_bmdma_post_internal_cmd,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.irq_handler = nv_ck804_interrupt,
|
||||
.irq_clear = ata_bmdma_irq_clear,
|
||||
.scr_read = nv_scr_read,
|
||||
@ -434,7 +436,7 @@ static const struct ata_port_operations nv_adma_ops = {
|
||||
.thaw = nv_ck804_thaw,
|
||||
.error_handler = nv_adma_error_handler,
|
||||
.post_internal_cmd = nv_adma_bmdma_stop,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.irq_handler = nv_adma_interrupt,
|
||||
.irq_clear = nv_adma_irq_clear,
|
||||
.scr_read = nv_scr_read,
|
||||
@ -736,7 +738,7 @@ static irqreturn_t nv_adma_interrupt(int irq, void *dev_instance)
|
||||
|
||||
/* if in ATA register mode, use standard ata interrupt handler */
|
||||
if (pp->flags & NV_ADMA_PORT_REGISTER_MODE) {
|
||||
u8 irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804)
|
||||
u8 irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804)
|
||||
>> (NV_INT_PORT_SHIFT * i);
|
||||
if(ata_tag_valid(ap->active_tag))
|
||||
/** NV_INT_DEV indication seems unreliable at times
|
||||
@ -827,7 +829,7 @@ static void nv_adma_irq_clear(struct ata_port *ap)
|
||||
u16 status = readw(mmio + NV_ADMA_STAT);
|
||||
u32 notifier = readl(mmio + NV_ADMA_NOTIFIER);
|
||||
u32 notifier_error = readl(mmio + NV_ADMA_NOTIFIER_ERROR);
|
||||
unsigned long dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
|
||||
void __iomem *dma_stat_addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
|
||||
|
||||
/* clear ADMA status */
|
||||
writew(status, mmio + NV_ADMA_STAT);
|
||||
@ -835,7 +837,7 @@ static void nv_adma_irq_clear(struct ata_port *ap)
|
||||
pp->notifier_clear_block);
|
||||
|
||||
/** clear legacy status */
|
||||
outb(inb(dma_stat_addr), dma_stat_addr);
|
||||
iowrite8(ioread8(dma_stat_addr), dma_stat_addr);
|
||||
}
|
||||
|
||||
static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
|
||||
@ -851,15 +853,15 @@ static void nv_adma_bmdma_setup(struct ata_queued_cmd *qc)
|
||||
}
|
||||
|
||||
/* load PRD table addr. */
|
||||
outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
|
||||
iowrite32(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
|
||||
|
||||
/* specify data direction, triple-check start bit is clear */
|
||||
dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
|
||||
if (!rw)
|
||||
dmactl |= ATA_DMA_WR;
|
||||
|
||||
outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
iowrite8(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
|
||||
/* issue r/w command */
|
||||
ata_exec_command(ap, &qc->tf);
|
||||
@ -877,9 +879,9 @@ static void nv_adma_bmdma_start(struct ata_queued_cmd *qc)
|
||||
}
|
||||
|
||||
/* start host DMA transaction */
|
||||
dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
outb(dmactl | ATA_DMA_START,
|
||||
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
dmactl = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
iowrite8(dmactl | ATA_DMA_START,
|
||||
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
}
|
||||
|
||||
static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
|
||||
@ -891,8 +893,8 @@ static void nv_adma_bmdma_stop(struct ata_queued_cmd *qc)
|
||||
return;
|
||||
|
||||
/* clear start/stop bit */
|
||||
outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
|
||||
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
iowrite8(ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
|
||||
ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
|
||||
|
||||
/* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
|
||||
ata_altstatus(ap); /* dummy read */
|
||||
@ -904,7 +906,7 @@ static u8 nv_adma_bmdma_status(struct ata_port *ap)
|
||||
|
||||
WARN_ON(!(pp->flags & NV_ADMA_PORT_REGISTER_MODE));
|
||||
|
||||
return inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
return ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
}
|
||||
|
||||
static int nv_adma_port_start(struct ata_port *ap)
|
||||
@ -927,10 +929,10 @@ static int nv_adma_port_start(struct ata_port *ap)
|
||||
if (!pp)
|
||||
return -ENOMEM;
|
||||
|
||||
mmio = ap->host->mmio_base + NV_ADMA_PORT +
|
||||
mmio = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_PORT +
|
||||
ap->port_no * NV_ADMA_PORT_SIZE;
|
||||
pp->ctl_block = mmio;
|
||||
pp->gen_block = ap->host->mmio_base + NV_ADMA_GEN;
|
||||
pp->gen_block = ap->host->iomap[NV_MMIO_BAR] + NV_ADMA_GEN;
|
||||
pp->notifier_clear_block = pp->gen_block +
|
||||
NV_ADMA_NOTIFIER_CLEAR + (4 * ap->port_no);
|
||||
|
||||
@ -1046,26 +1048,26 @@ static int nv_adma_port_resume(struct ata_port *ap)
|
||||
|
||||
static void nv_adma_setup_port(struct ata_probe_ent *probe_ent, unsigned int port)
|
||||
{
|
||||
void __iomem *mmio = probe_ent->mmio_base;
|
||||
void __iomem *mmio = probe_ent->iomap[NV_MMIO_BAR];
|
||||
struct ata_ioports *ioport = &probe_ent->port[port];
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
mmio += NV_ADMA_PORT + port * NV_ADMA_PORT_SIZE;
|
||||
|
||||
ioport->cmd_addr = (unsigned long) mmio;
|
||||
ioport->data_addr = (unsigned long) mmio + (ATA_REG_DATA * 4);
|
||||
ioport->cmd_addr = mmio;
|
||||
ioport->data_addr = mmio + (ATA_REG_DATA * 4);
|
||||
ioport->error_addr =
|
||||
ioport->feature_addr = (unsigned long) mmio + (ATA_REG_ERR * 4);
|
||||
ioport->nsect_addr = (unsigned long) mmio + (ATA_REG_NSECT * 4);
|
||||
ioport->lbal_addr = (unsigned long) mmio + (ATA_REG_LBAL * 4);
|
||||
ioport->lbam_addr = (unsigned long) mmio + (ATA_REG_LBAM * 4);
|
||||
ioport->lbah_addr = (unsigned long) mmio + (ATA_REG_LBAH * 4);
|
||||
ioport->device_addr = (unsigned long) mmio + (ATA_REG_DEVICE * 4);
|
||||
ioport->feature_addr = mmio + (ATA_REG_ERR * 4);
|
||||
ioport->nsect_addr = mmio + (ATA_REG_NSECT * 4);
|
||||
ioport->lbal_addr = mmio + (ATA_REG_LBAL * 4);
|
||||
ioport->lbam_addr = mmio + (ATA_REG_LBAM * 4);
|
||||
ioport->lbah_addr = mmio + (ATA_REG_LBAH * 4);
|
||||
ioport->device_addr = mmio + (ATA_REG_DEVICE * 4);
|
||||
ioport->status_addr =
|
||||
ioport->command_addr = (unsigned long) mmio + (ATA_REG_STATUS * 4);
|
||||
ioport->command_addr = mmio + (ATA_REG_STATUS * 4);
|
||||
ioport->altstatus_addr =
|
||||
ioport->ctl_addr = (unsigned long) mmio + 0x20;
|
||||
ioport->ctl_addr = mmio + 0x20;
|
||||
}
|
||||
|
||||
static int nv_adma_host_init(struct ata_probe_ent *probe_ent)
|
||||
@ -1252,7 +1254,7 @@ static irqreturn_t nv_nf2_interrupt(int irq, void *dev_instance)
|
||||
irqreturn_t ret;
|
||||
|
||||
spin_lock(&host->lock);
|
||||
irq_stat = inb(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
|
||||
irq_stat = ioread8(host->ports[0]->ioaddr.scr_addr + NV_INT_STATUS);
|
||||
ret = nv_do_interrupt(host, irq_stat);
|
||||
spin_unlock(&host->lock);
|
||||
|
||||
@ -1266,7 +1268,7 @@ static irqreturn_t nv_ck804_interrupt(int irq, void *dev_instance)
|
||||
irqreturn_t ret;
|
||||
|
||||
spin_lock(&host->lock);
|
||||
irq_stat = readb(host->mmio_base + NV_INT_STATUS_CK804);
|
||||
irq_stat = readb(host->iomap[NV_MMIO_BAR] + NV_INT_STATUS_CK804);
|
||||
ret = nv_do_interrupt(host, irq_stat);
|
||||
spin_unlock(&host->lock);
|
||||
|
||||
@ -1278,7 +1280,7 @@ static u32 nv_scr_read (struct ata_port *ap, unsigned int sc_reg)
|
||||
if (sc_reg > SCR_CONTROL)
|
||||
return 0xffffffffU;
|
||||
|
||||
return ioread32((void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
return ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
}
|
||||
|
||||
static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
|
||||
@ -1286,36 +1288,36 @@ static void nv_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
|
||||
if (sc_reg > SCR_CONTROL)
|
||||
return;
|
||||
|
||||
iowrite32(val, (void __iomem *)ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
}
|
||||
|
||||
static void nv_nf2_freeze(struct ata_port *ap)
|
||||
{
|
||||
unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
|
||||
void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
|
||||
int shift = ap->port_no * NV_INT_PORT_SHIFT;
|
||||
u8 mask;
|
||||
|
||||
mask = inb(scr_addr + NV_INT_ENABLE);
|
||||
mask = ioread8(scr_addr + NV_INT_ENABLE);
|
||||
mask &= ~(NV_INT_ALL << shift);
|
||||
outb(mask, scr_addr + NV_INT_ENABLE);
|
||||
iowrite8(mask, scr_addr + NV_INT_ENABLE);
|
||||
}
|
||||
|
||||
static void nv_nf2_thaw(struct ata_port *ap)
|
||||
{
|
||||
unsigned long scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
|
||||
void __iomem *scr_addr = ap->host->ports[0]->ioaddr.scr_addr;
|
||||
int shift = ap->port_no * NV_INT_PORT_SHIFT;
|
||||
u8 mask;
|
||||
|
||||
outb(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
|
||||
iowrite8(NV_INT_ALL << shift, scr_addr + NV_INT_STATUS);
|
||||
|
||||
mask = inb(scr_addr + NV_INT_ENABLE);
|
||||
mask = ioread8(scr_addr + NV_INT_ENABLE);
|
||||
mask |= (NV_INT_MASK << shift);
|
||||
outb(mask, scr_addr + NV_INT_ENABLE);
|
||||
iowrite8(mask, scr_addr + NV_INT_ENABLE);
|
||||
}
|
||||
|
||||
static void nv_ck804_freeze(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio_base = ap->host->mmio_base;
|
||||
void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
|
||||
int shift = ap->port_no * NV_INT_PORT_SHIFT;
|
||||
u8 mask;
|
||||
|
||||
@ -1326,7 +1328,7 @@ static void nv_ck804_freeze(struct ata_port *ap)
|
||||
|
||||
static void nv_ck804_thaw(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio_base = ap->host->mmio_base;
|
||||
void __iomem *mmio_base = ap->host->iomap[NV_MMIO_BAR];
|
||||
int shift = ap->port_no * NV_INT_PORT_SHIFT;
|
||||
u8 mask;
|
||||
|
||||
@ -1412,7 +1414,7 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
struct nv_host_priv *hpriv;
|
||||
int rc;
|
||||
u32 bar;
|
||||
unsigned long base;
|
||||
void __iomem *base;
|
||||
unsigned long type = ent->driver_data;
|
||||
int mask_set = 0;
|
||||
|
||||
@ -1464,15 +1466,14 @@ static int nv_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (!probe_ent)
|
||||
return -ENOMEM;
|
||||
|
||||
probe_ent->mmio_base = pcim_iomap(pdev, 5, 0);
|
||||
if (!probe_ent->mmio_base)
|
||||
if (!pcim_iomap(pdev, NV_MMIO_BAR, 0))
|
||||
return -EIO;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
probe_ent->private_data = hpriv;
|
||||
hpriv->type = type;
|
||||
|
||||
base = (unsigned long)probe_ent->mmio_base;
|
||||
|
||||
base = probe_ent->iomap[NV_MMIO_BAR];
|
||||
probe_ent->port[0].scr_addr = base + NV_PORT0_SCR_REG_OFFSET;
|
||||
probe_ent->port[1].scr_addr = base + NV_PORT1_SCR_REG_OFFSET;
|
||||
|
||||
|
@ -50,6 +50,8 @@
|
||||
|
||||
|
||||
enum {
|
||||
PDC_MMIO_BAR = 3,
|
||||
|
||||
/* register offsets */
|
||||
PDC_FEATURE = 0x04, /* Feature/Error reg (per port) */
|
||||
PDC_SECTOR_COUNT = 0x08, /* Sector count reg (per port) */
|
||||
@ -167,7 +169,7 @@ static const struct ata_port_operations pdc_sata_ops = {
|
||||
.thaw = pdc_thaw,
|
||||
.error_handler = pdc_error_handler,
|
||||
.post_internal_cmd = pdc_post_internal_cmd,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.irq_handler = pdc_interrupt,
|
||||
.irq_clear = pdc_irq_clear,
|
||||
|
||||
@ -192,7 +194,7 @@ static const struct ata_port_operations pdc_old_sata_ops = {
|
||||
.thaw = pdc_thaw,
|
||||
.error_handler = pdc_error_handler,
|
||||
.post_internal_cmd = pdc_post_internal_cmd,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.irq_handler = pdc_interrupt,
|
||||
.irq_clear = pdc_irq_clear,
|
||||
|
||||
@ -214,7 +216,7 @@ static const struct ata_port_operations pdc_pata_ops = {
|
||||
|
||||
.qc_prep = pdc_qc_prep,
|
||||
.qc_issue = pdc_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.eng_timeout = pdc_eng_timeout,
|
||||
.irq_handler = pdc_interrupt,
|
||||
.irq_clear = pdc_irq_clear,
|
||||
@ -348,7 +350,7 @@ static int pdc_port_start(struct ata_port *ap)
|
||||
|
||||
static void pdc_reset_port(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_CTLSTAT;
|
||||
void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_CTLSTAT;
|
||||
unsigned int i;
|
||||
u32 tmp;
|
||||
|
||||
@ -394,7 +396,7 @@ static u32 pdc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
|
||||
{
|
||||
if (sc_reg > SCR_CONTROL || ap->cbl != ATA_CBL_SATA)
|
||||
return 0xffffffffU;
|
||||
return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
}
|
||||
|
||||
|
||||
@ -403,7 +405,7 @@ static void pdc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
|
||||
{
|
||||
if (sc_reg > SCR_CONTROL || ap->cbl != ATA_CBL_SATA)
|
||||
return;
|
||||
writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
}
|
||||
|
||||
static void pdc_atapi_pkt(struct ata_queued_cmd *qc)
|
||||
@ -627,7 +629,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
|
||||
{
|
||||
unsigned int handled = 0;
|
||||
u32 tmp;
|
||||
void __iomem *mmio = (void __iomem *) ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
|
||||
void __iomem *mmio = ap->ioaddr.cmd_addr + PDC_GLOBAL_CTL;
|
||||
|
||||
tmp = readl(mmio);
|
||||
if (tmp & PDC_ERR_MASK) {
|
||||
@ -656,7 +658,7 @@ static inline unsigned int pdc_host_intr( struct ata_port *ap,
|
||||
static void pdc_irq_clear(struct ata_port *ap)
|
||||
{
|
||||
struct ata_host *host = ap->host;
|
||||
void __iomem *mmio = host->mmio_base;
|
||||
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
|
||||
|
||||
readl(mmio + PDC_INT_SEQMASK);
|
||||
}
|
||||
@ -672,12 +674,12 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance)
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
if (!host || !host->mmio_base) {
|
||||
if (!host || !host->iomap[PDC_MMIO_BAR]) {
|
||||
VPRINTK("QUICK EXIT\n");
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
mmio_base = host->mmio_base;
|
||||
mmio_base = host->iomap[PDC_MMIO_BAR];
|
||||
|
||||
/* reading should also clear interrupts */
|
||||
mask = readl(mmio_base + PDC_INT_SEQMASK);
|
||||
@ -722,18 +724,19 @@ static inline void pdc_packet_start(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct pdc_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
|
||||
unsigned int port_no = ap->port_no;
|
||||
u8 seq = (u8) (port_no + 1);
|
||||
|
||||
VPRINTK("ENTER, ap %p\n", ap);
|
||||
|
||||
writel(0x00000001, ap->host->mmio_base + (seq * 4));
|
||||
readl(ap->host->mmio_base + (seq * 4)); /* flush */
|
||||
writel(0x00000001, mmio + (seq * 4));
|
||||
readl(mmio + (seq * 4)); /* flush */
|
||||
|
||||
pp->pkt[2] = seq;
|
||||
wmb(); /* flush PRD, pkt writes */
|
||||
writel(pp->pkt_dma, (void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
|
||||
writel(pp->pkt_dma, ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT); /* flush */
|
||||
}
|
||||
|
||||
static unsigned int pdc_qc_issue_prot(struct ata_queued_cmd *qc)
|
||||
@ -808,7 +811,7 @@ static int pdc_old_check_atapi_dma(struct ata_queued_cmd *qc)
|
||||
return pdc_check_atapi_dma(qc);
|
||||
}
|
||||
|
||||
static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
|
||||
static void pdc_ata_setup_port(struct ata_ioports *port, void __iomem *base)
|
||||
{
|
||||
port->cmd_addr = base;
|
||||
port->data_addr = base;
|
||||
@ -828,7 +831,7 @@ static void pdc_ata_setup_port(struct ata_ioports *port, unsigned long base)
|
||||
|
||||
static void pdc_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
|
||||
{
|
||||
void __iomem *mmio = pe->mmio_base;
|
||||
void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
|
||||
struct pdc_host_priv *hp = pe->private_data;
|
||||
int hotplug_offset;
|
||||
u32 tmp;
|
||||
@ -884,8 +887,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
|
||||
static int printed_version;
|
||||
struct ata_probe_ent *probe_ent;
|
||||
struct pdc_host_priv *hp;
|
||||
unsigned long base;
|
||||
void __iomem *mmio_base;
|
||||
void __iomem *base;
|
||||
unsigned int board_idx = (unsigned int) ent->driver_data;
|
||||
int rc;
|
||||
u8 tmp;
|
||||
@ -897,11 +899,11 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc) {
|
||||
rc = pcim_iomap_regions(pdev, 1 << PDC_MMIO_BAR, DRV_NAME);
|
||||
if (rc == -EBUSY)
|
||||
pcim_pin_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
|
||||
if (rc)
|
||||
@ -917,11 +919,6 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
|
||||
probe_ent->dev = pci_dev_to_dev(pdev);
|
||||
INIT_LIST_HEAD(&probe_ent->node);
|
||||
|
||||
mmio_base = pcim_iomap(pdev, 3, 0);
|
||||
if (mmio_base == NULL)
|
||||
return -ENOMEM;
|
||||
base = (unsigned long) mmio_base;
|
||||
|
||||
hp = devm_kzalloc(&pdev->dev, sizeof(*hp), GFP_KERNEL);
|
||||
if (hp == NULL)
|
||||
return -ENOMEM;
|
||||
@ -937,7 +934,9 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
|
||||
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
base = probe_ent->iomap[PDC_MMIO_BAR];
|
||||
|
||||
pdc_ata_setup_port(&probe_ent->port[0], base + 0x200);
|
||||
pdc_ata_setup_port(&probe_ent->port[1], base + 0x280);
|
||||
@ -964,7 +963,7 @@ static int pdc_ata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
|
||||
/* Fall through */
|
||||
case board_2037x:
|
||||
/* TX2plus boards also have a PATA port */
|
||||
tmp = readb(mmio_base + PDC_FLASH_CTL+1);
|
||||
tmp = readb(base + PDC_FLASH_CTL+1);
|
||||
if (!(tmp & 0x80)) {
|
||||
probe_ent->n_ports = 3;
|
||||
pdc_ata_setup_port(&probe_ent->port[2], base + 0x300);
|
||||
|
@ -43,6 +43,8 @@
|
||||
#define DRV_VERSION "0.06"
|
||||
|
||||
enum {
|
||||
QS_MMIO_BAR = 4,
|
||||
|
||||
QS_PORTS = 4,
|
||||
QS_MAX_PRD = LIBATA_MAX_PRD,
|
||||
QS_CPB_ORDER = 6,
|
||||
@ -155,7 +157,7 @@ static const struct ata_port_operations qs_ata_ops = {
|
||||
.phy_reset = qs_phy_reset,
|
||||
.qc_prep = qs_qc_prep,
|
||||
.qc_issue = qs_qc_issue,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.eng_timeout = qs_eng_timeout,
|
||||
.irq_handler = qs_intr,
|
||||
.irq_clear = qs_irq_clear,
|
||||
@ -194,6 +196,11 @@ static struct pci_driver qs_ata_pci_driver = {
|
||||
.remove = ata_pci_remove_one,
|
||||
};
|
||||
|
||||
static void __iomem *qs_mmio_base(struct ata_host *host)
|
||||
{
|
||||
return host->iomap[QS_MMIO_BAR];
|
||||
}
|
||||
|
||||
static int qs_check_atapi_dma(struct ata_queued_cmd *qc)
|
||||
{
|
||||
return 1; /* ATAPI DMA not supported */
|
||||
@ -216,7 +223,7 @@ static void qs_irq_clear(struct ata_port *ap)
|
||||
|
||||
static inline void qs_enter_reg_mode(struct ata_port *ap)
|
||||
{
|
||||
u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
|
||||
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
|
||||
|
||||
writeb(QS_CTR0_REG, chan + QS_CCT_CTR0);
|
||||
readb(chan + QS_CCT_CTR0); /* flush */
|
||||
@ -224,7 +231,7 @@ static inline void qs_enter_reg_mode(struct ata_port *ap)
|
||||
|
||||
static inline void qs_reset_channel_logic(struct ata_port *ap)
|
||||
{
|
||||
u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
|
||||
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
|
||||
|
||||
writeb(QS_CTR1_RCHN, chan + QS_CCT_CTR1);
|
||||
readb(chan + QS_CCT_CTR0); /* flush */
|
||||
@ -254,14 +261,14 @@ static u32 qs_scr_read (struct ata_port *ap, unsigned int sc_reg)
|
||||
{
|
||||
if (sc_reg > SCR_CONTROL)
|
||||
return ~0U;
|
||||
return readl((void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
|
||||
return readl(ap->ioaddr.scr_addr + (sc_reg * 8));
|
||||
}
|
||||
|
||||
static void qs_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
|
||||
{
|
||||
if (sc_reg > SCR_CONTROL)
|
||||
return;
|
||||
writel(val, (void __iomem *)(ap->ioaddr.scr_addr + (sc_reg * 8)));
|
||||
writel(val, ap->ioaddr.scr_addr + (sc_reg * 8));
|
||||
}
|
||||
|
||||
static unsigned int qs_fill_sg(struct ata_queued_cmd *qc)
|
||||
@ -338,7 +345,7 @@ static void qs_qc_prep(struct ata_queued_cmd *qc)
|
||||
static inline void qs_packet_start(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
u8 __iomem *chan = ap->host->mmio_base + (ap->port_no * 0x4000);
|
||||
u8 __iomem *chan = qs_mmio_base(ap->host) + (ap->port_no * 0x4000);
|
||||
|
||||
VPRINTK("ENTER, ap %p\n", ap);
|
||||
|
||||
@ -375,7 +382,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host *host)
|
||||
{
|
||||
unsigned int handled = 0;
|
||||
u8 sFFE;
|
||||
u8 __iomem *mmio_base = host->mmio_base;
|
||||
u8 __iomem *mmio_base = qs_mmio_base(host);
|
||||
|
||||
do {
|
||||
u32 sff0 = readl(mmio_base + QS_HST_SFF);
|
||||
@ -467,7 +474,7 @@ static irqreturn_t qs_intr(int irq, void *dev_instance)
|
||||
return IRQ_RETVAL(handled);
|
||||
}
|
||||
|
||||
static void qs_ata_setup_port(struct ata_ioports *port, unsigned long base)
|
||||
static void qs_ata_setup_port(struct ata_ioports *port, void __iomem *base)
|
||||
{
|
||||
port->cmd_addr =
|
||||
port->data_addr = base + 0x400;
|
||||
@ -489,7 +496,7 @@ static int qs_port_start(struct ata_port *ap)
|
||||
{
|
||||
struct device *dev = ap->host->dev;
|
||||
struct qs_port_priv *pp;
|
||||
void __iomem *mmio_base = ap->host->mmio_base;
|
||||
void __iomem *mmio_base = qs_mmio_base(ap->host);
|
||||
void __iomem *chan = mmio_base + (ap->port_no * 0x4000);
|
||||
u64 addr;
|
||||
int rc;
|
||||
@ -516,7 +523,7 @@ static int qs_port_start(struct ata_port *ap)
|
||||
|
||||
static void qs_host_stop(struct ata_host *host)
|
||||
{
|
||||
void __iomem *mmio_base = host->mmio_base;
|
||||
void __iomem *mmio_base = qs_mmio_base(host);
|
||||
|
||||
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
|
||||
writeb(QS_CNFG3_GSRST, mmio_base + QS_HCF_CNFG3); /* global reset */
|
||||
@ -524,7 +531,7 @@ static void qs_host_stop(struct ata_host *host)
|
||||
|
||||
static void qs_host_init(unsigned int chip_id, struct ata_probe_ent *pe)
|
||||
{
|
||||
void __iomem *mmio_base = pe->mmio_base;
|
||||
void __iomem *mmio_base = pe->iomap[QS_MMIO_BAR];
|
||||
unsigned int port_no;
|
||||
|
||||
writeb(0, mmio_base + QS_HCT_CTRL); /* disable host interrupts */
|
||||
@ -599,8 +606,8 @@ static int qs_ata_init_one(struct pci_dev *pdev,
|
||||
const struct pci_device_id *ent)
|
||||
{
|
||||
static int printed_version;
|
||||
struct ata_probe_ent *probe_ent = NULL;
|
||||
void __iomem *mmio_base;
|
||||
struct ata_probe_ent *probe_ent;
|
||||
void __iomem * const *iomap;
|
||||
unsigned int board_idx = (unsigned int) ent->driver_data;
|
||||
int rc, port_no;
|
||||
|
||||
@ -611,18 +618,15 @@ static int qs_ata_init_one(struct pci_dev *pdev,
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
if ((pci_resource_flags(pdev, 4) & IORESOURCE_MEM) == 0)
|
||||
if ((pci_resource_flags(pdev, QS_MMIO_BAR) & IORESOURCE_MEM) == 0)
|
||||
return -ENODEV;
|
||||
|
||||
mmio_base = pcim_iomap(pdev, 4, 0);
|
||||
if (mmio_base == NULL)
|
||||
return -ENOMEM;
|
||||
rc = pcim_iomap_regions(pdev, 1 << QS_MMIO_BAR, DRV_NAME);
|
||||
if (rc)
|
||||
return rc;
|
||||
iomap = pcim_iomap_table(pdev);
|
||||
|
||||
rc = qs_set_dma_masks(pdev, mmio_base);
|
||||
rc = qs_set_dma_masks(pdev, iomap[QS_MMIO_BAR]);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
@ -642,12 +646,12 @@ static int qs_ata_init_one(struct pci_dev *pdev,
|
||||
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
probe_ent->iomap = iomap;
|
||||
probe_ent->n_ports = QS_PORTS;
|
||||
|
||||
for (port_no = 0; port_no < probe_ent->n_ports; ++port_no) {
|
||||
unsigned long chan = (unsigned long)mmio_base +
|
||||
(port_no * 0x4000);
|
||||
void __iomem *chan =
|
||||
probe_ent->iomap[QS_MMIO_BAR] + (port_no * 0x4000);
|
||||
qs_ata_setup_port(&probe_ent->port[port_no], chan);
|
||||
}
|
||||
|
||||
|
@ -49,6 +49,8 @@
|
||||
#define DRV_VERSION "2.0"
|
||||
|
||||
enum {
|
||||
SIL_MMIO_BAR = 5,
|
||||
|
||||
/*
|
||||
* host flags
|
||||
*/
|
||||
@ -200,7 +202,7 @@ static const struct ata_port_operations sil_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.freeze = sil_freeze,
|
||||
.thaw = sil_thaw,
|
||||
.error_handler = ata_bmdma_error_handler,
|
||||
@ -295,7 +297,8 @@ static void sil_post_set_mode (struct ata_port *ap)
|
||||
{
|
||||
struct ata_host *host = ap->host;
|
||||
struct ata_device *dev;
|
||||
void __iomem *addr = host->mmio_base + sil_port[ap->port_no].xfer_mode;
|
||||
void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
|
||||
void __iomem *addr = mmio_base + sil_port[ap->port_no].xfer_mode;
|
||||
u32 tmp, dev_mode[2];
|
||||
unsigned int i;
|
||||
|
||||
@ -318,9 +321,9 @@ static void sil_post_set_mode (struct ata_port *ap)
|
||||
readl(addr); /* flush */
|
||||
}
|
||||
|
||||
static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
|
||||
static inline void __iomem *sil_scr_addr(struct ata_port *ap, unsigned int sc_reg)
|
||||
{
|
||||
unsigned long offset = ap->ioaddr.scr_addr;
|
||||
void __iomem *offset = ap->ioaddr.scr_addr;
|
||||
|
||||
switch (sc_reg) {
|
||||
case SCR_STATUS:
|
||||
@ -339,7 +342,7 @@ static inline unsigned long sil_scr_addr(struct ata_port *ap, unsigned int sc_re
|
||||
|
||||
static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
|
||||
{
|
||||
void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
|
||||
void __iomem *mmio = sil_scr_addr(ap, sc_reg);
|
||||
if (mmio)
|
||||
return readl(mmio);
|
||||
return 0xffffffffU;
|
||||
@ -347,7 +350,7 @@ static u32 sil_scr_read (struct ata_port *ap, unsigned int sc_reg)
|
||||
|
||||
static void sil_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
|
||||
{
|
||||
void __iomem *mmio = (void __iomem *) sil_scr_addr(ap, sc_reg);
|
||||
void __iomem *mmio = sil_scr_addr(ap, sc_reg);
|
||||
if (mmio)
|
||||
writel(val, mmio);
|
||||
}
|
||||
@ -442,7 +445,7 @@ static void sil_host_intr(struct ata_port *ap, u32 bmdma2)
|
||||
static irqreturn_t sil_interrupt(int irq, void *dev_instance)
|
||||
{
|
||||
struct ata_host *host = dev_instance;
|
||||
void __iomem *mmio_base = host->mmio_base;
|
||||
void __iomem *mmio_base = host->iomap[SIL_MMIO_BAR];
|
||||
int handled = 0;
|
||||
int i;
|
||||
|
||||
@ -474,7 +477,7 @@ static irqreturn_t sil_interrupt(int irq, void *dev_instance)
|
||||
|
||||
static void sil_freeze(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio_base = ap->host->mmio_base;
|
||||
void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
|
||||
u32 tmp;
|
||||
|
||||
/* global IRQ mask doesn't block SATA IRQ, turn off explicitly */
|
||||
@ -489,7 +492,7 @@ static void sil_freeze(struct ata_port *ap)
|
||||
|
||||
static void sil_thaw(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *mmio_base = ap->host->mmio_base;
|
||||
void __iomem *mmio_base = ap->host->iomap[SIL_MMIO_BAR];
|
||||
u32 tmp;
|
||||
|
||||
/* clear IRQ */
|
||||
@ -621,7 +624,6 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
static int printed_version;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ata_probe_ent *probe_ent;
|
||||
unsigned long base;
|
||||
void __iomem *mmio_base;
|
||||
int rc;
|
||||
unsigned int i;
|
||||
@ -633,11 +635,11 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc) {
|
||||
rc = pcim_iomap_regions(pdev, 1 << SIL_MMIO_BAR, DRV_NAME);
|
||||
if (rc == -EBUSY)
|
||||
pcim_pin_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
|
||||
if (rc)
|
||||
@ -662,20 +664,16 @@ static int sil_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->port_flags = sil_port_info[ent->driver_data].flags;
|
||||
|
||||
mmio_base = pcim_iomap(pdev, 5, 0);
|
||||
if (mmio_base == NULL)
|
||||
return -ENOMEM;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
|
||||
base = (unsigned long) mmio_base;
|
||||
mmio_base = probe_ent->iomap[SIL_MMIO_BAR];
|
||||
|
||||
for (i = 0; i < probe_ent->n_ports; i++) {
|
||||
probe_ent->port[i].cmd_addr = base + sil_port[i].tf;
|
||||
probe_ent->port[i].cmd_addr = mmio_base + sil_port[i].tf;
|
||||
probe_ent->port[i].altstatus_addr =
|
||||
probe_ent->port[i].ctl_addr = base + sil_port[i].ctl;
|
||||
probe_ent->port[i].bmdma_addr = base + sil_port[i].bmdma;
|
||||
probe_ent->port[i].scr_addr = base + sil_port[i].scr;
|
||||
probe_ent->port[i].ctl_addr = mmio_base + sil_port[i].ctl;
|
||||
probe_ent->port[i].bmdma_addr = mmio_base + sil_port[i].bmdma;
|
||||
probe_ent->port[i].scr_addr = mmio_base + sil_port[i].scr;
|
||||
ata_std_ports(&probe_ent->port[i]);
|
||||
}
|
||||
|
||||
@ -702,7 +700,7 @@ static int sil_pci_device_resume(struct pci_dev *pdev)
|
||||
return rc;
|
||||
|
||||
sil_init_controller(pdev, host->n_ports, host->ports[0]->flags,
|
||||
host->mmio_base);
|
||||
host->iomap[SIL_MMIO_BAR]);
|
||||
ata_host_resume(host);
|
||||
|
||||
return 0;
|
||||
|
@ -60,6 +60,9 @@ struct sil24_port_multiplier {
|
||||
};
|
||||
|
||||
enum {
|
||||
SIL24_HOST_BAR = 0,
|
||||
SIL24_PORT_BAR = 2,
|
||||
|
||||
/*
|
||||
* Global controller registers (128 bytes @ BAR0)
|
||||
*/
|
||||
@ -320,12 +323,6 @@ struct sil24_port_priv {
|
||||
struct ata_taskfile tf; /* Cached taskfile registers */
|
||||
};
|
||||
|
||||
/* ap->host->private_data */
|
||||
struct sil24_host_priv {
|
||||
void __iomem *host_base; /* global controller control (128 bytes @BAR0) */
|
||||
void __iomem *port_base; /* port registers (4 * 8192 bytes @BAR2) */
|
||||
};
|
||||
|
||||
static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev);
|
||||
static u8 sil24_check_status(struct ata_port *ap);
|
||||
static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg);
|
||||
@ -462,7 +459,7 @@ static int sil24_tag(int tag)
|
||||
|
||||
static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
|
||||
{
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
|
||||
if (dev->cdb_len == 16)
|
||||
writel(PORT_CS_CDB16, port + PORT_CTRL_STAT);
|
||||
@ -473,7 +470,7 @@ static void sil24_dev_config(struct ata_port *ap, struct ata_device *dev)
|
||||
static inline void sil24_update_tf(struct ata_port *ap)
|
||||
{
|
||||
struct sil24_port_priv *pp = ap->private_data;
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
struct sil24_prb __iomem *prb = port;
|
||||
u8 fis[6 * 4];
|
||||
|
||||
@ -496,7 +493,7 @@ static int sil24_scr_map[] = {
|
||||
|
||||
static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
|
||||
{
|
||||
void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
|
||||
void __iomem *scr_addr = ap->ioaddr.scr_addr;
|
||||
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
|
||||
void __iomem *addr;
|
||||
addr = scr_addr + sil24_scr_map[sc_reg] * 4;
|
||||
@ -507,7 +504,7 @@ static u32 sil24_scr_read(struct ata_port *ap, unsigned sc_reg)
|
||||
|
||||
static void sil24_scr_write(struct ata_port *ap, unsigned sc_reg, u32 val)
|
||||
{
|
||||
void __iomem *scr_addr = (void __iomem *)ap->ioaddr.scr_addr;
|
||||
void __iomem *scr_addr = ap->ioaddr.scr_addr;
|
||||
if (sc_reg < ARRAY_SIZE(sil24_scr_map)) {
|
||||
void __iomem *addr;
|
||||
addr = scr_addr + sil24_scr_map[sc_reg] * 4;
|
||||
@ -523,7 +520,7 @@ static void sil24_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
|
||||
|
||||
static int sil24_init_port(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
u32 tmp;
|
||||
|
||||
writel(PORT_CS_INIT, port + PORT_CTRL_STAT);
|
||||
@ -539,7 +536,7 @@ static int sil24_init_port(struct ata_port *ap)
|
||||
|
||||
static int sil24_softreset(struct ata_port *ap, unsigned int *class)
|
||||
{
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
struct sil24_port_priv *pp = ap->private_data;
|
||||
struct sil24_prb *prb = &pp->cmd_block[0].ata.prb;
|
||||
dma_addr_t paddr = pp->cmd_block_dma;
|
||||
@ -599,7 +596,7 @@ static int sil24_softreset(struct ata_port *ap, unsigned int *class)
|
||||
|
||||
static int sil24_hardreset(struct ata_port *ap, unsigned int *class)
|
||||
{
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
const char *reason;
|
||||
int tout_msec, rc;
|
||||
u32 tmp;
|
||||
@ -716,7 +713,7 @@ static unsigned int sil24_qc_issue(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct sil24_port_priv *pp = ap->private_data;
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
unsigned int tag = sil24_tag(qc->tag);
|
||||
dma_addr_t paddr;
|
||||
void __iomem *activate;
|
||||
@ -737,7 +734,7 @@ static void sil24_irq_clear(struct ata_port *ap)
|
||||
|
||||
static void sil24_freeze(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
|
||||
/* Port-wide IRQ mask in HOST_CTRL doesn't really work, clear
|
||||
* PORT_IRQ_ENABLE instead.
|
||||
@ -747,7 +744,7 @@ static void sil24_freeze(struct ata_port *ap)
|
||||
|
||||
static void sil24_thaw(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
u32 tmp;
|
||||
|
||||
/* clear IRQ */
|
||||
@ -760,7 +757,7 @@ static void sil24_thaw(struct ata_port *ap)
|
||||
|
||||
static void sil24_error_intr(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
struct ata_eh_info *ehi = &ap->eh_info;
|
||||
int freeze = 0;
|
||||
u32 irq_stat;
|
||||
@ -838,7 +835,7 @@ static void sil24_finish_qc(struct ata_queued_cmd *qc)
|
||||
|
||||
static inline void sil24_host_intr(struct ata_port *ap)
|
||||
{
|
||||
void __iomem *port = (void __iomem *)ap->ioaddr.cmd_addr;
|
||||
void __iomem *port = ap->ioaddr.cmd_addr;
|
||||
u32 slot_stat, qc_active;
|
||||
int rc;
|
||||
|
||||
@ -873,12 +870,12 @@ static inline void sil24_host_intr(struct ata_port *ap)
|
||||
static irqreturn_t sil24_interrupt(int irq, void *dev_instance)
|
||||
{
|
||||
struct ata_host *host = dev_instance;
|
||||
struct sil24_host_priv *hpriv = host->private_data;
|
||||
void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
|
||||
unsigned handled = 0;
|
||||
u32 status;
|
||||
int i;
|
||||
|
||||
status = readl(hpriv->host_base + HOST_IRQ_STAT);
|
||||
status = readl(host_base + HOST_IRQ_STAT);
|
||||
|
||||
if (status == 0xffffffff) {
|
||||
printk(KERN_ERR DRV_NAME ": IRQ status == 0xffffffff, "
|
||||
@ -1031,7 +1028,6 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
unsigned int board_id = (unsigned int)ent->driver_data;
|
||||
struct ata_port_info *pinfo = &sil24_port_info[board_id];
|
||||
struct ata_probe_ent *probe_ent;
|
||||
struct sil24_host_priv *hpriv;
|
||||
void __iomem *host_base;
|
||||
void __iomem *port_base;
|
||||
int i, rc;
|
||||
@ -1044,20 +1040,15 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
rc = pcim_iomap_regions(pdev,
|
||||
(1 << SIL24_HOST_BAR) | (1 << SIL24_PORT_BAR),
|
||||
DRV_NAME);
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
/* map mmio registers */
|
||||
host_base = pcim_iomap(pdev, 0, 0);
|
||||
port_base = pcim_iomap(pdev, 2, 0);
|
||||
if (!host_base || !port_base)
|
||||
return -ENOMEM;
|
||||
|
||||
/* allocate & init probe_ent and hpriv */
|
||||
/* allocate & init probe_ent */
|
||||
probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
|
||||
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
|
||||
if (!probe_ent || !hpriv)
|
||||
if (!probe_ent)
|
||||
return -ENOMEM;
|
||||
|
||||
probe_ent->dev = pci_dev_to_dev(pdev);
|
||||
@ -1073,10 +1064,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->private_data = hpriv;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
hpriv->host_base = host_base;
|
||||
hpriv->port_base = port_base;
|
||||
host_base = probe_ent->iomap[SIL24_HOST_BAR];
|
||||
port_base = probe_ent->iomap[SIL24_PORT_BAR];
|
||||
|
||||
/*
|
||||
* Configure the device
|
||||
@ -1118,11 +1109,10 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
}
|
||||
|
||||
for (i = 0; i < probe_ent->n_ports; i++) {
|
||||
unsigned long portu =
|
||||
(unsigned long)port_base + i * PORT_REGS_SIZE;
|
||||
void __iomem *port = port_base + i * PORT_REGS_SIZE;
|
||||
|
||||
probe_ent->port[i].cmd_addr = portu;
|
||||
probe_ent->port[i].scr_addr = portu + PORT_SCONTROL;
|
||||
probe_ent->port[i].cmd_addr = port;
|
||||
probe_ent->port[i].scr_addr = port + PORT_SCONTROL;
|
||||
|
||||
ata_std_ports(&probe_ent->port[i]);
|
||||
}
|
||||
@ -1143,7 +1133,8 @@ static int sil24_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
static int sil24_pci_device_resume(struct pci_dev *pdev)
|
||||
{
|
||||
struct ata_host *host = dev_get_drvdata(&pdev->dev);
|
||||
struct sil24_host_priv *hpriv = host->private_data;
|
||||
void __iomem *host_base = host->iomap[SIL24_HOST_BAR];
|
||||
void __iomem *port_base = host->iomap[SIL24_PORT_BAR];
|
||||
int rc;
|
||||
|
||||
rc = ata_pci_device_do_resume(pdev);
|
||||
@ -1151,10 +1142,10 @@ static int sil24_pci_device_resume(struct pci_dev *pdev)
|
||||
return rc;
|
||||
|
||||
if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND)
|
||||
writel(HOST_CTRL_GLOBAL_RST, hpriv->host_base + HOST_CTRL);
|
||||
writel(HOST_CTRL_GLOBAL_RST, host_base + HOST_CTRL);
|
||||
|
||||
sil24_init_controller(pdev, host->n_ports, host->ports[0]->flags,
|
||||
hpriv->host_base, hpriv->port_base);
|
||||
host_base, port_base);
|
||||
|
||||
ata_host_resume(host);
|
||||
|
||||
|
@ -117,7 +117,7 @@ static const struct ata_port_operations sis_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
.error_handler = ata_bmdma_error_handler,
|
||||
@ -223,11 +223,11 @@ static u32 sis_scr_read (struct ata_port *ap, unsigned int sc_reg)
|
||||
|
||||
pci_read_config_byte(pdev, SIS_PMR, &pmr);
|
||||
|
||||
val = inl(ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
val = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
|
||||
if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
|
||||
(pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
|
||||
val2 = inl(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
|
||||
val2 = ioread32(ap->ioaddr.scr_addr + (sc_reg * 4) + 0x10);
|
||||
|
||||
return (val | val2) & 0xfffffffb;
|
||||
}
|
||||
@ -245,10 +245,10 @@ static void sis_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
|
||||
if (ap->flags & SIS_FLAG_CFGSCR)
|
||||
sis_scr_cfg_write(ap, sc_reg, val);
|
||||
else {
|
||||
outl(val, ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
if ((pdev->device == 0x0182) || (pdev->device == 0x0183) || (pdev->device == 0x1182) ||
|
||||
(pdev->device == 0x1183) || (pmr & SIS_PMR_COMBINED))
|
||||
outl(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
|
||||
iowrite32(val, ap->ioaddr.scr_addr + (sc_reg * 4)+0x10);
|
||||
}
|
||||
}
|
||||
|
||||
@ -353,10 +353,14 @@ static int sis_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!(probe_ent->port_flags & SIS_FLAG_CFGSCR)) {
|
||||
probe_ent->port[0].scr_addr =
|
||||
pci_resource_start(pdev, SIS_SCR_PCI_BAR);
|
||||
probe_ent->port[1].scr_addr =
|
||||
pci_resource_start(pdev, SIS_SCR_PCI_BAR) + port2_start;
|
||||
void *mmio;
|
||||
|
||||
mmio = pcim_iomap(pdev, SIS_SCR_PCI_BAR, 0);
|
||||
if (!mmio)
|
||||
return -ENOMEM;
|
||||
|
||||
probe_ent->port[0].scr_addr = mmio;
|
||||
probe_ent->port[1].scr_addr = mmio + port2_start;
|
||||
}
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
@ -135,31 +135,31 @@ static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
|
||||
|
||||
if (tf->ctl != ap->last_ctl) {
|
||||
writeb(tf->ctl, (void __iomem *) ioaddr->ctl_addr);
|
||||
writeb(tf->ctl, ioaddr->ctl_addr);
|
||||
ap->last_ctl = tf->ctl;
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
|
||||
writew(tf->feature | (((u16)tf->hob_feature) << 8),
|
||||
(void __iomem *) ioaddr->feature_addr);
|
||||
ioaddr->feature_addr);
|
||||
writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
|
||||
(void __iomem *) ioaddr->nsect_addr);
|
||||
ioaddr->nsect_addr);
|
||||
writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
|
||||
(void __iomem *) ioaddr->lbal_addr);
|
||||
ioaddr->lbal_addr);
|
||||
writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
|
||||
(void __iomem *) ioaddr->lbam_addr);
|
||||
ioaddr->lbam_addr);
|
||||
writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
|
||||
(void __iomem *) ioaddr->lbah_addr);
|
||||
ioaddr->lbah_addr);
|
||||
} else if (is_addr) {
|
||||
writew(tf->feature, (void __iomem *) ioaddr->feature_addr);
|
||||
writew(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
|
||||
writew(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
|
||||
writew(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
|
||||
writew(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
|
||||
writew(tf->feature, ioaddr->feature_addr);
|
||||
writew(tf->nsect, ioaddr->nsect_addr);
|
||||
writew(tf->lbal, ioaddr->lbal_addr);
|
||||
writew(tf->lbam, ioaddr->lbam_addr);
|
||||
writew(tf->lbah, ioaddr->lbah_addr);
|
||||
}
|
||||
|
||||
if (tf->flags & ATA_TFLAG_DEVICE)
|
||||
writeb(tf->device, (void __iomem *) ioaddr->device_addr);
|
||||
writeb(tf->device, ioaddr->device_addr);
|
||||
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
@ -171,12 +171,12 @@ static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
|
||||
u16 nsect, lbal, lbam, lbah, feature;
|
||||
|
||||
tf->command = k2_stat_check_status(ap);
|
||||
tf->device = readw((void __iomem *)ioaddr->device_addr);
|
||||
feature = readw((void __iomem *)ioaddr->error_addr);
|
||||
nsect = readw((void __iomem *)ioaddr->nsect_addr);
|
||||
lbal = readw((void __iomem *)ioaddr->lbal_addr);
|
||||
lbam = readw((void __iomem *)ioaddr->lbam_addr);
|
||||
lbah = readw((void __iomem *)ioaddr->lbah_addr);
|
||||
tf->device = readw(ioaddr->device_addr);
|
||||
feature = readw(ioaddr->error_addr);
|
||||
nsect = readw(ioaddr->nsect_addr);
|
||||
lbal = readw(ioaddr->lbal_addr);
|
||||
lbam = readw(ioaddr->lbam_addr);
|
||||
lbah = readw(ioaddr->lbah_addr);
|
||||
|
||||
tf->feature = feature;
|
||||
tf->nsect = nsect;
|
||||
@ -349,7 +349,7 @@ static const struct ata_port_operations k2_sata_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
.error_handler = ata_bmdma_error_handler,
|
||||
@ -361,7 +361,7 @@ static const struct ata_port_operations k2_sata_ops = {
|
||||
.port_start = ata_port_start,
|
||||
};
|
||||
|
||||
static void k2_sata_setup_port(struct ata_ioports *port, unsigned long base)
|
||||
static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base)
|
||||
{
|
||||
port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET;
|
||||
port->data_addr = base + K2_SATA_TF_DATA_OFFSET;
|
||||
@ -386,7 +386,6 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
|
||||
static int printed_version;
|
||||
struct device *dev = &pdev->dev;
|
||||
struct ata_probe_ent *probe_ent;
|
||||
unsigned long base;
|
||||
void __iomem *mmio_base;
|
||||
const struct k2_board_info *board_info =
|
||||
&k2_board_info[ent->driver_data];
|
||||
@ -410,12 +409,12 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
|
||||
if (pci_resource_len(pdev, 5) == 0)
|
||||
return -ENODEV;
|
||||
|
||||
/* Request PCI regions */
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc) {
|
||||
/* Request and iomap PCI regions */
|
||||
rc = pcim_iomap_regions(pdev, 1 << 5, DRV_NAME);
|
||||
if (rc == -EBUSY)
|
||||
pcim_pin_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
|
||||
if (rc)
|
||||
@ -431,10 +430,30 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
|
||||
probe_ent->dev = pci_dev_to_dev(pdev);
|
||||
INIT_LIST_HEAD(&probe_ent->node);
|
||||
|
||||
mmio_base = pcim_iomap(pdev, 5, 0);
|
||||
if (mmio_base == NULL)
|
||||
return -ENOMEM;
|
||||
base = (unsigned long) mmio_base;
|
||||
probe_ent->sht = &k2_sata_sht;
|
||||
probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
|
||||
ATA_FLAG_MMIO | board_info->port_flags;
|
||||
probe_ent->port_ops = &k2_sata_ops;
|
||||
probe_ent->n_ports = 4;
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
/* We don't care much about the PIO/UDMA masks, but the core won't like us
|
||||
* if we don't fill these
|
||||
*/
|
||||
probe_ent->pio_mask = 0x1f;
|
||||
probe_ent->mwdma_mask = 0x7;
|
||||
probe_ent->udma_mask = 0x7f;
|
||||
|
||||
mmio_base = probe_ent->iomap[5];
|
||||
|
||||
/* different controllers have different number of ports - currently 4 or 8 */
|
||||
/* All ports are on the same function. Multi-function device is no
|
||||
* longer available. This should not be seen in any system. */
|
||||
for (i = 0; i < board_info->n_ports; i++)
|
||||
k2_sata_setup_port(&probe_ent->port[i],
|
||||
mmio_base + i * K2_SATA_PORT_OFFSET);
|
||||
|
||||
/* Clear a magic bit in SCR1 according to Darwin, those help
|
||||
* some funky seagate drives (though so far, those were already
|
||||
@ -447,28 +466,6 @@ static int k2_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *e
|
||||
writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET);
|
||||
writel(0x0, mmio_base + K2_SATA_SIM_OFFSET);
|
||||
|
||||
probe_ent->sht = &k2_sata_sht;
|
||||
probe_ent->port_flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
|
||||
ATA_FLAG_MMIO | board_info->port_flags;
|
||||
probe_ent->port_ops = &k2_sata_ops;
|
||||
probe_ent->n_ports = 4;
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
|
||||
/* We don't care much about the PIO/UDMA masks, but the core won't like us
|
||||
* if we don't fill these
|
||||
*/
|
||||
probe_ent->pio_mask = 0x1f;
|
||||
probe_ent->mwdma_mask = 0x7;
|
||||
probe_ent->udma_mask = 0x7f;
|
||||
|
||||
/* different controllers have different number of ports - currently 4 or 8 */
|
||||
/* All ports are on the same function. Multi-function device is no
|
||||
* longer available. This should not be seen in any system. */
|
||||
for (i = 0; i < board_info->n_ports; i++)
|
||||
k2_sata_setup_port(&probe_ent->port[i], base + i * K2_SATA_PORT_OFFSET);
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
if (!ata_device_add(probe_ent))
|
||||
|
@ -49,6 +49,9 @@
|
||||
|
||||
|
||||
enum {
|
||||
PDC_MMIO_BAR = 3,
|
||||
PDC_DIMM_BAR = 4,
|
||||
|
||||
PDC_PRD_TBL = 0x44, /* Direct command DMA table addr */
|
||||
|
||||
PDC_PKT_SUBMIT = 0x40, /* Command packet pointer addr */
|
||||
@ -137,8 +140,6 @@ struct pdc_port_priv {
|
||||
};
|
||||
|
||||
struct pdc_host_priv {
|
||||
void __iomem *dimm_mmio;
|
||||
|
||||
unsigned int doing_hdma;
|
||||
unsigned int hdma_prod;
|
||||
unsigned int hdma_cons;
|
||||
@ -202,7 +203,7 @@ static const struct ata_port_operations pdc_20621_ops = {
|
||||
.phy_reset = pdc_20621_phy_reset,
|
||||
.qc_prep = pdc20621_qc_prep,
|
||||
.qc_issue = pdc20621_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.eng_timeout = pdc_eng_timeout,
|
||||
.irq_handler = pdc20621_interrupt,
|
||||
.irq_clear = pdc20621_irq_clear,
|
||||
@ -411,9 +412,8 @@ static void pdc20621_dma_prep(struct ata_queued_cmd *qc)
|
||||
struct scatterlist *sg;
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct pdc_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
struct pdc_host_priv *hpriv = ap->host->private_data;
|
||||
void __iomem *dimm_mmio = hpriv->dimm_mmio;
|
||||
void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
|
||||
void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
|
||||
unsigned int portno = ap->port_no;
|
||||
unsigned int i, idx, total_len = 0, sgt_len;
|
||||
u32 *buf = (u32 *) &pp->dimm_buf[PDC_DIMM_HEADER_SZ];
|
||||
@ -472,9 +472,8 @@ static void pdc20621_nodata_prep(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct pdc_port_priv *pp = ap->private_data;
|
||||
void __iomem *mmio = ap->host->mmio_base;
|
||||
struct pdc_host_priv *hpriv = ap->host->private_data;
|
||||
void __iomem *dimm_mmio = hpriv->dimm_mmio;
|
||||
void __iomem *mmio = ap->host->iomap[PDC_MMIO_BAR];
|
||||
void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
|
||||
unsigned int portno = ap->port_no;
|
||||
unsigned int i;
|
||||
|
||||
@ -524,7 +523,7 @@ static void __pdc20621_push_hdma(struct ata_queued_cmd *qc,
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct ata_host *host = ap->host;
|
||||
void __iomem *mmio = host->mmio_base;
|
||||
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
|
||||
|
||||
/* hard-code chip #0 */
|
||||
mmio += PDC_CHIP0_OFS;
|
||||
@ -578,8 +577,7 @@ static void pdc20621_dump_hdma(struct ata_queued_cmd *qc)
|
||||
{
|
||||
struct ata_port *ap = qc->ap;
|
||||
unsigned int port_no = ap->port_no;
|
||||
struct pdc_host_priv *hpriv = ap->host->private_data;
|
||||
void *dimm_mmio = hpriv->dimm_mmio;
|
||||
void __iomem *dimm_mmio = ap->host->iomap[PDC_DIMM_BAR];
|
||||
|
||||
dimm_mmio += (port_no * PDC_DIMM_WINDOW_STEP);
|
||||
dimm_mmio += PDC_DIMM_HOST_PKT;
|
||||
@ -598,7 +596,7 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
|
||||
struct ata_port *ap = qc->ap;
|
||||
struct ata_host *host = ap->host;
|
||||
unsigned int port_no = ap->port_no;
|
||||
void __iomem *mmio = host->mmio_base;
|
||||
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
|
||||
unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
|
||||
u8 seq = (u8) (port_no + 1);
|
||||
unsigned int port_ofs;
|
||||
@ -627,8 +625,8 @@ static void pdc20621_packet_start(struct ata_queued_cmd *qc)
|
||||
readl(mmio + PDC_20621_SEQCTL + (seq * 4)); /* flush */
|
||||
|
||||
writel(port_ofs + PDC_DIMM_ATA_PKT,
|
||||
(void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
VPRINTK("submitted ofs 0x%x (%u), seq %u\n",
|
||||
port_ofs + PDC_DIMM_ATA_PKT,
|
||||
port_ofs + PDC_DIMM_ATA_PKT,
|
||||
@ -706,8 +704,8 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
|
||||
writel(0x00000001, mmio + PDC_20621_SEQCTL + (seq * 4));
|
||||
readl(mmio + PDC_20621_SEQCTL + (seq * 4));
|
||||
writel(port_ofs + PDC_DIMM_ATA_PKT,
|
||||
(void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
readl((void __iomem *) ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
readl(ap->ioaddr.cmd_addr + PDC_PKT_SUBMIT);
|
||||
}
|
||||
|
||||
/* step two - execute ATA command */
|
||||
@ -740,7 +738,7 @@ static inline unsigned int pdc20621_host_intr( struct ata_port *ap,
|
||||
static void pdc20621_irq_clear(struct ata_port *ap)
|
||||
{
|
||||
struct ata_host *host = ap->host;
|
||||
void __iomem *mmio = host->mmio_base;
|
||||
void __iomem *mmio = host->iomap[PDC_MMIO_BAR];
|
||||
|
||||
mmio += PDC_CHIP0_OFS;
|
||||
|
||||
@ -758,12 +756,12 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance)
|
||||
|
||||
VPRINTK("ENTER\n");
|
||||
|
||||
if (!host || !host->mmio_base) {
|
||||
if (!host || !host->iomap[PDC_MMIO_BAR]) {
|
||||
VPRINTK("QUICK EXIT\n");
|
||||
return IRQ_NONE;
|
||||
}
|
||||
|
||||
mmio_base = host->mmio_base;
|
||||
mmio_base = host->iomap[PDC_MMIO_BAR];
|
||||
|
||||
/* reading should also clear interrupts */
|
||||
mmio_base += PDC_CHIP0_OFS;
|
||||
@ -864,7 +862,7 @@ static void pdc_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile
|
||||
}
|
||||
|
||||
|
||||
static void pdc_sata_setup_port(struct ata_ioports *port, unsigned long base)
|
||||
static void pdc_sata_setup_port(struct ata_ioports *port, void __iomem *base)
|
||||
{
|
||||
port->cmd_addr = base;
|
||||
port->data_addr = base;
|
||||
@ -890,9 +888,8 @@ static void pdc20621_get_from_dimm(struct ata_probe_ent *pe, void *psource,
|
||||
u16 idx;
|
||||
u8 page_mask;
|
||||
long dist;
|
||||
void __iomem *mmio = pe->mmio_base;
|
||||
struct pdc_host_priv *hpriv = pe->private_data;
|
||||
void __iomem *dimm_mmio = hpriv->dimm_mmio;
|
||||
void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
|
||||
void __iomem *dimm_mmio = pe->iomap[PDC_DIMM_BAR];
|
||||
|
||||
/* hard-code chip #0 */
|
||||
mmio += PDC_CHIP0_OFS;
|
||||
@ -946,9 +943,8 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
|
||||
u16 idx;
|
||||
u8 page_mask;
|
||||
long dist;
|
||||
void __iomem *mmio = pe->mmio_base;
|
||||
struct pdc_host_priv *hpriv = pe->private_data;
|
||||
void __iomem *dimm_mmio = hpriv->dimm_mmio;
|
||||
void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
|
||||
void __iomem *dimm_mmio = pe->iomap[PDC_DIMM_BAR];
|
||||
|
||||
/* hard-code chip #0 */
|
||||
mmio += PDC_CHIP0_OFS;
|
||||
@ -993,7 +989,7 @@ static void pdc20621_put_to_dimm(struct ata_probe_ent *pe, void *psource,
|
||||
static unsigned int pdc20621_i2c_read(struct ata_probe_ent *pe, u32 device,
|
||||
u32 subaddr, u32 *pdata)
|
||||
{
|
||||
void __iomem *mmio = pe->mmio_base;
|
||||
void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
|
||||
u32 i2creg = 0;
|
||||
u32 status;
|
||||
u32 count =0;
|
||||
@ -1052,7 +1048,7 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
|
||||
u32 data = 0;
|
||||
int size, i;
|
||||
u8 bdimmsize;
|
||||
void __iomem *mmio = pe->mmio_base;
|
||||
void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
|
||||
static const struct {
|
||||
unsigned int reg;
|
||||
unsigned int ofs;
|
||||
@ -1114,8 +1110,8 @@ static int pdc20621_prog_dimm0(struct ata_probe_ent *pe)
|
||||
static unsigned int pdc20621_prog_dimm_global(struct ata_probe_ent *pe)
|
||||
{
|
||||
u32 data, spd0;
|
||||
int error, i;
|
||||
void __iomem *mmio = pe->mmio_base;
|
||||
int error, i;
|
||||
void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
|
||||
|
||||
/* hard-code chip #0 */
|
||||
mmio += PDC_CHIP0_OFS;
|
||||
@ -1169,7 +1165,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
|
||||
u32 ticks=0;
|
||||
u32 clock=0;
|
||||
u32 fparam=0;
|
||||
void __iomem *mmio = pe->mmio_base;
|
||||
void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
|
||||
|
||||
/* hard-code chip #0 */
|
||||
mmio += PDC_CHIP0_OFS;
|
||||
@ -1293,7 +1289,7 @@ static unsigned int pdc20621_dimm_init(struct ata_probe_ent *pe)
|
||||
static void pdc_20621_init(struct ata_probe_ent *pe)
|
||||
{
|
||||
u32 tmp;
|
||||
void __iomem *mmio = pe->mmio_base;
|
||||
void __iomem *mmio = pe->iomap[PDC_MMIO_BAR];
|
||||
|
||||
/* hard-code chip #0 */
|
||||
mmio += PDC_CHIP0_OFS;
|
||||
@ -1325,9 +1321,7 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
|
||||
{
|
||||
static int printed_version;
|
||||
struct ata_probe_ent *probe_ent;
|
||||
unsigned long base;
|
||||
void __iomem *mmio_base;
|
||||
void __iomem *dimm_mmio;
|
||||
void __iomem *base;
|
||||
struct pdc_host_priv *hpriv;
|
||||
unsigned int board_idx = (unsigned int) ent->driver_data;
|
||||
int rc;
|
||||
@ -1339,11 +1333,12 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc) {
|
||||
rc = pcim_iomap_regions(pdev, (1 << PDC_MMIO_BAR) | (1 << PDC_DIMM_BAR),
|
||||
DRV_NAME);
|
||||
if (rc == -EBUSY)
|
||||
pcim_pin_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
|
||||
if (rc)
|
||||
@ -1359,21 +1354,10 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
|
||||
probe_ent->dev = pci_dev_to_dev(pdev);
|
||||
INIT_LIST_HEAD(&probe_ent->node);
|
||||
|
||||
mmio_base = pcim_iomap(pdev, 3, 0);
|
||||
if (mmio_base == NULL)
|
||||
return -ENOMEM;
|
||||
base = (unsigned long) mmio_base;
|
||||
|
||||
hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL);
|
||||
if (!hpriv)
|
||||
return -ENOMEM;
|
||||
|
||||
dimm_mmio = pcim_iomap(pdev, 4, 0);
|
||||
if (!dimm_mmio)
|
||||
return -ENOMEM;
|
||||
|
||||
hpriv->dimm_mmio = dimm_mmio;
|
||||
|
||||
probe_ent->sht = pdc_port_info[board_idx].sht;
|
||||
probe_ent->port_flags = pdc_port_info[board_idx].flags;
|
||||
probe_ent->pio_mask = pdc_port_info[board_idx].pio_mask;
|
||||
@ -1383,10 +1367,10 @@ static int pdc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *
|
||||
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->irq_flags = IRQF_SHARED;
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
probe_ent->private_data = hpriv;
|
||||
base += PDC_CHIP0_OFS;
|
||||
base = probe_ent->iomap[PDC_MMIO_BAR] + PDC_CHIP0_OFS;
|
||||
|
||||
probe_ent->n_ports = 4;
|
||||
pdc_sata_setup_port(&probe_ent->port[0], base + 0x200);
|
||||
|
@ -108,7 +108,7 @@ static const struct ata_port_operations uli_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
@ -188,6 +188,7 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
int rc;
|
||||
unsigned int board_idx = (unsigned int) ent->driver_data;
|
||||
struct uli_priv *hpriv;
|
||||
void __iomem * const *iomap;
|
||||
|
||||
if (!printed_version++)
|
||||
dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
|
||||
@ -220,24 +221,26 @@ static int uli_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
|
||||
probe_ent->private_data = hpriv;
|
||||
|
||||
iomap = pcim_iomap_table(pdev);
|
||||
|
||||
switch (board_idx) {
|
||||
case uli_5287:
|
||||
hpriv->scr_cfg_addr[0] = ULI5287_BASE;
|
||||
hpriv->scr_cfg_addr[1] = ULI5287_BASE + ULI5287_OFFS;
|
||||
probe_ent->n_ports = 4;
|
||||
|
||||
probe_ent->port[2].cmd_addr = pci_resource_start(pdev, 0) + 8;
|
||||
probe_ent->port[2].cmd_addr = iomap[0] + 8;
|
||||
probe_ent->port[2].altstatus_addr =
|
||||
probe_ent->port[2].ctl_addr =
|
||||
(pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS) + 4;
|
||||
probe_ent->port[2].bmdma_addr = pci_resource_start(pdev, 4) + 16;
|
||||
probe_ent->port[2].ctl_addr = (void __iomem *)
|
||||
((unsigned long)iomap[1] | ATA_PCI_CTL_OFS) + 4;
|
||||
probe_ent->port[2].bmdma_addr = iomap[4] + 16;
|
||||
hpriv->scr_cfg_addr[2] = ULI5287_BASE + ULI5287_OFFS*4;
|
||||
|
||||
probe_ent->port[3].cmd_addr = pci_resource_start(pdev, 2) + 8;
|
||||
probe_ent->port[3].cmd_addr = iomap[2] + 8;
|
||||
probe_ent->port[3].altstatus_addr =
|
||||
probe_ent->port[3].ctl_addr =
|
||||
(pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS) + 4;
|
||||
probe_ent->port[3].bmdma_addr = pci_resource_start(pdev, 4) + 24;
|
||||
probe_ent->port[3].ctl_addr = (void __iomem *)
|
||||
((unsigned long)iomap[3] | ATA_PCI_CTL_OFS) + 4;
|
||||
probe_ent->port[3].bmdma_addr = iomap[4] + 24;
|
||||
hpriv->scr_cfg_addr[3] = ULI5287_BASE + ULI5287_OFFS*5;
|
||||
|
||||
ata_std_ports(&probe_ent->port[2]);
|
||||
|
@ -134,7 +134,7 @@ static const struct ata_port_operations vt6420_sata_ops = {
|
||||
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = svia_noop_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
@ -166,7 +166,7 @@ static const struct ata_port_operations vt6421_pata_ops = {
|
||||
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
@ -195,7 +195,7 @@ static const struct ata_port_operations vt6421_sata_ops = {
|
||||
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_pio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
@ -230,14 +230,14 @@ static u32 svia_scr_read (struct ata_port *ap, unsigned int sc_reg)
|
||||
{
|
||||
if (sc_reg > SCR_CONTROL)
|
||||
return 0xffffffffU;
|
||||
return inl(ap->ioaddr.scr_addr + (4 * sc_reg));
|
||||
return ioread32(ap->ioaddr.scr_addr + (4 * sc_reg));
|
||||
}
|
||||
|
||||
static void svia_scr_write (struct ata_port *ap, unsigned int sc_reg, u32 val)
|
||||
{
|
||||
if (sc_reg > SCR_CONTROL)
|
||||
return;
|
||||
outl(val, ap->ioaddr.scr_addr + (4 * sc_reg));
|
||||
iowrite32(val, ap->ioaddr.scr_addr + (4 * sc_reg));
|
||||
}
|
||||
|
||||
static void svia_noop_freeze(struct ata_port *ap)
|
||||
@ -387,31 +387,28 @@ static const unsigned int vt6421_bar_sizes[] = {
|
||||
16, 16, 16, 16, 32, 128
|
||||
};
|
||||
|
||||
static unsigned long svia_scr_addr(unsigned long addr, unsigned int port)
|
||||
static void __iomem * svia_scr_addr(void __iomem *addr, unsigned int port)
|
||||
{
|
||||
return addr + (port * 128);
|
||||
}
|
||||
|
||||
static unsigned long vt6421_scr_addr(unsigned long addr, unsigned int port)
|
||||
static void __iomem * vt6421_scr_addr(void __iomem *addr, unsigned int port)
|
||||
{
|
||||
return addr + (port * 64);
|
||||
}
|
||||
|
||||
static void vt6421_init_addrs(struct ata_probe_ent *probe_ent,
|
||||
struct pci_dev *pdev,
|
||||
unsigned int port)
|
||||
void __iomem * const *iomap, unsigned int port)
|
||||
{
|
||||
unsigned long reg_addr = pci_resource_start(pdev, port);
|
||||
unsigned long bmdma_addr = pci_resource_start(pdev, 4) + (port * 8);
|
||||
unsigned long scr_addr;
|
||||
void __iomem *reg_addr = iomap[port];
|
||||
void __iomem *bmdma_addr = iomap[4] + (port * 8);
|
||||
|
||||
probe_ent->port[port].cmd_addr = reg_addr;
|
||||
probe_ent->port[port].altstatus_addr =
|
||||
probe_ent->port[port].ctl_addr = (reg_addr + 8) | ATA_PCI_CTL_OFS;
|
||||
probe_ent->port[port].ctl_addr = (void __iomem *)
|
||||
((unsigned long)(reg_addr + 8) | ATA_PCI_CTL_OFS);
|
||||
probe_ent->port[port].bmdma_addr = bmdma_addr;
|
||||
|
||||
scr_addr = vt6421_scr_addr(pci_resource_start(pdev, 5), port);
|
||||
probe_ent->port[port].scr_addr = scr_addr;
|
||||
probe_ent->port[port].scr_addr = vt6421_scr_addr(iomap[5], port);
|
||||
|
||||
ata_std_ports(&probe_ent->port[port]);
|
||||
}
|
||||
@ -420,16 +417,16 @@ static struct ata_probe_ent *vt6420_init_probe_ent(struct pci_dev *pdev)
|
||||
{
|
||||
struct ata_probe_ent *probe_ent;
|
||||
struct ata_port_info *ppi[2];
|
||||
void __iomem * const *iomap;
|
||||
|
||||
ppi[0] = ppi[1] = &vt6420_port_info;
|
||||
probe_ent = ata_pci_init_native_mode(pdev, ppi, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
|
||||
if (!probe_ent)
|
||||
return NULL;
|
||||
|
||||
probe_ent->port[0].scr_addr =
|
||||
svia_scr_addr(pci_resource_start(pdev, 5), 0);
|
||||
probe_ent->port[1].scr_addr =
|
||||
svia_scr_addr(pci_resource_start(pdev, 5), 1);
|
||||
iomap = pcim_iomap_table(pdev);
|
||||
probe_ent->port[0].scr_addr = svia_scr_addr(iomap[5], 0);
|
||||
probe_ent->port[1].scr_addr = svia_scr_addr(iomap[5], 1);
|
||||
|
||||
return probe_ent;
|
||||
}
|
||||
@ -458,7 +455,7 @@ static struct ata_probe_ent *vt6421_init_probe_ent(struct pci_dev *pdev)
|
||||
probe_ent->udma_mask = 0x7f;
|
||||
|
||||
for (i = 0; i < N_PORTS; i++)
|
||||
vt6421_init_addrs(probe_ent, pdev, i);
|
||||
vt6421_init_addrs(probe_ent, pcim_iomap_table(pdev), i);
|
||||
|
||||
return probe_ent;
|
||||
}
|
||||
@ -519,7 +516,7 @@ static int svia_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
if (rc)
|
||||
return rc;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
rc = pcim_iomap_regions(pdev, 0x1f, DRV_NAME);
|
||||
if (rc) {
|
||||
pcim_pin_device(pdev);
|
||||
return rc;
|
||||
|
@ -50,6 +50,8 @@
|
||||
#define DRV_VERSION "2.0"
|
||||
|
||||
enum {
|
||||
VSC_MMIO_BAR = 0,
|
||||
|
||||
/* Interrupt register offsets (from chip base address) */
|
||||
VSC_SATA_INT_STAT_OFFSET = 0x00,
|
||||
VSC_SATA_INT_MASK_OFFSET = 0x04,
|
||||
@ -104,7 +106,7 @@ static u32 vsc_sata_scr_read (struct ata_port *ap, unsigned int sc_reg)
|
||||
{
|
||||
if (sc_reg > SCR_CONTROL)
|
||||
return 0xffffffffU;
|
||||
return readl((void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
return readl(ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
}
|
||||
|
||||
|
||||
@ -113,7 +115,7 @@ static void vsc_sata_scr_write (struct ata_port *ap, unsigned int sc_reg,
|
||||
{
|
||||
if (sc_reg > SCR_CONTROL)
|
||||
return;
|
||||
writel(val, (void __iomem *) ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
writel(val, ap->ioaddr.scr_addr + (sc_reg * 4));
|
||||
}
|
||||
|
||||
|
||||
@ -122,7 +124,7 @@ static void vsc_intr_mask_update(struct ata_port *ap, u8 ctl)
|
||||
void __iomem *mask_addr;
|
||||
u8 mask;
|
||||
|
||||
mask_addr = ap->host->mmio_base +
|
||||
mask_addr = ap->host->iomap[VSC_MMIO_BAR] +
|
||||
VSC_SATA_INT_MASK_OFFSET + ap->port_no;
|
||||
mask = readb(mask_addr);
|
||||
if (ctl & ATA_NIEN)
|
||||
@ -149,25 +151,25 @@ static void vsc_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
|
||||
}
|
||||
if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
|
||||
writew(tf->feature | (((u16)tf->hob_feature) << 8),
|
||||
(void __iomem *) ioaddr->feature_addr);
|
||||
ioaddr->feature_addr);
|
||||
writew(tf->nsect | (((u16)tf->hob_nsect) << 8),
|
||||
(void __iomem *) ioaddr->nsect_addr);
|
||||
ioaddr->nsect_addr);
|
||||
writew(tf->lbal | (((u16)tf->hob_lbal) << 8),
|
||||
(void __iomem *) ioaddr->lbal_addr);
|
||||
ioaddr->lbal_addr);
|
||||
writew(tf->lbam | (((u16)tf->hob_lbam) << 8),
|
||||
(void __iomem *) ioaddr->lbam_addr);
|
||||
ioaddr->lbam_addr);
|
||||
writew(tf->lbah | (((u16)tf->hob_lbah) << 8),
|
||||
(void __iomem *) ioaddr->lbah_addr);
|
||||
ioaddr->lbah_addr);
|
||||
} else if (is_addr) {
|
||||
writew(tf->feature, (void __iomem *) ioaddr->feature_addr);
|
||||
writew(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
|
||||
writew(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
|
||||
writew(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
|
||||
writew(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
|
||||
writew(tf->feature, ioaddr->feature_addr);
|
||||
writew(tf->nsect, ioaddr->nsect_addr);
|
||||
writew(tf->lbal, ioaddr->lbal_addr);
|
||||
writew(tf->lbam, ioaddr->lbam_addr);
|
||||
writew(tf->lbah, ioaddr->lbah_addr);
|
||||
}
|
||||
|
||||
if (tf->flags & ATA_TFLAG_DEVICE)
|
||||
writeb(tf->device, (void __iomem *) ioaddr->device_addr);
|
||||
writeb(tf->device, ioaddr->device_addr);
|
||||
|
||||
ata_wait_idle(ap);
|
||||
}
|
||||
@ -179,12 +181,12 @@ static void vsc_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
|
||||
u16 nsect, lbal, lbam, lbah, feature;
|
||||
|
||||
tf->command = ata_check_status(ap);
|
||||
tf->device = readw((void __iomem *) ioaddr->device_addr);
|
||||
feature = readw((void __iomem *) ioaddr->error_addr);
|
||||
nsect = readw((void __iomem *) ioaddr->nsect_addr);
|
||||
lbal = readw((void __iomem *) ioaddr->lbal_addr);
|
||||
lbam = readw((void __iomem *) ioaddr->lbam_addr);
|
||||
lbah = readw((void __iomem *) ioaddr->lbah_addr);
|
||||
tf->device = readw(ioaddr->device_addr);
|
||||
feature = readw(ioaddr->error_addr);
|
||||
nsect = readw(ioaddr->nsect_addr);
|
||||
lbal = readw(ioaddr->lbal_addr);
|
||||
lbam = readw(ioaddr->lbam_addr);
|
||||
lbah = readw(ioaddr->lbah_addr);
|
||||
|
||||
tf->feature = feature;
|
||||
tf->nsect = nsect;
|
||||
@ -216,7 +218,8 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance)
|
||||
|
||||
spin_lock(&host->lock);
|
||||
|
||||
int_status = readl(host->mmio_base + VSC_SATA_INT_STAT_OFFSET);
|
||||
int_status = readl(host->iomap[VSC_MMIO_BAR] +
|
||||
VSC_SATA_INT_STAT_OFFSET);
|
||||
|
||||
for (i = 0; i < host->n_ports; i++) {
|
||||
if (int_status & ((u32) 0xFF << (8 * i))) {
|
||||
@ -300,7 +303,7 @@ static const struct ata_port_operations vsc_sata_ops = {
|
||||
.bmdma_status = ata_bmdma_status,
|
||||
.qc_prep = ata_qc_prep,
|
||||
.qc_issue = ata_qc_issue_prot,
|
||||
.data_xfer = ata_mmio_data_xfer,
|
||||
.data_xfer = ata_data_xfer,
|
||||
.freeze = ata_bmdma_freeze,
|
||||
.thaw = ata_bmdma_thaw,
|
||||
.error_handler = ata_bmdma_error_handler,
|
||||
@ -312,7 +315,8 @@ static const struct ata_port_operations vsc_sata_ops = {
|
||||
.port_start = ata_port_start,
|
||||
};
|
||||
|
||||
static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned long base)
|
||||
static void __devinit vsc_sata_setup_port(struct ata_ioports *port,
|
||||
void __iomem *base)
|
||||
{
|
||||
port->cmd_addr = base + VSC_SATA_TF_CMD_OFFSET;
|
||||
port->data_addr = base + VSC_SATA_TF_DATA_OFFSET;
|
||||
@ -329,16 +333,15 @@ static void __devinit vsc_sata_setup_port(struct ata_ioports *port, unsigned lon
|
||||
port->ctl_addr = base + VSC_SATA_TF_CTL_OFFSET;
|
||||
port->bmdma_addr = base + VSC_SATA_DMA_CMD_OFFSET;
|
||||
port->scr_addr = base + VSC_SATA_SCR_STATUS_OFFSET;
|
||||
writel(0, (void __iomem *) base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
|
||||
writel(0, (void __iomem *) base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
|
||||
writel(0, base + VSC_SATA_UP_DESCRIPTOR_OFFSET);
|
||||
writel(0, base + VSC_SATA_UP_DATA_BUFFER_OFFSET);
|
||||
}
|
||||
|
||||
|
||||
static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
{
|
||||
static int printed_version;
|
||||
struct ata_probe_ent *probe_ent = NULL;
|
||||
unsigned long base;
|
||||
struct ata_probe_ent *probe_ent;
|
||||
void __iomem *mmio_base;
|
||||
int rc;
|
||||
|
||||
@ -355,11 +358,11 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
|
||||
if (pci_resource_len(pdev, 0) == 0)
|
||||
return -ENODEV;
|
||||
|
||||
rc = pci_request_regions(pdev, DRV_NAME);
|
||||
if (rc) {
|
||||
rc = pcim_iomap_regions(pdev, 1 << VSC_MMIO_BAR, DRV_NAME);
|
||||
if (rc == -EBUSY)
|
||||
pcim_pin_device(pdev);
|
||||
if (rc)
|
||||
return rc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Use 32 bit DMA mask, because 64 bit address support is poor.
|
||||
@ -377,11 +380,6 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
|
||||
probe_ent->dev = pci_dev_to_dev(pdev);
|
||||
INIT_LIST_HEAD(&probe_ent->node);
|
||||
|
||||
mmio_base = pcim_iomap(pdev, 0, 0);
|
||||
if (mmio_base == NULL)
|
||||
return -ENOMEM;
|
||||
base = (unsigned long) mmio_base;
|
||||
|
||||
/*
|
||||
* Due to a bug in the chip, the default cache line size can't be used
|
||||
*/
|
||||
@ -398,7 +396,7 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
|
||||
probe_ent->port_ops = &vsc_sata_ops;
|
||||
probe_ent->n_ports = 4;
|
||||
probe_ent->irq = pdev->irq;
|
||||
probe_ent->mmio_base = mmio_base;
|
||||
probe_ent->iomap = pcim_iomap_table(pdev);
|
||||
|
||||
/* We don't care much about the PIO/UDMA masks, but the core won't like us
|
||||
* if we don't fill these
|
||||
@ -407,11 +405,13 @@ static int __devinit vsc_sata_init_one (struct pci_dev *pdev, const struct pci_d
|
||||
probe_ent->mwdma_mask = 0x07;
|
||||
probe_ent->udma_mask = 0x7f;
|
||||
|
||||
mmio_base = probe_ent->iomap[VSC_MMIO_BAR];
|
||||
|
||||
/* We have 4 ports per PCI function */
|
||||
vsc_sata_setup_port(&probe_ent->port[0], base + 1 * VSC_SATA_PORT_OFFSET);
|
||||
vsc_sata_setup_port(&probe_ent->port[1], base + 2 * VSC_SATA_PORT_OFFSET);
|
||||
vsc_sata_setup_port(&probe_ent->port[2], base + 3 * VSC_SATA_PORT_OFFSET);
|
||||
vsc_sata_setup_port(&probe_ent->port[3], base + 4 * VSC_SATA_PORT_OFFSET);
|
||||
vsc_sata_setup_port(&probe_ent->port[0], mmio_base + 1 * VSC_SATA_PORT_OFFSET);
|
||||
vsc_sata_setup_port(&probe_ent->port[1], mmio_base + 2 * VSC_SATA_PORT_OFFSET);
|
||||
vsc_sata_setup_port(&probe_ent->port[2], mmio_base + 3 * VSC_SATA_PORT_OFFSET);
|
||||
vsc_sata_setup_port(&probe_ent->port[3], mmio_base + 4 * VSC_SATA_PORT_OFFSET);
|
||||
|
||||
pci_set_master(pdev);
|
||||
|
||||
|
@ -348,21 +348,21 @@ typedef int (*ata_reset_fn_t)(struct ata_port *ap, unsigned int *classes);
|
||||
typedef void (*ata_postreset_fn_t)(struct ata_port *ap, unsigned int *classes);
|
||||
|
||||
struct ata_ioports {
|
||||
unsigned long cmd_addr;
|
||||
unsigned long data_addr;
|
||||
unsigned long error_addr;
|
||||
unsigned long feature_addr;
|
||||
unsigned long nsect_addr;
|
||||
unsigned long lbal_addr;
|
||||
unsigned long lbam_addr;
|
||||
unsigned long lbah_addr;
|
||||
unsigned long device_addr;
|
||||
unsigned long status_addr;
|
||||
unsigned long command_addr;
|
||||
unsigned long altstatus_addr;
|
||||
unsigned long ctl_addr;
|
||||
unsigned long bmdma_addr;
|
||||
unsigned long scr_addr;
|
||||
void __iomem *cmd_addr;
|
||||
void __iomem *data_addr;
|
||||
void __iomem *error_addr;
|
||||
void __iomem *feature_addr;
|
||||
void __iomem *nsect_addr;
|
||||
void __iomem *lbal_addr;
|
||||
void __iomem *lbam_addr;
|
||||
void __iomem *lbah_addr;
|
||||
void __iomem *device_addr;
|
||||
void __iomem *status_addr;
|
||||
void __iomem *command_addr;
|
||||
void __iomem *altstatus_addr;
|
||||
void __iomem *ctl_addr;
|
||||
void __iomem *bmdma_addr;
|
||||
void __iomem *scr_addr;
|
||||
};
|
||||
|
||||
struct ata_probe_ent {
|
||||
@ -381,7 +381,7 @@ struct ata_probe_ent {
|
||||
unsigned int irq_flags;
|
||||
unsigned long port_flags;
|
||||
unsigned long _host_flags;
|
||||
void __iomem *mmio_base;
|
||||
void __iomem * const *iomap;
|
||||
void *private_data;
|
||||
|
||||
/* port_info for the secondary port. Together with irq2, it's
|
||||
@ -398,7 +398,7 @@ struct ata_host {
|
||||
struct device *dev;
|
||||
unsigned long irq;
|
||||
unsigned long irq2;
|
||||
void __iomem *mmio_base;
|
||||
void __iomem * const *iomap;
|
||||
unsigned int n_ports;
|
||||
void *private_data;
|
||||
const struct ata_port_operations *ops;
|
||||
@ -768,12 +768,10 @@ extern u8 ata_altstatus(struct ata_port *ap);
|
||||
extern void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf);
|
||||
extern int ata_port_start (struct ata_port *ap);
|
||||
extern irqreturn_t ata_interrupt (int irq, void *dev_instance);
|
||||
extern void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data);
|
||||
extern void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data);
|
||||
extern void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data);
|
||||
extern void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data);
|
||||
extern void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
|
||||
unsigned int buflen, int write_data);
|
||||
extern void ata_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
|
||||
extern unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc);
|
||||
@ -1084,10 +1082,9 @@ static inline u8 ata_wait_idle(struct ata_port *ap)
|
||||
u8 status = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 1000);
|
||||
|
||||
if (status != 0xff && (status & (ATA_BUSY | ATA_DRQ))) {
|
||||
unsigned long l = ap->ioaddr.status_addr;
|
||||
if (ata_msg_warn(ap))
|
||||
printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%lX\n",
|
||||
status, l);
|
||||
printk(KERN_WARNING "ATA: abnormal status 0x%X on port 0x%p\n",
|
||||
status, ap->ioaddr.status_addr);
|
||||
}
|
||||
|
||||
return status;
|
||||
@ -1172,20 +1169,11 @@ static inline u8 ata_irq_ack(struct ata_port *ap, unsigned int chk_drq)
|
||||
printk(KERN_ERR "abnormal status 0x%X\n", status);
|
||||
|
||||
/* get controller status; clear intr, err bits */
|
||||
if (ap->flags & ATA_FLAG_MMIO) {
|
||||
void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
|
||||
host_stat = readb(mmio + ATA_DMA_STATUS);
|
||||
writeb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
|
||||
mmio + ATA_DMA_STATUS);
|
||||
host_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
iowrite8(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
|
||||
ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
|
||||
post_stat = readb(mmio + ATA_DMA_STATUS);
|
||||
} else {
|
||||
host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
outb(host_stat | ATA_DMA_INTR | ATA_DMA_ERR,
|
||||
ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
|
||||
post_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
}
|
||||
post_stat = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
|
||||
|
||||
if (ata_msg_intr(ap))
|
||||
printk(KERN_INFO "%s: irq ack: host_stat 0x%X, new host_stat 0x%X, drv_stat 0x%X\n",
|
||||
|
Loading…
Reference in New Issue
Block a user