VDMA patch
This thing uses pxa dma engine to transfer data from ide data register.
It results in faster transfer rates and less cpu load. DMA can be
enabled/diabled
in common way (hdparm -d or ide=nodma boot parameter). Current version is for
2.6.21,
I didn't port it to later kernels.
diff -ruN -X ../../new8/kernel/diffignore.txt linux-2.6.21-orig/drivers/ide/arm/ide-vdma-pxa2xx.c linux-2.6.21/drivers/ide/arm/ide-vdma-pxa2xx.c
--- linux-2.6.21-orig/drivers/ide/arm/ide-vdma-pxa2xx.c 1970-01-01 03:00:00.000000000 +0300
+++ linux-2.6.21/drivers/ide/arm/ide-vdma-pxa2xx.c 2010-09-22 19:46:44.000000000 +0400
@@ -0,0 +1,430 @@
+/*
+ * Support for "virtual" DMA transfers using the DMA controller of pxa2xx processors.
+ * Currently this code is only tested on PXA270 processor.
+ *
+ * Copyright 2006 Kirill Kornilov
+ *
+ */
+
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/major.h>
+#include <linux/errno.h>
+#include <linux/genhd.h>
+#include <linux/blkpg.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/hdreg.h>
+#include <linux/ide.h>
+#include <linux/bitops.h>
+
+#include <asm/byteorder.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#include <asm/arch/system.h>
+#include <asm/arch/dma.h>
+
+#include "ide-vdma.h"
+
+struct ide_vdma_state {
+ int dmach;
+};
+
+unsigned ide_vdma_pxa27x_burst_size = DCMD_BURST8;
+EXPORT_SYMBOL(ide_vdma_pxa27x_burst_size);
+
+static ide_startstop_t ide_vdma_in_intr (ide_drive_t *drive);
+static ide_startstop_t ide_vdma_out_intr (ide_drive_t *drive);
+static int ide_vdma_end (ide_drive_t *drive);
+
+static ide_startstop_t ide_vdma_error(ide_drive_t *drive, struct request *rq,
+ const char *s, u8 stat)
+{
+ ide_hwif_t *hwif = drive->hwif;
+
+ if(drive->waiting_for_dma) ide_vdma_end(drive);
+ if (rq->bio) {
+ int sectors = hwif->nsect - hwif->nleft - 1;
+
+ if (sectors > 0) {
+ ide_driver_t *drv;
+
+ drv = *(ide_driver_t **)rq->rq_disk->private_data;
+ drv->end_request(drive, 1, sectors);
+ }
+ }
+ return ide_error(drive, s, stat);
+}
+
+static void ide_vdma_end_request(ide_drive_t *drive, struct request *rq, u8 stat)
+{
+ if (rq->rq_disk) {
+ ide_driver_t *drv;
+
+ drv = *(ide_driver_t **)rq->rq_disk->private_data;;
+ drv->end_request(drive, 1, rq->hard_nr_sectors);
+ } else
+ ide_end_request(drive, 1, rq->hard_nr_sectors);
+}
+
+static u8 ide_vdma_wait_drive_not_busy(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ int retries = 100;
+ u8 stat;
+
+ /*
+ * Last sector was transfered, wait until drive is ready.
+ * This can take up to 10 usec, but we will wait max 1 ms
+ * (drive_cmd_intr() waits that long).
+ */
+ while (((stat = hwif->INB(IDE_STATUS_REG)) & BUSY_STAT) && retries--)
+ udelay(10);
+
+ if (!retries)
+ printk(KERN_ERR "%s: drive still BUSY!\n", drive->name);
+
+ return stat;
+}
+
+static void ide_vdma_intr(int channel, void *data)
+{
+ unsigned long flags;
+ unsigned dmach;
+ unsigned dcsr;
+ ide_hwif_t *hwif;
+ ide_hwgroup_t *hwgroup = (ide_hwgroup_t *)data;
+
+ spin_lock_irqsave(&ide_lock, flags);
+ hwif = hwgroup->hwif;
+ dmach = hwif->hw.dma;
+
+ dcsr = DCSR(dmach);
+
+ if(hwif->nleft || hwgroup->handler != ide_vdma_in_intr
+ || !(dcsr&DCSR_ENDINTR)) {
+ /* stop dma, clear interrupt status */
+ DCSR(dmach) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
+ spin_unlock_irqrestore(&ide_lock, flags);
+ printk("ide_vdma_intr: unexpected interrupt! DCSR=0x%x, DTADR=0x%x, DCMD=0x%x\n",
+ DCSR(dmach), DTADR(dmach), DCMD(dmach));
+ return;
+ }
+
+ disable_irq_nosync(hwif->irq);
+ spin_unlock_irqrestore(&ide_lock, flags);
+ ide_intr(hwif->irq, data);
+ enable_irq(hwif->irq);
+}
+
+
+static void ide_vdma_setup_in(ide_hwif_t *hwif)
+{
+ unsigned dmach = hwif->hw.dma;
+ struct scatterlist *sg = hwif->sg_table;
+ unsigned int nleft = hwif->nleft;
+
+ DCSR(dmach) = DCSR_NODESC;
+ DSADR(dmach) = hwif->dma_base;
+ DTADR(dmach) = sg_dma_address(&sg[hwif->cursg]) + hwif->cursg_ofs*SECTOR_SIZE;
+ if(nleft == 1) DCMD(dmach) = DCMD_ENDIRQEN | DCMD_INCTRGADDR | ide_vdma_pxa27x_burst_size | SECTOR_SIZE;
+ else DCMD(dmach) = DCMD_INCTRGADDR | ide_vdma_pxa27x_burst_size | SECTOR_SIZE;
+
+ hwif->nleft = nleft-1; /* we don't support multisector transfers... */
+ hwif->cursg_ofs++;
+ if ((hwif->cursg_ofs * SECTOR_SIZE) == sg[hwif->cursg].length) {
+ hwif->cursg++;
+ hwif->cursg_ofs = 0;
+ }
+
+ DCSR(dmach) |= DCSR_RUN;
+}
+
+static void ide_vdma_setup_out(ide_hwif_t *hwif)
+{
+ unsigned dmach = hwif->hw.dma;
+ struct scatterlist *sg = hwif->sg_table;
+ unsigned int nleft = hwif->nleft;
+
+ DCSR(dmach) = DCSR_NODESC;
+ DTADR(dmach) = hwif->dma_base;
+ DSADR(dmach) = sg_dma_address(&sg[hwif->cursg]) + hwif->cursg_ofs*SECTOR_SIZE;
+ DCMD(dmach) = DCMD_INCSRCADDR | ide_vdma_pxa27x_burst_size | SECTOR_SIZE;
+
+ hwif->nleft = nleft-1; /* we don't support multisector transfers... */
+ hwif->cursg_ofs++;
+ if ((hwif->cursg_ofs * SECTOR_SIZE) == sg[hwif->cursg].length) {
+ hwif->cursg++;
+ hwif->cursg_ofs = 0;
+ }
+
+ DCSR(dmach) |= DCSR_RUN;
+}
+
+static int ide_vdma_on(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ struct ide_vdma_state *st = hwif->hwif_data;
+
+ if(st == NULL) return -EINVAL;
+ if(st->dmach >= 0) return 0; /* already enabled */
+ hwif->hw.dma = pxa_request_dma("ide vdma", DMA_PRIO_LOW,
+ ide_vdma_intr, hwif->hwgroup);
+ if(hwif->hw.dma < 0) return -EBUSY;
+ st->dmach = hwif->hw.dma;
+ drive->using_dma = 1;
+ return 0;
+}
+
+static ide_startstop_t ide_vdma_in_intr (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = HWGROUP(drive)->rq;
+ u8 stat = hwif->INB(IDE_STATUS_REG);
+
+ if(hwif->nleft) {
+ if(!OK_STAT(stat, DATA_READY, BAD_R_STAT)) {
+ if (stat & (ERR_STAT | DRQ_STAT))
+ return ide_vdma_error(drive, rq, __FUNCTION__, stat);
+ /* No data yet, so wait for another IRQ. */
+ ide_set_handler(drive, &ide_vdma_in_intr, WAIT_WORSTCASE, NULL);
+ return ide_started;
+ }
+
+ ide_set_handler(drive, &ide_vdma_in_intr, WAIT_WORSTCASE, NULL);
+ ide_vdma_setup_in(hwif);
+ return ide_started;
+ }
+
+ ide_vdma_end(drive);
+
+ /* If it was the last datablock check status and finish transfer. */
+ stat = ide_vdma_wait_drive_not_busy(drive);
+ if (!OK_STAT(stat, 0, BAD_R_STAT))
+ return ide_vdma_error(drive, rq, __FUNCTION__, stat);
+ ide_vdma_end_request(drive, rq, stat);
+ return ide_stopped;
+}
+
+
+static ide_startstop_t ide_vdma_out_intr (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct request *rq = HWGROUP(drive)->rq;
+ u8 stat = hwif->INB(IDE_STATUS_REG);
+
+ if (!OK_STAT(stat, DRIVE_READY, drive->bad_wstat)) goto error;
+
+ /* Deal with unexpected ATA data phase. */
+ if (((stat & DRQ_STAT) == 0) ^ !hwif->nleft) goto error;
+
+
+ if (!hwif->nleft) {
+ ide_vdma_end(drive);
+ ide_vdma_end_request(drive, rq, stat);
+ return ide_stopped;
+ }
+
+ /* Still data left to transfer. */
+ ide_vdma_setup_out(hwif);
+ ide_set_handler(drive, &ide_vdma_out_intr, WAIT_WORSTCASE, NULL);
+
+ return ide_started;
+
+error:
+ return ide_vdma_error(drive, rq, __FUNCTION__, stat);
+}
+
+
+static void ide_vdma_off_quietly(ide_drive_t *drive)
+{
+ drive->using_dma = 0;
+}
+
+static void ide_vdma_host_on(ide_drive_t *drive)
+{
+}
+
+static void ide_vdma_host_off(ide_drive_t *drive)
+{
+}
+
+static int ide_vdma_check(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ struct ide_vdma_state *st = hwif->hwif_data;
+
+ if(st == NULL) return -EINVAL;
+ drive->vdma = 1;
+ if(st->dmach >= 0) return 0; /* already enabled */
+
+ if(hwif->autodma) return ide_vdma_on(drive);
+ return 0;
+}
+
+static int ide_vdma_setup(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ struct request *rq = hwif->hwgroup->rq;
+ struct scatterlist *sg = hwif->sg_table;
+
+ ide_init_sg_cmd(drive,rq);
+ ide_map_sg(drive, rq);
+
+ hwif->sg_dma_direction = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+
+ hwif->sg_nents = dma_map_sg(hwif->hw.dev, sg, hwif->sg_nents,
+ hwif->sg_dma_direction);
+ /*
+ * This doesn't mean we're actually waiting for dma, but
+ * we need it to enable dma error recovery.
+ */
+ drive->waiting_for_dma = 1;
+ return 0;
+}
+
+static int ide_vdma_test_irq(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ unsigned dcsr;
+ int dmach = hwif->hw.dma;
+
+
+ dcsr = DCSR(dmach);
+
+ /* We don't expect any interrupt, if DMA is running. */
+ if(dcsr & DCSR_RUN) return 0;
+
+ if(hwif->nleft == 0 && hwif->sg_dma_direction == DMA_FROM_DEVICE) {
+ /* Yes, we're expecting DMA interrupt... */
+ return (dcsr&DCSR_ENDINTR) != 0;
+ }
+
+ /*
+ * We expect normal IDE interrupt,
+ * if the drive is not busy, let the handler decide.
+ */
+ return((hwif->INB(IDE_STATUS_REG) & BUSY_STAT) == 0);
+}
+
+static void ide_vdma_exec_cmd(ide_drive_t *drive, u8 cmd)
+{
+ ide_handler_t *handler;
+
+ switch(cmd) {
+ case WIN_READ:
+ case WIN_READ_EXT:
+ handler = ide_vdma_in_intr;
+ break;
+ case WIN_WRITE:
+ case WIN_WRITE_EXT:
+ handler = ide_vdma_out_intr;
+ break;
+ default:
+ BUG();
+ }
+
+ ide_execute_command(drive, cmd, handler, WAIT_CMD, NULL);
+}
+
+int ide_vdma_lostirq (ide_drive_t *drive)
+{
+ printk("%s: VDMA: lost interrupt\n", drive->name);
+ return 1;
+}
+
+
+static void ide_vdma_start(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ ide_startstop_t startstop;
+
+ if(hwif->sg_dma_direction == DMA_TO_DEVICE) {
+ if (ide_wait_stat(&startstop, drive, DATA_READY,
+ drive->bad_wstat, WAIT_DRQ)) {
+ printk(KERN_ERR "%s: no DRQ after issuing WRITE%s\n",
+ drive->name,
+ drive->addressing ? "_EXT" : "");
+ return;
+ }
+ ide_vdma_setup_out(drive->hwif);
+ }
+}
+
+static int ide_vdma_end (ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = HWIF(drive);
+ int dmach = hwif->hw.dma;
+ unsigned dcsr;
+
+ drive->waiting_for_dma = 0;
+
+ dcsr = DCSR(dmach);
+ /* disable dma, clear interrupts */
+ DCSR(dmach) = DCSR_STARTINTR|DCSR_ENDINTR|DCSR_BUSERR;
+
+ dma_unmap_sg(hwif->hw.dev, hwif->sg_table, hwif->sg_nents,
+ hwif->sg_dma_direction);
+
+ return (dcsr & (DCSR_RUN|DCSR_BUSERR)) != 0;
+}
+
+
+static int ide_vdma_timeout(ide_drive_t *drive)
+{
+ printk(KERN_ERR "%s: timeout waiting for VDMA\n", drive->name);
+ if (ide_vdma_test_irq(drive)) return 0;
+ return ide_vdma_end(drive);
+}
+
+struct ide_vdma_state* ide_vdma_init(ide_hwif_t *hwif, unsigned long dma_base)
+{
+ struct ide_vdma_state *st;
+
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if(st == NULL) return NULL;
+ st->dmach = -1;
+
+ printk(" %s: VDMA capable%s\n", hwif->name,
+ noautodma ? "" : ", auto-enable");
+
+ hwif->dmatable_cpu = NULL;
+ hwif->dmatable_dma = 0;
+ hwif->autodma = !noautodma;
+
+ hwif->ide_dma_check = ide_vdma_check;
+ hwif->dma_host_off = ide_vdma_host_off;
+ hwif->dma_off_quietly = ide_vdma_off_quietly;
+ hwif->dma_host_on = ide_vdma_host_on;
+ hwif->ide_dma_on = ide_vdma_on;
+ hwif->dma_setup = ide_vdma_setup;
+ hwif->dma_exec_cmd = ide_vdma_exec_cmd;
+ hwif->ide_dma_lostirq = ide_vdma_lostirq;
+ hwif->ide_dma_test_irq = ide_vdma_test_irq;
+ hwif->dma_start = ide_vdma_start;
+ hwif->ide_dma_end = ide_vdma_end;
+ hwif->ide_dma_timeout = ide_vdma_timeout;
+
+ hwif->drives[0].autodma = hwif->autodma;
+
+ hwif->hwif_data = st;
+ hwif->dma_base = dma_base;
+
+ return st;
+}
+EXPORT_SYMBOL(ide_vdma_init);
+
+void ide_vdma_fini(struct ide_vdma_state* st)
+{
+ if(!st) return;
+
+ if(st->dmach != -1) pxa_free_dma(st->dmach);
+ kfree(st);
+}
+EXPORT_SYMBOL(ide_vdma_fini);
+/*
+ * vim:ts=8 cino=>8
+ */
diff -ruN -X ../../new8/kernel/diffignore.txt linux-2.6.21-orig/drivers/ide/ide.c linux-2.6.21/drivers/ide/ide.c
--- linux-2.6.21-orig/drivers/ide/ide.c 2010-09-22 18:54:38.000000000 +0400
+++ linux-2.6.21/drivers/ide/ide.c 2010-09-22 19:34:51.000000000 +0400
@@ -1127,7 +1127,7 @@
ide_hwif_t *hwif = drive->hwif;
int err = -EPERM;
- if (!drive->id || !(drive->id->capability & 1))
+ if ((!drive->id || !(drive->id->capability & 1)) && !drive->vdma)
goto out;
if (hwif->ide_dma_check == NULL)
diff -ruN -X ../../new8/kernel/diffignore.txt linux-2.6.21-orig/drivers/ide/ide-io.c linux-2.6.21/drivers/ide/ide-io.c
--- linux-2.6.21-orig/drivers/ide/ide-io.c 2010-09-22 18:54:38.000000000 +0400
+++ linux-2.6.21/drivers/ide/ide-io.c 2010-09-22 18:55:13.000000000 +0400
@@ -222,7 +222,7 @@
* we could be smarter and check for current xfer_speed
* in struct drive etc...
*/
- if ((drive->id->capability & 1) == 0)
+ if ((drive->id->capability & 1) == 0 && drive->vdma == 0)
break;
if (drive->hwif->ide_dma_check == NULL)
break;
diff -ruN -X ../../new8/kernel/diffignore.txt linux-2.6.21-orig/drivers/ide/ide-vdma.h linux-2.6.21/drivers/ide/ide-vdma.h
--- linux-2.6.21-orig/drivers/ide/ide-vdma.h 1970-01-01 03:00:00.000000000 +0300
+++ linux-2.6.21/drivers/ide/ide-vdma.h 2006-06-13 19:38:15.000000000 +0400
@@ -0,0 +1,19 @@
+/*
+ * Virtual DMA (PIO over DMA) handlers
+ *
+ * Copyright 2006 Kirill Kornilov
+ *
+ */
+
+#ifndef _VDMA_H
+#define _VDMA_H
+
+struct ide_vdma_state;
+
+/* initialize hwif for virtual DMA */
+extern struct ide_vdma_state *ide_vdma_init(ide_hwif_t *hwif, unsigned long dma_base);
+
+/* release resources */
+extern void ide_vdma_fini(struct ide_vdma_state* s);
+
+#endif
diff -ruN -X ../../new8/kernel/diffignore.txt linux-2.6.21-orig/drivers/ide/Kconfig linux-2.6.21/drivers/ide/Kconfig
--- linux-2.6.21-orig/drivers/ide/Kconfig 2010-09-22 18:54:38.000000000 +0400
+++ linux-2.6.21/drivers/ide/Kconfig 2010-09-22 19:05:56.000000000 +0400
@@ -170,6 +170,10 @@
Support for Compact Flash cards, outboard IDE disks, tape drives,
and CD-ROM drives connected through a PCMCIA card.
+config IDECS_VDMA
+ bool "Support PIO over DMA transfers (EXPERIMENTAL)"
+ depends on BLK_DEV_IDECS && IDE_VDMA && EXPERIMENTAL
+
config BLK_DEV_DELKIN
tristate "Cardbus IDE support (Delkin/ASKA/Workbit)"
depends on CARDBUS && PCI
@@ -1042,7 +1046,7 @@
endif
config BLK_DEV_IDEDMA
- def_bool BLK_DEV_IDEDMA_PCI || BLK_DEV_IDEDMA_PMAC || BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA
+ def_bool BLK_DEV_IDEDMA_PCI || BLK_DEV_IDEDMA_PMAC || BLK_DEV_IDEDMA_ICS || BLK_DEV_IDE_AU1XXX_MDMA2_DBDMA || IDE_VDMA
config IDEDMA_IVB
bool "IGNORE word93 Validation BITS"
@@ -1060,6 +1064,13 @@
It is normally safe to answer Y; however, the default is N.
+config IDE_VDMA_PXA2xx
+ bool "Support PIO over DMA transfers with PXA2xx DMA controller (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && PXA27x
+
+config IDE_VDMA
+ def_bool IDE_VDMA_PXA2xx
+
endif
config BLK_DEV_HD_ONLY
diff -ruN -X ../../new8/kernel/diffignore.txt linux-2.6.21-orig/drivers/ide/legacy/ide-cs.c linux-2.6.21/drivers/ide/legacy/ide-cs.c
--- linux-2.6.21-orig/drivers/ide/legacy/ide-cs.c 2010-09-22 18:54:38.000000000 +0400
+++ linux-2.6.21/drivers/ide/legacy/ide-cs.c 2010-09-27 17:47:02.000000000 +0400
@@ -53,6 +53,10 @@
#include <pcmcia/cisreg.h>
#include <pcmcia/ciscode.h>
+#ifdef CONFIG_IDECS_VDMA
+#include "ide-vdma.h"
+#endif
+
/*====================================================================*/
/* Module parameters */
@@ -84,6 +88,9 @@
int ndev;
dev_node_t node;
int hd;
+#ifdef CONFIG_IDECS_VDMA
+ void *vdma_data;
+#endif
} ide_info_t;
static void ide_release(struct pcmcia_device *);
@@ -106,6 +113,7 @@
{
ide_info_t *info;
+
DEBUG(0, "ide_attach()\n");
/* Create new ide device */
@@ -145,15 +153,16 @@
kfree(link->priv);
} /* ide_detach */
-static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle)
+static int idecs_register(ide_hwif_t **hwif, unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle)
{
hw_regs_t hw;
+
memset(&hw, 0, sizeof(hw));
ide_init_hwif_ports(&hw, io, ctl, NULL);
hw.irq = irq;
hw.chipset = ide_pci;
hw.dev = &handle->dev;
- return ide_register_hw_with_fixup(&hw, NULL, ide_undecoded_slave);
+ return ide_register_hw_with_fixup(&hw, hwif, ide_undecoded_slave);
}
/*======================================================================
@@ -180,6 +189,10 @@
cistpl_cftable_entry_t *cfg;
int i, pass, last_ret = 0, last_fn = 0, hd, is_kme = 0;
unsigned long io_base, ctl_base;
+ ide_hwif_t *hwif;
+#ifdef CONFIG_IDECS_VDMA
+ unsigned long dma_base;
+#endif
DEBUG(0, "ide_config(0x%p)\n", link);
@@ -276,11 +289,11 @@
/* retry registration in case device is still spinning up */
for (hd = -1, i = 0; i < 10; i++) {
- hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, link);
+ hd = idecs_register(&hwif, io_base, ctl_base, link->irq.AssignedIRQ, link);
if (hd >= 0) break;
if (link->io.NumPorts1 == 0x20) {
outb(0x02, ctl_base + 0x10);
- hd = idecs_register(io_base + 0x10, ctl_base + 0x10,
+ hd = idecs_register(&hwif, io_base + 0x10, ctl_base + 0x10,
link->irq.AssignedIRQ, link);
if (hd >= 0) {
io_base += 0x10;
@@ -298,6 +311,24 @@
goto failed;
}
+#ifdef CONFIG_IDECS_VDMA
+ if (pcmcia_get_phys_ioaddr(link, io_base, &dma_base) == CS_SUCCESS) {
+ info->vdma_data = ide_vdma_init(hwif, dma_base);
+ if (hwif->ide_dma_check) {
+ /*
+ * ide_dma_check is normally called from probe_hwif,
+ * but since dma hooks are installed after the ide_register_hw_with_fixup,
+ * we have to call dma_check here.
+ */
+ for (i = 0; i < MAX_DRIVES; i++) {
+ ide_drive_t *drive = &hwif->drives[i];
+ if (drive->present)
+ hwif->ide_dma_check(drive);
+ }
+ }
+ }
+#endif
+
info->ndev = 1;
sprintf(info->node.dev_name, "hd%c", 'a' + (hd * 2));
info->node.major = ide_major[hd];
@@ -342,11 +373,13 @@
ide_unregister(info->hd);
}
info->ndev = 0;
-
+
+#ifdef CONFIG_IDECS_VDMA
+ ide_vdma_fini(info->vdma_data);
+#endif
pcmcia_disable_device(link);
} /* ide_release */
-
/*======================================================================
The card status event handler. Mostly, this schedules other
@@ -407,6 +440,8 @@
PCMCIA_DEVICE_PROD_ID1("STI Flash", 0xe4a13209),
PCMCIA_DEVICE_PROD_ID12("STI", "Flash 5.0", 0xbf2df18d, 0x8cb57a0e),
PCMCIA_MFC_DEVICE_PROD_ID12(1, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6),
+ PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "04/05/06", 0x43d74cb4, 0x6a22777d),
+ PCMCIA_DEVICE_PROD_ID12("KINGSTON", "ELITE PRO CF CARD 32GB", 0x2e6d1829, 0x3526fdb3),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, ide_ids);
diff -ruN -X ../../new8/kernel/diffignore.txt linux-2.6.21-orig/drivers/ide/Makefile linux-2.6.21/drivers/ide/Makefile
--- linux-2.6.21-orig/drivers/ide/Makefile 2010-09-22 18:54:38.000000000 +0400
+++ linux-2.6.21/drivers/ide/Makefile 2010-09-22 18:55:13.000000000 +0400
@@ -26,6 +26,7 @@
# built-in only drivers from arm/
ide-core-$(CONFIG_IDE_ARM) += arm/ide_arm.o
+ide-core-$(CONFIG_IDE_VDMA_PXA2xx) += arm/ide-vdma-pxa2xx.o
# built-in only drivers from legacy/
ide-core-$(CONFIG_BLK_DEV_BUDDHA) += legacy/buddha.o
diff -ruN -X ../../new8/kernel/diffignore.txt linux-2.6.21-orig/drivers/pcmcia/soc_common.c linux-2.6.21/drivers/pcmcia/soc_common.c
--- linux-2.6.21-orig/drivers/pcmcia/soc_common.c 2010-09-22 18:54:39.000000000 +0400
+++ linux-2.6.21/drivers/pcmcia/soc_common.c 2010-09-27 17:58:14.000000000 +0400
@@ -48,6 +48,8 @@
#include "soc_common.h"
+#include <pcmcia/ds.h>
+
/* FIXME: platform dependent resource declaration has to move out of this file */
#ifdef CONFIG_ARCH_PXA
#include <asm/arch/pxa-regs.h>
@@ -378,7 +380,6 @@
return 0;
}
-
/*
* Implements the set_mem_map() operation for the in-kernel PCMCIA
* service (formerly SS_SetMemMap in Card Services). We configure
@@ -433,6 +434,17 @@
return 0;
}
+int pcmcia_get_phys_ioaddr(struct pcmcia_device *p_dev, ioaddr_t addr, unsigned long *phys_addr)
+{
+ struct pcmcia_socket *sock = p_dev->socket;
+ struct soc_pcmcia_socket *skt = to_soc_pcmcia_socket(sock);
+
+ *phys_addr = (unsigned long)addr - (unsigned long)skt->virt_io
+ + skt->res_io.start;
+ return CS_SUCCESS;
+}
+EXPORT_SYMBOL(pcmcia_get_phys_ioaddr);
+
struct bittbl {
unsigned int mask;
const char *name;
diff -ruN -X ../../new8/kernel/diffignore.txt linux-2.6.21-orig/include/pcmcia/cs.h linux-2.6.21/include/pcmcia/cs.h
--- linux-2.6.21-orig/include/pcmcia/cs.h 2006-06-18 05:49:35.000000000 +0400
+++ linux-2.6.21/include/pcmcia/cs.h 2010-09-27 17:51:23.000000000 +0400
@@ -384,6 +384,7 @@
int pcmcia_request_io(struct pcmcia_device *p_dev, io_req_t *req);
int pcmcia_request_irq(struct pcmcia_device *p_dev, irq_req_t *req);
int pcmcia_request_window(struct pcmcia_device **p_dev, win_req_t *req, window_handle_t *wh);
+int pcmcia_get_phys_ioaddr(struct pcmcia_device *p_dev, ioaddr_t addr, unsigned long *phys_addr);
int pcmcia_suspend_card(struct pcmcia_socket *skt);
int pcmcia_resume_card(struct pcmcia_socket *skt);
int pcmcia_eject_card(struct pcmcia_socket *skt);
_______________________________________________
Zaurus-devel mailing list
[email protected]
http://lists.linuxtogo.org/cgi-bin/mailman/listinfo/zaurus-devel