Resent with minor fixes from Kevin.
This is a forward port of the SD/MMC driver from the latest LSP,
melded into linux-2.6.23-git. I've tried to keep the style updates
that were merged recently. I've fixed the exit code (in
davinci_mmcsd_exit) to release clocks, and some failure paths in
davinci_mmcsd_init.
The driver as is performs ~6MB/s reads and ~1MB/s writes with a
block size of 512k (i.e. still needs help!). This patch just makes
the driver usable again.
Signed-off-by: Bernard Blackham <[EMAIL PROTECTED]>
davinci_mmc.c | 743 ++++++++++++++++++++++++++++++++++++++--------------------
davinci_mmc.h | 23 +
2 files changed, 514 insertions(+), 252 deletions(-)
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 5d8e79f..e988370 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -24,13 +24,13 @@
Modifications:
ver. 1.0: Oct 2005, Purushotam Kumar Initial version
ver 1.1: Nov 2005, Purushotam Kumar Solved bugs
- ver 1.2: Jan 2066, Purushotam Kumar Added card remove insert support
+ ver 1.2: Jan 2006, Purushotam Kumar Added card remove insert support
-
*
-
*/
#include <linux/module.h>
+#include <linux/version.h>
#include <linux/tty.h>
#include <linux/ioport.h>
#include <linux/init.h>
@@ -41,35 +41,25 @@ #include <linux/clk.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/mmc.h>
-
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/hardware.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
#include <asm/arch/irqs.h>
#include <asm/arch/hardware.h>
#include "davinci_mmc.h"
#include <asm/arch/edma.h>
-/* FIXME: old defines from old mmc.h */
-/* #define MMC_RSP_NONE (0 << 0) */
-/* #define MMC_RSP_SHORT (1 << 0) */
-/* #define MMC_RSP_LONG (2 << 0) */
-/* #define MMC_RSP_MASK (3 << 0) */
-/* #define MMC_RSP_CRC (1 << 3) /\* expect valid crc *\/ */
-/* #define MMC_RSP_BUSY (1 << 4) /\* card may send busy
*\/ */
-#define MMC_RSP_SHORT MMC_RSP_PRESENT
-#define MMC_RSP_LONG MMC_RSP_136
-#define MMC_RSP_MASK (MMC_RSP_PRESENT | MMC_RSP_136)
-
extern void davinci_clean_channel(int ch_no);
/* MMCSD Init clock in Hz in opendain mode */
-#define MMCSD_INIT_CLOCK 200000
-#define DRIVER_NAME "mmc0"
-#define MMCINT_INTERRUPT IRQ_MMCINT
-#define MMCSD_REGS_BASE_ADDR DAVINCI_MMC_SD_BASE
-#define TCINTEN (0x1<<20)
+#define MMCSD_INIT_CLOCK 200000
+#define DRIVER_NAME "mmc0"
+#define MMCINT_INTERRUPT IRQ_MMCINT
+#define MMCSD_REGS_BASE_ADDR DAVINCI_MMC_SD_BASE
+#define TCINTEN (0x1<<20)
/* This macro could not be defined to 0 (ZERO) or -ve value.
* This value is multiplied to "HZ"
@@ -83,6 +73,8 @@ mmcsd_config_def mmcsd_cfg = {
/* read write thresholds (in bytes) can be any power of 2 from 2 to 64 */
32,
/* To use the DMA or not-- 1- Use DMA, 0-Interrupt mode */
+ 1,
+/* flag Indicates 1bit/4bit mode */
1
};
@@ -107,54 +99,71 @@ static struct mmc_request *que_mmc_reque
/* tells whether card is initizlzed or not */
static unsigned int is_card_initialized = 0;
static unsigned int new_card_state = 0; /* tells current state of card
*/
+static unsigned int is_card_removed = 0;
static DEFINE_SPINLOCK(mmc_lock);
+#define RSP_TYPE(x) ((x) & ~(MMC_RSP_BUSY|MMC_RSP_OPCODE))
+#define MMCST1_BUSY (1 << 0)
+
+static inline void wait_on_data(void)
+{
+ int cnt = 900000;
+ while (((mmcsd_regs->mmc_st1) & MMCST1_BUSY) && cnt) {
+ cnt--;
+ udelay(1);
+ }
+ if (!cnt) {
+ dev_warn(&mmc_dev, "ERROR: TOUT waiting for BUSY\n");
+ }
+}
+
static void mmc_davinci_start_command(struct mmc_davinci_host *host,
struct mmc_command *cmd)
{
u32 cmd_reg = 0;
u32 resp_type = 0;
u32 cmd_type = 0;
- int byte_cnt = 0, i = 0;
unsigned long flags;
+#ifdef CONFIG_MMC_DEBUG
dev_dbg(&mmc_dev, "\nMMCSD : CMD%d, argument 0x%08x",
cmd->opcode, cmd->arg);
- if (cmd->flags & MMC_RSP_SHORT)
- dev_dbg(&mmc_dev, ", 32-bit response");
- if (cmd->flags & MMC_RSP_LONG)
- dev_dbg(&mmc_dev, ", 128-bit response");
- if (cmd->flags & MMC_RSP_CRC)
- dev_dbg(&mmc_dev, ", CRC");
- if (cmd->flags & MMC_RSP_BUSY)
- dev_dbg(&mmc_dev, ", busy notification");
- else
- dev_dbg(&mmc_dev, ", No busy notification");
+ switch (RSP_TYPE(mmc_resp_type(cmd))) {
+ case RSP_TYPE(MMC_RSP_R1):
+ dev_dbg(&mmc_dev, ", R1/R1b response");
+ break;
+ case RSP_TYPE(MMC_RSP_R2):
+ dev_dbg(&mmc_dev, ", R2 response");
+ break;
+ case RSP_TYPE(MMC_RSP_R3):
+ dev_dbg(&mmc_dev, ", R3 response");
+ break;
+ default:
+ break;
+ }
dev_dbg(&mmc_dev, "\n");
+#endif
host->cmd = cmd;
/* Protocol layer does not provide response type,
* but our hardware needs to know exact type, not just size!
*/
- switch (cmd->flags & MMC_RSP_MASK) {
+ switch (RSP_TYPE(mmc_resp_type(cmd))) {
case MMC_RSP_NONE:
/* resp 0 */
break;
- case MMC_RSP_SHORT:
- /* resp 1, resp 1b */
- /* OR resp 3!! (assume this if bus is set opendrain) */
- if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) {
- resp_type = 3;
- if (cmd->opcode == 3)
- resp_type = 1;
- } else
- resp_type = 1;
+ case RSP_TYPE(MMC_RSP_R1):
+ resp_type = 1;
break;
- case MMC_RSP_LONG:
- /* resp 2 */
+ case RSP_TYPE(MMC_RSP_R2):
resp_type = 2;
break;
+ case RSP_TYPE(MMC_RSP_R3):
+ resp_type = 3;
+ break;
+ default:
+ break;
}
/* Protocol layer does not provide command type, but our hardware
@@ -171,11 +180,11 @@ static void mmc_davinci_start_command(st
* rest are ac, except if opendrain
*/
- if (host->data_dir)
+ if (mmc_cmd_type(cmd) == MMC_CMD_ADTC)
cmd_type = DAVINCI_MMC_CMDTYPE_ADTC;
- else if (resp_type == 0 && cmd->opcode != 15)
+ else if (mmc_cmd_type(cmd) == MMC_CMD_BC)
cmd_type = DAVINCI_MMC_CMDTYPE_BC;
- else if (host->bus_mode == MMC_BUSMODE_OPENDRAIN)
+ else if (mmc_cmd_type(cmd) == MMC_CMD_BCR)
cmd_type = DAVINCI_MMC_CMDTYPE_BCR;
else
cmd_type = DAVINCI_MMC_CMDTYPE_AC;
@@ -198,10 +207,9 @@ static void mmc_davinci_start_command(st
cmd_reg = cmd_reg | (1 << 14);
/* Set for generating DMA Xfer event */
- if ((host->use_dma == 1) && (host->data != NULL)
- && ((cmd->opcode == 18) || (cmd->opcode == 25)
- || (cmd->opcode == 24)
- || (cmd->opcode == 17)))
+ if ((host->do_dma == 1) && (host->data != NULL)
+ && ((cmd->opcode == 18) || (cmd->opcode == 25)
+ || (cmd->opcode == 24) || (cmd->opcode == 17)))
cmd_reg = cmd_reg | (1 << 16);
/* Setting whether command involves data transfer or not */
@@ -227,7 +235,7 @@ static void mmc_davinci_start_command(st
/* Enable interrupt */
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
- if (host->use_dma != 1)
+ if (host->do_dma != 1)
mmcsd_regs->mmc_im = MMCSD_EVENT_EOFCMD
| MMCSD_EVENT_WRITE
| MMCSD_EVENT_ERROR_CMDCRC
@@ -243,7 +251,7 @@ static void mmc_davinci_start_command(st
| MMCSD_EVENT_ERROR_DATATIMEOUT
| MMCSD_EVENT_BLOCK_XFERRED;
} else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
- if (host->use_dma != 1)
+ if (host->do_dma != 1)
mmcsd_regs->mmc_im = MMCSD_EVENT_EOFCMD
| MMCSD_EVENT_READ
| MMCSD_EVENT_ERROR_CMDCRC
@@ -271,25 +279,22 @@ static void mmc_davinci_start_command(st
*/
if ((host->data_dir == DAVINCI_MMC_DATADIR_WRITE)
&& (cmd_type == DAVINCI_MMC_CMDTYPE_ADTC)
- && (host->use_dma != 1)) {
- byte_cnt = mmcsd_cfg.rw_threshold;
- host->bytes_left -= mmcsd_cfg.rw_threshold;
- for (i = 0; i < (byte_cnt / 4); i++) {
- mmcsd_regs->mmc_dxr = *host->buffer;
- host->buffer++;
- }
- }
+ && (host->do_dma != 1))
+ /* Fill the FIFO for Tx */
+ davinci_fifo_data_trans(host);
if (cmd->opcode == 7) {
spin_lock_irqsave(&mmc_lock, flags);
+ is_card_removed = 0;
new_card_state = 1;
is_card_initialized = 1;
host->old_card_state = new_card_state;
is_init_progress = 0;
spin_unlock_irqrestore(&mmc_lock, flags);
}
- if (cmd->opcode == 1) {
+ if (cmd->opcode == 1 || cmd->opcode == 41) {
spin_lock_irqsave(&mmc_lock, flags);
+ is_card_initialized = 0;
is_init_progress = 1;
spin_unlock_irqrestore(&mmc_lock, flags);
}
@@ -302,47 +307,178 @@ static void mmc_davinci_start_command(st
static void mmc_davinci_dma_cb(int lch, u16 ch_status, void *data)
{
- int sync_dev = 0;
- struct mmc_davinci_host *host = (struct mmc_davinci_host *)data;
+ if (DMA_COMPLETE != ch_status) {
+ struct mmc_davinci_host *host = (struct mmc_davinci_host *)data;
+ dev_warn(&mmc_dev, "[DMA FAILED]");
+ davinci_abort_dma(host);
+ }
+}
- if (DMA_COMPLETE == ch_status) {
+static void davinci_fifo_data_trans(struct mmc_davinci_host *host)
+{
+ int n, i;
- if (host->cmd == NULL && host->data == NULL) {
- if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
- sync_dev = DAVINCI_DMA_MMCTXEVT;
- } else {
- sync_dev = DAVINCI_DMA_MMCRXEVT;
- }
- dev_dbg(&mmc_dev,
- "Interrupt from DMA when no request has been
made\n");
- davinci_stop_dma(sync_dev);
- return;
- }
+ if (host->buffer_bytes_left == 0) {
+ host->sg_idx++;
+ BUG_ON(host->sg_idx == host->sg_len);
+ mmc_davinci_sg_to_buf(host);
+ }
- if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
- sync_dev = DAVINCI_DMA_MMCTXEVT; /* Write */
- } else {
- sync_dev = DAVINCI_DMA_MMCRXEVT; /* Read */
+ n = mmcsd_cfg.rw_threshold;
+ if (n > host->buffer_bytes_left)
+ n = host->buffer_bytes_left;
+ host->buffer_bytes_left -= n;
+ host->bytes_left -= n;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ for (i = 0; i < (n / 4); i++) {
+ mmcsd_regs->mmc_dxr = *host->buffer;
+ host->buffer++;
}
- davinci_stop_dma(sync_dev);
} else {
- /* Handing of Event missed interreupt from DMA */
- dev_dbg(&mmc_dev,
- "Event miss interrupt has been generated by DMA\n");
- if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
- sync_dev = DAVINCI_DMA_MMCTXEVT; /* Write */
- } else {
- sync_dev = DAVINCI_DMA_MMCRXEVT; /* Read */
+ for (i = 0; i < (n / 4); i++) {
+ *host->buffer = mmcsd_regs->mmc_drr;
+ host->buffer++;
}
- davinci_clean_channel(sync_dev);
}
}
+static void davinci_reinit_chan(void)
+{
+ int sync_dev;
+
+ sync_dev = DAVINCI_DMA_MMCTXEVT;
+ davinci_stop_dma(sync_dev);
+ davinci_clean_channel(sync_dev);
+
+ sync_dev = DAVINCI_DMA_MMCRXEVT;
+ davinci_stop_dma(sync_dev);
+ davinci_clean_channel(sync_dev);
+}
+
+static void davinci_abort_dma(struct mmc_davinci_host *host)
+{
+ int sync_dev = 0;
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_READ)
+ sync_dev = DAVINCI_DMA_MMCTXEVT;
+ else
+ sync_dev = DAVINCI_DMA_MMCRXEVT;
+
+ davinci_stop_dma(sync_dev);
+ davinci_clean_channel(sync_dev);
+
+}
+
static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,
struct mmc_request *req)
{
- const char *dev_name;
- int sync_dev, r, edma_ch = 0, tcc = 0;
+ int use_dma = 1, i;
+ struct mmc_data *data = host->data;
+ int block_size = data->blksz;
+
+ host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
+ ((data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE));
+
+ /* Decide if we can use DMA */
+ for (i = 0; i < host->sg_len; i++) {
+ if ((data->sg[i].length % block_size) != 0) {
+ use_dma = 0;
+ break;
+ }
+ }
+
+ if (!use_dma) {
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
+ (data->flags & MMC_DATA_WRITE)
+ ? DMA_TO_DEVICE
+ : DMA_FROM_DEVICE);
+ return -1;
+ }
+
+ host->do_dma = 1;
+
+ host->dma_state = 0;
+
+ mmc_davinci_send_dma_request(host, req);
+
+ return 0;
+
+}
+
+static int davinci_release_dma_channels(struct mmc_davinci_host *host)
+{
+ davinci_free_dma(DAVINCI_DMA_MMCTXEVT);
+ davinci_free_dma(DAVINCI_DMA_MMCRXEVT);
+
+ if (host->edma_ch_details.cnt_chanel) {
+ davinci_free_dma(host->edma_ch_details.chanel_num[0]);
+ host->edma_ch_details.cnt_chanel = 0;
+ }
+
+ return 0;
+}
+
+static int davinci_acquire_dma_channels(struct mmc_davinci_host *host)
+{
+ int edma_chan_num, tcc = 0, r, sync_dev;
+ enum dma_event_q queue_no = EVENTQ_0;
+
+ /* Acquire master DMA write channel */
+ if ((r = davinci_request_dma(DAVINCI_DMA_MMCTXEVT, "MMC_WRITE",
+ mmc_davinci_dma_cb, host, &edma_chan_num, &tcc,
+ queue_no)) != 0) {
+ dev_warn(&mmc_dev,
+ "MMC: davinci_request_dma() failed with %d\n",
+ r);
+ return r;
+ }
+
+ /* Acquire master DMA read channel */
+ if ((r = davinci_request_dma(DAVINCI_DMA_MMCRXEVT, "MMC_READ",
+ mmc_davinci_dma_cb, host, &edma_chan_num, &tcc,
+ queue_no)) != 0) {
+ dev_warn(&mmc_dev,
+ "MMC: davinci_request_dma() failed with %d\n",
+ r);
+ goto free_master_write;
+ }
+
+ host->edma_ch_details.cnt_chanel = 0;
+
+ /* currently data Writes are done using single block mode,
+ * so no DMA slave write channel is required for now */
+
+ /* Create a DMA slave read channel
+ * (assuming max segments handled is 2) */
+ sync_dev = DAVINCI_DMA_MMCRXEVT;
+ if ((r = davinci_request_dma(DAVINCI_EDMA_PARAM_ANY, "LINK",
+ NULL, NULL, &edma_chan_num, &sync_dev,
+ queue_no)) != 0) {
+ dev_warn(&mmc_dev,
+ "MMC: davinci_request_dma() failed with %d\n", r);
+ goto free_master_read;
+ }
+
+ host->edma_ch_details.cnt_chanel++;
+ host->edma_ch_details.chanel_num[0] = edma_chan_num;
+
+ return 0;
+
+free_master_read:
+ davinci_free_dma(DAVINCI_DMA_MMCRXEVT);
+free_master_write:
+ davinci_free_dma(DAVINCI_DMA_MMCTXEVT);
+
+ return r;
+}
+
+static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
+ struct mmc_request *req)
+{
+ int sync_dev;
unsigned char i, j;
unsigned short acnt, bcnt, ccnt;
unsigned int src_port, dst_port, temp_ccnt;
@@ -353,69 +489,50 @@ static int mmc_davinci_start_dma_transfe
unsigned short bcntrld;
enum sync_dimension sync_mode;
edmacc_paramentry_regs temp;
- enum dma_event_q queue_no = EVENTQ_0;
int edma_chan_num;
- unsigned int num_eight_words = (req->data->blocks * 512) / 32;
static unsigned int option_read = 0;
static unsigned int option_write = 0;
- static unsigned char dma_read_req = 1;
- static unsigned char dma_write_req = 1;
+ struct mmc_data *data = host->data;
+ struct scatterlist *sg = &data->sg[0];
+ unsigned int count;
+ int num_frames, frame;
#define MAX_C_CNT 64000
- if ((req->data->flags & MMC_DATA_WRITE)) {
- sync_dev = DAVINCI_DMA_MMCTXEVT; /* Write */
- dev_name = "MMC_WRITE";
+ frame = data->blksz;
+ count = sg_dma_len(sg);
- if (dma_write_req) {
- r = davinci_request_dma(sync_dev, dev_name,
- mmc_davinci_dma_cb, host,
- &edma_ch, &tcc, queue_no);
- if (r != 0) {
- dev_dbg(&mmc_dev,
- "MMC: davinci_request_dma() failed with
%d\n",
-r);
- return r;
- }
- dma_write_req = 0;
- }
- } else {
- sync_dev = DAVINCI_DMA_MMCRXEVT; /* Read */
- dev_name = "MMC_READ";
- if (dma_read_req) {
- r = davinci_request_dma(sync_dev, dev_name,
- mmc_davinci_dma_cb, host,
- &edma_ch, &tcc, queue_no);
- if (r != 0) {
- dev_dbg(&mmc_dev,
- "MMC: davinci_request_dma() failed with
%d\n",
- r);
- return r;
- }
- dma_read_req = 0;
- }
+ if ((data->blocks == 1) && (count > data->blksz)) {
+ count = frame;
}
- if ((req->data->flags & MMC_DATA_WRITE)) {
- /* AB Sync Transfer */
- /* Acnt =32, Bcnt= , Cnt=1 */
-
- sync_dev = DAVINCI_DMA_MMCTXEVT; /* Write */
+ if (count % 32 == 0) {
acnt = 4;
bcnt = 8;
- if (num_eight_words > MAX_C_CNT) {
- temp_ccnt = MAX_C_CNT;
- ccnt = temp_ccnt;
- } else {
- ccnt = num_eight_words;
- temp_ccnt = ccnt;
- }
+ num_frames = count / 32;
+ } else {
+ acnt = count;
+ bcnt = 1;
+ num_frames = 1;
+ }
+
+ if (num_frames > MAX_C_CNT) {
+ temp_ccnt = MAX_C_CNT;
+ ccnt = temp_ccnt;
+ } else {
+ ccnt = num_frames;
+ temp_ccnt = ccnt;
+ }
+
+ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
+ /*AB Sync Transfer */
+ sync_dev = DAVINCI_DMA_MMCTXEVT;
- src_port = (unsigned int)virt_to_phys(req->data->mrq->buffer);
+ src_port = (unsigned int)sg_dma_address(sg);
mode_src = INCR;
fifo_width_src = W8BIT; /* It's not cared as modeDsr is INCR */
- src_bidx = 4;
- src_cidx = 32;
+ src_bidx = acnt;
+ src_cidx = acnt * bcnt;
dst_port = MMCSD_REGS_BASE_ADDR + 0x2C;
mode_dst = INCR;
fifo_width_dst = W8BIT; /* It's not cared as modeDsr is INCR */
@@ -425,27 +542,18 @@ r);
sync_mode = ABSYNC;
} else {
- sync_dev = DAVINCI_DMA_MMCRXEVT; /* Read */
- acnt = 4;
- bcnt = 8;
- if (num_eight_words > MAX_C_CNT) {
- temp_ccnt = MAX_C_CNT;
- ccnt = temp_ccnt;
- } else {
- ccnt = num_eight_words;
- temp_ccnt = ccnt;
- }
+ sync_dev = DAVINCI_DMA_MMCRXEVT;
src_port = MMCSD_REGS_BASE_ADDR + 0x28;
mode_src = INCR;
fifo_width_src = W8BIT;
src_bidx = 0;
src_cidx = 0;
- dst_port = (unsigned int)virt_to_phys(req->data->mrq->buffer);
+ dst_port = (unsigned int)sg_dma_address(sg);
mode_dst = INCR;
fifo_width_dst = W8BIT; /* It's not cared as modeDsr is INCR */
- dst_bidx = 4;
- dst_cidx = 32;
+ dst_bidx = acnt;
+ dst_cidx = acnt * bcnt;
bcntrld = 8;
sync_mode = ABSYNC;
}
@@ -459,7 +567,6 @@ r);
davinci_set_dma_transfer_params(sync_dev, acnt, bcnt, ccnt, bcntrld,
sync_mode);
- host->edma_ch_details.cnt_chanel = 0;
davinci_get_dma_params(sync_dev, &temp);
if (sync_dev == DAVINCI_DMA_MMCTXEVT) {
if (option_write == 0) {
@@ -478,12 +585,14 @@ r);
}
}
- if (num_eight_words > MAX_C_CNT) { /* Linking will be performed */
+ if (host->sg_len > 1) {
davinci_get_dma_params(sync_dev, &temp);
temp.opt &= ~TCINTEN;
davinci_set_dma_params(sync_dev, &temp);
- for (i = 0; i < EDMA_MAX_LOGICAL_CHA_ALLOWED; i++) {
+ for (i = 0; i < host->sg_len - 1; i++) {
+ sg = &data->sg[i + 1];
+
if (i != 0) {
j = i - 1;
davinci_get_dma_params(
@@ -495,30 +604,24 @@ r);
&temp);
}
- host->edma_ch_details.cnt_chanel++;
- davinci_request_dma(DAVINCI_EDMA_PARAM_ANY, "LINK",
- NULL, NULL, &edma_chan_num,
- &sync_dev, queue_no);
- host->edma_ch_details.chanel_num[i] = edma_chan_num;
- ccnt = temp.ccnt & 0x0000FFFF;
- if (sync_dev == DAVINCI_DMA_MMCTXEVT) {
- temp.src = temp.src + (acnt * bcnt * ccnt);
- } else {
- temp.dst = temp.dst + (acnt * bcnt * ccnt);
- }
+ edma_chan_num = host->edma_ch_details.chanel_num[0];
+
+ frame = data->blksz;
+ count = sg_dma_len(sg);
+
+ if ((data->blocks == 1) && (count > data->blksz))
+ count = frame;
+
+ ccnt = count / 32;
+
+ if (sync_dev == DAVINCI_DMA_MMCTXEVT)
+ temp.src = (unsigned int)sg_dma_address(sg);
+ else
+ temp.dst = (unsigned int)sg_dma_address(sg);
temp.opt |= TCINTEN;
- if ((num_eight_words - temp_ccnt) > MAX_C_CNT) {
- temp.ccnt = (temp.ccnt & 0xFFFF0000)
- | MAX_C_CNT;
- ccnt = temp.ccnt & 0x0000FFFF;
- temp_ccnt = temp_ccnt + ccnt;
- } else {
- temp.ccnt = (temp.ccnt & 0xFFFF0000)
- | (num_eight_words -temp_ccnt);
- ccnt = temp.ccnt & 0x0000FFFF;
- temp_ccnt = temp_ccnt + ccnt;
- }
+ temp.ccnt = (temp.ccnt & 0xFFFF0000) | (ccnt);
+
davinci_set_dma_params(edma_chan_num, &temp);
if (i != 0) {
j = i - 1;
@@ -526,8 +629,6 @@ r);
chanel_num[j],
edma_chan_num);
}
- if (temp_ccnt == num_eight_words)
- break;
}
davinci_dma_link_lch(sync_dev,
host->edma_ch_details.chanel_num[0]);
@@ -540,8 +641,7 @@ r);
static void
mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request
*req)
{
- int timeout;
-
+ int timeout, sg_len;
host->data = req->data;
if (req->data == NULL) {
host->data_dir = DAVINCI_MMC_DATADIR_NONE;
@@ -549,6 +649,9 @@ mmc_davinci_prepare_data(struct mmc_davi
mmcsd_regs->mmc_nblk = 0;
return;
}
+ /* Init idx */
+ host->sg_idx = 0;
+
dev_dbg(&mmc_dev,
"MMCSD : Data xfer (%s %s), "
"DTO %d cycles + %d ns, %d blocks of %d bytes\r\n",
@@ -589,15 +692,33 @@ mmc_davinci_prepare_data(struct mmc_davi
break;
}
- if ((host->use_dma == 1)
- && (mmc_davinci_start_dma_transfer(host, req) == 0)) {
+ sg_len = (req->data->blocks == 1) ? 1 : req->data->sg_len;
+ host->sg_len = sg_len;
+
+ host->bytes_left = req->data->blocks * req->data->blksz;
+
+ if ((host->use_dma == 1) && (host->bytes_left % 32 == 0)
+ && (mmc_davinci_start_dma_transfer(host, req) == 0)) {
host->buffer = NULL;
host->bytes_left = 0;
} else {
/* Revert to CPU Copy */
- host->buffer = (u32 *) (req->data->mrq->buffer);
- host->bytes_left = req->data->blocks * req->data->blksz;
- host->use_dma = 0;
+
+ host->do_dma = 0;
+ mmc_davinci_sg_to_buf(host);
+ }
+}
+
+/* PIO only */
+static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
+{
+ struct scatterlist *sg;
+
+ sg = host->data->sg + host->sg_idx;
+ host->buffer_bytes_left = sg->length;
+ host->buffer = page_address(sg->page) + sg->offset;
+ if (host->buffer_bytes_left > host->bytes_left) {
+ host->buffer_bytes_left = host->bytes_left;
}
}
@@ -606,14 +727,27 @@ static void mmc_davinci_request(struct m
struct mmc_davinci_host *host = mmc_priv(mmc);
unsigned long flags;
+ if (is_card_removed) {
+ if (req->cmd) {
+ req->cmd->error |= MMC_ERR_TIMEOUT;
+ mmc_request_done(mmc, req);
+ }
+ dev_dbg(&mmc_dev,
+ "From code segment excuted when card removed\n");
+ return;
+ }
+
+ wait_on_data();
+
if (!is_card_detect_progress) {
spin_lock_irqsave(&mmc_lock, flags);
is_card_busy = 1;
spin_unlock_irqrestore(&mmc_lock, flags);
+ host->do_dma = 0;
mmc_davinci_prepare_data(host, req);
mmc_davinci_start_command(host, req->cmd);
} else {
- /* Queu up the request as card dectection is being excuted */
+ /* Queue up the request as card dectection is being excuted */
que_mmc_host = mmc;
que_mmc_request = req;
spin_lock_irqsave(&mmc_lock, flags);
@@ -652,6 +786,13 @@ static void mmc_davinci_set_ios(struct m
dev_dbg(&mmc_dev, "clock %dHz busmode %d powermode %d Vdd %d.%02d\r\n",
ios->clock, ios->bus_mode, ios->power_mode,
ios->vdd / 100, ios->vdd % 100);
+ if (ios->bus_width == MMC_BUS_WIDTH_4) {
+ dev_dbg(&mmc_dev, "\nEnabling 4 bit mode\n");
+ mmcsd_regs->mmc_ctl = mmcsd_regs->mmc_ctl | (1 << 2);
+ } else {
+ dev_dbg(&mmc_dev, "Disabling 4 bit mode\n");
+ mmcsd_regs->mmc_ctl = mmcsd_regs->mmc_ctl & ~(1 << 2);
+ }
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
open_drain_freq = ((unsigned int)cpu_arm_clk
@@ -660,8 +801,12 @@ static void mmc_davinci_set_ios(struct m
| open_drain_freq;
} else {
mmc_push_pull_freq = calculate_freq_for_card(ios->clock);
+ mmcsd_regs->mmc_clk &= ~(0x100);
+ udelay(10);
mmcsd_regs->mmc_clk = (mmcsd_regs->mmc_clk & ~(0xFF))
| mmc_push_pull_freq;
+ mmcsd_regs->mmc_clk |= 0x100;
+ udelay(10);
}
host->bus_mode = ios->bus_mode;
if (ios->power_mode == MMC_POWER_UP) {
@@ -685,6 +830,15 @@ mmc_davinci_xfer_done(struct mmc_davinci
if (data->error == MMC_ERR_NONE)
data->bytes_xfered += data->blocks * data->blksz;
+ if (host->do_dma) {
+ davinci_abort_dma(host);
+
+ dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len,
+ (data->
+ flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE :
+ DMA_FROM_DEVICE);
+ }
+
if (data->error == MMC_ERR_TIMEOUT) {
spin_lock_irqsave(&mmc_lock, flags);
is_card_busy = 0;
@@ -708,25 +862,24 @@ static void mmc_davinci_cmd_done(struct
struct mmc_command *cmd)
{
unsigned long flags;
-
host->cmd = NULL;
- switch (cmd->flags & MMC_RSP_MASK) {
- case MMC_RSP_NONE:
- /* resp 0 */
- break;
- case MMC_RSP_SHORT:
- /* response types 1, 1b, 3, 4, 5, 6 */
- cmd->resp[0] = mmcsd_regs->mmc_rsp67;
- break;
+ if (!cmd) {
+ dev_warn(&mmc_dev, "%s(): No cmd ptr\n", __FUNCTION__);
+ return;
+ }
- case MMC_RSP_LONG:
- /* response type 2 */
- cmd->resp[3] = mmcsd_regs->mmc_rsp01;
- cmd->resp[2] = mmcsd_regs->mmc_rsp23;
- cmd->resp[1] = mmcsd_regs->mmc_rsp45;
- cmd->resp[0] = mmcsd_regs->mmc_rsp67;
- break;
+ if (cmd->flags & MMC_RSP_PRESENT) {
+ if (cmd->flags & MMC_RSP_136) {
+ /* response type 2 */
+ cmd->resp[3] = mmcsd_regs->mmc_rsp01;
+ cmd->resp[2] = mmcsd_regs->mmc_rsp23;
+ cmd->resp[1] = mmcsd_regs->mmc_rsp45;
+ cmd->resp[0] = mmcsd_regs->mmc_rsp67;
+ } else {
+ /* response types 1, 1b, 3, 4, 5, 6 */
+ cmd->resp[0] = mmcsd_regs->mmc_rsp67;
+ }
}
if (host->data == NULL || cmd->error != MMC_ERR_NONE) {
@@ -746,7 +899,6 @@ static irqreturn_t mmc_davinci_irq(int i
u16 status;
int end_command;
int end_transfer;
- int byte_cnt = 0, i = 0;
unsigned long flags;
if (host->is_core_command) {
@@ -783,44 +935,35 @@ static irqreturn_t mmc_davinci_irq(int i
if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) {
if (status & MMCSD_EVENT_WRITE) {
/* Buffer almost empty */
- if (host->bytes_left > 0) {
- byte_cnt =
- mmcsd_cfg.rw_threshold;
- host->bytes_left -=
- mmcsd_cfg.rw_threshold;
- for (i = 0; i < (byte_cnt / 4);
- i++) {
- mmcsd_regs->mmc_dxr =
- *host->buffer;
- host->buffer++;
- }
- }
+ if (host->bytes_left > 0)
+ davinci_fifo_data_trans(host);
}
}
if (host->data_dir == DAVINCI_MMC_DATADIR_READ) {
if (status & MMCSD_EVENT_READ) {
/* Buffer almost empty */
- if (host->bytes_left > 0) {
- byte_cnt =
- mmcsd_cfg.rw_threshold;
- host->bytes_left -=
- mmcsd_cfg.rw_threshold;
- for (i = 0; i < (byte_cnt / 4);
- i++) {
- *host->buffer =
- mmcsd_regs->
- mmc_drr;
- host->buffer++;
- }
- }
+ if (host->bytes_left > 0)
+ davinci_fifo_data_trans(host);
}
}
if (status & MMCSD_EVENT_BLOCK_XFERRED) {
/* Block sent/received */
if (host->data != NULL) {
- end_transfer = 1;
+ if (host->do_dma == 1) {
+ end_transfer = 1;
+ } else {
+ /* if datasize<32 no RX ints
are generated */
+ if (host->bytes_left > 0) {
+ davinci_fifo_data_trans
+ (host);
+ }
+ end_transfer = 1;
+ }
+ } else {
+ dev_warn(&mmc_dev,
+ "TC:host->data is NULL\n");
}
}
@@ -829,6 +972,7 @@ static irqreturn_t mmc_davinci_irq(int i
if ((host->data) && (new_card_state != 0)) {
host->data->error |= MMC_ERR_TIMEOUT;
spin_lock_irqsave(&mmc_lock, flags);
+ is_card_removed = 1;
new_card_state = 0;
is_card_initialized = 0;
spin_unlock_irqrestore(&mmc_lock,
@@ -836,15 +980,23 @@ static irqreturn_t mmc_davinci_irq(int i
dev_dbg(&mmc_dev,
"MMCSD: Data timeout, CMD%d and
status is %x\r\n",
host->cmd->opcode, status);
+
+ if (host->cmd) {
+ host->cmd->error |=
+ MMC_ERR_TIMEOUT;
+ }
end_transfer = 1;
- host->cmd->error |= MMC_ERR_TIMEOUT;
}
- dev_dbg(&mmc_dev,
- "MMCSD: Data timeout, CMD%d and status
is %x\r\n",
- host->cmd->opcode, status);
}
if (status & MMCSD_EVENT_ERROR_DATACRC) {
+ /* DAT line portion is diabled and in reset
state */
+ mmcsd_regs->mmc_ctl =
+ mmcsd_regs->mmc_ctl | (1 << 1);
+ udelay(10);
+ mmcsd_regs->mmc_ctl =
+ mmcsd_regs->mmc_ctl & ~(1 << 1);
+
/* Data CRC error */
if (host->data) {
host->data->error |= MMC_ERR_BADCRC;
@@ -859,6 +1011,10 @@ static irqreturn_t mmc_davinci_irq(int i
}
if (status & MMCSD_EVENT_ERROR_CMDTIMEOUT) {
+ if (host->do_dma)
+ /* abort DMA transfer */
+ davinci_abort_dma(host);
+
/* Command timeout */
if (host->cmd) {
/* Timeouts are normal in case of
@@ -888,7 +1044,11 @@ static irqreturn_t mmc_davinci_irq(int i
/* Command CRC error */
dev_dbg(&mmc_dev, "Command CRC error\r\n");
if (host->cmd) {
- host->cmd->error |= MMC_ERR_BADCRC;
+ /* Ignore CMD CRC errors during high
speed operation */
+ if (host->mmc->ios.clock <= 25000000) {
+ host->cmd->error |=
+ MMC_ERR_BADCRC;
+ }
end_command = 1;
}
}
@@ -926,6 +1086,7 @@ static irqreturn_t mmc_davinci_irq(int i
} else {
spin_lock_irqsave(&mmc_lock, flags);
+ is_card_removed = 1;
new_card_state = 0;
is_card_initialized = 0;
spin_unlock_irqrestore(&mmc_lock, flags);
@@ -945,15 +1106,17 @@ static irqreturn_t mmc_davinci_irq(int i
}
- if (host->cmd_code == 1) {
+ if (host->cmd_code == 1 || host->cmd_code == 55) {
if (status & MMCSD_EVENT_EOFCMD) {
spin_lock_irqsave(&mmc_lock, flags);
+ is_card_removed = 0;
new_card_state = 1;
is_card_initialized = 0;
spin_unlock_irqrestore(&mmc_lock, flags);
} else {
spin_lock_irqsave(&mmc_lock, flags);
+ is_card_removed = 1;
new_card_state = 0;
is_card_initialized = 0;
spin_unlock_irqrestore(&mmc_lock, flags);
@@ -970,19 +1133,31 @@ static irqreturn_t mmc_davinci_irq(int i
is_req_queued_up = 0;
spin_unlock_irqrestore(&mmc_lock, flags);
}
-
}
if (host->cmd_code == 0) {
if (status & MMCSD_EVENT_EOFCMD) {
+ static int flag_sd_mmc;
host->is_core_command = 0;
- host->cmd_code = 1;
- dev_dbg(&mmc_dev,
- "MMC-Probing mmc with cmd1\n");
- /* Issue cmd1 */
- mmcsd_regs->mmc_arghl = 0x80300000;
- mmcsd_regs->mmc_cmd = 0x00000601;
+ if (flag_sd_mmc) {
+ flag_sd_mmc = 0;
+ host->cmd_code = 1;
+ /* Issue cmd1 */
+ mmcsd_regs->mmc_arghl = 0x80300000;
+ mmcsd_regs->mmc_cmd = 0x00000601;
+ } else {
+ flag_sd_mmc = 1;
+ host->cmd_code = 55;
+ /* Issue cmd55 */
+ mmcsd_regs->mmc_arghl = 0x0;
+ mmcsd_regs->mmc_cmd =
+ ((0x0 | (1 << 9) | 55));
+ }
+
+ dev_dbg(&mmc_dev,
+ "MMC-Probing mmc with cmd%d\n",
+ host->cmd_code);
} else {
spin_lock_irqsave(&mmc_lock, flags);
new_card_state = 0;
@@ -999,12 +1174,22 @@ static irqreturn_t mmc_davinci_irq(int i
static struct mmc_host_ops mmc_davinci_ops = {
.request = mmc_davinci_request,
.set_ios = mmc_davinci_set_ios,
+ .get_ro = mmc_davinci_get_ro
};
-void mmc_check_card(unsigned long data)
+static int mmc_davinci_get_ro(struct mmc_host *mmc)
+{
+ return 0;
+}
+
+static void mmc_check_card(unsigned long data)
{
struct mmc_davinci_host *host = (struct mmc_davinci_host *)data;
unsigned long flags;
+ struct mmc_card *card = NULL;
+
+ if (host->mmc && host->mmc->card)
+ card = host->mmc->card;
if ((!is_card_detect_progress) || (!is_init_progress)) {
if (is_card_initialized) {
@@ -1014,7 +1199,8 @@ void mmc_check_card(unsigned long data)
is_card_detect_progress = 1;
spin_unlock_irqrestore(&mmc_lock, flags);
/* Issue cmd13 */
- mmcsd_regs->mmc_arghl = 0x10000;
+ mmcsd_regs->mmc_arghl = (card && mmc_card_sd(card))
+ ? (card->rca << 16) : 0x10000;
mmcsd_regs->mmc_cmd = 0x0000028D;
} else {
host->is_core_command = 0;
@@ -1041,6 +1227,8 @@ static void davinci_mmc_check_status(uns
if (!is_card_busy) {
if (host->old_card_state ^ new_card_state) {
+ davinci_reinit_chan();
+ init_mmcsd_host();
mmc_detect_change(host->mmc, 0);
spin_lock_irqsave(&mmc_lock, flags);
host->old_card_state = new_card_state;
@@ -1057,6 +1245,7 @@ static void init_mmcsd_host(void)
mmcsd_regs->mmc_ctl = mmcsd_regs->mmc_ctl | 0x1;
/* DAT line portion is diabled and in reset state */
mmcsd_regs->mmc_ctl = mmcsd_regs->mmc_ctl | (1 << 1);
+ udelay(10);
mmcsd_regs->mmc_clk = 0x0;
mmcsd_regs->mmc_clk = mmcsd_regs->mmc_clk | (1 << 8);
@@ -1066,13 +1255,14 @@ static void init_mmcsd_host(void)
mmcsd_regs->mmc_ctl = mmcsd_regs->mmc_ctl & ~(0x1);
mmcsd_regs->mmc_ctl = mmcsd_regs->mmc_ctl & ~(1 << 1);
+ udelay(10);
}
static int davinci_mmcsd_probe(struct platform_device *pdev)
{
struct mmc_davinci_host *host;
struct mmc_host *mmc;
- int ret;
+ int ret = 0;
mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev);
if (!mmc) {
@@ -1085,14 +1275,44 @@ static int davinci_mmcsd_probe(struct pl
init_mmcsd_host();
+ if (mmcsd_cfg.use_4bit_mode) {
+ dev_warn(&mmc_dev, "Supporting 4-bit mode\n");
+ mmc->caps |= MMC_CAP_4_BIT_DATA;
+ } else
+ dev_warn(&mmc_dev, "Not Supporting 4-bit mode\n");
+
mmc->ops = &mmc_davinci_ops;
mmc->f_min = 312500;
- mmc->f_max = 20000000;
+#ifdef CONFIG_MMC_HIGHSPEED
+ mmc->f_max = 50000000;
+#else
+ mmc->f_max = 25000000;
+#endif
mmc->ocr_avail = MMC_VDD_32_33;
+ mmc->max_phys_segs = 2;
+ mmc->max_hw_segs = 2;
+ mmc->max_blk_size = 4095; /* BLEN is 11 bits */
+ mmc->max_blk_count = 65535; /* NBLK is 16 bits */
+ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
+ mmc->max_seg_size = mmc->max_req_size;
+
+ dev_dbg(&mmc_dev, "max_phys_segs=%d\n", mmc->max_phys_segs);
+ dev_dbg(&mmc_dev, "max_hw_segs=%d\n", mmc->max_hw_segs);
+ dev_dbg(&mmc_dev, "max_blk_size=%d\n", mmc->max_blk_size);
+ dev_dbg(&mmc_dev, "max_req_size=%d\n", mmc->max_req_size);
+ dev_dbg(&mmc_dev, "max_seg_size=%d\n", mmc->max_seg_size);
+
host = mmc_priv(mmc);
host->mmc = mmc; /* Important */
+ if (mmcsd_cfg.use_dma) {
+ dev_dbg(&mmc_dev, "Using DMA mode\n");
+ if (davinci_acquire_dma_channels(host) != 0)
+ goto out;
+ } else
+ dev_dbg(&mmc_dev, "Not Using DMA mode\n");
+
host->use_dma = mmcsd_cfg.use_dma;
host->irq = MMCINT_INTERRUPT;
host->sd_support = 1;
@@ -1122,13 +1342,20 @@ out:
static int davinci_mmcsd_remove(struct platform_device *pdev)
{
struct mmc_davinci_host *host = platform_get_drvdata(pdev);
+ unsigned long flags;
platform_set_drvdata(pdev, NULL);
- mmc_remove_host(host->mmc);
- free_irq(host->irq, host);
- del_timer(&host->timer);
- davinci_free_dma(DAVINCI_DMA_MMCTXEVT);
- davinci_free_dma(DAVINCI_DMA_MMCRXEVT);
+ if (host) {
+ mmc_remove_host(host->mmc);
+ free_irq(host->irq, host);
+
+ spin_lock_irqsave(&mmc_lock, flags);
+ del_timer(&host->timer);
+ spin_unlock_irqrestore(&mmc_lock, flags);
+
+ davinci_release_dma_channels(host);
+ }
+
return 0;
}
@@ -1222,14 +1449,26 @@ free1:
platform_device_unregister(&mmc_davinci_device);
}
+ if (clkp) {
+ mmc_clkp = NULL;
+ clk_put(clkp);
+ }
+
return -ENODEV;
}
static void __exit davinci_mmcsd_exit(void)
{
+ struct clk *clkp;
+
platform_driver_unregister(&davinci_mmcsd_driver);
platform_device_unregister(&mmc_davinci_device);
- clk_disable(mmc_clkp);
+
+ clkp = mmc_clkp;
+ mmc_clkp = NULL;
+
+ clk_disable(clkp);
+ clk_put(clkp);
}
module_init(davinci_mmcsd_init);
diff --git a/drivers/mmc/host/davinci_mmc.h b/drivers/mmc/host/davinci_mmc.h
index a6c4557..5840ccf 100644
--- a/drivers/mmc/host/davinci_mmc.h
+++ b/drivers/mmc/host/davinci_mmc.h
@@ -132,6 +132,7 @@ #define DAVINCI_MMC_DATADIR_WRITE 2
int power_pin;
int use_dma;
+ int do_dma;
struct completion dma_completion;
struct timer_list timer;
@@ -143,10 +144,16 @@ #define DAVINCI_MMC_DATADIR_WRITE 2
edma_ch_mmcsd edma_ch_details;
+ unsigned int sg_len;
+ int sg_idx;
+ unsigned int buffer_bytes_left;
+ unsigned int dma_len;
+ int dma_state;
};
typedef struct {
unsigned short rw_threshold;
unsigned short use_dma;
+ unsigned short use_4bit_mode;
} mmcsd_config_def;
typedef enum {
@@ -168,4 +175,20 @@ #define MMCSD_EVENT_CRC_ERROR \
#define MMCSD_EVENT_ERROR \
(MMCSD_EVENT_TIMEOUT_ERROR | MMCSD_EVENT_CRC_ERROR)
+static void init_mmcsd_host(void);
+
+static void davinci_fifo_data_trans(struct mmc_davinci_host *host);
+
+static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host);
+
+static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,
+ struct mmc_request *req);
+
+static void mmc_davinci_xfer_done(struct mmc_davinci_host *host,
+ struct mmc_data *data);
+
+static int mmc_davinci_get_ro(struct mmc_host *mmc);
+
+static void davinci_abort_dma(struct mmc_davinci_host *host);
+
#endif /* DAVINCI_MMC_H_ */
_______________________________________________
Davinci-linux-open-source mailing list
[email protected]
http://linux.davincidsp.com/mailman/listinfo/davinci-linux-open-source