mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-22 01:10:28 +00:00
mmc: mmci: implement pre_req() and post_req()
pre_req() runs dma_map_sg() and prepares the dma descriptor for the next mmc data transfer. post_req() runs dma_unmap_sg. If not calling pre_req() before mmci_request(), mmci_request() will prepare the cache and dma just like it did it before. It is optional to use pre_req() and post_req() for mmci. Signed-off-by: Per Forlin <per.forlin@linaro.org> Tested-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Chris Ball <cjb@laptop.org>
This commit is contained in:
parent
9782aff8df
commit
58c7ccbf91
@ -226,6 +226,9 @@ static void __devinit mmci_dma_setup(struct mmci_host *host)
|
||||
return;
|
||||
}
|
||||
|
||||
/* initialize pre request cookie */
|
||||
host->next_data.cookie = 1;
|
||||
|
||||
/* Try to acquire a generic DMA engine slave channel */
|
||||
dma_cap_zero(mask);
|
||||
dma_cap_set(DMA_SLAVE, mask);
|
||||
@ -335,7 +338,8 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
|
||||
dir = DMA_FROM_DEVICE;
|
||||
}
|
||||
|
||||
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
|
||||
if (!data->host_cookie)
|
||||
dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
|
||||
|
||||
/*
|
||||
* Use of DMA with scatter-gather is impossible.
|
||||
@ -353,7 +357,8 @@ static void mmci_dma_data_error(struct mmci_host *host)
|
||||
dmaengine_terminate_all(host->dma_current);
|
||||
}
|
||||
|
||||
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
||||
static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
|
||||
struct mmci_host_next *next)
|
||||
{
|
||||
struct variant_data *variant = host->variant;
|
||||
struct dma_slave_config conf = {
|
||||
@ -364,13 +369,20 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
||||
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
|
||||
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
|
||||
};
|
||||
struct mmc_data *data = host->data;
|
||||
struct dma_chan *chan;
|
||||
struct dma_device *device;
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
int nr_sg;
|
||||
|
||||
host->dma_current = NULL;
|
||||
/* Check if next job is already prepared */
|
||||
if (data->host_cookie && !next &&
|
||||
host->dma_current && host->dma_desc_current)
|
||||
return 0;
|
||||
|
||||
if (!next) {
|
||||
host->dma_current = NULL;
|
||||
host->dma_desc_current = NULL;
|
||||
}
|
||||
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
conf.direction = DMA_FROM_DEVICE;
|
||||
@ -385,7 +397,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
||||
return -EINVAL;
|
||||
|
||||
/* If less than or equal to the fifo size, don't bother with DMA */
|
||||
if (host->size <= variant->fifosize)
|
||||
if (data->blksz * data->blocks <= variant->fifosize)
|
||||
return -EINVAL;
|
||||
|
||||
device = chan->device;
|
||||
@ -399,14 +411,38 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
||||
if (!desc)
|
||||
goto unmap_exit;
|
||||
|
||||
/* Okay, go for it. */
|
||||
host->dma_current = chan;
|
||||
if (next) {
|
||||
next->dma_chan = chan;
|
||||
next->dma_desc = desc;
|
||||
} else {
|
||||
host->dma_current = chan;
|
||||
host->dma_desc_current = desc;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
unmap_exit:
|
||||
if (!next)
|
||||
dmaengine_terminate_all(chan);
|
||||
dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
||||
{
|
||||
int ret;
|
||||
struct mmc_data *data = host->data;
|
||||
|
||||
ret = mmci_dma_prep_data(host, host->data, NULL);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
/* Okay, go for it. */
|
||||
dev_vdbg(mmc_dev(host->mmc),
|
||||
"Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
|
||||
data->sg_len, data->blksz, data->blocks, data->flags);
|
||||
dmaengine_submit(desc);
|
||||
dma_async_issue_pending(chan);
|
||||
dmaengine_submit(host->dma_desc_current);
|
||||
dma_async_issue_pending(host->dma_current);
|
||||
|
||||
datactrl |= MCI_DPSM_DMAENABLE;
|
||||
|
||||
@ -421,14 +457,90 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
|
||||
writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
|
||||
host->base + MMCIMASK0);
|
||||
return 0;
|
||||
|
||||
unmap_exit:
|
||||
dmaengine_terminate_all(chan);
|
||||
dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
||||
{
|
||||
struct mmci_host_next *next = &host->next_data;
|
||||
|
||||
if (data->host_cookie && data->host_cookie != next->cookie) {
|
||||
printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
|
||||
" host->next_data.cookie %d\n",
|
||||
__func__, data->host_cookie, host->next_data.cookie);
|
||||
data->host_cookie = 0;
|
||||
}
|
||||
|
||||
if (!data->host_cookie)
|
||||
return;
|
||||
|
||||
host->dma_desc_current = next->dma_desc;
|
||||
host->dma_current = next->dma_chan;
|
||||
|
||||
next->dma_desc = NULL;
|
||||
next->dma_chan = NULL;
|
||||
}
|
||||
|
||||
static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
||||
bool is_first_req)
|
||||
{
|
||||
struct mmci_host *host = mmc_priv(mmc);
|
||||
struct mmc_data *data = mrq->data;
|
||||
struct mmci_host_next *nd = &host->next_data;
|
||||
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
if (data->host_cookie) {
|
||||
data->host_cookie = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
/* if config for dma */
|
||||
if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
|
||||
((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
|
||||
if (mmci_dma_prep_data(host, data, nd))
|
||||
data->host_cookie = 0;
|
||||
else
|
||||
data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
|
||||
}
|
||||
}
|
||||
|
||||
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
|
||||
int err)
|
||||
{
|
||||
struct mmci_host *host = mmc_priv(mmc);
|
||||
struct mmc_data *data = mrq->data;
|
||||
struct dma_chan *chan;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
if (!data)
|
||||
return;
|
||||
|
||||
if (data->flags & MMC_DATA_READ) {
|
||||
dir = DMA_FROM_DEVICE;
|
||||
chan = host->dma_rx_channel;
|
||||
} else {
|
||||
dir = DMA_TO_DEVICE;
|
||||
chan = host->dma_tx_channel;
|
||||
}
|
||||
|
||||
|
||||
/* if config for dma */
|
||||
if (chan) {
|
||||
if (err)
|
||||
dmaengine_terminate_all(chan);
|
||||
if (err || data->host_cookie)
|
||||
dma_unmap_sg(mmc_dev(host->mmc), data->sg,
|
||||
data->sg_len, dir);
|
||||
mrq->data->host_cookie = 0;
|
||||
}
|
||||
}
|
||||
|
||||
#else
|
||||
/* Blank functions if the DMA engine is not available */
|
||||
static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
|
||||
{
|
||||
}
|
||||
static inline void mmci_dma_setup(struct mmci_host *host)
|
||||
{
|
||||
}
|
||||
@ -449,6 +561,10 @@ static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datac
|
||||
{
|
||||
return -ENOSYS;
|
||||
}
|
||||
|
||||
#define mmci_pre_request NULL
|
||||
#define mmci_post_request NULL
|
||||
|
||||
#endif
|
||||
|
||||
static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
|
||||
@ -872,6 +988,9 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
|
||||
|
||||
host->mrq = mrq;
|
||||
|
||||
if (mrq->data)
|
||||
mmci_get_next_data(host, mrq->data);
|
||||
|
||||
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
|
||||
mmci_start_data(host, mrq->data);
|
||||
|
||||
@ -986,6 +1105,8 @@ static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
|
||||
|
||||
static const struct mmc_host_ops mmci_ops = {
|
||||
.request = mmci_request,
|
||||
.pre_req = mmci_pre_request,
|
||||
.post_req = mmci_post_request,
|
||||
.set_ios = mmci_set_ios,
|
||||
.get_ro = mmci_get_ro,
|
||||
.get_cd = mmci_get_cd,
|
||||
|
@ -166,6 +166,12 @@ struct clk;
|
||||
struct variant_data;
|
||||
struct dma_chan;
|
||||
|
||||
struct mmci_host_next {
|
||||
struct dma_async_tx_descriptor *dma_desc;
|
||||
struct dma_chan *dma_chan;
|
||||
s32 cookie;
|
||||
};
|
||||
|
||||
struct mmci_host {
|
||||
phys_addr_t phybase;
|
||||
void __iomem *base;
|
||||
@ -203,6 +209,8 @@ struct mmci_host {
|
||||
struct dma_chan *dma_current;
|
||||
struct dma_chan *dma_rx_channel;
|
||||
struct dma_chan *dma_tx_channel;
|
||||
struct dma_async_tx_descriptor *dma_desc_current;
|
||||
struct mmci_host_next next_data;
|
||||
|
||||
#define dma_inprogress(host) ((host)->dma_current)
|
||||
#else
|
||||
|
Loading…
Reference in New Issue
Block a user