mirror of
https://github.com/FEX-Emu/linux.git
synced 2024-12-27 03:47:43 +00:00
ASoC: rsnd: use dma_sync_single_for_xxx() for IOMMU
IOMMU needs DMA mapping function to use it. One solution is that we can use DMA mapped dev on snd_pcm_lib_preallocate_pages_for_all() for SNDRV_DMA_TYPE_DEV. But pcm_new and dma map timing are mismatched. Thus, this patch uses SNDRV_DMA_TYPE_CONTINUOUS for pcm_new, and use dma_sync_single_for_xxx() for each transfer. Signed-off-by: Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> Signed-off-by: Mark Brown <broonie@kernel.org>
This commit is contained in:
parent
edce5c496c
commit
4821d914fe
@ -1126,8 +1126,8 @@ static int rsnd_pcm_new(struct snd_soc_pcm_runtime *rtd)
|
||||
|
||||
return snd_pcm_lib_preallocate_pages_for_all(
|
||||
rtd->pcm,
|
||||
SNDRV_DMA_TYPE_DEV,
|
||||
rtd->card->snd_card->dev,
|
||||
SNDRV_DMA_TYPE_CONTINUOUS,
|
||||
snd_dma_continuous_data(GFP_KERNEL),
|
||||
PREALLOC_BUFFER, PREALLOC_BUFFER_MAX);
|
||||
}
|
||||
|
||||
|
@ -25,6 +25,10 @@
|
||||
|
||||
struct rsnd_dmaen {
|
||||
struct dma_chan *chan;
|
||||
dma_addr_t dma_buf;
|
||||
unsigned int dma_len;
|
||||
unsigned int dma_period;
|
||||
unsigned int dma_cnt;
|
||||
};
|
||||
|
||||
struct rsnd_dmapp {
|
||||
@ -58,10 +62,38 @@ struct rsnd_dma_ctrl {
|
||||
/*
|
||||
* Audio DMAC
|
||||
*/
|
||||
#define rsnd_dmaen_sync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 1)
|
||||
#define rsnd_dmaen_unsync(dmaen, io, i) __rsnd_dmaen_sync(dmaen, io, i, 0)
|
||||
static void __rsnd_dmaen_sync(struct rsnd_dmaen *dmaen, struct rsnd_dai_stream *io,
|
||||
int i, int sync)
|
||||
{
|
||||
struct device *dev = dmaen->chan->device->dev;
|
||||
enum dma_data_direction dir;
|
||||
int is_play = rsnd_io_is_play(io);
|
||||
dma_addr_t buf;
|
||||
int len, max;
|
||||
size_t period;
|
||||
|
||||
len = dmaen->dma_len;
|
||||
period = dmaen->dma_period;
|
||||
max = len / period;
|
||||
i = i % max;
|
||||
buf = dmaen->dma_buf + (period * i);
|
||||
|
||||
dir = is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
|
||||
|
||||
if (sync)
|
||||
dma_sync_single_for_device(dev, buf, period, dir);
|
||||
else
|
||||
dma_sync_single_for_cpu(dev, buf, period, dir);
|
||||
}
|
||||
|
||||
static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
|
||||
struct rsnd_dai_stream *io)
|
||||
{
|
||||
struct rsnd_priv *priv = rsnd_mod_to_priv(mod);
|
||||
struct rsnd_dma *dma = rsnd_mod_to_dma(mod);
|
||||
struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
|
||||
bool elapsed = false;
|
||||
unsigned long flags;
|
||||
|
||||
@ -78,9 +110,22 @@ static void __rsnd_dmaen_complete(struct rsnd_mod *mod,
|
||||
*/
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
if (rsnd_io_is_working(io))
|
||||
if (rsnd_io_is_working(io)) {
|
||||
rsnd_dmaen_unsync(dmaen, io, dmaen->dma_cnt);
|
||||
|
||||
/*
|
||||
* Next period is already started.
|
||||
* Let's sync Next Next period
|
||||
* see
|
||||
* rsnd_dmaen_start()
|
||||
*/
|
||||
rsnd_dmaen_sync(dmaen, io, dmaen->dma_cnt + 2);
|
||||
|
||||
elapsed = rsnd_dai_pointer_update(io, io->byte_per_period);
|
||||
|
||||
dmaen->dma_cnt++;
|
||||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
|
||||
if (elapsed)
|
||||
@ -116,7 +161,12 @@ static int rsnd_dmaen_stop(struct rsnd_mod *mod,
|
||||
struct rsnd_dmaen *dmaen = rsnd_dma_to_dmaen(dma);
|
||||
|
||||
if (dmaen->chan) {
|
||||
int is_play = rsnd_io_is_play(io);
|
||||
|
||||
dmaengine_terminate_all(dmaen->chan);
|
||||
dma_unmap_single(dmaen->chan->device->dev,
|
||||
dmaen->dma_buf, dmaen->dma_len,
|
||||
is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
@ -184,7 +234,11 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
|
||||
struct device *dev = rsnd_priv_to_dev(priv);
|
||||
struct dma_async_tx_descriptor *desc;
|
||||
struct dma_slave_config cfg = {};
|
||||
dma_addr_t buf;
|
||||
size_t len;
|
||||
size_t period;
|
||||
int is_play = rsnd_io_is_play(io);
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
cfg.direction = is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
|
||||
@ -201,10 +255,19 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
||||
len = snd_pcm_lib_buffer_bytes(substream);
|
||||
period = snd_pcm_lib_period_bytes(substream);
|
||||
buf = dma_map_single(dmaen->chan->device->dev,
|
||||
substream->runtime->dma_area,
|
||||
len,
|
||||
is_play ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
|
||||
if (dma_mapping_error(dmaen->chan->device->dev, buf)) {
|
||||
dev_err(dev, "dma map failed\n");
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
desc = dmaengine_prep_dma_cyclic(dmaen->chan,
|
||||
substream->runtime->dma_addr,
|
||||
snd_pcm_lib_buffer_bytes(substream),
|
||||
snd_pcm_lib_period_bytes(substream),
|
||||
buf, len, period,
|
||||
is_play ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
|
||||
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
|
||||
|
||||
@ -216,6 +279,19 @@ static int rsnd_dmaen_start(struct rsnd_mod *mod,
|
||||
desc->callback = rsnd_dmaen_complete;
|
||||
desc->callback_param = rsnd_mod_get(dma);
|
||||
|
||||
dmaen->dma_buf = buf;
|
||||
dmaen->dma_len = len;
|
||||
dmaen->dma_period = period;
|
||||
dmaen->dma_cnt = 0;
|
||||
|
||||
/*
|
||||
* synchronize this and next period
|
||||
* see
|
||||
* __rsnd_dmaen_complete()
|
||||
*/
|
||||
for (i = 0; i < 2; i++)
|
||||
rsnd_dmaen_sync(dmaen, io, i);
|
||||
|
||||
if (dmaengine_submit(desc) < 0) {
|
||||
dev_err(dev, "dmaengine_submit() fail\n");
|
||||
return -EIO;
|
||||
|
Loading…
Reference in New Issue
Block a user