2006-03-08 07:53:24 +00:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or modify
|
|
|
|
* it under the terms of the GNU General Public License as published by
|
|
|
|
* the Free Software Foundation; either version 2 of the License, or
|
|
|
|
* (at your option) any later version.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
* GNU General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public License
|
|
|
|
* along with this program; if not, write to the Free Software
|
|
|
|
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <linux/init.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/device.h>
|
|
|
|
#include <linux/ioport.h>
|
|
|
|
#include <linux/errno.h>
|
|
|
|
#include <linux/interrupt.h>
|
|
|
|
#include <linux/platform_device.h>
|
2010-11-23 01:12:15 +00:00
|
|
|
#include <linux/spi/pxa2xx_spi.h>
|
2006-03-08 07:53:24 +00:00
|
|
|
#include <linux/dma-mapping.h>
|
|
|
|
#include <linux/spi/spi.h>
|
|
|
|
#include <linux/workqueue.h>
|
|
|
|
#include <linux/delay.h>
|
2009-04-07 02:00:54 +00:00
|
|
|
#include <linux/gpio.h>
|
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit slab.h inclusion from percpu.h
percpu.h is included by sched.h and module.h and thus ends up being
included when building most .c files. percpu.h includes slab.h which
in turn includes gfp.h making everything defined by the two files
universally available and complicating inclusion dependencies.
percpu.h -> slab.h dependency is about to be removed. Prepare for
this change by updating users of gfp and slab facilities include those
headers directly instead of assuming availability. As this conversion
needs to touch large number of source files, the following script is
used as the basis of conversion.
http://userweb.kernel.org/~tj/misc/slabh-sweep.py
The script does the followings.
* Scan files for gfp and slab usages and update includes such that
only the necessary includes are there. ie. if only gfp is used,
gfp.h, if slab is used, slab.h.
* When the script inserts a new include, it looks at the include
blocks and try to put the new include such that its order conforms
to its surrounding. It's put in the include block which contains
core kernel includes, in the same order that the rest are ordered -
alphabetical, Christmas tree, rev-Xmas-tree or at the end if there
doesn't seem to be any matching order.
* If the script can't find a place to put a new include (mostly
because the file doesn't have fitting include block), it prints out
an error message indicating which .h file needs to be added to the
file.
The conversion was done in the following steps.
1. The initial automatic conversion of all .c files updated slightly
over 4000 files, deleting around 700 includes and adding ~480 gfp.h
and ~3000 slab.h inclusions. The script emitted errors for ~400
files.
2. Each error was manually checked. Some didn't need the inclusion,
some needed manual addition while adding it to implementation .h or
embedding .c file was more appropriate for others. This step added
inclusions to around 150 files.
3. The script was run again and the output was compared to the edits
from #2 to make sure no file was left behind.
4. Several build tests were done and a couple of problems were fixed.
e.g. lib/decompress_*.c used malloc/free() wrappers around slab
APIs requiring slab.h to be added manually.
5. The script was run on all .h files but without automatically
editing them as sprinkling gfp.h and slab.h inclusions around .h
files could easily lead to inclusion dependency hell. Most gfp.h
inclusion directives were ignored as stuff from gfp.h was usually
wildly available and often used in preprocessor macros. Each
slab.h inclusion directive was examined and added manually as
necessary.
6. percpu.h was updated not to include slab.h.
7. Build test were done on the following configurations and failures
were fixed. CONFIG_GCOV_KERNEL was turned off for all tests (as my
distributed build env didn't work with gcov compiles) and a few
more options had to be turned off depending on archs to make things
build (like ipr on powerpc/64 which failed due to missing writeq).
* x86 and x86_64 UP and SMP allmodconfig and a custom test config.
* powerpc and powerpc64 SMP allmodconfig
* sparc and sparc64 SMP allmodconfig
* ia64 SMP allmodconfig
* s390 SMP allmodconfig
* alpha SMP allmodconfig
* um on x86_64 SMP allmodconfig
8. percpu.h modifications were reverted so that it could be applied as
a separate patch and serve as bisection point.
Given the fact that I had only a couple of failures from tests on step
6, I'm fairly confident about the coverage of this conversion patch.
If there is a breakage, it's likely to be something in one of the arch
headers which should be easily discoverable easily on most builds of
the specific arch.
Signed-off-by: Tejun Heo <tj@kernel.org>
Guess-its-ok-by: Christoph Lameter <cl@linux-foundation.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
2010-03-24 08:04:11 +00:00
|
|
|
#include <linux/slab.h>
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <asm/irq.h>
|
|
|
|
#include <asm/delay.h>
|
|
|
|
|
|
|
|
|
|
|
|
MODULE_AUTHOR("Stephen Street");
|
2007-12-10 23:49:25 +00:00
|
|
|
MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
|
2006-03-08 07:53:24 +00:00
|
|
|
MODULE_LICENSE("GPL");
|
2008-04-11 04:29:20 +00:00
|
|
|
MODULE_ALIAS("platform:pxa2xx-spi");
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
#define MAX_BUSES 3
|
|
|
|
|
2008-10-16 05:02:43 +00:00
|
|
|
#define TIMOUT_DFLT 1000
|
|
|
|
|
2008-09-13 09:33:18 +00:00
|
|
|
#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
|
|
|
|
#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
|
2008-10-01 17:39:24 +00:00
|
|
|
#define IS_DMA_ALIGNED(x) ((((u32)(x)) & 0x07) == 0)
|
2008-09-13 09:33:18 +00:00
|
|
|
#define MAX_DMA_LEN 8191
|
2009-04-07 02:00:57 +00:00
|
|
|
#define DMA_ALIGNMENT 8
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2008-02-23 23:23:40 +00:00
|
|
|
/*
|
|
|
|
* for testing SSCR1 changes that require SSP restart, basically
|
|
|
|
* everything except the service and interrupt enables, the pxa270 developer
|
|
|
|
* manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
|
|
|
|
* list, but the PXA255 dev man says all bits without really meaning the
|
|
|
|
* service and interrupt enables
|
|
|
|
*/
|
|
|
|
#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
|
2006-12-10 10:18:54 +00:00
|
|
|
| SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
|
2008-02-23 23:23:40 +00:00
|
|
|
| SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
|
|
|
|
| SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
|
|
|
|
| SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
|
|
|
|
| SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
|
2006-12-10 10:18:54 +00:00
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
#define DEFINE_SSP_REG(reg, off) \
|
2008-04-28 09:14:17 +00:00
|
|
|
static inline u32 read_##reg(void const __iomem *p) \
|
|
|
|
{ return __raw_readl(p + (off)); } \
|
|
|
|
\
|
|
|
|
static inline void write_##reg(u32 v, void __iomem *p) \
|
|
|
|
{ __raw_writel(v, p + (off)); }
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
DEFINE_SSP_REG(SSCR0, 0x00)
|
|
|
|
DEFINE_SSP_REG(SSCR1, 0x04)
|
|
|
|
DEFINE_SSP_REG(SSSR, 0x08)
|
|
|
|
DEFINE_SSP_REG(SSITR, 0x0c)
|
|
|
|
DEFINE_SSP_REG(SSDR, 0x10)
|
|
|
|
DEFINE_SSP_REG(SSTO, 0x28)
|
|
|
|
DEFINE_SSP_REG(SSPSP, 0x2c)
|
|
|
|
|
|
|
|
#define START_STATE ((void*)0)
|
|
|
|
#define RUNNING_STATE ((void*)1)
|
|
|
|
#define DONE_STATE ((void*)2)
|
|
|
|
#define ERROR_STATE ((void*)-1)
|
|
|
|
|
|
|
|
#define QUEUE_RUNNING 0
|
|
|
|
#define QUEUE_STOPPED 1
|
|
|
|
|
|
|
|
struct driver_data {
|
|
|
|
/* Driver model hookup */
|
|
|
|
struct platform_device *pdev;
|
|
|
|
|
2007-11-21 10:50:53 +00:00
|
|
|
/* SSP Info */
|
|
|
|
struct ssp_device *ssp;
|
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
/* SPI framework hookup */
|
|
|
|
enum pxa_ssp_type ssp_type;
|
|
|
|
struct spi_master *master;
|
|
|
|
|
|
|
|
/* PXA hookup */
|
|
|
|
struct pxa2xx_spi_master *master_info;
|
|
|
|
|
|
|
|
/* DMA setup stuff */
|
|
|
|
int rx_channel;
|
|
|
|
int tx_channel;
|
|
|
|
u32 *null_dma_buf;
|
|
|
|
|
|
|
|
/* SSP register addresses */
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
u32 ssdr_physical;
|
|
|
|
|
|
|
|
/* SSP masks*/
|
|
|
|
u32 dma_cr1;
|
|
|
|
u32 int_cr1;
|
|
|
|
u32 clear_sr;
|
|
|
|
u32 mask_sr;
|
|
|
|
|
|
|
|
/* Driver message queue */
|
|
|
|
struct workqueue_struct *workqueue;
|
|
|
|
struct work_struct pump_messages;
|
|
|
|
spinlock_t lock;
|
|
|
|
struct list_head queue;
|
|
|
|
int busy;
|
|
|
|
int run;
|
|
|
|
|
|
|
|
/* Message Transfer pump */
|
|
|
|
struct tasklet_struct pump_transfers;
|
|
|
|
|
|
|
|
/* Current message transfer state info */
|
|
|
|
struct spi_message* cur_msg;
|
|
|
|
struct spi_transfer* cur_transfer;
|
|
|
|
struct chip_data *cur_chip;
|
|
|
|
size_t len;
|
|
|
|
void *tx;
|
|
|
|
void *tx_end;
|
|
|
|
void *rx;
|
|
|
|
void *rx_end;
|
|
|
|
int dma_mapped;
|
|
|
|
dma_addr_t rx_dma;
|
|
|
|
dma_addr_t tx_dma;
|
|
|
|
size_t rx_map_len;
|
|
|
|
size_t tx_map_len;
|
2006-03-28 22:05:23 +00:00
|
|
|
u8 n_bytes;
|
|
|
|
u32 dma_width;
|
2006-12-10 10:18:54 +00:00
|
|
|
int (*write)(struct driver_data *drv_data);
|
|
|
|
int (*read)(struct driver_data *drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
|
|
|
|
void (*cs_control)(u32 command);
|
|
|
|
};
|
|
|
|
|
|
|
|
struct chip_data {
|
|
|
|
u32 cr0;
|
|
|
|
u32 cr1;
|
|
|
|
u32 psp;
|
|
|
|
u32 timeout;
|
|
|
|
u8 n_bytes;
|
|
|
|
u32 dma_width;
|
|
|
|
u32 dma_burst_size;
|
|
|
|
u32 threshold;
|
|
|
|
u32 dma_threshold;
|
|
|
|
u8 enable_dma;
|
2006-03-28 22:05:23 +00:00
|
|
|
u8 bits_per_word;
|
|
|
|
u32 speed_hz;
|
2010-11-23 01:12:17 +00:00
|
|
|
union {
|
|
|
|
int gpio_cs;
|
|
|
|
unsigned int frm;
|
|
|
|
};
|
2009-04-07 02:00:54 +00:00
|
|
|
int gpio_cs_inverted;
|
2006-12-10 10:18:54 +00:00
|
|
|
int (*write)(struct driver_data *drv_data);
|
|
|
|
int (*read)(struct driver_data *drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
void (*cs_control)(u32 command);
|
|
|
|
};
|
|
|
|
|
2006-12-05 19:36:26 +00:00
|
|
|
static void pump_messages(struct work_struct *work);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2009-04-07 02:00:54 +00:00
|
|
|
static void cs_assert(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
struct chip_data *chip = drv_data->cur_chip;
|
|
|
|
|
2010-11-23 01:12:17 +00:00
|
|
|
if (drv_data->ssp_type == CE4100_SSP) {
|
|
|
|
write_SSSR(drv_data->cur_chip->frm, drv_data->ioaddr);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2009-04-07 02:00:54 +00:00
|
|
|
if (chip->cs_control) {
|
|
|
|
chip->cs_control(PXA2XX_CS_ASSERT);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gpio_is_valid(chip->gpio_cs))
|
|
|
|
gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void cs_deassert(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
struct chip_data *chip = drv_data->cur_chip;
|
|
|
|
|
2010-11-23 01:12:17 +00:00
|
|
|
if (drv_data->ssp_type == CE4100_SSP)
|
|
|
|
return;
|
|
|
|
|
2009-04-07 02:00:54 +00:00
|
|
|
if (chip->cs_control) {
|
2009-04-09 01:48:03 +00:00
|
|
|
chip->cs_control(PXA2XX_CS_DEASSERT);
|
2009-04-07 02:00:54 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gpio_is_valid(chip->gpio_cs))
|
|
|
|
gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
|
|
|
|
}
|
|
|
|
|
2010-11-23 01:12:17 +00:00
|
|
|
static void write_SSSR_CS(struct driver_data *drv_data, u32 val)
|
|
|
|
{
|
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
|
|
|
|
|
|
|
if (drv_data->ssp_type == CE4100_SSP)
|
|
|
|
val |= read_SSSR(reg) & SSSR_ALT_FRM_MASK;
|
|
|
|
|
|
|
|
write_SSSR(val, reg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pxa25x_ssp_comp(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
if (drv_data->ssp_type == PXA25x_SSP)
|
|
|
|
return 1;
|
|
|
|
if (drv_data->ssp_type == CE4100_SSP)
|
|
|
|
return 1;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
static int flush(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
unsigned long limit = loops_per_jiffy << 1;
|
|
|
|
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
do {
|
|
|
|
while (read_SSSR(reg) & SSSR_RNE) {
|
|
|
|
read_SSDR(reg);
|
|
|
|
}
|
2009-04-21 19:24:46 +00:00
|
|
|
} while ((read_SSSR(reg) & SSSR_BSY) && --limit);
|
2010-11-23 01:12:17 +00:00
|
|
|
write_SSSR_CS(drv_data, SSSR_ROR);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static int null_writer(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-28 22:05:23 +00:00
|
|
|
u8 n_bytes = drv_data->n_bytes;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2010-11-23 01:12:15 +00:00
|
|
|
if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
|
2006-12-10 10:18:54 +00:00
|
|
|
|| (drv_data->tx == drv_data->tx_end))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
write_SSDR(0, reg);
|
|
|
|
drv_data->tx += n_bytes;
|
|
|
|
|
|
|
|
return 1;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static int null_reader(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-28 22:05:23 +00:00
|
|
|
u8 n_bytes = drv_data->n_bytes;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
while ((read_SSSR(reg) & SSSR_RNE)
|
2006-12-10 10:18:54 +00:00
|
|
|
&& (drv_data->rx < drv_data->rx_end)) {
|
2006-03-08 07:53:24 +00:00
|
|
|
read_SSDR(reg);
|
|
|
|
drv_data->rx += n_bytes;
|
|
|
|
}
|
2006-12-10 10:18:54 +00:00
|
|
|
|
|
|
|
return drv_data->rx == drv_data->rx_end;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static int u8_writer(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2010-11-23 01:12:15 +00:00
|
|
|
if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
|
2006-12-10 10:18:54 +00:00
|
|
|
|| (drv_data->tx == drv_data->tx_end))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
write_SSDR(*(u8 *)(drv_data->tx), reg);
|
|
|
|
++drv_data->tx;
|
|
|
|
|
|
|
|
return 1;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static int u8_reader(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
while ((read_SSSR(reg) & SSSR_RNE)
|
2006-12-10 10:18:54 +00:00
|
|
|
&& (drv_data->rx < drv_data->rx_end)) {
|
2006-03-08 07:53:24 +00:00
|
|
|
*(u8 *)(drv_data->rx) = read_SSDR(reg);
|
|
|
|
++drv_data->rx;
|
|
|
|
}
|
2006-12-10 10:18:54 +00:00
|
|
|
|
|
|
|
return drv_data->rx == drv_data->rx_end;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static int u16_writer(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2010-11-23 01:12:15 +00:00
|
|
|
if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
|
2006-12-10 10:18:54 +00:00
|
|
|
|| (drv_data->tx == drv_data->tx_end))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
write_SSDR(*(u16 *)(drv_data->tx), reg);
|
|
|
|
drv_data->tx += 2;
|
|
|
|
|
|
|
|
return 1;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static int u16_reader(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
while ((read_SSSR(reg) & SSSR_RNE)
|
2006-12-10 10:18:54 +00:00
|
|
|
&& (drv_data->rx < drv_data->rx_end)) {
|
2006-03-08 07:53:24 +00:00
|
|
|
*(u16 *)(drv_data->rx) = read_SSDR(reg);
|
|
|
|
drv_data->rx += 2;
|
|
|
|
}
|
2006-12-10 10:18:54 +00:00
|
|
|
|
|
|
|
return drv_data->rx == drv_data->rx_end;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
2006-12-10 10:18:54 +00:00
|
|
|
|
|
|
|
static int u32_writer(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2010-11-23 01:12:15 +00:00
|
|
|
if (((read_SSSR(reg) & SSSR_TFL_MASK) == SSSR_TFL_MASK)
|
2006-12-10 10:18:54 +00:00
|
|
|
|| (drv_data->tx == drv_data->tx_end))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
write_SSDR(*(u32 *)(drv_data->tx), reg);
|
|
|
|
drv_data->tx += 4;
|
|
|
|
|
|
|
|
return 1;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static int u32_reader(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
while ((read_SSSR(reg) & SSSR_RNE)
|
2006-12-10 10:18:54 +00:00
|
|
|
&& (drv_data->rx < drv_data->rx_end)) {
|
2006-03-08 07:53:24 +00:00
|
|
|
*(u32 *)(drv_data->rx) = read_SSDR(reg);
|
|
|
|
drv_data->rx += 4;
|
|
|
|
}
|
2006-12-10 10:18:54 +00:00
|
|
|
|
|
|
|
return drv_data->rx == drv_data->rx_end;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static void *next_transfer(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
struct spi_message *msg = drv_data->cur_msg;
|
|
|
|
struct spi_transfer *trans = drv_data->cur_transfer;
|
|
|
|
|
|
|
|
/* Move to next transfer */
|
|
|
|
if (trans->transfer_list.next != &msg->transfers) {
|
|
|
|
drv_data->cur_transfer =
|
|
|
|
list_entry(trans->transfer_list.next,
|
|
|
|
struct spi_transfer,
|
|
|
|
transfer_list);
|
|
|
|
return RUNNING_STATE;
|
|
|
|
} else
|
|
|
|
return DONE_STATE;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int map_dma_buffers(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
struct spi_message *msg = drv_data->cur_msg;
|
|
|
|
struct device *dev = &msg->spi->dev;
|
|
|
|
|
|
|
|
if (!drv_data->cur_chip->enable_dma)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
if (msg->is_dma_mapped)
|
|
|
|
return drv_data->rx_dma && drv_data->tx_dma;
|
|
|
|
|
|
|
|
if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* Modify setup if rx buffer is null */
|
|
|
|
if (drv_data->rx == NULL) {
|
|
|
|
*drv_data->null_dma_buf = 0;
|
|
|
|
drv_data->rx = drv_data->null_dma_buf;
|
|
|
|
drv_data->rx_map_len = 4;
|
|
|
|
} else
|
|
|
|
drv_data->rx_map_len = drv_data->len;
|
|
|
|
|
|
|
|
|
|
|
|
/* Modify setup if tx buffer is null */
|
|
|
|
if (drv_data->tx == NULL) {
|
|
|
|
*drv_data->null_dma_buf = 0;
|
|
|
|
drv_data->tx = drv_data->null_dma_buf;
|
|
|
|
drv_data->tx_map_len = 4;
|
|
|
|
} else
|
|
|
|
drv_data->tx_map_len = drv_data->len;
|
|
|
|
|
2008-11-19 23:36:21 +00:00
|
|
|
/* Stream map the tx buffer. Always do DMA_TO_DEVICE first
|
|
|
|
* so we flush the cache *before* invalidating it, in case
|
|
|
|
* the tx and rx buffers overlap.
|
|
|
|
*/
|
2006-03-08 07:53:24 +00:00
|
|
|
drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
|
2008-11-19 23:36:21 +00:00
|
|
|
drv_data->tx_map_len, DMA_TO_DEVICE);
|
|
|
|
if (dma_mapping_error(dev, drv_data->tx_dma))
|
|
|
|
return 0;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2008-11-19 23:36:21 +00:00
|
|
|
/* Stream map the rx buffer */
|
|
|
|
drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
|
2006-03-08 07:53:24 +00:00
|
|
|
drv_data->rx_map_len, DMA_FROM_DEVICE);
|
2008-11-19 23:36:21 +00:00
|
|
|
if (dma_mapping_error(dev, drv_data->rx_dma)) {
|
|
|
|
dma_unmap_single(dev, drv_data->tx_dma,
|
|
|
|
drv_data->tx_map_len, DMA_TO_DEVICE);
|
2006-03-08 07:53:24 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void unmap_dma_buffers(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
struct device *dev;
|
|
|
|
|
|
|
|
if (!drv_data->dma_mapped)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if (!drv_data->cur_msg->is_dma_mapped) {
|
|
|
|
dev = &drv_data->cur_msg->spi->dev;
|
|
|
|
dma_unmap_single(dev, drv_data->rx_dma,
|
|
|
|
drv_data->rx_map_len, DMA_FROM_DEVICE);
|
|
|
|
dma_unmap_single(dev, drv_data->tx_dma,
|
|
|
|
drv_data->tx_map_len, DMA_TO_DEVICE);
|
|
|
|
}
|
|
|
|
|
|
|
|
drv_data->dma_mapped = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* caller already set message->status; dma and pio irqs are blocked */
|
2006-05-20 22:00:19 +00:00
|
|
|
static void giveback(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
|
|
|
struct spi_transfer* last_transfer;
|
2006-05-20 22:00:19 +00:00
|
|
|
unsigned long flags;
|
|
|
|
struct spi_message *msg;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-05-20 22:00:19 +00:00
|
|
|
spin_lock_irqsave(&drv_data->lock, flags);
|
|
|
|
msg = drv_data->cur_msg;
|
|
|
|
drv_data->cur_msg = NULL;
|
|
|
|
drv_data->cur_transfer = NULL;
|
|
|
|
queue_work(drv_data->workqueue, &drv_data->pump_messages);
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
|
|
|
|
last_transfer = list_entry(msg->transfers.prev,
|
2006-03-08 07:53:24 +00:00
|
|
|
struct spi_transfer,
|
|
|
|
transfer_list);
|
|
|
|
|
2008-09-13 09:33:17 +00:00
|
|
|
/* Delay if requested before any change in chip select */
|
|
|
|
if (last_transfer->delay_usecs)
|
|
|
|
udelay(last_transfer->delay_usecs);
|
|
|
|
|
|
|
|
/* Drop chip select UNLESS cs_change is true or we are returning
|
|
|
|
* a message with an error, or next message is for another chip
|
|
|
|
*/
|
2006-03-08 07:53:24 +00:00
|
|
|
if (!last_transfer->cs_change)
|
2009-04-07 02:00:54 +00:00
|
|
|
cs_deassert(drv_data);
|
2008-09-13 09:33:17 +00:00
|
|
|
else {
|
|
|
|
struct spi_message *next_msg;
|
|
|
|
|
|
|
|
/* Holding of cs was hinted, but we need to make sure
|
|
|
|
* the next message is for the same chip. Don't waste
|
|
|
|
* time with the following tests unless this was hinted.
|
|
|
|
*
|
|
|
|
* We cannot postpone this until pump_messages, because
|
|
|
|
* after calling msg->complete (below) the driver that
|
|
|
|
* sent the current message could be unloaded, which
|
|
|
|
* could invalidate the cs_control() callback...
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* get a pointer to the next message, if any */
|
|
|
|
spin_lock_irqsave(&drv_data->lock, flags);
|
|
|
|
if (list_empty(&drv_data->queue))
|
|
|
|
next_msg = NULL;
|
|
|
|
else
|
|
|
|
next_msg = list_entry(drv_data->queue.next,
|
|
|
|
struct spi_message, queue);
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
|
|
|
|
/* see if the next and current messages point
|
|
|
|
* to the same chip
|
|
|
|
*/
|
|
|
|
if (next_msg && next_msg->spi != msg->spi)
|
|
|
|
next_msg = NULL;
|
|
|
|
if (!next_msg || msg->state == ERROR_STATE)
|
2009-04-07 02:00:54 +00:00
|
|
|
cs_deassert(drv_data);
|
2008-09-13 09:33:17 +00:00
|
|
|
}
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-05-20 22:00:19 +00:00
|
|
|
msg->state = NULL;
|
|
|
|
if (msg->complete)
|
|
|
|
msg->complete(msg->context);
|
2009-04-07 02:00:54 +00:00
|
|
|
|
|
|
|
drv_data->cur_chip = NULL;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2008-04-28 09:14:17 +00:00
|
|
|
static int wait_ssp_rx_stall(void const __iomem *ioaddr)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
|
|
|
unsigned long limit = loops_per_jiffy << 1;
|
|
|
|
|
2009-04-21 19:24:46 +00:00
|
|
|
while ((read_SSSR(ioaddr) & SSSR_BSY) && --limit)
|
2006-03-08 07:53:24 +00:00
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int wait_dma_channel_stop(int channel)
|
|
|
|
{
|
|
|
|
unsigned long limit = loops_per_jiffy << 1;
|
|
|
|
|
2009-04-21 19:24:46 +00:00
|
|
|
while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
|
2006-03-08 07:53:24 +00:00
|
|
|
cpu_relax();
|
|
|
|
|
|
|
|
return limit;
|
|
|
|
}
|
|
|
|
|
2008-04-28 09:14:17 +00:00
|
|
|
static void dma_error_stop(struct driver_data *drv_data, const char *msg)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Stop and reset */
|
|
|
|
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
|
|
|
|
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
|
2010-11-23 01:12:17 +00:00
|
|
|
write_SSSR_CS(drv_data, drv_data->clear_sr);
|
2006-12-10 10:18:54 +00:00
|
|
|
write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data))
|
2006-12-10 10:18:54 +00:00
|
|
|
write_SSTO(0, reg);
|
|
|
|
flush(drv_data);
|
|
|
|
write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
unmap_dma_buffers(drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
dev_err(&drv_data->pdev->dev, "%s\n", msg);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
drv_data->cur_msg->state = ERROR_STATE;
|
|
|
|
tasklet_schedule(&drv_data->pump_transfers);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dma_transfer_complete(struct driver_data *drv_data)
|
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-12-10 10:18:54 +00:00
|
|
|
struct spi_message *msg = drv_data->cur_msg;
|
|
|
|
|
|
|
|
/* Clear and disable interrupts on SSP and DMA channels*/
|
|
|
|
write_SSCR1(read_SSCR1(reg) & ~drv_data->dma_cr1, reg);
|
2010-11-23 01:12:17 +00:00
|
|
|
write_SSSR_CS(drv_data, drv_data->clear_sr);
|
2006-12-10 10:18:54 +00:00
|
|
|
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
|
|
|
|
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
|
|
|
|
|
|
|
|
if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
|
|
|
|
dev_err(&drv_data->pdev->dev,
|
|
|
|
"dma_handler: dma rx channel stop failed\n");
|
|
|
|
|
|
|
|
if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
|
|
|
|
dev_err(&drv_data->pdev->dev,
|
|
|
|
"dma_transfer: ssp rx stall failed\n");
|
|
|
|
|
|
|
|
unmap_dma_buffers(drv_data);
|
|
|
|
|
|
|
|
/* update the buffer pointer for the amount completed in dma */
|
|
|
|
drv_data->rx += drv_data->len -
|
|
|
|
(DCMD(drv_data->rx_channel) & DCMD_LENGTH);
|
|
|
|
|
|
|
|
/* read trailing data from fifo, it does not matter how many
|
|
|
|
* bytes are in the fifo just read until buffer is full
|
|
|
|
* or fifo is empty, which ever occurs first */
|
|
|
|
drv_data->read(drv_data);
|
|
|
|
|
|
|
|
/* return count of what was actually read */
|
|
|
|
msg->actual_length += drv_data->len -
|
|
|
|
(drv_data->rx_end - drv_data->rx);
|
|
|
|
|
2008-09-13 09:33:17 +00:00
|
|
|
/* Transfer delays and chip select release are
|
|
|
|
* handled in pump_transfers or giveback
|
|
|
|
*/
|
2006-12-10 10:18:54 +00:00
|
|
|
|
|
|
|
/* Move to next transfer */
|
|
|
|
msg->state = next_transfer(drv_data);
|
|
|
|
|
|
|
|
/* Schedule transfer tasklet */
|
|
|
|
tasklet_schedule(&drv_data->pump_transfers);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dma_handler(int channel, void *data)
|
|
|
|
{
|
|
|
|
struct driver_data *drv_data = data;
|
|
|
|
u32 irq_status = DCSR(channel) & DMA_INT_MASK;
|
|
|
|
|
|
|
|
if (irq_status & DCSR_BUSERR) {
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
if (channel == drv_data->tx_channel)
|
2006-12-10 10:18:54 +00:00
|
|
|
dma_error_stop(drv_data,
|
|
|
|
"dma_handler: "
|
|
|
|
"bad bus address on tx channel");
|
2006-03-08 07:53:24 +00:00
|
|
|
else
|
2006-12-10 10:18:54 +00:00
|
|
|
dma_error_stop(drv_data,
|
|
|
|
"dma_handler: "
|
|
|
|
"bad bus address on rx channel");
|
|
|
|
return;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
|
2006-12-10 10:18:54 +00:00
|
|
|
if ((channel == drv_data->tx_channel)
|
|
|
|
&& (irq_status & DCSR_ENDINTR)
|
|
|
|
&& (drv_data->ssp_type == PXA25x_SSP)) {
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* Wait for rx to stall */
|
|
|
|
if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
|
|
|
|
dev_err(&drv_data->pdev->dev,
|
|
|
|
"dma_handler: ssp rx stall failed\n");
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* finish this transfer, start the next */
|
|
|
|
dma_transfer_complete(drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static irqreturn_t dma_transfer(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
u32 irq_status;
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
irq_status = read_SSSR(reg) & drv_data->mask_sr;
|
|
|
|
if (irq_status & SSSR_ROR) {
|
2006-12-10 10:18:54 +00:00
|
|
|
dma_error_stop(drv_data, "dma_transfer: fifo overrun");
|
2006-03-08 07:53:24 +00:00
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Check for false positive timeout */
|
2006-12-10 10:18:54 +00:00
|
|
|
if ((irq_status & SSSR_TINT)
|
|
|
|
&& (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
|
2006-03-08 07:53:24 +00:00
|
|
|
write_SSSR(SSSR_TINT, reg);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Clear and disable timeout interrupt, do the rest in
|
|
|
|
* dma_transfer_complete */
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data))
|
2006-03-08 07:53:24 +00:00
|
|
|
write_SSTO(0, reg);
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* finish this transfer, start the next */
|
|
|
|
dma_transfer_complete(drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Opps problem detected */
|
|
|
|
return IRQ_NONE;
|
|
|
|
}
|
|
|
|
|
2010-11-23 01:12:17 +00:00
|
|
|
static void reset_sccr1(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
|
|
|
struct chip_data *chip = drv_data->cur_chip;
|
|
|
|
u32 sccr1_reg;
|
|
|
|
|
|
|
|
sccr1_reg = read_SSCR1(reg) & ~drv_data->int_cr1;
|
|
|
|
sccr1_reg &= ~SSCR1_RFT;
|
|
|
|
sccr1_reg |= chip->threshold;
|
|
|
|
write_SSCR1(sccr1_reg, reg);
|
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static void int_error_stop(struct driver_data *drv_data, const char* msg)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Stop and reset SSP */
|
2010-11-23 01:12:17 +00:00
|
|
|
write_SSSR_CS(drv_data, drv_data->clear_sr);
|
2010-11-23 01:12:17 +00:00
|
|
|
reset_sccr1(drv_data);
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data))
|
2006-12-10 10:18:54 +00:00
|
|
|
write_SSTO(0, reg);
|
|
|
|
flush(drv_data);
|
|
|
|
write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
dev_err(&drv_data->pdev->dev, "%s\n", msg);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
drv_data->cur_msg->state = ERROR_STATE;
|
|
|
|
tasklet_schedule(&drv_data->pump_transfers);
|
|
|
|
}
|
2006-05-20 22:00:19 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static void int_transfer_complete(struct driver_data *drv_data)
|
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Stop SSP */
|
2010-11-23 01:12:17 +00:00
|
|
|
write_SSSR_CS(drv_data, drv_data->clear_sr);
|
2010-11-23 01:12:17 +00:00
|
|
|
reset_sccr1(drv_data);
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data))
|
2006-12-10 10:18:54 +00:00
|
|
|
write_SSTO(0, reg);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2011-03-31 01:57:33 +00:00
|
|
|
/* Update total byte transferred return count actual bytes read */
|
2006-12-10 10:18:54 +00:00
|
|
|
drv_data->cur_msg->actual_length += drv_data->len -
|
|
|
|
(drv_data->rx_end - drv_data->rx);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2008-09-13 09:33:17 +00:00
|
|
|
/* Transfer delays and chip select release are
|
|
|
|
* handled in pump_transfers or giveback
|
|
|
|
*/
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Move to next transfer */
|
|
|
|
drv_data->cur_msg->state = next_transfer(drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Schedule transfer tasklet */
|
|
|
|
tasklet_schedule(&drv_data->pump_transfers);
|
|
|
|
}
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
|
|
|
|
{
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
u32 irq_mask = (read_SSCR1(reg) & SSCR1_TIE) ?
|
|
|
|
drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
u32 irq_status = read_SSSR(reg) & irq_mask;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
if (irq_status & SSSR_ROR) {
|
|
|
|
int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
if (irq_status & SSSR_TINT) {
|
|
|
|
write_SSSR(SSSR_TINT, reg);
|
|
|
|
if (drv_data->read(drv_data)) {
|
|
|
|
int_transfer_complete(drv_data);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
}
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Drain rx fifo, Fill tx fifo and prevent overruns */
|
|
|
|
do {
|
|
|
|
if (drv_data->read(drv_data)) {
|
|
|
|
int_transfer_complete(drv_data);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
} while (drv_data->write(drv_data));
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
if (drv_data->read(drv_data)) {
|
|
|
|
int_transfer_complete(drv_data);
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
if (drv_data->tx == drv_data->tx_end) {
|
2010-11-23 01:12:17 +00:00
|
|
|
u32 bytes_left;
|
|
|
|
u32 sccr1_reg;
|
|
|
|
|
|
|
|
sccr1_reg = read_SSCR1(reg);
|
|
|
|
sccr1_reg &= ~SSCR1_TIE;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* PXA25x_SSP has no timeout, set up rx threshould for the
|
2011-03-31 01:57:33 +00:00
|
|
|
* remaining RX bytes.
|
2010-11-23 01:12:17 +00:00
|
|
|
*/
|
2010-11-23 01:12:17 +00:00
|
|
|
if (pxa25x_ssp_comp(drv_data)) {
|
2010-11-23 01:12:17 +00:00
|
|
|
|
|
|
|
sccr1_reg &= ~SSCR1_RFT;
|
|
|
|
|
|
|
|
bytes_left = drv_data->rx_end - drv_data->rx;
|
|
|
|
switch (drv_data->n_bytes) {
|
|
|
|
case 4:
|
|
|
|
bytes_left >>= 1;
|
|
|
|
case 2:
|
|
|
|
bytes_left >>= 1;
|
2006-12-10 10:18:54 +00:00
|
|
|
}
|
2010-11-23 01:12:17 +00:00
|
|
|
|
|
|
|
if (bytes_left > RX_THRESH_DFLT)
|
|
|
|
bytes_left = RX_THRESH_DFLT;
|
|
|
|
|
|
|
|
sccr1_reg |= SSCR1_RxTresh(bytes_left);
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
2010-11-23 01:12:17 +00:00
|
|
|
write_SSCR1(sccr1_reg, reg);
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2006-05-20 22:00:19 +00:00
|
|
|
/* We did something */
|
|
|
|
return IRQ_HANDLED;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
IRQ: Maintain regs pointer globally rather than passing to IRQ handlers
Maintain a per-CPU global "struct pt_regs *" variable which can be used instead
of passing regs around manually through all ~1800 interrupt handlers in the
Linux kernel.
The regs pointer is used in few places, but it potentially costs both stack
space and code to pass it around. On the FRV arch, removing the regs parameter
from all the genirq function results in a 20% speed up of the IRQ exit path
(ie: from leaving timer_interrupt() to leaving do_IRQ()).
Where appropriate, an arch may override the generic storage facility and do
something different with the variable. On FRV, for instance, the address is
maintained in GR28 at all times inside the kernel as part of general exception
handling.
Having looked over the code, it appears that the parameter may be handed down
through up to twenty or so layers of functions. Consider a USB character
device attached to a USB hub, attached to a USB controller that posts its
interrupts through a cascaded auxiliary interrupt controller. A character
device driver may want to pass regs to the sysrq handler through the input
layer which adds another few layers of parameter passing.
I've build this code with allyesconfig for x86_64 and i386. I've runtested the
main part of the code on FRV and i386, though I can't test most of the drivers.
I've also done partial conversion for powerpc and MIPS - these at least compile
with minimal configurations.
This will affect all archs. Mostly the changes should be relatively easy.
Take do_IRQ(), store the regs pointer at the beginning, saving the old one:
struct pt_regs *old_regs = set_irq_regs(regs);
And put the old one back at the end:
set_irq_regs(old_regs);
Don't pass regs through to generic_handle_irq() or __do_IRQ().
In timer_interrupt(), this sort of change will be necessary:
- update_process_times(user_mode(regs));
- profile_tick(CPU_PROFILING, regs);
+ update_process_times(user_mode(get_irq_regs()));
+ profile_tick(CPU_PROFILING);
I'd like to move update_process_times()'s use of get_irq_regs() into itself,
except that i386, alone of the archs, uses something other than user_mode().
Some notes on the interrupt handling in the drivers:
(*) input_dev() is now gone entirely. The regs pointer is no longer stored in
the input_dev struct.
(*) finish_unlinks() in drivers/usb/host/ohci-q.c needs checking. It does
something different depending on whether it's been supplied with a regs
pointer or not.
(*) Various IRQ handler function pointers have been moved to type
irq_handler_t.
Signed-Off-By: David Howells <dhowells@redhat.com>
(cherry picked from 1b16e7ac850969f38b375e511e3fa2f474a33867 commit)
2006-10-05 13:55:46 +00:00
|
|
|
static irqreturn_t ssp_int(int irq, void *dev_id)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2006-10-06 19:00:58 +00:00
|
|
|
struct driver_data *drv_data = dev_id;
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2010-11-23 01:12:14 +00:00
|
|
|
u32 sccr1_reg = read_SSCR1(reg);
|
|
|
|
u32 mask = drv_data->mask_sr;
|
|
|
|
u32 status;
|
|
|
|
|
|
|
|
status = read_SSSR(reg);
|
|
|
|
|
|
|
|
/* Ignore possible writes if we don't need to write */
|
|
|
|
if (!(sccr1_reg & SSCR1_TIE))
|
|
|
|
mask &= ~SSSR_TFS;
|
|
|
|
|
|
|
|
if (!(status & mask))
|
|
|
|
return IRQ_NONE;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
if (!drv_data->cur_msg) {
|
2006-05-20 22:00:19 +00:00
|
|
|
|
|
|
|
write_SSCR0(read_SSCR0(reg) & ~SSCR0_SSE, reg);
|
|
|
|
write_SSCR1(read_SSCR1(reg) & ~drv_data->int_cr1, reg);
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data))
|
2006-05-20 22:00:19 +00:00
|
|
|
write_SSTO(0, reg);
|
2010-11-23 01:12:17 +00:00
|
|
|
write_SSSR_CS(drv_data, drv_data->clear_sr);
|
2006-05-20 22:00:19 +00:00
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
dev_err(&drv_data->pdev->dev, "bad message state "
|
2006-12-10 10:18:54 +00:00
|
|
|
"in interrupt handler\n");
|
2006-05-20 22:00:19 +00:00
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
/* Never fail */
|
|
|
|
return IRQ_HANDLED;
|
|
|
|
}
|
|
|
|
|
|
|
|
return drv_data->transfer_handler(drv_data);
|
|
|
|
}
|
|
|
|
|
2008-04-28 09:14:17 +00:00
|
|
|
static int set_dma_burst_and_threshold(struct chip_data *chip,
|
|
|
|
struct spi_device *spi,
|
2006-12-10 10:18:54 +00:00
|
|
|
u8 bits_per_word, u32 *burst_code,
|
|
|
|
u32 *threshold)
|
|
|
|
{
|
|
|
|
struct pxa2xx_spi_chip *chip_info =
|
|
|
|
(struct pxa2xx_spi_chip *)spi->controller_data;
|
|
|
|
int bytes_per_word;
|
|
|
|
int burst_bytes;
|
|
|
|
int thresh_words;
|
|
|
|
int req_burst_size;
|
|
|
|
int retval = 0;
|
|
|
|
|
|
|
|
/* Set the threshold (in registers) to equal the same amount of data
|
|
|
|
* as represented by burst size (in bytes). The computation below
|
|
|
|
* is (burst_size rounded up to nearest 8 byte, word or long word)
|
|
|
|
* divided by (bytes/register); the tx threshold is the inverse of
|
|
|
|
* the rx, so that there will always be enough data in the rx fifo
|
|
|
|
* to satisfy a burst, and there will always be enough space in the
|
|
|
|
* tx fifo to accept a burst (a tx burst will overwrite the fifo if
|
|
|
|
* there is not enough space), there must always remain enough empty
|
|
|
|
* space in the rx fifo for any data loaded to the tx fifo.
|
|
|
|
* Whenever burst_size (in bytes) equals bits/word, the fifo threshold
|
|
|
|
* will be 8, or half the fifo;
|
|
|
|
* The threshold can only be set to 2, 4 or 8, but not 16, because
|
|
|
|
* to burst 16 to the tx fifo, the fifo would have to be empty;
|
|
|
|
* however, the minimum fifo trigger level is 1, and the tx will
|
|
|
|
* request service when the fifo is at this level, with only 15 spaces.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* find bytes/word */
|
|
|
|
if (bits_per_word <= 8)
|
|
|
|
bytes_per_word = 1;
|
|
|
|
else if (bits_per_word <= 16)
|
|
|
|
bytes_per_word = 2;
|
|
|
|
else
|
|
|
|
bytes_per_word = 4;
|
|
|
|
|
|
|
|
/* use struct pxa2xx_spi_chip->dma_burst_size if available */
|
|
|
|
if (chip_info)
|
|
|
|
req_burst_size = chip_info->dma_burst_size;
|
|
|
|
else {
|
|
|
|
switch (chip->dma_burst_size) {
|
|
|
|
default:
|
|
|
|
/* if the default burst size is not set,
|
|
|
|
* do it now */
|
|
|
|
chip->dma_burst_size = DCMD_BURST8;
|
|
|
|
case DCMD_BURST8:
|
|
|
|
req_burst_size = 8;
|
|
|
|
break;
|
|
|
|
case DCMD_BURST16:
|
|
|
|
req_burst_size = 16;
|
|
|
|
break;
|
|
|
|
case DCMD_BURST32:
|
|
|
|
req_burst_size = 32;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (req_burst_size <= 8) {
|
|
|
|
*burst_code = DCMD_BURST8;
|
|
|
|
burst_bytes = 8;
|
|
|
|
} else if (req_burst_size <= 16) {
|
|
|
|
if (bytes_per_word == 1) {
|
|
|
|
/* don't burst more than 1/2 the fifo */
|
|
|
|
*burst_code = DCMD_BURST8;
|
|
|
|
burst_bytes = 8;
|
|
|
|
retval = 1;
|
|
|
|
} else {
|
|
|
|
*burst_code = DCMD_BURST16;
|
|
|
|
burst_bytes = 16;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (bytes_per_word == 1) {
|
|
|
|
/* don't burst more than 1/2 the fifo */
|
|
|
|
*burst_code = DCMD_BURST8;
|
|
|
|
burst_bytes = 8;
|
|
|
|
retval = 1;
|
|
|
|
} else if (bytes_per_word == 2) {
|
|
|
|
/* don't burst more than 1/2 the fifo */
|
|
|
|
*burst_code = DCMD_BURST16;
|
|
|
|
burst_bytes = 16;
|
|
|
|
retval = 1;
|
|
|
|
} else {
|
|
|
|
*burst_code = DCMD_BURST32;
|
|
|
|
burst_bytes = 32;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
thresh_words = burst_bytes / bytes_per_word;
|
|
|
|
|
|
|
|
/* thresh_words will be between 2 and 8 */
|
|
|
|
*threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
|
|
|
|
| (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
|
|
|
|
|
|
|
|
return retval;
|
|
|
|
}
|
|
|
|
|
2007-11-21 10:50:53 +00:00
|
|
|
static unsigned int ssp_get_clk_div(struct ssp_device *ssp, int rate)
|
|
|
|
{
|
|
|
|
unsigned long ssp_clk = clk_get_rate(ssp->clk);
|
|
|
|
|
2010-11-23 01:12:17 +00:00
|
|
|
if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
|
2007-11-21 10:50:53 +00:00
|
|
|
return ((ssp_clk / (2 * rate) - 1) & 0xff) << 8;
|
|
|
|
else
|
|
|
|
return ((ssp_clk / rate - 1) & 0xfff) << 8;
|
|
|
|
}
|
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
static void pump_transfers(unsigned long data)
|
|
|
|
{
|
|
|
|
struct driver_data *drv_data = (struct driver_data *)data;
|
|
|
|
struct spi_message *message = NULL;
|
|
|
|
struct spi_transfer *transfer = NULL;
|
|
|
|
struct spi_transfer *previous = NULL;
|
|
|
|
struct chip_data *chip = NULL;
|
2007-11-21 10:50:53 +00:00
|
|
|
struct ssp_device *ssp = drv_data->ssp;
|
2008-04-28 09:14:17 +00:00
|
|
|
void __iomem *reg = drv_data->ioaddr;
|
2006-03-28 22:05:23 +00:00
|
|
|
u32 clk_div = 0;
|
|
|
|
u8 bits = 0;
|
|
|
|
u32 speed = 0;
|
|
|
|
u32 cr0;
|
2006-12-10 10:18:54 +00:00
|
|
|
u32 cr1;
|
|
|
|
u32 dma_thresh = drv_data->cur_chip->dma_threshold;
|
|
|
|
u32 dma_burst = drv_data->cur_chip->dma_burst_size;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* Get current state information */
|
|
|
|
message = drv_data->cur_msg;
|
|
|
|
transfer = drv_data->cur_transfer;
|
|
|
|
chip = drv_data->cur_chip;
|
|
|
|
|
|
|
|
/* Handle for abort */
|
|
|
|
if (message->state == ERROR_STATE) {
|
|
|
|
message->status = -EIO;
|
2006-05-20 22:00:19 +00:00
|
|
|
giveback(drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Handle end of message */
|
|
|
|
if (message->state == DONE_STATE) {
|
|
|
|
message->status = 0;
|
2006-05-20 22:00:19 +00:00
|
|
|
giveback(drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2008-09-13 09:33:17 +00:00
|
|
|
/* Delay if requested at end of transfer before CS change */
|
2006-03-08 07:53:24 +00:00
|
|
|
if (message->state == RUNNING_STATE) {
|
|
|
|
previous = list_entry(transfer->transfer_list.prev,
|
|
|
|
struct spi_transfer,
|
|
|
|
transfer_list);
|
|
|
|
if (previous->delay_usecs)
|
|
|
|
udelay(previous->delay_usecs);
|
2008-09-13 09:33:17 +00:00
|
|
|
|
|
|
|
/* Drop chip select only if cs_change is requested */
|
|
|
|
if (previous->cs_change)
|
2009-04-07 02:00:54 +00:00
|
|
|
cs_deassert(drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2008-09-13 09:33:18 +00:00
|
|
|
/* Check for transfers that need multiple DMA segments */
|
|
|
|
if (transfer->len > MAX_DMA_LEN && chip->enable_dma) {
|
|
|
|
|
|
|
|
/* reject already-mapped transfers; PIO won't always work */
|
|
|
|
if (message->is_dma_mapped
|
|
|
|
|| transfer->rx_dma || transfer->tx_dma) {
|
|
|
|
dev_err(&drv_data->pdev->dev,
|
|
|
|
"pump_transfers: mapped transfer length "
|
2008-10-01 17:39:24 +00:00
|
|
|
"of %u is greater than %d\n",
|
2008-09-13 09:33:18 +00:00
|
|
|
transfer->len, MAX_DMA_LEN);
|
|
|
|
message->status = -EINVAL;
|
|
|
|
giveback(drv_data);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* warn ... we force this to PIO mode */
|
|
|
|
if (printk_ratelimit())
|
|
|
|
dev_warn(&message->spi->dev, "pump_transfers: "
|
|
|
|
"DMA disabled for transfer length %ld "
|
|
|
|
"greater than %d\n",
|
|
|
|
(long)drv_data->len, MAX_DMA_LEN);
|
2006-12-10 10:18:54 +00:00
|
|
|
}
|
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
/* Setup the transfer state based on the type of transfer */
|
|
|
|
if (flush(drv_data) == 0) {
|
|
|
|
dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
|
|
|
|
message->status = -EIO;
|
2006-05-20 22:00:19 +00:00
|
|
|
giveback(drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
return;
|
|
|
|
}
|
2006-03-28 22:05:23 +00:00
|
|
|
drv_data->n_bytes = chip->n_bytes;
|
|
|
|
drv_data->dma_width = chip->dma_width;
|
2006-03-08 07:53:24 +00:00
|
|
|
drv_data->tx = (void *)transfer->tx_buf;
|
|
|
|
drv_data->tx_end = drv_data->tx + transfer->len;
|
|
|
|
drv_data->rx = transfer->rx_buf;
|
|
|
|
drv_data->rx_end = drv_data->rx + transfer->len;
|
|
|
|
drv_data->rx_dma = transfer->rx_dma;
|
|
|
|
drv_data->tx_dma = transfer->tx_dma;
|
2006-12-10 10:18:54 +00:00
|
|
|
drv_data->len = transfer->len & DCMD_LENGTH;
|
2006-03-08 07:53:24 +00:00
|
|
|
drv_data->write = drv_data->tx ? chip->write : null_writer;
|
|
|
|
drv_data->read = drv_data->rx ? chip->read : null_reader;
|
2006-03-28 22:05:23 +00:00
|
|
|
|
|
|
|
/* Change speed and bit per word on a per transfer */
|
2006-12-10 10:18:54 +00:00
|
|
|
cr0 = chip->cr0;
|
2006-03-28 22:05:23 +00:00
|
|
|
if (transfer->speed_hz || transfer->bits_per_word) {
|
|
|
|
|
|
|
|
bits = chip->bits_per_word;
|
|
|
|
speed = chip->speed_hz;
|
|
|
|
|
|
|
|
if (transfer->speed_hz)
|
|
|
|
speed = transfer->speed_hz;
|
|
|
|
|
|
|
|
if (transfer->bits_per_word)
|
|
|
|
bits = transfer->bits_per_word;
|
|
|
|
|
2007-11-21 10:50:53 +00:00
|
|
|
clk_div = ssp_get_clk_div(ssp, speed);
|
2006-03-28 22:05:23 +00:00
|
|
|
|
|
|
|
if (bits <= 8) {
|
|
|
|
drv_data->n_bytes = 1;
|
|
|
|
drv_data->dma_width = DCMD_WIDTH1;
|
|
|
|
drv_data->read = drv_data->read != null_reader ?
|
|
|
|
u8_reader : null_reader;
|
|
|
|
drv_data->write = drv_data->write != null_writer ?
|
|
|
|
u8_writer : null_writer;
|
|
|
|
} else if (bits <= 16) {
|
|
|
|
drv_data->n_bytes = 2;
|
|
|
|
drv_data->dma_width = DCMD_WIDTH2;
|
|
|
|
drv_data->read = drv_data->read != null_reader ?
|
|
|
|
u16_reader : null_reader;
|
|
|
|
drv_data->write = drv_data->write != null_writer ?
|
|
|
|
u16_writer : null_writer;
|
|
|
|
} else if (bits <= 32) {
|
|
|
|
drv_data->n_bytes = 4;
|
|
|
|
drv_data->dma_width = DCMD_WIDTH4;
|
|
|
|
drv_data->read = drv_data->read != null_reader ?
|
|
|
|
u32_reader : null_reader;
|
|
|
|
drv_data->write = drv_data->write != null_writer ?
|
|
|
|
u32_writer : null_writer;
|
|
|
|
}
|
2006-12-10 10:18:54 +00:00
|
|
|
/* if bits/word is changed in dma mode, then must check the
|
|
|
|
* thresholds and burst also */
|
|
|
|
if (chip->enable_dma) {
|
|
|
|
if (set_dma_burst_and_threshold(chip, message->spi,
|
|
|
|
bits, &dma_burst,
|
|
|
|
&dma_thresh))
|
|
|
|
if (printk_ratelimit())
|
|
|
|
dev_warn(&message->spi->dev,
|
2008-09-13 09:33:18 +00:00
|
|
|
"pump_transfers: "
|
2006-12-10 10:18:54 +00:00
|
|
|
"DMA burst size reduced to "
|
|
|
|
"match bits_per_word\n");
|
|
|
|
}
|
2006-03-28 22:05:23 +00:00
|
|
|
|
|
|
|
cr0 = clk_div
|
|
|
|
| SSCR0_Motorola
|
2006-05-20 22:00:19 +00:00
|
|
|
| SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
|
2006-03-28 22:05:23 +00:00
|
|
|
| SSCR0_SSE
|
|
|
|
| (bits > 16 ? SSCR0_EDSS : 0);
|
|
|
|
}
|
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
message->state = RUNNING_STATE;
|
|
|
|
|
2008-09-13 09:33:18 +00:00
|
|
|
/* Try to map dma buffer and do a dma transfer if successful, but
|
|
|
|
* only if the length is non-zero and less than MAX_DMA_LEN.
|
|
|
|
*
|
|
|
|
* Zero-length non-descriptor DMA is illegal on PXA2xx; force use
|
|
|
|
* of PIO instead. Care is needed above because the transfer may
|
|
|
|
* have have been passed with buffers that are already dma mapped.
|
|
|
|
* A zero-length transfer in PIO mode will not try to write/read
|
|
|
|
* to/from the buffers
|
|
|
|
*
|
|
|
|
* REVISIT large transfers are exactly where we most want to be
|
|
|
|
* using DMA. If this happens much, split those transfers into
|
|
|
|
* multiple DMA segments rather than forcing PIO.
|
|
|
|
*/
|
|
|
|
drv_data->dma_mapped = 0;
|
|
|
|
if (drv_data->len > 0 && drv_data->len <= MAX_DMA_LEN)
|
|
|
|
drv_data->dma_mapped = map_dma_buffers(drv_data);
|
|
|
|
if (drv_data->dma_mapped) {
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* Ensure we have the correct interrupt handler */
|
|
|
|
drv_data->transfer_handler = dma_transfer;
|
|
|
|
|
|
|
|
/* Setup rx DMA Channel */
|
|
|
|
DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
|
|
|
|
DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
|
|
|
|
DTADR(drv_data->rx_channel) = drv_data->rx_dma;
|
|
|
|
if (drv_data->rx == drv_data->null_dma_buf)
|
|
|
|
/* No target address increment */
|
|
|
|
DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
|
2006-03-28 22:05:23 +00:00
|
|
|
| drv_data->dma_width
|
2006-12-10 10:18:54 +00:00
|
|
|
| dma_burst
|
2006-03-08 07:53:24 +00:00
|
|
|
| drv_data->len;
|
|
|
|
else
|
|
|
|
DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
|
|
|
|
| DCMD_FLOWSRC
|
2006-03-28 22:05:23 +00:00
|
|
|
| drv_data->dma_width
|
2006-12-10 10:18:54 +00:00
|
|
|
| dma_burst
|
2006-03-08 07:53:24 +00:00
|
|
|
| drv_data->len;
|
|
|
|
|
|
|
|
/* Setup tx DMA Channel */
|
|
|
|
DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
|
|
|
|
DSADR(drv_data->tx_channel) = drv_data->tx_dma;
|
|
|
|
DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
|
|
|
|
if (drv_data->tx == drv_data->null_dma_buf)
|
|
|
|
/* No source address increment */
|
|
|
|
DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
|
2006-03-28 22:05:23 +00:00
|
|
|
| drv_data->dma_width
|
2006-12-10 10:18:54 +00:00
|
|
|
| dma_burst
|
2006-03-08 07:53:24 +00:00
|
|
|
| drv_data->len;
|
|
|
|
else
|
|
|
|
DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
|
|
|
|
| DCMD_FLOWTRG
|
2006-03-28 22:05:23 +00:00
|
|
|
| drv_data->dma_width
|
2006-12-10 10:18:54 +00:00
|
|
|
| dma_burst
|
2006-03-08 07:53:24 +00:00
|
|
|
| drv_data->len;
|
|
|
|
|
|
|
|
/* Enable dma end irqs on SSP to detect end of transfer */
|
|
|
|
if (drv_data->ssp_type == PXA25x_SSP)
|
|
|
|
DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Clear status and start DMA engine */
|
|
|
|
cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
|
2006-03-08 07:53:24 +00:00
|
|
|
write_SSSR(drv_data->clear_sr, reg);
|
|
|
|
DCSR(drv_data->rx_channel) |= DCSR_RUN;
|
|
|
|
DCSR(drv_data->tx_channel) |= DCSR_RUN;
|
|
|
|
} else {
|
|
|
|
/* Ensure we have the correct interrupt handler */
|
|
|
|
drv_data->transfer_handler = interrupt_transfer;
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Clear status */
|
|
|
|
cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
|
2010-11-23 01:12:17 +00:00
|
|
|
write_SSSR_CS(drv_data, drv_data->clear_sr);
|
2006-12-10 10:18:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* see if we need to reload the config registers */
|
|
|
|
if ((read_SSCR0(reg) != cr0)
|
|
|
|
|| (read_SSCR1(reg) & SSCR1_CHANGE_MASK) !=
|
|
|
|
(cr1 & SSCR1_CHANGE_MASK)) {
|
|
|
|
|
2008-02-23 23:23:40 +00:00
|
|
|
/* stop the SSP, and update the other bits */
|
2006-12-10 10:18:54 +00:00
|
|
|
write_SSCR0(cr0 & ~SSCR0_SSE, reg);
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data))
|
2006-03-08 07:53:24 +00:00
|
|
|
write_SSTO(chip->timeout, reg);
|
2008-02-23 23:23:40 +00:00
|
|
|
/* first set CR1 without interrupt and service enables */
|
|
|
|
write_SSCR1(cr1 & SSCR1_CHANGE_MASK, reg);
|
|
|
|
/* restart the SSP */
|
2006-12-10 10:18:54 +00:00
|
|
|
write_SSCR0(cr0, reg);
|
2008-02-23 23:23:40 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
} else {
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data))
|
2006-12-10 10:18:54 +00:00
|
|
|
write_SSTO(chip->timeout, reg);
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
2008-02-23 23:23:40 +00:00
|
|
|
|
2009-04-07 02:00:54 +00:00
|
|
|
cs_assert(drv_data);
|
2008-02-23 23:23:40 +00:00
|
|
|
|
|
|
|
/* after chip select, release the data by enabling service
|
|
|
|
* requests and interrupts, without changing any mode bits */
|
|
|
|
write_SSCR1(cr1, reg);
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2006-12-05 19:36:26 +00:00
|
|
|
static void pump_messages(struct work_struct *work)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2006-12-05 19:36:26 +00:00
|
|
|
struct driver_data *drv_data =
|
|
|
|
container_of(work, struct driver_data, pump_messages);
|
2006-03-08 07:53:24 +00:00
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* Lock queue and check for queue work */
|
|
|
|
spin_lock_irqsave(&drv_data->lock, flags);
|
|
|
|
if (list_empty(&drv_data->queue) || drv_data->run == QUEUE_STOPPED) {
|
|
|
|
drv_data->busy = 0;
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Make sure we are not already running a message */
|
|
|
|
if (drv_data->cur_msg) {
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Extract head of queue */
|
|
|
|
drv_data->cur_msg = list_entry(drv_data->queue.next,
|
|
|
|
struct spi_message, queue);
|
|
|
|
list_del_init(&drv_data->cur_msg->queue);
|
|
|
|
|
|
|
|
/* Initial message state*/
|
|
|
|
drv_data->cur_msg->state = START_STATE;
|
|
|
|
drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
|
|
|
|
struct spi_transfer,
|
|
|
|
transfer_list);
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* prepare to setup the SSP, in pump_transfers, using the per
|
|
|
|
* chip configuration */
|
2006-03-08 07:53:24 +00:00
|
|
|
drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
|
|
|
|
|
|
|
|
/* Mark as busy and launch transfers */
|
|
|
|
tasklet_schedule(&drv_data->pump_transfers);
|
2006-05-20 22:00:19 +00:00
|
|
|
|
|
|
|
drv_data->busy = 1;
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static int transfer(struct spi_device *spi, struct spi_message *msg)
|
|
|
|
{
|
|
|
|
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drv_data->lock, flags);
|
|
|
|
|
|
|
|
if (drv_data->run == QUEUE_STOPPED) {
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
return -ESHUTDOWN;
|
|
|
|
}
|
|
|
|
|
|
|
|
msg->actual_length = 0;
|
|
|
|
msg->status = -EINPROGRESS;
|
|
|
|
msg->state = START_STATE;
|
|
|
|
|
|
|
|
list_add_tail(&msg->queue, &drv_data->queue);
|
|
|
|
|
|
|
|
if (drv_data->run == QUEUE_RUNNING && !drv_data->busy)
|
|
|
|
queue_work(drv_data->workqueue, &drv_data->pump_messages);
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-04-07 02:00:54 +00:00
|
|
|
static int setup_cs(struct spi_device *spi, struct chip_data *chip,
|
|
|
|
struct pxa2xx_spi_chip *chip_info)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
|
|
|
|
if (chip == NULL || chip_info == NULL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
/* NOTE: setup() can be called multiple times, possibly with
|
|
|
|
* different chip_info, release previously requested GPIO
|
|
|
|
*/
|
|
|
|
if (gpio_is_valid(chip->gpio_cs))
|
|
|
|
gpio_free(chip->gpio_cs);
|
|
|
|
|
|
|
|
/* If (*cs_control) is provided, ignore GPIO chip select */
|
|
|
|
if (chip_info->cs_control) {
|
|
|
|
chip->cs_control = chip_info->cs_control;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (gpio_is_valid(chip_info->gpio_cs)) {
|
|
|
|
err = gpio_request(chip_info->gpio_cs, "SPI_CS");
|
|
|
|
if (err) {
|
|
|
|
dev_err(&spi->dev, "failed to request chip select "
|
|
|
|
"GPIO%d\n", chip_info->gpio_cs);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
chip->gpio_cs = chip_info->gpio_cs;
|
|
|
|
chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
|
|
|
|
|
|
|
|
err = gpio_direction_output(chip->gpio_cs,
|
|
|
|
!chip->gpio_cs_inverted);
|
|
|
|
}
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
static int setup(struct spi_device *spi)
|
|
|
|
{
|
|
|
|
struct pxa2xx_spi_chip *chip_info = NULL;
|
|
|
|
struct chip_data *chip;
|
|
|
|
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
|
2007-11-21 10:50:53 +00:00
|
|
|
struct ssp_device *ssp = drv_data->ssp;
|
2006-03-08 07:53:24 +00:00
|
|
|
unsigned int clk_div;
|
2008-10-16 05:02:43 +00:00
|
|
|
uint tx_thres = TX_THRESH_DFLT;
|
|
|
|
uint rx_thres = RX_THRESH_DFLT;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data)
|
2006-12-10 10:18:54 +00:00
|
|
|
&& (spi->bits_per_word < 4 || spi->bits_per_word > 32)) {
|
|
|
|
dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
|
|
|
|
"b/w not 4-32 for type non-PXA25x_SSP\n",
|
|
|
|
drv_data->ssp_type, spi->bits_per_word);
|
2006-03-08 07:53:24 +00:00
|
|
|
return -EINVAL;
|
2010-11-23 01:12:17 +00:00
|
|
|
} else if (pxa25x_ssp_comp(drv_data)
|
2006-12-10 10:18:54 +00:00
|
|
|
&& (spi->bits_per_word < 4
|
|
|
|
|| spi->bits_per_word > 16)) {
|
|
|
|
dev_err(&spi->dev, "failed setup: ssp_type=%d, bits/wrd=%d "
|
|
|
|
"b/w not 4-16 for type PXA25x_SSP\n",
|
|
|
|
drv_data->ssp_type, spi->bits_per_word);
|
2006-03-08 07:53:24 +00:00
|
|
|
return -EINVAL;
|
2006-12-10 10:18:54 +00:00
|
|
|
}
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* Only alloc on first setup */
|
2006-03-08 07:53:24 +00:00
|
|
|
chip = spi_get_ctldata(spi);
|
2006-12-10 10:18:54 +00:00
|
|
|
if (!chip) {
|
2006-03-08 07:53:24 +00:00
|
|
|
chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
|
2006-12-10 10:18:54 +00:00
|
|
|
if (!chip) {
|
|
|
|
dev_err(&spi->dev,
|
|
|
|
"failed setup: can't allocate chip data\n");
|
2006-03-08 07:53:24 +00:00
|
|
|
return -ENOMEM;
|
2006-12-10 10:18:54 +00:00
|
|
|
}
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2010-11-23 01:12:17 +00:00
|
|
|
if (drv_data->ssp_type == CE4100_SSP) {
|
|
|
|
if (spi->chip_select > 4) {
|
|
|
|
dev_err(&spi->dev, "failed setup: "
|
|
|
|
"cs number must not be > 4.\n");
|
|
|
|
kfree(chip);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
chip->frm = spi->chip_select;
|
|
|
|
} else
|
|
|
|
chip->gpio_cs = -1;
|
2006-03-08 07:53:24 +00:00
|
|
|
chip->enable_dma = 0;
|
2008-10-16 05:02:43 +00:00
|
|
|
chip->timeout = TIMOUT_DFLT;
|
2006-03-08 07:53:24 +00:00
|
|
|
chip->dma_burst_size = drv_data->master_info->enable_dma ?
|
|
|
|
DCMD_BURST8 : 0;
|
|
|
|
}
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* protocol drivers may change the chip settings, so...
|
|
|
|
* if chip_info exists, use it */
|
|
|
|
chip_info = spi->controller_data;
|
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
/* chip_info isn't always needed */
|
2006-12-10 10:18:54 +00:00
|
|
|
chip->cr1 = 0;
|
2006-03-08 07:53:24 +00:00
|
|
|
if (chip_info) {
|
2008-10-16 05:02:43 +00:00
|
|
|
if (chip_info->timeout)
|
|
|
|
chip->timeout = chip_info->timeout;
|
|
|
|
if (chip_info->tx_threshold)
|
|
|
|
tx_thres = chip_info->tx_threshold;
|
|
|
|
if (chip_info->rx_threshold)
|
|
|
|
rx_thres = chip_info->rx_threshold;
|
|
|
|
chip->enable_dma = drv_data->master_info->enable_dma;
|
2006-03-08 07:53:24 +00:00
|
|
|
chip->dma_threshold = 0;
|
|
|
|
if (chip_info->enable_loopback)
|
|
|
|
chip->cr1 = SSCR1_LBM;
|
|
|
|
}
|
|
|
|
|
2008-10-16 05:02:43 +00:00
|
|
|
chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
|
|
|
|
(SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
|
|
|
|
|
2006-12-10 10:18:54 +00:00
|
|
|
/* set dma burst and threshold outside of chip_info path so that if
|
|
|
|
* chip_info goes away after setting chip->enable_dma, the
|
|
|
|
* burst and threshold can still respond to changes in bits_per_word */
|
|
|
|
if (chip->enable_dma) {
|
|
|
|
/* set up legal burst and threshold for dma */
|
|
|
|
if (set_dma_burst_and_threshold(chip, spi, spi->bits_per_word,
|
|
|
|
&chip->dma_burst_size,
|
|
|
|
&chip->dma_threshold)) {
|
|
|
|
dev_warn(&spi->dev, "in setup: DMA burst size reduced "
|
|
|
|
"to match bits_per_word\n");
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-11-21 10:50:53 +00:00
|
|
|
clk_div = ssp_get_clk_div(ssp, spi->max_speed_hz);
|
2006-03-28 22:05:23 +00:00
|
|
|
chip->speed_hz = spi->max_speed_hz;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
chip->cr0 = clk_div
|
|
|
|
| SSCR0_Motorola
|
2006-05-20 22:00:19 +00:00
|
|
|
| SSCR0_DataSize(spi->bits_per_word > 16 ?
|
|
|
|
spi->bits_per_word - 16 : spi->bits_per_word)
|
2006-03-08 07:53:24 +00:00
|
|
|
| SSCR0_SSE
|
|
|
|
| (spi->bits_per_word > 16 ? SSCR0_EDSS : 0);
|
2007-01-26 08:56:44 +00:00
|
|
|
chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
|
|
|
|
chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
|
|
|
|
| (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* NOTE: PXA25x_SSP _could_ use external clocking ... */
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data))
|
2009-06-17 23:26:03 +00:00
|
|
|
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
|
2010-03-16 08:48:01 +00:00
|
|
|
clk_get_rate(ssp->clk)
|
|
|
|
/ (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
|
|
|
|
chip->enable_dma ? "DMA" : "PIO");
|
2006-03-08 07:53:24 +00:00
|
|
|
else
|
2009-06-17 23:26:03 +00:00
|
|
|
dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
|
2010-03-16 08:48:01 +00:00
|
|
|
clk_get_rate(ssp->clk) / 2
|
|
|
|
/ (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
|
|
|
|
chip->enable_dma ? "DMA" : "PIO");
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
if (spi->bits_per_word <= 8) {
|
|
|
|
chip->n_bytes = 1;
|
|
|
|
chip->dma_width = DCMD_WIDTH1;
|
|
|
|
chip->read = u8_reader;
|
|
|
|
chip->write = u8_writer;
|
|
|
|
} else if (spi->bits_per_word <= 16) {
|
|
|
|
chip->n_bytes = 2;
|
|
|
|
chip->dma_width = DCMD_WIDTH2;
|
|
|
|
chip->read = u16_reader;
|
|
|
|
chip->write = u16_writer;
|
|
|
|
} else if (spi->bits_per_word <= 32) {
|
|
|
|
chip->cr0 |= SSCR0_EDSS;
|
|
|
|
chip->n_bytes = 4;
|
|
|
|
chip->dma_width = DCMD_WIDTH4;
|
|
|
|
chip->read = u32_reader;
|
|
|
|
chip->write = u32_writer;
|
|
|
|
} else {
|
|
|
|
dev_err(&spi->dev, "invalid wordsize\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
2006-03-28 22:05:23 +00:00
|
|
|
chip->bits_per_word = spi->bits_per_word;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
spi_set_ctldata(spi, chip);
|
|
|
|
|
2010-11-23 01:12:17 +00:00
|
|
|
if (drv_data->ssp_type == CE4100_SSP)
|
|
|
|
return 0;
|
|
|
|
|
2009-04-07 02:00:54 +00:00
|
|
|
return setup_cs(spi, chip, chip_info);
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
2007-02-12 08:52:45 +00:00
|
|
|
static void cleanup(struct spi_device *spi)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2007-02-12 08:52:45 +00:00
|
|
|
struct chip_data *chip = spi_get_ctldata(spi);
|
2010-11-23 01:12:17 +00:00
|
|
|
struct driver_data *drv_data = spi_master_get_devdata(spi->master);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2009-05-12 20:19:36 +00:00
|
|
|
if (!chip)
|
|
|
|
return;
|
|
|
|
|
2010-11-23 01:12:17 +00:00
|
|
|
if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
|
2009-04-07 02:00:54 +00:00
|
|
|
gpio_free(chip->gpio_cs);
|
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
kfree(chip);
|
|
|
|
}
|
|
|
|
|
2012-12-07 16:57:14 +00:00
|
|
|
static int init_queue(struct driver_data *drv_data)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
|
|
|
INIT_LIST_HEAD(&drv_data->queue);
|
|
|
|
spin_lock_init(&drv_data->lock);
|
|
|
|
|
|
|
|
drv_data->run = QUEUE_STOPPED;
|
|
|
|
drv_data->busy = 0;
|
|
|
|
|
|
|
|
tasklet_init(&drv_data->pump_transfers,
|
|
|
|
pump_transfers, (unsigned long)drv_data);
|
|
|
|
|
2006-12-05 19:36:26 +00:00
|
|
|
INIT_WORK(&drv_data->pump_messages, pump_messages);
|
2006-03-08 07:53:24 +00:00
|
|
|
drv_data->workqueue = create_singlethread_workqueue(
|
2009-03-24 23:38:21 +00:00
|
|
|
dev_name(drv_data->master->dev.parent));
|
2006-03-08 07:53:24 +00:00
|
|
|
if (drv_data->workqueue == NULL)
|
|
|
|
return -EBUSY;
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int start_queue(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drv_data->lock, flags);
|
|
|
|
|
|
|
|
if (drv_data->run == QUEUE_RUNNING || drv_data->busy) {
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
|
|
|
|
|
|
|
drv_data->run = QUEUE_RUNNING;
|
|
|
|
drv_data->cur_msg = NULL;
|
|
|
|
drv_data->cur_transfer = NULL;
|
|
|
|
drv_data->cur_chip = NULL;
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
|
|
|
|
queue_work(drv_data->workqueue, &drv_data->pump_messages);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int stop_queue(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
unsigned long flags;
|
|
|
|
unsigned limit = 500;
|
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
spin_lock_irqsave(&drv_data->lock, flags);
|
|
|
|
|
|
|
|
/* This is a bit lame, but is optimized for the common execution path.
|
|
|
|
* A wait_queue on the drv_data->busy could be used, but then the common
|
|
|
|
* execution path (pump_messages) would be required to call wake_up or
|
|
|
|
* friends on every SPI message. Do this instead */
|
|
|
|
drv_data->run = QUEUE_STOPPED;
|
2011-04-06 14:49:15 +00:00
|
|
|
while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
|
2006-03-08 07:53:24 +00:00
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
msleep(10);
|
|
|
|
spin_lock_irqsave(&drv_data->lock, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!list_empty(&drv_data->queue) || drv_data->busy)
|
|
|
|
status = -EBUSY;
|
|
|
|
|
|
|
|
spin_unlock_irqrestore(&drv_data->lock, flags);
|
|
|
|
|
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int destroy_queue(struct driver_data *drv_data)
|
|
|
|
{
|
|
|
|
int status;
|
|
|
|
|
|
|
|
status = stop_queue(drv_data);
|
2006-12-10 10:18:54 +00:00
|
|
|
/* we are unloading the module or failing to load (only two calls
|
|
|
|
* to this routine), and neither call can handle a return value.
|
|
|
|
* However, destroy_workqueue calls flush_workqueue, and that will
|
|
|
|
* block until all work is done. If the reason that stop_queue
|
|
|
|
* timed out is that the work will never finish, then it does no
|
|
|
|
* good to call destroy_workqueue, so return anyway. */
|
2006-03-08 07:53:24 +00:00
|
|
|
if (status != 0)
|
|
|
|
return status;
|
|
|
|
|
|
|
|
destroy_workqueue(drv_data->workqueue);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2012-12-07 16:57:14 +00:00
|
|
|
static int pxa2xx_spi_probe(struct platform_device *pdev)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
|
|
|
struct device *dev = &pdev->dev;
|
|
|
|
struct pxa2xx_spi_master *platform_info;
|
|
|
|
struct spi_master *master;
|
2008-10-16 05:02:42 +00:00
|
|
|
struct driver_data *drv_data;
|
2007-11-21 10:50:53 +00:00
|
|
|
struct ssp_device *ssp;
|
2008-10-16 05:02:42 +00:00
|
|
|
int status;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2013-01-07 10:44:33 +00:00
|
|
|
platform_info = dev_get_platdata(dev);
|
|
|
|
if (!platform_info) {
|
|
|
|
dev_err(&pdev->dev, "missing platform data\n");
|
|
|
|
return -ENODEV;
|
|
|
|
}
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2010-05-05 14:11:15 +00:00
|
|
|
ssp = pxa_ssp_request(pdev->id, pdev->name);
|
2013-01-07 10:44:33 +00:00
|
|
|
if (!ssp)
|
|
|
|
ssp = &platform_info->ssp;
|
|
|
|
|
|
|
|
if (!ssp->mmio_base) {
|
|
|
|
dev_err(&pdev->dev, "failed to get ssp\n");
|
2006-03-08 07:53:24 +00:00
|
|
|
return -ENODEV;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Allocate master with space for drv_data and null dma buffer */
|
|
|
|
master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
|
|
|
|
if (!master) {
|
2008-10-16 05:02:42 +00:00
|
|
|
dev_err(&pdev->dev, "cannot alloc spi_master\n");
|
2010-05-05 14:11:15 +00:00
|
|
|
pxa_ssp_free(ssp);
|
2006-03-08 07:53:24 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
drv_data = spi_master_get_devdata(master);
|
|
|
|
drv_data->master = master;
|
|
|
|
drv_data->master_info = platform_info;
|
|
|
|
drv_data->pdev = pdev;
|
2007-11-21 10:50:53 +00:00
|
|
|
drv_data->ssp = ssp;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
2010-10-08 16:11:19 +00:00
|
|
|
master->dev.parent = &pdev->dev;
|
|
|
|
master->dev.of_node = pdev->dev.of_node;
|
2009-06-17 23:26:04 +00:00
|
|
|
/* the spi->mode bits understood by this driver: */
|
2009-06-17 23:26:06 +00:00
|
|
|
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
|
2009-06-17 23:26:04 +00:00
|
|
|
|
2013-01-07 10:44:33 +00:00
|
|
|
master->bus_num = ssp->port_id;
|
2006-03-08 07:53:24 +00:00
|
|
|
master->num_chipselect = platform_info->num_chipselect;
|
2009-04-07 02:00:57 +00:00
|
|
|
master->dma_alignment = DMA_ALIGNMENT;
|
2006-03-08 07:53:24 +00:00
|
|
|
master->cleanup = cleanup;
|
|
|
|
master->setup = setup;
|
|
|
|
master->transfer = transfer;
|
|
|
|
|
2007-11-21 10:50:53 +00:00
|
|
|
drv_data->ssp_type = ssp->type;
|
2006-03-08 07:53:24 +00:00
|
|
|
drv_data->null_dma_buf = (u32 *)ALIGN((u32)(drv_data +
|
|
|
|
sizeof(struct driver_data)), 8);
|
|
|
|
|
2007-11-21 10:50:53 +00:00
|
|
|
drv_data->ioaddr = ssp->mmio_base;
|
|
|
|
drv_data->ssdr_physical = ssp->phys_base + SSDR;
|
2010-11-23 01:12:17 +00:00
|
|
|
if (pxa25x_ssp_comp(drv_data)) {
|
2006-03-08 07:53:24 +00:00
|
|
|
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
|
|
|
|
drv_data->dma_cr1 = 0;
|
|
|
|
drv_data->clear_sr = SSSR_ROR;
|
|
|
|
drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
|
|
|
|
} else {
|
|
|
|
drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
|
|
|
|
drv_data->dma_cr1 = SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE;
|
|
|
|
drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
|
|
|
|
drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
|
|
|
|
}
|
|
|
|
|
2010-11-23 01:12:14 +00:00
|
|
|
status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
|
|
|
|
drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
if (status < 0) {
|
2008-10-16 05:02:42 +00:00
|
|
|
dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
|
2006-03-08 07:53:24 +00:00
|
|
|
goto out_error_master_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Setup DMA if requested */
|
|
|
|
drv_data->tx_channel = -1;
|
|
|
|
drv_data->rx_channel = -1;
|
|
|
|
if (platform_info->enable_dma) {
|
|
|
|
|
|
|
|
/* Get two DMA channels (rx and tx) */
|
|
|
|
drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
|
|
|
|
DMA_PRIO_HIGH,
|
|
|
|
dma_handler,
|
|
|
|
drv_data);
|
|
|
|
if (drv_data->rx_channel < 0) {
|
|
|
|
dev_err(dev, "problem (%d) requesting rx channel\n",
|
|
|
|
drv_data->rx_channel);
|
|
|
|
status = -ENODEV;
|
|
|
|
goto out_error_irq_alloc;
|
|
|
|
}
|
|
|
|
drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
|
|
|
|
DMA_PRIO_MEDIUM,
|
|
|
|
dma_handler,
|
|
|
|
drv_data);
|
|
|
|
if (drv_data->tx_channel < 0) {
|
|
|
|
dev_err(dev, "problem (%d) requesting tx channel\n",
|
|
|
|
drv_data->tx_channel);
|
|
|
|
status = -ENODEV;
|
|
|
|
goto out_error_dma_alloc;
|
|
|
|
}
|
|
|
|
|
2007-11-21 10:50:53 +00:00
|
|
|
DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
|
|
|
|
DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Enable SOC clock */
|
2007-11-21 10:50:53 +00:00
|
|
|
clk_enable(ssp->clk);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* Load default SSP configuration */
|
|
|
|
write_SSCR0(0, drv_data->ioaddr);
|
2008-10-16 05:02:43 +00:00
|
|
|
write_SSCR1(SSCR1_RxTresh(RX_THRESH_DFLT) |
|
|
|
|
SSCR1_TxTresh(TX_THRESH_DFLT),
|
|
|
|
drv_data->ioaddr);
|
2010-03-16 08:48:01 +00:00
|
|
|
write_SSCR0(SSCR0_SCR(2)
|
2006-03-08 07:53:24 +00:00
|
|
|
| SSCR0_Motorola
|
|
|
|
| SSCR0_DataSize(8),
|
|
|
|
drv_data->ioaddr);
|
2010-11-23 01:12:17 +00:00
|
|
|
if (!pxa25x_ssp_comp(drv_data))
|
2006-03-08 07:53:24 +00:00
|
|
|
write_SSTO(0, drv_data->ioaddr);
|
|
|
|
write_SSPSP(0, drv_data->ioaddr);
|
|
|
|
|
|
|
|
/* Initial and start queue */
|
|
|
|
status = init_queue(drv_data);
|
|
|
|
if (status != 0) {
|
|
|
|
dev_err(&pdev->dev, "problem initializing queue\n");
|
|
|
|
goto out_error_clock_enabled;
|
|
|
|
}
|
|
|
|
status = start_queue(drv_data);
|
|
|
|
if (status != 0) {
|
|
|
|
dev_err(&pdev->dev, "problem starting queue\n");
|
|
|
|
goto out_error_clock_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Register with the SPI framework */
|
|
|
|
platform_set_drvdata(pdev, drv_data);
|
|
|
|
status = spi_register_master(master);
|
|
|
|
if (status != 0) {
|
|
|
|
dev_err(&pdev->dev, "problem registering spi master\n");
|
|
|
|
goto out_error_queue_alloc;
|
|
|
|
}
|
|
|
|
|
|
|
|
return status;
|
|
|
|
|
|
|
|
out_error_queue_alloc:
|
|
|
|
destroy_queue(drv_data);
|
|
|
|
|
|
|
|
out_error_clock_enabled:
|
2007-11-21 10:50:53 +00:00
|
|
|
clk_disable(ssp->clk);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
out_error_dma_alloc:
|
|
|
|
if (drv_data->tx_channel != -1)
|
|
|
|
pxa_free_dma(drv_data->tx_channel);
|
|
|
|
if (drv_data->rx_channel != -1)
|
|
|
|
pxa_free_dma(drv_data->rx_channel);
|
|
|
|
|
|
|
|
out_error_irq_alloc:
|
2007-11-21 10:50:53 +00:00
|
|
|
free_irq(ssp->irq, drv_data);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
out_error_master_alloc:
|
|
|
|
spi_master_put(master);
|
2010-05-05 14:11:15 +00:00
|
|
|
pxa_ssp_free(ssp);
|
2006-03-08 07:53:24 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int pxa2xx_spi_remove(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
struct driver_data *drv_data = platform_get_drvdata(pdev);
|
2009-01-06 22:41:45 +00:00
|
|
|
struct ssp_device *ssp;
|
2006-03-08 07:53:24 +00:00
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
if (!drv_data)
|
|
|
|
return 0;
|
2009-01-06 22:41:45 +00:00
|
|
|
ssp = drv_data->ssp;
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* Remove the queue */
|
|
|
|
status = destroy_queue(drv_data);
|
|
|
|
if (status != 0)
|
2006-12-10 10:18:54 +00:00
|
|
|
/* the kernel does not check the return status of this
|
|
|
|
* this routine (mod->exit, within the kernel). Therefore
|
|
|
|
* nothing is gained by returning from here, the module is
|
|
|
|
* going away regardless, and we should not leave any more
|
|
|
|
* resources allocated than necessary. We cannot free the
|
|
|
|
* message memory in drv_data->queue, but we can release the
|
|
|
|
* resources below. I think the kernel should honor -EBUSY
|
|
|
|
* returns but... */
|
|
|
|
dev_err(&pdev->dev, "pxa2xx_spi_remove: workqueue will not "
|
|
|
|
"complete, message memory not freed\n");
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* Disable the SSP at the peripheral and SOC level */
|
|
|
|
write_SSCR0(0, drv_data->ioaddr);
|
2007-11-21 10:50:53 +00:00
|
|
|
clk_disable(ssp->clk);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* Release DMA */
|
|
|
|
if (drv_data->master_info->enable_dma) {
|
2007-11-21 10:50:53 +00:00
|
|
|
DRCMR(ssp->drcmr_rx) = 0;
|
|
|
|
DRCMR(ssp->drcmr_tx) = 0;
|
2006-03-08 07:53:24 +00:00
|
|
|
pxa_free_dma(drv_data->tx_channel);
|
|
|
|
pxa_free_dma(drv_data->rx_channel);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Release IRQ */
|
2007-11-21 10:50:53 +00:00
|
|
|
free_irq(ssp->irq, drv_data);
|
|
|
|
|
|
|
|
/* Release SSP */
|
2010-05-05 14:11:15 +00:00
|
|
|
pxa_ssp_free(ssp);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* Disconnect from the SPI framework */
|
|
|
|
spi_unregister_master(drv_data->master);
|
|
|
|
|
|
|
|
/* Prevent double remove */
|
|
|
|
platform_set_drvdata(pdev, NULL);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void pxa2xx_spi_shutdown(struct platform_device *pdev)
|
|
|
|
{
|
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
if ((status = pxa2xx_spi_remove(pdev)) != 0)
|
|
|
|
dev_err(&pdev->dev, "shutdown failed with %d\n", status);
|
|
|
|
}
|
|
|
|
|
|
|
|
#ifdef CONFIG_PM
|
2009-07-21 14:50:16 +00:00
|
|
|
static int pxa2xx_spi_suspend(struct device *dev)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2009-07-21 14:50:16 +00:00
|
|
|
struct driver_data *drv_data = dev_get_drvdata(dev);
|
2007-11-21 10:50:53 +00:00
|
|
|
struct ssp_device *ssp = drv_data->ssp;
|
2006-03-08 07:53:24 +00:00
|
|
|
int status = 0;
|
|
|
|
|
|
|
|
status = stop_queue(drv_data);
|
|
|
|
if (status != 0)
|
|
|
|
return status;
|
|
|
|
write_SSCR0(0, drv_data->ioaddr);
|
2007-11-21 10:50:53 +00:00
|
|
|
clk_disable(ssp->clk);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-07-21 14:50:16 +00:00
|
|
|
static int pxa2xx_spi_resume(struct device *dev)
|
2006-03-08 07:53:24 +00:00
|
|
|
{
|
2009-07-21 14:50:16 +00:00
|
|
|
struct driver_data *drv_data = dev_get_drvdata(dev);
|
2007-11-21 10:50:53 +00:00
|
|
|
struct ssp_device *ssp = drv_data->ssp;
|
2006-03-08 07:53:24 +00:00
|
|
|
int status = 0;
|
|
|
|
|
2009-04-21 19:24:43 +00:00
|
|
|
if (drv_data->rx_channel != -1)
|
|
|
|
DRCMR(drv_data->ssp->drcmr_rx) =
|
|
|
|
DRCMR_MAPVLD | drv_data->rx_channel;
|
|
|
|
if (drv_data->tx_channel != -1)
|
|
|
|
DRCMR(drv_data->ssp->drcmr_tx) =
|
|
|
|
DRCMR_MAPVLD | drv_data->tx_channel;
|
|
|
|
|
2006-03-08 07:53:24 +00:00
|
|
|
/* Enable the SSP clock */
|
2008-05-12 21:02:01 +00:00
|
|
|
clk_enable(ssp->clk);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
/* Start the queue running */
|
|
|
|
status = start_queue(drv_data);
|
|
|
|
if (status != 0) {
|
2009-07-21 14:50:16 +00:00
|
|
|
dev_err(dev, "problem starting queue (%d)\n", status);
|
2006-03-08 07:53:24 +00:00
|
|
|
return status;
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
2009-07-21 14:50:16 +00:00
|
|
|
|
2009-12-15 02:00:08 +00:00
|
|
|
static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
|
2009-07-21 14:50:16 +00:00
|
|
|
.suspend = pxa2xx_spi_suspend,
|
|
|
|
.resume = pxa2xx_spi_resume,
|
|
|
|
};
|
|
|
|
#endif
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
static struct platform_driver driver = {
|
|
|
|
.driver = {
|
2009-07-21 14:50:16 +00:00
|
|
|
.name = "pxa2xx-spi",
|
|
|
|
.owner = THIS_MODULE,
|
|
|
|
#ifdef CONFIG_PM
|
|
|
|
.pm = &pxa2xx_spi_pm_ops,
|
|
|
|
#endif
|
2006-03-08 07:53:24 +00:00
|
|
|
},
|
2010-11-19 17:00:11 +00:00
|
|
|
.probe = pxa2xx_spi_probe,
|
2007-10-16 08:27:46 +00:00
|
|
|
.remove = pxa2xx_spi_remove,
|
2006-03-08 07:53:24 +00:00
|
|
|
.shutdown = pxa2xx_spi_shutdown,
|
|
|
|
};
|
|
|
|
|
|
|
|
static int __init pxa2xx_spi_init(void)
|
|
|
|
{
|
2010-11-19 17:00:11 +00:00
|
|
|
return platform_driver_register(&driver);
|
2006-03-08 07:53:24 +00:00
|
|
|
}
|
2009-09-22 23:46:10 +00:00
|
|
|
subsys_initcall(pxa2xx_spi_init);
|
2006-03-08 07:53:24 +00:00
|
|
|
|
|
|
|
static void __exit pxa2xx_spi_exit(void)
|
|
|
|
{
|
|
|
|
platform_driver_unregister(&driver);
|
|
|
|
}
|
|
|
|
module_exit(pxa2xx_spi_exit);
|