diff options
author | Mark Brown <broonie@kernel.org> | 2019-07-04 17:35:07 +0100 |
---|---|---|
committer | Mark Brown <broonie@kernel.org> | 2019-07-04 17:35:07 +0100 |
commit | 106dbe24d4146c0804cb025e450ac7af42d72356 (patch) | |
tree | ad196f245894a8546603f8472619f9e5b73b8265 /drivers/spi | |
parent | 2337ff45293f36efa79247b3680223b9c9154392 (diff) | |
parent | 8cc7720470a17558bd6f8d67df63361600e46c55 (diff) | |
download | linux-106dbe24d4146c0804cb025e450ac7af42d72356.tar.gz linux-106dbe24d4146c0804cb025e450ac7af42d72356.tar.bz2 linux-106dbe24d4146c0804cb025e450ac7af42d72356.zip |
Merge branch 'spi-5.3' into spi-next
Diffstat (limited to 'drivers/spi')
-rw-r--r-- | drivers/spi/Kconfig | 14 | ||||
-rw-r--r-- | drivers/spi/Makefile | 1 | ||||
-rw-r--r-- | drivers/spi/atmel-quadspi.c | 21 | ||||
-rw-r--r-- | drivers/spi/spi-at91-usart.c | 221 | ||||
-rw-r--r-- | drivers/spi/spi-bcm2835.c | 328 | ||||
-rw-r--r-- | drivers/spi/spi-bcm2835aux.c | 4 | ||||
-rw-r--r-- | drivers/spi/spi-meson-spifc.c | 12 | ||||
-rw-r--r-- | drivers/spi/spi-mt65xx.c | 15 | ||||
-rw-r--r-- | drivers/spi/spi-pxa2xx.c | 14 | ||||
-rw-r--r-- | drivers/spi/spi-qup.c | 4 | ||||
-rw-r--r-- | drivers/spi/spi-rockchip.c | 4 | ||||
-rw-r--r-- | drivers/spi/spi-sh-msiof.c | 2 | ||||
-rw-r--r-- | drivers/spi/spi-stm32-qspi.c | 10 | ||||
-rw-r--r-- | drivers/spi/spi-synquacer.c | 828 | ||||
-rw-r--r-- | drivers/spi/spi-tegra114.c | 170 | ||||
-rw-r--r-- | drivers/spi/spi.c | 194 | ||||
-rw-r--r-- | drivers/spi/spidev.c | 2 |
17 files changed, 1603 insertions, 241 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index 30a40280c157..3a1d8f1170de 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig @@ -120,7 +120,7 @@ config SPI_AXI_SPI_ENGINE config SPI_BCM2835 tristate "BCM2835 SPI controller" depends on GPIOLIB - depends on ARCH_BCM2835 || COMPILE_TEST + depends on ARCH_BCM2835 || ARCH_BRCMSTB || COMPILE_TEST help This selects a driver for the Broadcom BCM2835 SPI master. @@ -131,7 +131,7 @@ config SPI_BCM2835 config SPI_BCM2835AUX tristate "BCM2835 SPI auxiliary controller" - depends on (ARCH_BCM2835 && GPIOLIB) || COMPILE_TEST + depends on ((ARCH_BCM2835 || ARCH_BRCMSTB) && GPIOLIB) || COMPILE_TEST help This selects a driver for the Broadcom BCM2835 SPI aux master. @@ -733,6 +733,16 @@ config SPI_SUN6I help This enables using the SPI controller on the Allwinner A31 SoCs. +config SPI_SYNQUACER + tristate "Socionext's SynQuacer HighSpeed SPI controller" + depends on ARCH_SYNQUACER || COMPILE_TEST + help + SPI driver for Socionext's High speed SPI controller which provides + various operating modes for interfacing to serial peripheral devices + that use the de-facto standard SPI protocol. + + It also supports the new dual-bit and quad-bit SPI protocol. + config SPI_MXIC tristate "Macronix MX25F0A SPI controller" depends on SPI_MASTER diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile index f2f78d03dc28..63dcab552bcb 100644 --- a/drivers/spi/Makefile +++ b/drivers/spi/Makefile @@ -106,6 +106,7 @@ obj-$(CONFIG_SPI_STM32_QSPI) += spi-stm32-qspi.o obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o +obj-$(CONFIG_SPI_SYNQUACER) += spi-synquacer.o obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c index 9f24d5f0b431..6a7d7b553d95 100644 --- a/drivers/spi/atmel-quadspi.c +++ b/drivers/spi/atmel-quadspi.c @@ -151,6 +151,7 @@ struct atmel_qspi { const struct atmel_qspi_caps *caps; u32 pending; u32 mr; + u32 scr; struct completion cmd_completion; }; @@ -382,7 +383,7 @@ static int atmel_qspi_setup(struct spi_device *spi) struct spi_controller *ctrl = spi->master; struct atmel_qspi *aq = spi_controller_get_devdata(ctrl); unsigned long src_rate; - u32 scr, scbr; + u32 scbr; if (ctrl->busy) return -EBUSY; @@ -399,13 +400,13 @@ static int atmel_qspi_setup(struct spi_device *spi) if (scbr > 0) scbr--; - scr = QSPI_SCR_SCBR(scbr); - writel_relaxed(scr, aq->regs + QSPI_SCR); + aq->scr = QSPI_SCR_SCBR(scbr); + writel_relaxed(aq->scr, aq->regs + QSPI_SCR); return 0; } -static int atmel_qspi_init(struct atmel_qspi *aq) +static void atmel_qspi_init(struct atmel_qspi *aq) { /* Reset the QSPI controller */ writel_relaxed(QSPI_CR_SWRST, aq->regs + QSPI_CR); @@ -416,8 +417,6 @@ static int atmel_qspi_init(struct atmel_qspi *aq) /* Enable the QSPI controller */ writel_relaxed(QSPI_CR_QSPIEN, aq->regs + QSPI_CR); - - return 0; } static irqreturn_t atmel_qspi_interrupt(int irq, void *dev_id) @@ -536,9 +535,7 @@ static int atmel_qspi_probe(struct platform_device *pdev) if (err) goto disable_qspick; - err = atmel_qspi_init(aq); - if (err) - goto disable_qspick; + atmel_qspi_init(aq); err = spi_register_controller(ctrl); if (err) @@ -587,7 +584,11 @@ static int __maybe_unused atmel_qspi_resume(struct device *dev) clk_prepare_enable(aq->pclk); clk_prepare_enable(aq->qspick); - return atmel_qspi_init(aq); + atmel_qspi_init(aq); + + writel_relaxed(aq->scr, aq->regs + QSPI_SCR); + + return 0; } static SIMPLE_DEV_PM_OPS(atmel_qspi_pm_ops, atmel_qspi_suspend, diff --git a/drivers/spi/spi-at91-usart.c b/drivers/spi/spi-at91-usart.c index f763e14bdf12..a40bb2ef89dc 100644 --- a/drivers/spi/spi-at91-usart.c +++ b/drivers/spi/spi-at91-usart.c @@ -8,9 +8,12 @@ #include <linux/clk.h> #include <linux/delay.h> +#include <linux/dmaengine.h> +#include <linux/dma-direction.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> +#include <linux/of_platform.h> #include <linux/of_gpio.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> @@ -59,6 +62,8 @@ #define US_INIT \ (US_MR_SPI_MASTER | US_MR_CHRL | US_MR_CLKO | US_MR_WRDBT) +#define US_DMA_MIN_BYTES 16 +#define US_DMA_TIMEOUT (msecs_to_jiffies(1000)) /* Register access macros */ #define at91_usart_spi_readl(port, reg) \ @@ -72,14 +77,19 @@ writeb_relaxed((value), (port)->regs + US_##reg) struct at91_usart_spi { + struct platform_device *mpdev; struct spi_transfer *current_transfer; void __iomem *regs; struct device *dev; struct clk *clk; + struct completion xfer_completion; + /*used in interrupt to protect data reading*/ spinlock_t lock; + phys_addr_t phybase; + int irq; unsigned int current_tx_remaining_bytes; unsigned int current_rx_remaining_bytes; @@ -88,8 +98,182 @@ struct at91_usart_spi { u32 status; bool xfer_failed; + bool use_dma; }; +static void dma_callback(void *data) +{ + struct spi_controller *ctlr = data; + struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); + + at91_usart_spi_writel(aus, IER, US_IR_RXRDY); + aus->current_rx_remaining_bytes = 0; + complete(&aus->xfer_completion); +} + +static bool at91_usart_spi_can_dma(struct spi_controller *ctrl, + struct spi_device *spi, + struct spi_transfer *xfer) +{ + struct at91_usart_spi *aus = spi_master_get_devdata(ctrl); + + return aus->use_dma && xfer->len >= US_DMA_MIN_BYTES; +} + +static int at91_usart_spi_configure_dma(struct spi_controller *ctlr, + struct at91_usart_spi *aus) +{ + struct dma_slave_config slave_config; + struct device *dev = &aus->mpdev->dev; + phys_addr_t phybase = aus->phybase; + dma_cap_mask_t mask; + int err = 0; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + ctlr->dma_tx = dma_request_slave_channel_reason(dev, "tx"); + if (IS_ERR_OR_NULL(ctlr->dma_tx)) { + if (IS_ERR(ctlr->dma_tx)) { + err = PTR_ERR(ctlr->dma_tx); + goto at91_usart_spi_error_clear; + } + + dev_dbg(dev, + "DMA TX channel not available, SPI unable to use DMA\n"); + err = -EBUSY; + goto at91_usart_spi_error_clear; + } + + ctlr->dma_rx = dma_request_slave_channel_reason(dev, "rx"); + if (IS_ERR_OR_NULL(ctlr->dma_rx)) { + if (IS_ERR(ctlr->dma_rx)) { + err = PTR_ERR(ctlr->dma_rx); + goto at91_usart_spi_error; + } + + dev_dbg(dev, + "DMA RX channel not available, SPI unable to use DMA\n"); + err = -EBUSY; + goto at91_usart_spi_error; + } + + slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; + slave_config.dst_addr = (dma_addr_t)phybase + US_THR; + slave_config.src_addr = (dma_addr_t)phybase + US_RHR; + slave_config.src_maxburst = 1; + slave_config.dst_maxburst = 1; + slave_config.device_fc = false; + + slave_config.direction = DMA_DEV_TO_MEM; + if (dmaengine_slave_config(ctlr->dma_rx, &slave_config)) { + dev_err(&ctlr->dev, + "failed to configure rx dma channel\n"); + err = -EINVAL; + goto at91_usart_spi_error; + } + + slave_config.direction = DMA_MEM_TO_DEV; + if (dmaengine_slave_config(ctlr->dma_tx, &slave_config)) { + dev_err(&ctlr->dev, + "failed to configure tx dma channel\n"); + err = -EINVAL; + goto at91_usart_spi_error; + } + + aus->use_dma = true; + return 0; + +at91_usart_spi_error: + if (!IS_ERR_OR_NULL(ctlr->dma_tx)) + dma_release_channel(ctlr->dma_tx); + if (!IS_ERR_OR_NULL(ctlr->dma_rx)) + dma_release_channel(ctlr->dma_rx); + ctlr->dma_tx = NULL; + ctlr->dma_rx = NULL; + +at91_usart_spi_error_clear: + return err; +} + +static void at91_usart_spi_release_dma(struct spi_controller *ctlr) +{ + if (ctlr->dma_rx) + dma_release_channel(ctlr->dma_rx); + if (ctlr->dma_tx) + dma_release_channel(ctlr->dma_tx); +} + +static void at91_usart_spi_stop_dma(struct spi_controller *ctlr) +{ + if (ctlr->dma_rx) + dmaengine_terminate_all(ctlr->dma_rx); + if (ctlr->dma_tx) + dmaengine_terminate_all(ctlr->dma_tx); +} + +static int at91_usart_spi_dma_transfer(struct spi_controller *ctlr, + struct spi_transfer *xfer) +{ + struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); + struct dma_chan *rxchan = ctlr->dma_rx; + struct dma_chan *txchan = ctlr->dma_tx; + struct dma_async_tx_descriptor *rxdesc; + struct dma_async_tx_descriptor *txdesc; + dma_cookie_t cookie; + + /* Disable RX interrupt */ + at91_usart_spi_writel(aus, IDR, US_IR_RXRDY); + + rxdesc = dmaengine_prep_slave_sg(rxchan, + xfer->rx_sg.sgl, + xfer->rx_sg.nents, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + if (!rxdesc) + goto at91_usart_spi_err_dma; + + txdesc = dmaengine_prep_slave_sg(txchan, + xfer->tx_sg.sgl, + xfer->tx_sg.nents, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + if (!txdesc) + goto at91_usart_spi_err_dma; + + rxdesc->callback = dma_callback; + rxdesc->callback_param = ctlr; + + cookie = rxdesc->tx_submit(rxdesc); + if (dma_submit_error(cookie)) + goto at91_usart_spi_err_dma; + + cookie = txdesc->tx_submit(txdesc); + if (dma_submit_error(cookie)) + goto at91_usart_spi_err_dma; + + rxchan->device->device_issue_pending(rxchan); + txchan->device->device_issue_pending(txchan); + + return 0; + +at91_usart_spi_err_dma: + /* Enable RX interrupt if something fails and fallback to PIO */ + at91_usart_spi_writel(aus, IER, US_IR_RXRDY); + at91_usart_spi_stop_dma(ctlr); + + return -ENOMEM; +} + +static unsigned long at91_usart_spi_dma_timeout(struct at91_usart_spi *aus) +{ + return wait_for_completion_timeout(&aus->xfer_completion, + US_DMA_TIMEOUT); +} + static inline u32 at91_usart_spi_tx_ready(struct at91_usart_spi *aus) { return aus->status & US_IR_TXRDY; @@ -216,6 +400,8 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr, struct spi_transfer *xfer) { struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); + unsigned long dma_timeout = 0; + int ret = 0; at91_usart_spi_set_xfer_speed(aus, xfer); aus->xfer_failed = false; @@ -225,8 +411,25 @@ static int at91_usart_spi_transfer_one(struct spi_controller *ctlr, while ((aus->current_tx_remaining_bytes || aus->current_rx_remaining_bytes) && !aus->xfer_failed) { - at91_usart_spi_read_status(aus); - at91_usart_spi_tx(aus); + reinit_completion(&aus->xfer_completion); + if (at91_usart_spi_can_dma(ctlr, spi, xfer) && + !ret) { + ret = at91_usart_spi_dma_transfer(ctlr, xfer); + if (ret) + continue; + + dma_timeout = at91_usart_spi_dma_timeout(aus); + + if (WARN_ON(dma_timeout == 0)) { + dev_err(&spi->dev, "DMA transfer timeout\n"); + return -EIO; + } + aus->current_tx_remaining_bytes = 0; + } else { + at91_usart_spi_read_status(aus); + at91_usart_spi_tx(aus); + } + cpu_relax(); } @@ -345,6 +548,7 @@ static int at91_usart_spi_probe(struct platform_device *pdev) controller->transfer_one = at91_usart_spi_transfer_one; controller->prepare_message = at91_usart_spi_prepare_message; controller->unprepare_message = at91_usart_spi_unprepare_message; + controller->can_dma = at91_usart_spi_can_dma; controller->cleanup = at91_usart_spi_cleanup; controller->max_speed_hz = DIV_ROUND_UP(clk_get_rate(clk), US_MIN_CLK_DIV); @@ -376,7 +580,17 @@ static int at91_usart_spi_probe(struct platform_device *pdev) aus->spi_clk = clk_get_rate(clk); at91_usart_spi_init(aus); + aus->phybase = regs->start; + + aus->mpdev = to_platform_device(pdev->dev.parent); + + ret = at91_usart_spi_configure_dma(controller, aus); + if (ret) + goto at91_usart_fail_dma; + spin_lock_init(&aus->lock); + init_completion(&aus->xfer_completion); + ret = devm_spi_register_master(&pdev->dev, controller); if (ret) goto at91_usart_fail_register_master; @@ -389,6 +603,8 @@ static int at91_usart_spi_probe(struct platform_device *pdev) return 0; at91_usart_fail_register_master: + at91_usart_spi_release_dma(controller); +at91_usart_fail_dma: clk_disable_unprepare(clk); at91_usart_spi_probe_fail: spi_master_put(controller); @@ -453,6 +669,7 @@ static int at91_usart_spi_remove(struct platform_device *pdev) struct spi_controller *ctlr = platform_get_drvdata(pdev); struct at91_usart_spi *aus = spi_master_get_devdata(ctlr); + at91_usart_spi_release_dma(ctlr); clk_disable_unprepare(aus->clk); return 0; diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 402c1efcd762..6f243a90c844 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -13,6 +13,7 @@ #include <linux/clk.h> #include <linux/completion.h> +#include <linux/debugfs.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> @@ -64,14 +65,18 @@ #define BCM2835_SPI_FIFO_SIZE 64 #define BCM2835_SPI_FIFO_SIZE_3_4 48 -#define BCM2835_SPI_POLLING_LIMIT_US 30 -#define BCM2835_SPI_POLLING_JIFFIES 2 #define BCM2835_SPI_DMA_MIN_LENGTH 96 #define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \ | SPI_NO_CS | SPI_3WIRE) #define DRV_NAME "spi-bcm2835" +/* define polling limits */ +unsigned int polling_limit_us = 30; +module_param(polling_limit_us, uint, 0664); +MODULE_PARM_DESC(polling_limit_us, + "time in us to run a transfer in polling mode\n"); + /** * struct bcm2835_spi - BCM2835 SPI controller * @regs: base address of register map @@ -88,6 +93,15 @@ * length is not a multiple of 4 (to overcome hardware limitation) * @tx_spillover: whether @tx_prologue spills over to second TX sglist entry * @dma_pending: whether a DMA transfer is in progress + * @debugfs_dir: the debugfs directory - neede to remove debugfs when + * unloading the module + * @count_transfer_polling: count of how often polling mode is used + * @count_transfer_irq: count of how often interrupt mode is used + * @count_transfer_irq_after_polling: count of how often we fall back to + * interrupt mode after starting in polling mode. + * These are counted as well in @count_transfer_polling and + * @count_transfer_irq + * @count_transfer_dma: count how often dma mode is used */ struct bcm2835_spi { void __iomem *regs; @@ -102,8 +116,55 @@ struct bcm2835_spi { int rx_prologue; unsigned int tx_spillover; unsigned int dma_pending; + + struct dentry *debugfs_dir; + u64 count_transfer_polling; + u64 count_transfer_irq; + u64 count_transfer_irq_after_polling; + u64 count_transfer_dma; }; +#if defined(CONFIG_DEBUG_FS) +static void bcm2835_debugfs_create(struct bcm2835_spi *bs, + const char *dname) +{ + char name[64]; + struct dentry *dir; + + /* get full name */ + snprintf(name, sizeof(name), "spi-bcm2835-%s", dname); + + /* the base directory */ + dir = debugfs_create_dir(name, NULL); + bs->debugfs_dir = dir; + + /* the counters */ + debugfs_create_u64("count_transfer_polling", 0444, dir, + &bs->count_transfer_polling); + debugfs_create_u64("count_transfer_irq", 0444, dir, + &bs->count_transfer_irq); + debugfs_create_u64("count_transfer_irq_after_polling", 0444, dir, + &bs->count_transfer_irq_after_polling); + debugfs_create_u64("count_transfer_dma", 0444, dir, + &bs->count_transfer_dma); +} + +static void bcm2835_debugfs_remove(struct bcm2835_spi *bs) +{ + debugfs_remove_recursive(bs->debugfs_dir); + bs->debugfs_dir = NULL; +} +#else +static void bcm2835_debugfs_create(struct bcm2835_spi *bs, + const char *dname) +{ +} + +static void bcm2835_debugfs_remove(struct bcm2835_spi *bs) +{ +} +#endif /* CONFIG_DEBUG_FS */ + static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg) { return readl(bs->regs + reg); @@ -248,9 +309,9 @@ static inline void bcm2835_wr_fifo_blind(struct bcm2835_spi *bs, int count) } } -static void bcm2835_spi_reset_hw(struct spi_master *master) +static void bcm2835_spi_reset_hw(struct spi_controller *ctlr) { - struct bcm2835_spi *bs = spi_master_get_devdata(master); + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); /* Disable SPI interrupts and transfer */ @@ -269,8 +330,8 @@ static void bcm2835_spi_reset_hw(struct spi_master *master) static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) { - struct spi_master *master = dev_id; - struct bcm2835_spi *bs = spi_master_get_devdata(master); + struct spi_controller *ctlr = dev_id; + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); /* @@ -292,20 +353,23 @@ static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id) if (!bs->rx_len) { /* Transfer complete - reset SPI HW */ - bcm2835_spi_reset_hw(master); + bcm2835_spi_reset_hw(ctlr); /* wake up the framework */ - complete(&master->xfer_completion); + complete(&ctlr->xfer_completion); } return IRQ_HANDLED; } -static int bcm2835_spi_transfer_one_irq(struct spi_master *master, +static int bcm2835_spi_transfer_one_irq(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *tfr, u32 cs, bool fifo_empty) { - struct bcm2835_spi *bs = spi_master_get_devdata(master); + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); + + /* update usage statistics */ + bs->count_transfer_irq++; /* * Enable HW block, but with interrupts still disabled. @@ -328,7 +392,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master, /** * bcm2835_spi_transfer_prologue() - transfer first few bytes without DMA - * @master: SPI master + * @ctlr: SPI master controller * @tfr: SPI transfer * @bs: BCM2835 SPI controller * @cs: CS register @@ -372,7 +436,7 @@ static int bcm2835_spi_transfer_one_irq(struct spi_master *master, * be transmitted in 32-bit width to ensure that the following DMA transfer can * pick up the residue in the RX FIFO in ungarbled form. */ -static void bcm2835_spi_transfer_prologue(struct spi_master *master, +static void bcm2835_spi_transfer_prologue(struct spi_controller *ctlr, struct spi_transfer *tfr, struct bcm2835_spi *bs, u32 cs) @@ -413,9 +477,9 @@ static void bcm2835_spi_transfer_prologue(struct spi_master *master, bcm2835_wr_fifo_count(bs, bs->rx_prologue); bcm2835_wait_tx_fifo_empty(bs); bcm2835_rd_fifo_count(bs, bs->rx_prologue); - bcm2835_spi_reset_hw(master); + bcm2835_spi_reset_hw(ctlr); - dma_sync_single_for_device(master->dma_rx->device->dev, + dma_sync_single_for_device(ctlr->dma_rx->device->dev, sg_dma_address(&tfr->rx_sg.sgl[0]), bs->rx_prologue, DMA_FROM_DEVICE); @@ -479,11 +543,11 @@ static void bcm2835_spi_undo_prologue(struct bcm2835_spi *bs) static void bcm2835_spi_dma_done(void *data) { - struct spi_master *master = data; - struct bcm2835_spi *bs = spi_master_get_devdata(master); + struct spi_controller *ctlr = data; + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); /* reset fifo and HW */ - bcm2835_spi_reset_hw(master); + bcm2835_spi_reset_hw(ctlr); /* and terminate tx-dma as we do not have an irq for it * because when the rx dma will terminate and this callback @@ -491,15 +555,15 @@ static void bcm2835_spi_dma_done(void *data) * situation otherwise... */ if (cmpxchg(&bs->dma_pending, true, false)) { - dmaengine_terminate_async(master->dma_tx); + dmaengine_terminate_async(ctlr->dma_tx); bcm2835_spi_undo_prologue(bs); } /* and mark as completed */; - complete(&master->xfer_completion); + complete(&ctlr->xfer_completion); } -static int bcm2835_spi_prepare_sg(struct spi_master *master, +static int bcm2835_spi_prepare_sg(struct spi_controller *ctlr, struct spi_transfer *tfr, bool is_tx) { @@ -514,14 +578,14 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master, if (is_tx) { dir = DMA_MEM_TO_DEV; - chan = master->dma_tx; + chan = ctlr->dma_tx; nents = tfr->tx_sg.nents; sgl = tfr->tx_sg.sgl; flags = 0 /* no tx interrupt */; } else { dir = DMA_DEV_TO_MEM; - chan = master->dma_rx; + chan = ctlr->dma_rx; nents = tfr->rx_sg.nents; sgl = tfr->rx_sg.sgl; flags = DMA_PREP_INTERRUPT; @@ -534,7 +598,7 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master, /* set callback for rx */ if (!is_tx) { desc->callback = bcm2835_spi_dma_done; - desc->callback_param = master; + desc->callback_param = ctlr; } /* submit it to DMA-engine */ @@ -543,27 +607,30 @@ static int bcm2835_spi_prepare_sg(struct spi_master *master, return dma_submit_error(cookie); } -static int bcm2835_spi_transfer_one_dma(struct spi_master *master, +static int bcm2835_spi_transfer_one_dma(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *tfr, u32 cs) { - struct bcm2835_spi *bs = spi_master_get_devdata(master); + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); int ret; + /* update usage statistics */ + bs->count_transfer_dma++; + /* * Transfer first few bytes without DMA if length of first TX or RX * sglist entry is not a multiple of 4 bytes (hardware limitation). */ - bcm2835_spi_transfer_prologue(master, tfr, bs, cs); + bcm2835_spi_transfer_prologue(ctlr, tfr, bs, cs); /* setup tx-DMA */ - ret = bcm2835_spi_prepare_sg(master, tfr, true); + ret = bcm2835_spi_prepare_sg(ctlr, tfr, true); if (ret) goto err_reset_hw; /* start TX early */ - dma_async_issue_pending(master->dma_tx); + dma_async_issue_pending(ctlr->dma_tx); /* mark as dma pending */ bs->dma_pending = 1; @@ -579,27 +646,27 @@ static int bcm2835_spi_transfer_one_dma(struct spi_master *master, * mapping of the rx buffers still takes place * this saves 10us or more. */ - ret = bcm2835_spi_prepare_sg(master, tfr, false); + ret = bcm2835_spi_prepare_sg(ctlr, tfr, false); if (ret) { /* need to reset on errors */ - dmaengine_terminate_sync(master->dma_tx); + dmaengine_terminate_sync(ctlr->dma_tx); bs->dma_pending = false; goto err_reset_hw; } /* start rx dma late */ - dma_async_issue_pending(master->dma_rx); + dma_async_issue_pending(ctlr->dma_rx); /* wait for wakeup in framework */ return 1; err_reset_hw: - bcm2835_spi_reset_hw(master); + bcm2835_spi_reset_hw(ctlr); bcm2835_spi_undo_prologue(bs); return ret; } -static bool bcm2835_spi_can_dma(struct spi_master *master, +static bool bcm2835_spi_can_dma(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *tfr) { @@ -611,21 +678,21 @@ static bool bcm2835_spi_can_dma(struct spi_master *master, return true; } -static void bcm2835_dma_release(struct spi_master *master) +static void bcm2835_dma_release(struct spi_controller *ctlr) { - if (master->dma_tx) { - dmaengine_terminate_sync(master->dma_tx); - dma_release_channel(master->dma_tx); - master->dma_tx = NULL; + if (ctlr->dma_tx) { + dmaengine_terminate_sync(ctlr->dma_tx); + dma_release_channel(ctlr->dma_tx); + ctlr->dma_tx = NULL; } - if (master->dma_rx) { - dmaengine_terminate_sync(master->dma_rx); - dma_release_channel(master->dma_rx); - master->dma_rx = NULL; + if (ctlr->dma_rx) { + dmaengine_terminate_sync(ctlr->dma_rx); + dma_release_channel(ctlr->dma_rx); + ctlr->dma_rx = NULL; } } -static void bcm2835_dma_init(struct spi_master *master, struct device *dev) +static void bcm2835_dma_init(struct spi_controller *ctlr, struct device *dev) { struct dma_slave_config slave_config; const __be32 *addr; @@ -633,7 +700,7 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev) int ret; /* base address in dma-space */ - addr = of_get_address(master->dev.of_node, 0, NULL, NULL); + addr = of_get_address(ctlr->dev.of_node, 0, NULL, NULL); if (!addr) { dev_err(dev, "could not get DMA-register address - not using dma mode\n"); goto err; @@ -641,38 +708,36 @@ static void bcm2835_dma_init(struct spi_master *master, struct device *dev) dma_reg_base = be32_to_cpup(addr); /* get tx/rx dma */ - master->dma_tx = dma_request_slave_channel(dev, "tx"); - if (!master->dma_tx) { + ctlr->dma_tx = dma_request_slave_channel(dev, "tx"); + if (!ctlr->dma_tx) { dev_err(dev, "no tx-dma configuration found - not using dma mode\n"); goto err; } - master->dma_rx = dma_request_slave_channel(dev, "rx"); - if (!master->dma_rx) { + ctlr->dma_rx = dma_request_slave_channel(dev, "rx"); + if (!ctlr->dma_rx) { dev_err(dev, "no rx-dma configuration found - not using dma mode\n"); goto err_release; } /* configure DMAs */ - slave_config.direction = DMA_MEM_TO_DEV; slave_config.dst_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - ret = dmaengine_slave_config(master->dma_tx, &slave_config); + ret = dmaengine_slave_config(ctlr->dma_tx, &slave_config); if (ret) goto err_config; - slave_config.direction = DMA_DEV_TO_MEM; slave_config.src_addr = (u32)(dma_reg_base + BCM2835_SPI_FIFO); slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; - ret = dmaengine_slave_config(master->dma_rx, &slave_config); + ret = dmaengine_slave_config(ctlr->dma_rx, &slave_config); if (ret) goto err_config; /* all went well, so set can_dma */ - master->can_dma = bcm2835_spi_can_dma; + ctlr->can_dma = bcm2835_spi_can_dma; /* need to do TX AND RX DMA, so we need dummy buffers */ - master->flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX; + ctlr->flags = SPI_CONTROLLER_MUST_RX | SPI_CONTROLLER_MUST_TX; return; @@ -680,20 +745,22 @@ err_config: dev_err(dev, "issue configuring dma: %d - not using DMA mode\n", ret); err_release: - bcm2835_dma_release(master); + bcm2835_dma_release(ctlr); err: return; } -static int bcm2835_spi_transfer_one_poll(struct spi_master *master, +static int bcm2835_spi_transfer_one_poll(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *tfr, - u32 cs, - unsigned long long xfer_time_us) + u32 cs) { - struct bcm2835_spi *bs = spi_master_get_devdata(master); + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); unsigned long timeout; + /* update usage statistics */ + bs->count_transfer_polling++; + /* enable HW block without interrupts */ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA); @@ -703,8 +770,8 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master, */ bcm2835_wr_fifo_blind(bs, BCM2835_SPI_FIFO_SIZE); - /* set the timeout */ - timeout = jiffies + BCM2835_SPI_POLLING_JIFFIES; + /* set the timeout to at least 2 jiffies */ + timeout = jiffies + 2 + HZ * polling_limit_us / 1000000; /* loop until finished the transfer */ while (bs->rx_len) { @@ -723,25 +790,28 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master, jiffies - timeout, bs->tx_len, bs->rx_len); /* fall back to interrupt mode */ - return bcm2835_spi_transfer_one_irq(master, spi, + + /* update usage statistics */ + bs->count_transfer_irq_after_polling++; + + return bcm2835_spi_transfer_one_irq(ctlr, spi, tfr, cs, false); } } /* Transfer complete - reset SPI HW */ - bcm2835_spi_reset_hw(master); + bcm2835_spi_reset_hw(ctlr); /* and return without waiting for completion */ return 0; } -static int bcm2835_spi_transfer_one(struct spi_master *master, +static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, struct spi_device *spi, struct spi_transfer *tfr) { - struct bcm2835_spi *bs = spi_master_get_devdata(master); - unsigned long spi_hz, clk_hz, cdiv; - unsigned long spi_used_hz; - unsigned long long xfer_time_us; + struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); + unsigned long spi_hz, clk_hz, cdiv, spi_used_hz; + unsigned long hz_per_byte, byte_limit; u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS); /* set clock */ @@ -782,42 +852,49 @@ static int bcm2835_spi_transfer_one(struct spi_master *master, bs->tx_len = tfr->len; bs->rx_len = tfr->len; - /* calculate the estimated time in us the transfer runs */ - xfer_time_us = (unsigned long long)tfr->len - * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */ - * 1000000; - do_div(xfer_time_us, spi_used_hz); + /* Calculate the estimated time in us the transfer runs. Note that + * there is 1 idle clocks cycles after each byte getting transferred + |