dmaengine updates for v6.16

New support:
   - Renesas RZ/V2H(P) dma support for r9a09g057
   - Arm DMA-350 driver
   - Tegra Tegra264 ADMA support
 
  Updates:
   - AMD ptdma driver code removal and optimizations
   - Freescale edma error interrupt handler support
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEE+vs47OPLdNbVcHzyfBQHDyUjg0cFAmhBO1YACgkQfBQHDyUj
 g0cuMxAAqscuPU17PjJPDy9Fzaq+B3nsZ9JR+Y/M7ifxFJSedJqdaZc7w4OsfGy2
 VZIWiSbpK2WrQHLTh/KlE2AeycO0zX5H1vQmYc4GNQn+18cprxj68YHLb05ZskJq
 sNVpEI0zZCxrFUgz8TrwdNcDzTC71TdtD2VLqZ6dCYcoi8lWiHPdbzxR/cSpbENb
 ysSrAoJy6v92ES2McH3wLAcwuchlC1wFMof9kVVhe3ueZnrtvuBML/fZldKE85qc
 dgcm9r1XOdcU3rOBxKQkQq2b0PzeRcUhUNRErqMQVTNs8Vg3N02x2jM214XKNLGt
 G/aFac9neun6iJ3H8rXzHEFhO8bInNddCjfv1SBdV0UR2LZHnzHQHz+0Og/HdyGD
 kkr3QsU+JzUQe29cHRwDKUR63l5dd+6PgwkWgcxYuauhFNRFpxdlosepmWZWZ+GE
 OVy4D/tWu1acXvorm9ZnIbkg/9anzQJEj78+Y9Tlgh5C59nBINfBtVjTVw9BWDTo
 1P9YS3YGdkT49uZu1sust9ug4H9/yifcXY4uXzBdTIYZTt3kNZfncVr3kMkMgAdU
 bcm5PvnklIRo+JWd8WftiLQDyF4OWUcf5CG3VVFthIR4Fla+1Wpg41NjQVLvRNzk
 Ji/WzLj0Wnzx+QuPyUC3NFKE11IJdB+7hGktfVBHcuQ/W6Vc7bY=
 =cu98
 -----END PGP SIGNATURE-----

Merge tag 'dmaengine-6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "A fairly small update for the dmaengine subsystem. This has a new ARM
  dmaengine driver and couple of new device support and few driver
  changes:

  New support:
   - Renesas RZ/V2H(P) dma support for r9a09g057
   - Arm DMA-350 driver
   - Tegra Tegra264 ADMA support

  Updates:
   - AMD ptdma driver code removal and optimizations
   - Freescale edma error interrupt handler support"

* tag 'dmaengine-6.16-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (27 commits)
  dmaengine: idxd: Remove unused pointer and macro
  arm64: dts: renesas: r9a09g057: Add DMAC nodes
  dmaengine: sh: rz-dmac: Add RZ/V2H(P) support
  dmaengine: sh: rz-dmac: Allow for multiple DMACs
  irqchip/renesas-rzv2h: Add rzv2h_icu_register_dma_req()
  dt-bindings: dma: rz-dmac: Document RZ/V2H(P) family of SoCs
  dt-bindings: dma: rz-dmac: Restrict properties for RZ/A1H
  dmaengine: idxd: Narrow the restriction on BATCH to ver. 1 only
  dmaengine: ti: Add NULL check in udma_probe()
  fsldma: Set correct dma_mask based on hw capability
  dmaengine: idxd: Check availability of workqueue allocated by idxd wq driver before using
  dmaengine: xilinx_dma: Set dma_device directions
  dmaengine: tegra210-adma: Add Tegra264 support
  dt-bindings: Document Tegra264 ADMA support
  dmaengine: dw-edma: Add HDMA NATIVE map check
  dmaegnine: fsl-edma: add edma error interrupt handler
  dt-bindings: dma: fsl-edma: increase maxItems of interrupts and interrupt-names
  dmaengine: ARM_DMA350 should depend on ARM/ARM64
  dt-bindings: dma: qcom,bam: Document dma-coherent property
  dmaengine: Add Arm DMA-350 driver
  ...
This commit is contained in:
Linus Torvalds 2025-06-05 08:49:30 -07:00
commit bfdf35c5dc
29 changed files with 1463 additions and 106 deletions

View File

@ -0,0 +1,44 @@
# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
%YAML 1.2
---
$id: http://devicetree.org/schemas/dma/arm,dma-350.yaml#
$schema: http://devicetree.org/meta-schemas/core.yaml#
title: Arm CoreLink DMA-350 Controller
maintainers:
- Robin Murphy <robin.murphy@arm.com>
allOf:
- $ref: dma-controller.yaml#
properties:
compatible:
const: arm,dma-350
reg:
items:
- description: Base and size of the full register map
interrupts:
minItems: 1
items:
- description: Channel 0 interrupt
- description: Channel 1 interrupt
- description: Channel 2 interrupt
- description: Channel 3 interrupt
- description: Channel 4 interrupt
- description: Channel 5 interrupt
- description: Channel 6 interrupt
- description: Channel 7 interrupt
"#dma-cells":
const: 1
description: The cell is the trigger input number
required:
- compatible
- reg
- interrupts
unevaluatedProperties: false

View File

@ -48,11 +48,11 @@ properties:
interrupts:
minItems: 1
maxItems: 64
maxItems: 65
interrupt-names:
minItems: 1
maxItems: 64
maxItems: 65
"#dma-cells":
description: |

View File

@ -19,6 +19,7 @@ properties:
- enum:
- nvidia,tegra210-adma
- nvidia,tegra186-adma
- nvidia,tegra264-adma
- items:
- enum:
- nvidia,tegra234-adma
@ -92,6 +93,7 @@ allOf:
contains:
enum:
- nvidia,tegra186-adma
- nvidia,tegra264-adma
then:
anyOf:
- properties:

View File

@ -42,6 +42,8 @@ properties:
interrupts:
maxItems: 1
dma-coherent: true
iommus:
minItems: 1
maxItems: 6

View File

@ -11,19 +11,23 @@ maintainers:
properties:
compatible:
items:
- enum:
- renesas,r7s72100-dmac # RZ/A1H
- renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five
- renesas,r9a07g044-dmac # RZ/G2{L,LC}
- renesas,r9a07g054-dmac # RZ/V2L
- renesas,r9a08g045-dmac # RZ/G3S
- const: renesas,rz-dmac
oneOf:
- items:
- enum:
- renesas,r7s72100-dmac # RZ/A1H
- renesas,r9a07g043-dmac # RZ/G2UL and RZ/Five
- renesas,r9a07g044-dmac # RZ/G2{L,LC}
- renesas,r9a07g054-dmac # RZ/V2L
- renesas,r9a08g045-dmac # RZ/G3S
- const: renesas,rz-dmac
- const: renesas,r9a09g057-dmac # RZ/V2H(P)
reg:
items:
- description: Control and channel register block
- description: DMA extended resource selector block
minItems: 1
interrupts:
maxItems: 17
@ -52,6 +56,7 @@ properties:
items:
- description: DMA main clock
- description: DMA register access clock
minItems: 1
clock-names:
items:
@ -61,10 +66,10 @@ properties:
'#dma-cells':
const: 1
description:
The cell specifies the encoded MID/RID values of the DMAC port
connected to the DMA client and the slave channel configuration
parameters.
bits[0:9] - Specifies MID/RID value
The cell specifies the encoded MID/RID or the REQ No values of
the DMAC port connected to the DMA client and the slave channel
configuration parameters.
bits[0:9] - Specifies the MID/RID or the REQ No value
bit[10] - Specifies DMA request high enable (HIEN)
bit[11] - Specifies DMA request detection type (LVL)
bits[12:14] - Specifies DMAACK output mode (AM)
@ -80,12 +85,26 @@ properties:
items:
- description: Reset for DMA ARESETN reset terminal
- description: Reset for DMA RST_ASYNC reset terminal
minItems: 1
reset-names:
items:
- const: arst
- const: rst_async
renesas,icu:
description:
It must contain the phandle to the ICU and the index of the DMAC as seen
from the ICU.
$ref: /schemas/types.yaml#/definitions/phandle-array
items:
- items:
- description: Phandle to the ICU node.
- description:
The number of the DMAC as seen from the ICU, i.e. parameter k from
register ICU_DMkSELy. This may differ from the actual DMAC instance
number.
required:
- compatible
- reg
@ -98,13 +117,25 @@ allOf:
- $ref: dma-controller.yaml#
- if:
not:
properties:
compatible:
contains:
enum:
- renesas,r7s72100-dmac
properties:
compatible:
contains:
enum:
- renesas,r9a07g043-dmac
- renesas,r9a07g044-dmac
- renesas,r9a07g054-dmac
- renesas,r9a08g045-dmac
then:
properties:
reg:
minItems: 2
clocks:
minItems: 2
resets:
minItems: 2
renesas,icu: false
required:
- clocks
- clock-names
@ -112,6 +143,46 @@ allOf:
- resets
- reset-names
- if:
properties:
compatible:
contains:
const: renesas,r7s72100-dmac
then:
properties:
reg:
minItems: 2
clocks: false
clock-names: false
power-domains: false
resets: false
reset-names: false
renesas,icu: false
- if:
properties:
compatible:
contains:
const: renesas,r9a09g057-dmac
then:
properties:
reg:
maxItems: 1
clocks:
maxItems: 1
resets:
maxItems: 1
clock-names: false
reset-names: false
required:
- clocks
- power-domains
- renesas,icu
- resets
additionalProperties: false
examples:

View File

@ -172,8 +172,8 @@ Currently, the types available are:
- It's usually used for copying pixel data between host memory and
memory-mapped GPU device memory, such as found on modern PCI video graphics
cards. The most immediate example is the OpenGL API function
``glReadPielx()``, which might require a verbatim copy of a huge framebuffer
from local device memory onto host memory.
``glReadPixels()``, which might require a verbatim copy of a huge
framebuffer from local device memory onto host memory.
- DMA_XOR

View File

@ -10781,7 +10781,7 @@ F: net/dsa/tag_hellcreek.c
HISILICON DMA DRIVER
M: Zhou Wang <wangzhou1@hisilicon.com>
M: Jie Hai <haijie1@huawei.com>
M: Longfang Liu <liulongfang@huawei.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/hisi_dma.c

View File

@ -280,6 +280,171 @@
resets = <&cpg 0x30>;
};
dmac0: dma-controller@11400000 {
compatible = "renesas,r9a09g057-dmac";
reg = <0 0x11400000 0 0x10000>;
interrupts = <GIC_SPI 499 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 89 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 90 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 91 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 92 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 93 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 94 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 95 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 96 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 97 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 98 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 99 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 100 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 101 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 102 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 103 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 104 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "error",
"ch0", "ch1", "ch2", "ch3",
"ch4", "ch5", "ch6", "ch7",
"ch8", "ch9", "ch10", "ch11",
"ch12", "ch13", "ch14", "ch15";
clocks = <&cpg CPG_MOD 0x0>;
power-domains = <&cpg>;
resets = <&cpg 0x31>;
#dma-cells = <1>;
dma-channels = <16>;
renesas,icu = <&icu 4>;
};
dmac1: dma-controller@14830000 {
compatible = "renesas,r9a09g057-dmac";
reg = <0 0x14830000 0 0x10000>;
interrupts = <GIC_SPI 495 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 25 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 26 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 27 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 28 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 29 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 31 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 32 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 33 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 37 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 38 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 39 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 40 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "error",
"ch0", "ch1", "ch2", "ch3",
"ch4", "ch5", "ch6", "ch7",
"ch8", "ch9", "ch10", "ch11",
"ch12", "ch13", "ch14", "ch15";
clocks = <&cpg CPG_MOD 0x1>;
power-domains = <&cpg>;
resets = <&cpg 0x32>;
#dma-cells = <1>;
dma-channels = <16>;
renesas,icu = <&icu 0>;
};
dmac2: dma-controller@14840000 {
compatible = "renesas,r9a09g057-dmac";
reg = <0 0x14840000 0 0x10000>;
interrupts = <GIC_SPI 496 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 41 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 42 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 43 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 44 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 45 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 46 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 47 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 48 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 49 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 50 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 51 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 52 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 53 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 54 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 55 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 56 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "error",
"ch0", "ch1", "ch2", "ch3",
"ch4", "ch5", "ch6", "ch7",
"ch8", "ch9", "ch10", "ch11",
"ch12", "ch13", "ch14", "ch15";
clocks = <&cpg CPG_MOD 0x2>;
power-domains = <&cpg>;
resets = <&cpg 0x33>;
#dma-cells = <1>;
dma-channels = <16>;
renesas,icu = <&icu 1>;
};
dmac3: dma-controller@12000000 {
compatible = "renesas,r9a09g057-dmac";
reg = <0 0x12000000 0 0x10000>;
interrupts = <GIC_SPI 497 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 57 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 58 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 59 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 60 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 61 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 62 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 63 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 64 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 65 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 66 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 67 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 68 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 69 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 70 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 71 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 72 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "error",
"ch0", "ch1", "ch2", "ch3",
"ch4", "ch5", "ch6", "ch7",
"ch8", "ch9", "ch10", "ch11",
"ch12", "ch13", "ch14", "ch15";
clocks = <&cpg CPG_MOD 0x3>;
power-domains = <&cpg>;
resets = <&cpg 0x34>;
#dma-cells = <1>;
dma-channels = <16>;
renesas,icu = <&icu 2>;
};
dmac4: dma-controller@12010000 {
compatible = "renesas,r9a09g057-dmac";
reg = <0 0x12010000 0 0x10000>;
interrupts = <GIC_SPI 498 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 73 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 74 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 75 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 76 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 77 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 78 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 79 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 80 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 81 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 82 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 83 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 84 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 85 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 86 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 87 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 88 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "error",
"ch0", "ch1", "ch2", "ch3",
"ch4", "ch5", "ch6", "ch7",
"ch8", "ch9", "ch10", "ch11",
"ch12", "ch13", "ch14", "ch15";
clocks = <&cpg CPG_MOD 0x4>;
power-domains = <&cpg>;
resets = <&cpg 0x35>;
#dma-cells = <1>;
dma-channels = <16>;
renesas,icu = <&icu 3>;
};
ostm0: timer@11800000 {
compatible = "renesas,r9a09g057-ostm", "renesas,ostm";
reg = <0x0 0x11800000 0x0 0x1000>;

View File

@ -93,6 +93,14 @@ config APPLE_ADMAC
help
Enable support for Audio DMA Controller found on Apple Silicon SoCs.
config ARM_DMA350
tristate "Arm DMA-350 support"
depends on ARM || ARM64 || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Enable support for the Arm DMA-350 controller.
config AT_HDMAC
tristate "Atmel AHB DMA support"
depends on ARCH_AT91

View File

@ -17,6 +17,7 @@ obj-$(CONFIG_ALTERA_MSGDMA) += altera-msgdma.o
obj-$(CONFIG_AMBA_PL08X) += amba-pl08x.o
obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/
obj-$(CONFIG_APPLE_ADMAC) += apple-admac.o
obj-$(CONFIG_ARM_DMA350) += arm-dma350.o
obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
obj-$(CONFIG_AXI_DMAC) += dma-axi-dmac.o

View File

@ -566,7 +566,6 @@ int pt_dmaengine_register(struct pt_device *pt)
struct ae4_device *ae4 = NULL;
struct pt_dma_chan *chan;
char *desc_cache_name;
char *cmd_cache_name;
int ret, i;
if (pt->ver == AE4_DMA_VERSION)
@ -582,27 +581,17 @@ int pt_dmaengine_register(struct pt_device *pt)
if (!pt->pt_dma_chan)
return -ENOMEM;
cmd_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
"%s-dmaengine-cmd-cache",
dev_name(pt->dev));
if (!cmd_cache_name)
return -ENOMEM;
desc_cache_name = devm_kasprintf(pt->dev, GFP_KERNEL,
"%s-dmaengine-desc-cache",
dev_name(pt->dev));
if (!desc_cache_name) {
ret = -ENOMEM;
goto err_cache;
}
if (!desc_cache_name)
return -ENOMEM;
pt->dma_desc_cache = kmem_cache_create(desc_cache_name,
sizeof(struct pt_dma_desc), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!pt->dma_desc_cache) {
ret = -ENOMEM;
goto err_cache;
}
if (!pt->dma_desc_cache)
return -ENOMEM;
dma_dev->dev = pt->dev;
dma_dev->src_addr_widths = DMA_SLAVE_BUSWIDTH_64_BYTES;
@ -656,9 +645,6 @@ int pt_dmaengine_register(struct pt_device *pt)
err_reg:
kmem_cache_destroy(pt->dma_desc_cache);
err_cache:
kmem_cache_destroy(pt->dma_cmd_cache);
return ret;
}
EXPORT_SYMBOL_GPL(pt_dmaengine_register);
@ -670,5 +656,4 @@ void pt_dmaengine_unregister(struct pt_device *pt)
dma_async_device_unregister(dma_dev);
kmem_cache_destroy(pt->dma_desc_cache);
kmem_cache_destroy(pt->dma_cmd_cache);
}

View File

@ -254,7 +254,6 @@ struct pt_device {
/* Support for the DMA Engine capabilities */
struct dma_device dma_dev;
struct pt_dma_chan *pt_dma_chan;
struct kmem_cache *dma_cmd_cache;
struct kmem_cache *dma_desc_cache;
wait_queue_head_t lsb_queue;

660
drivers/dma/arm-dma350.c Normal file
View File

@ -0,0 +1,660 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2024-2025 Arm Limited
// Arm DMA-350 driver
#include <linux/bitfield.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include "dmaengine.h"
#include "virt-dma.h"
#define DMAINFO 0x0f00
#define DMA_BUILDCFG0 0xb0
#define DMA_CFG_DATA_WIDTH GENMASK(18, 16)
#define DMA_CFG_ADDR_WIDTH GENMASK(15, 10)
#define DMA_CFG_NUM_CHANNELS GENMASK(9, 4)
#define DMA_BUILDCFG1 0xb4
#define DMA_CFG_NUM_TRIGGER_IN GENMASK(8, 0)
#define IIDR 0xc8
#define IIDR_PRODUCTID GENMASK(31, 20)
#define IIDR_VARIANT GENMASK(19, 16)
#define IIDR_REVISION GENMASK(15, 12)
#define IIDR_IMPLEMENTER GENMASK(11, 0)
#define PRODUCTID_DMA350 0x3a0
#define IMPLEMENTER_ARM 0x43b
#define DMACH(n) (0x1000 + 0x0100 * (n))
#define CH_CMD 0x00
#define CH_CMD_RESUME BIT(5)
#define CH_CMD_PAUSE BIT(4)
#define CH_CMD_STOP BIT(3)
#define CH_CMD_DISABLE BIT(2)
#define CH_CMD_CLEAR BIT(1)
#define CH_CMD_ENABLE BIT(0)
#define CH_STATUS 0x04
#define CH_STAT_RESUMEWAIT BIT(21)
#define CH_STAT_PAUSED BIT(20)
#define CH_STAT_STOPPED BIT(19)
#define CH_STAT_DISABLED BIT(18)
#define CH_STAT_ERR BIT(17)
#define CH_STAT_DONE BIT(16)
#define CH_STAT_INTR_ERR BIT(1)
#define CH_STAT_INTR_DONE BIT(0)
#define CH_INTREN 0x08
#define CH_INTREN_ERR BIT(1)
#define CH_INTREN_DONE BIT(0)
#define CH_CTRL 0x0c
#define CH_CTRL_USEDESTRIGIN BIT(26)
#define CH_CTRL_USESRCTRIGIN BIT(26)
#define CH_CTRL_DONETYPE GENMASK(23, 21)
#define CH_CTRL_REGRELOADTYPE GENMASK(20, 18)
#define CH_CTRL_XTYPE GENMASK(11, 9)
#define CH_CTRL_TRANSIZE GENMASK(2, 0)
#define CH_SRCADDR 0x10
#define CH_SRCADDRHI 0x14
#define CH_DESADDR 0x18
#define CH_DESADDRHI 0x1c
#define CH_XSIZE 0x20
#define CH_XSIZEHI 0x24
#define CH_SRCTRANSCFG 0x28
#define CH_DESTRANSCFG 0x2c
#define CH_CFG_MAXBURSTLEN GENMASK(19, 16)
#define CH_CFG_PRIVATTR BIT(11)
#define CH_CFG_SHAREATTR GENMASK(9, 8)
#define CH_CFG_MEMATTR GENMASK(7, 0)
#define TRANSCFG_DEVICE \
FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_DEVICE)
#define TRANSCFG_NC \
FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_OSH) | \
FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_NC)
#define TRANSCFG_WB \
FIELD_PREP(CH_CFG_MAXBURSTLEN, 0xf) | \
FIELD_PREP(CH_CFG_SHAREATTR, SHAREATTR_ISH) | \
FIELD_PREP(CH_CFG_MEMATTR, MEMATTR_WB)
#define CH_XADDRINC 0x30
#define CH_XY_DES GENMASK(31, 16)
#define CH_XY_SRC GENMASK(15, 0)
#define CH_FILLVAL 0x38
#define CH_SRCTRIGINCFG 0x4c
#define CH_DESTRIGINCFG 0x50
#define CH_LINKATTR 0x70
#define CH_LINK_SHAREATTR GENMASK(9, 8)
#define CH_LINK_MEMATTR GENMASK(7, 0)
#define CH_AUTOCFG 0x74
#define CH_LINKADDR 0x78
#define CH_LINKADDR_EN BIT(0)
#define CH_LINKADDRHI 0x7c
#define CH_ERRINFO 0x90
#define CH_ERRINFO_AXIRDPOISERR BIT(18)
#define CH_ERRINFO_AXIWRRESPERR BIT(17)
#define CH_ERRINFO_AXIRDRESPERR BIT(16)
#define CH_BUILDCFG0 0xf8
#define CH_CFG_INC_WIDTH GENMASK(29, 26)
#define CH_CFG_DATA_WIDTH GENMASK(24, 22)
#define CH_CFG_DATA_BUF_SIZE GENMASK(7, 0)
#define CH_BUILDCFG1 0xfc
#define CH_CFG_HAS_CMDLINK BIT(8)
#define CH_CFG_HAS_TRIGSEL BIT(7)
#define CH_CFG_HAS_TRIGIN BIT(5)
#define CH_CFG_HAS_WRAP BIT(1)
#define LINK_REGCLEAR BIT(0)
#define LINK_INTREN BIT(2)
#define LINK_CTRL BIT(3)
#define LINK_SRCADDR BIT(4)
#define LINK_SRCADDRHI BIT(5)
#define LINK_DESADDR BIT(6)
#define LINK_DESADDRHI BIT(7)
#define LINK_XSIZE BIT(8)
#define LINK_XSIZEHI BIT(9)
#define LINK_SRCTRANSCFG BIT(10)
#define LINK_DESTRANSCFG BIT(11)
#define LINK_XADDRINC BIT(12)
#define LINK_FILLVAL BIT(14)
#define LINK_SRCTRIGINCFG BIT(19)
#define LINK_DESTRIGINCFG BIT(20)
#define LINK_AUTOCFG BIT(29)
#define LINK_LINKADDR BIT(30)
#define LINK_LINKADDRHI BIT(31)
enum ch_ctrl_donetype {
CH_CTRL_DONETYPE_NONE = 0,
CH_CTRL_DONETYPE_CMD = 1,
CH_CTRL_DONETYPE_CYCLE = 3
};
enum ch_ctrl_xtype {
CH_CTRL_XTYPE_DISABLE = 0,
CH_CTRL_XTYPE_CONTINUE = 1,
CH_CTRL_XTYPE_WRAP = 2,
CH_CTRL_XTYPE_FILL = 3
};
enum ch_cfg_shareattr {
SHAREATTR_NSH = 0,
SHAREATTR_OSH = 2,
SHAREATTR_ISH = 3
};
enum ch_cfg_memattr {
MEMATTR_DEVICE = 0x00,
MEMATTR_NC = 0x44,
MEMATTR_WB = 0xff
};
struct d350_desc {
struct virt_dma_desc vd;
u32 command[16];
u16 xsize;
u16 xsizehi;
u8 tsz;
};
struct d350_chan {
struct virt_dma_chan vc;
struct d350_desc *desc;
void __iomem *base;
int irq;
enum dma_status status;
dma_cookie_t cookie;
u32 residue;
u8 tsz;
bool has_trig;
bool has_wrap;
bool coherent;
};
struct d350 {
struct dma_device dma;
int nchan;
int nreq;
struct d350_chan channels[] __counted_by(nchan);
};
static inline struct d350_chan *to_d350_chan(struct dma_chan *chan)
{
return container_of(chan, struct d350_chan, vc.chan);
}
static inline struct d350_desc *to_d350_desc(struct virt_dma_desc *vd)
{
return container_of(vd, struct d350_desc, vd);
}
static void d350_desc_free(struct virt_dma_desc *vd)
{
kfree(to_d350_desc(vd));
}
static struct dma_async_tx_descriptor *d350_prep_memcpy(struct dma_chan *chan,
dma_addr_t dest, dma_addr_t src, size_t len, unsigned long flags)
{
struct d350_chan *dch = to_d350_chan(chan);
struct d350_desc *desc;
u32 *cmd;
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
return NULL;
desc->tsz = __ffs(len | dest | src | (1 << dch->tsz));
desc->xsize = lower_16_bits(len >> desc->tsz);
desc->xsizehi = upper_16_bits(len >> desc->tsz);
cmd = desc->command;
cmd[0] = LINK_CTRL | LINK_SRCADDR | LINK_SRCADDRHI | LINK_DESADDR |
LINK_DESADDRHI | LINK_XSIZE | LINK_XSIZEHI | LINK_SRCTRANSCFG |
LINK_DESTRANSCFG | LINK_XADDRINC | LINK_LINKADDR;
cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_CONTINUE) |
FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
cmd[2] = lower_32_bits(src);
cmd[3] = upper_32_bits(src);
cmd[4] = lower_32_bits(dest);
cmd[5] = upper_32_bits(dest);
cmd[6] = FIELD_PREP(CH_XY_SRC, desc->xsize) | FIELD_PREP(CH_XY_DES, desc->xsize);
cmd[7] = FIELD_PREP(CH_XY_SRC, desc->xsizehi) | FIELD_PREP(CH_XY_DES, desc->xsizehi);
cmd[8] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
cmd[9] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
cmd[10] = FIELD_PREP(CH_XY_SRC, 1) | FIELD_PREP(CH_XY_DES, 1);
cmd[11] = 0;
return vchan_tx_prep(&dch->vc, &desc->vd, flags);
}
static struct dma_async_tx_descriptor *d350_prep_memset(struct dma_chan *chan,
dma_addr_t dest, int value, size_t len, unsigned long flags)
{
struct d350_chan *dch = to_d350_chan(chan);
struct d350_desc *desc;
u32 *cmd;
desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
if (!desc)
return NULL;
desc->tsz = __ffs(len | dest | (1 << dch->tsz));
desc->xsize = lower_16_bits(len >> desc->tsz);
desc->xsizehi = upper_16_bits(len >> desc->tsz);
cmd = desc->command;
cmd[0] = LINK_CTRL | LINK_DESADDR | LINK_DESADDRHI |
LINK_XSIZE | LINK_XSIZEHI | LINK_DESTRANSCFG |
LINK_XADDRINC | LINK_FILLVAL | LINK_LINKADDR;
cmd[1] = FIELD_PREP(CH_CTRL_TRANSIZE, desc->tsz) |
FIELD_PREP(CH_CTRL_XTYPE, CH_CTRL_XTYPE_FILL) |
FIELD_PREP(CH_CTRL_DONETYPE, CH_CTRL_DONETYPE_CMD);
cmd[2] = lower_32_bits(dest);
cmd[3] = upper_32_bits(dest);
cmd[4] = FIELD_PREP(CH_XY_DES, desc->xsize);
cmd[5] = FIELD_PREP(CH_XY_DES, desc->xsizehi);
cmd[6] = dch->coherent ? TRANSCFG_WB : TRANSCFG_NC;
cmd[7] = FIELD_PREP(CH_XY_DES, 1);
cmd[8] = (u8)value * 0x01010101;
cmd[9] = 0;
return vchan_tx_prep(&dch->vc, &desc->vd, flags);
}
static int d350_pause(struct dma_chan *chan)
{
struct d350_chan *dch = to_d350_chan(chan);
unsigned long flags;
spin_lock_irqsave(&dch->vc.lock, flags);
if (dch->status == DMA_IN_PROGRESS) {
writel_relaxed(CH_CMD_PAUSE, dch->base + CH_CMD);
dch->status = DMA_PAUSED;
}
spin_unlock_irqrestore(&dch->vc.lock, flags);
return 0;
}
static int d350_resume(struct dma_chan *chan)
{
struct d350_chan *dch = to_d350_chan(chan);
unsigned long flags;
spin_lock_irqsave(&dch->vc.lock, flags);
if (dch->status == DMA_PAUSED) {
writel_relaxed(CH_CMD_RESUME, dch->base + CH_CMD);
dch->status = DMA_IN_PROGRESS;
}
spin_unlock_irqrestore(&dch->vc.lock, flags);
return 0;
}
static u32 d350_get_residue(struct d350_chan *dch)
{
u32 res, xsize, xsizehi, hi_new;
int retries = 3; /* 1st time unlucky, 2nd improbable, 3rd just broken */
hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
do {
xsizehi = hi_new;
xsize = readl_relaxed(dch->base + CH_XSIZE);
hi_new = readl_relaxed(dch->base + CH_XSIZEHI);
} while (xsizehi != hi_new && --retries);
res = FIELD_GET(CH_XY_DES, xsize);
res |= FIELD_GET(CH_XY_DES, xsizehi) << 16;
return res << dch->desc->tsz;
}
static int d350_terminate_all(struct dma_chan *chan)
{
struct d350_chan *dch = to_d350_chan(chan);
unsigned long flags;
LIST_HEAD(list);
spin_lock_irqsave(&dch->vc.lock, flags);
writel_relaxed(CH_CMD_STOP, dch->base + CH_CMD);
if (dch->desc) {
if (dch->status != DMA_ERROR)
vchan_terminate_vdesc(&dch->desc->vd);
dch->desc = NULL;
dch->status = DMA_COMPLETE;
}
vchan_get_all_descriptors(&dch->vc, &list);
list_splice_tail(&list, &dch->vc.desc_terminated);
spin_unlock_irqrestore(&dch->vc.lock, flags);
return 0;
}
static void d350_synchronize(struct dma_chan *chan)
{
struct d350_chan *dch = to_d350_chan(chan);
vchan_synchronize(&dch->vc);
}
static u32 d350_desc_bytes(struct d350_desc *desc)
{
return ((u32)desc->xsizehi << 16 | desc->xsize) << desc->tsz;
}
static enum dma_status d350_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
struct dma_tx_state *state)
{
struct d350_chan *dch = to_d350_chan(chan);
struct virt_dma_desc *vd;
enum dma_status status;
unsigned long flags;
u32 residue = 0;
status = dma_cookie_status(chan, cookie, state);
spin_lock_irqsave(&dch->vc.lock, flags);
if (cookie == dch->cookie) {
status = dch->status;
if (status == DMA_IN_PROGRESS || status == DMA_PAUSED)
dch->residue = d350_get_residue(dch);
residue = dch->residue;
} else if ((vd = vchan_find_desc(&dch->vc, cookie))) {
residue = d350_desc_bytes(to_d350_desc(vd));
} else if (status == DMA_IN_PROGRESS) {
/* Somebody else terminated it? */
status = DMA_ERROR;
}
spin_unlock_irqrestore(&dch->vc.lock, flags);
dma_set_residue(state, residue);
return status;
}
static void d350_start_next(struct d350_chan *dch)
{
u32 hdr, *reg;
dch->desc = to_d350_desc(vchan_next_desc(&dch->vc));
if (!dch->desc)
return;
list_del(&dch->desc->vd.node);
dch->status = DMA_IN_PROGRESS;
dch->cookie = dch->desc->vd.tx.cookie;
dch->residue = d350_desc_bytes(dch->desc);
hdr = dch->desc->command[0];
reg = &dch->desc->command[1];
if (hdr & LINK_INTREN)
writel_relaxed(*reg++, dch->base + CH_INTREN);
if (hdr & LINK_CTRL)
writel_relaxed(*reg++, dch->base + CH_CTRL);
if (hdr & LINK_SRCADDR)
writel_relaxed(*reg++, dch->base + CH_SRCADDR);
if (hdr & LINK_SRCADDRHI)
writel_relaxed(*reg++, dch->base + CH_SRCADDRHI);
if (hdr & LINK_DESADDR)
writel_relaxed(*reg++, dch->base + CH_DESADDR);
if (hdr & LINK_DESADDRHI)
writel_relaxed(*reg++, dch->base + CH_DESADDRHI);
if (hdr & LINK_XSIZE)
writel_relaxed(*reg++, dch->base + CH_XSIZE);
if (hdr & LINK_XSIZEHI)
writel_relaxed(*reg++, dch->base + CH_XSIZEHI);
if (hdr & LINK_SRCTRANSCFG)
writel_relaxed(*reg++, dch->base + CH_SRCTRANSCFG);
if (hdr & LINK_DESTRANSCFG)
writel_relaxed(*reg++, dch->base + CH_DESTRANSCFG);
if (hdr & LINK_XADDRINC)
writel_relaxed(*reg++, dch->base + CH_XADDRINC);
if (hdr & LINK_FILLVAL)
writel_relaxed(*reg++, dch->base + CH_FILLVAL);
if (hdr & LINK_SRCTRIGINCFG)
writel_relaxed(*reg++, dch->base + CH_SRCTRIGINCFG);
if (hdr & LINK_DESTRIGINCFG)
writel_relaxed(*reg++, dch->base + CH_DESTRIGINCFG);
if (hdr & LINK_AUTOCFG)
writel_relaxed(*reg++, dch->base + CH_AUTOCFG);
if (hdr & LINK_LINKADDR)
writel_relaxed(*reg++, dch->base + CH_LINKADDR);
if (hdr & LINK_LINKADDRHI)
writel_relaxed(*reg++, dch->base + CH_LINKADDRHI);
writel(CH_CMD_ENABLE, dch->base + CH_CMD);
}
static void d350_issue_pending(struct dma_chan *chan)
{
struct d350_chan *dch = to_d350_chan(chan);
unsigned long flags;
spin_lock_irqsave(&dch->vc.lock, flags);
if (vchan_issue_pending(&dch->vc) && !dch->desc)
d350_start_next(dch);
spin_unlock_irqrestore(&dch->vc.lock, flags);
}
static irqreturn_t d350_irq(int irq, void *data)
{
struct d350_chan *dch = data;
struct device *dev = dch->vc.chan.device->dev;
struct virt_dma_desc *vd = &dch->desc->vd;
u32 ch_status;
ch_status = readl(dch->base + CH_STATUS);
if (!ch_status)
return IRQ_NONE;
if (ch_status & CH_STAT_INTR_ERR) {
u32 errinfo = readl_relaxed(dch->base + CH_ERRINFO);
if (errinfo & (CH_ERRINFO_AXIRDPOISERR | CH_ERRINFO_AXIRDRESPERR))
vd->tx_result.result = DMA_TRANS_READ_FAILED;
else if (errinfo & CH_ERRINFO_AXIWRRESPERR)
vd->tx_result.result = DMA_TRANS_WRITE_FAILED;
else
vd->tx_result.result = DMA_TRANS_ABORTED;
vd->tx_result.residue = d350_get_residue(dch);
} else if (!(ch_status & CH_STAT_INTR_DONE)) {
dev_warn(dev, "Unexpected IRQ source? 0x%08x\n", ch_status);
}
writel_relaxed(ch_status, dch->base + CH_STATUS);
spin_lock(&dch->vc.lock);
vchan_cookie_complete(vd);
if (ch_status & CH_STAT_INTR_DONE) {
dch->status = DMA_COMPLETE;
dch->residue = 0;
d350_start_next(dch);
} else {
dch->status = DMA_ERROR;
dch->residue = vd->tx_result.residue;
}
spin_unlock(&dch->vc.lock);
return IRQ_HANDLED;
}
static int d350_alloc_chan_resources(struct dma_chan *chan)
{
struct d350_chan *dch = to_d350_chan(chan);
int ret = request_irq(dch->irq, d350_irq, IRQF_SHARED,
dev_name(&dch->vc.chan.dev->device), dch);
if (!ret)
writel_relaxed(CH_INTREN_DONE | CH_INTREN_ERR, dch->base + CH_INTREN);
return ret;
}
static void d350_free_chan_resources(struct dma_chan *chan)
{
struct d350_chan *dch = to_d350_chan(chan);
writel_relaxed(0, dch->base + CH_INTREN);
free_irq(dch->irq, dch);
vchan_free_chan_resources(&dch->vc);
}
static int d350_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct d350 *dmac;
void __iomem *base;
u32 reg;
int ret, nchan, dw, aw, r, p;
bool coherent, memset;
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
reg = readl_relaxed(base + DMAINFO + IIDR);
r = FIELD_GET(IIDR_VARIANT, reg);
p = FIELD_GET(IIDR_REVISION, reg);
if (FIELD_GET(IIDR_IMPLEMENTER, reg) != IMPLEMENTER_ARM ||
FIELD_GET(IIDR_PRODUCTID, reg) != PRODUCTID_DMA350)
return dev_err_probe(dev, -ENODEV, "Not a DMA-350!");
reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG0);
nchan = FIELD_GET(DMA_CFG_NUM_CHANNELS, reg) + 1;
dw = 1 << FIELD_GET(DMA_CFG_DATA_WIDTH, reg);
aw = FIELD_GET(DMA_CFG_ADDR_WIDTH, reg) + 1;
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(aw));
coherent = device_get_dma_attr(dev) == DEV_DMA_COHERENT;
dmac = devm_kzalloc(dev, struct_size(dmac, channels, nchan), GFP_KERNEL);
if (!dmac)
return -ENOMEM;
dmac->nchan = nchan;
reg = readl_relaxed(base + DMAINFO + DMA_BUILDCFG1);
dmac->nreq = FIELD_GET(DMA_CFG_NUM_TRIGGER_IN, reg);
dev_dbg(dev, "DMA-350 r%dp%d with %d channels, %d requests\n", r, p, dmac->nchan, dmac->nreq);
dmac->dma.dev = dev;
for (int i = min(dw, 16); i > 0; i /= 2) {
dmac->dma.src_addr_widths |= BIT(i);
dmac->dma.dst_addr_widths |= BIT(i);
}
dmac->dma.directions = BIT(DMA_MEM_TO_MEM);
dmac->dma.descriptor_reuse = true;
dmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
dmac->dma.device_alloc_chan_resources = d350_alloc_chan_resources;
dmac->dma.device_free_chan_resources = d350_free_chan_resources;
dma_cap_set(DMA_MEMCPY, dmac->dma.cap_mask);
dmac->dma.device_prep_dma_memcpy = d350_prep_memcpy;
dmac->dma.device_pause = d350_pause;
dmac->dma.device_resume = d350_resume;
dmac->dma.device_terminate_all = d350_terminate_all;
dmac->dma.device_synchronize = d350_synchronize;
dmac->dma.device_tx_status = d350_tx_status;
dmac->dma.device_issue_pending = d350_issue_pending;
INIT_LIST_HEAD(&dmac->dma.channels);
/* Would be nice to have per-channel caps for this... */
memset = true;
for (int i = 0; i < nchan; i++) {
struct d350_chan *dch = &dmac->channels[i];
dch->base = base + DMACH(i);
writel_relaxed(CH_CMD_CLEAR, dch->base + CH_CMD);
reg = readl_relaxed(dch->base + CH_BUILDCFG1);
if (!(FIELD_GET(CH_CFG_HAS_CMDLINK, reg))) {
dev_warn(dev, "No command link support on channel %d\n", i);
continue;
}
dch->irq = platform_get_irq(pdev, i);
if (dch->irq < 0)
return dev_err_probe(dev, dch->irq,
"Failed to get IRQ for channel %d\n", i);
dch->has_wrap = FIELD_GET(CH_CFG_HAS_WRAP, reg);
dch->has_trig = FIELD_GET(CH_CFG_HAS_TRIGIN, reg) &
FIELD_GET(CH_CFG_HAS_TRIGSEL, reg);
/* Fill is a special case of Wrap */
memset &= dch->has_wrap;
reg = readl_relaxed(dch->base + CH_BUILDCFG0);
dch->tsz = FIELD_GET(CH_CFG_DATA_WIDTH, reg);
reg = FIELD_PREP(CH_LINK_SHAREATTR, coherent ? SHAREATTR_ISH : SHAREATTR_OSH);
reg |= FIELD_PREP(CH_LINK_MEMATTR, coherent ? MEMATTR_WB : MEMATTR_NC);
writel_relaxed(reg, dch->base + CH_LINKATTR);
dch->vc.desc_free = d350_desc_free;
vchan_init(&dch->vc, &dmac->dma);
}
if (memset) {
dma_cap_set(DMA_MEMSET, dmac->dma.cap_mask);
dmac->dma.device_prep_dma_memset = d350_prep_memset;
}
platform_set_drvdata(pdev, dmac);
ret = dma_async_device_register(&dmac->dma);
if (ret)
return dev_err_probe(dev, ret, "Failed to register DMA device\n");
return 0;
}
static void d350_remove(struct platform_device *pdev)
{
struct d350 *dmac = platform_get_drvdata(pdev);
dma_async_device_unregister(&dmac->dma);
}
static const struct of_device_id d350_of_match[] __maybe_unused = {
{ .compatible = "arm,dma-350" },
{}
};
MODULE_DEVICE_TABLE(of, d350_of_match);
static struct platform_driver d350_driver = {
.driver = {
.name = "arm-dma350",
.of_match_table = of_match_ptr(d350_of_match),
},
.probe = d350_probe,
.remove = d350_remove,
};
module_platform_driver(d350_driver);
MODULE_AUTHOR("Robin Murphy <robin.murphy@arm.com>");
MODULE_DESCRIPTION("Arm DMA-350 driver");
MODULE_LICENSE("GPL v2");

View File

@ -2033,10 +2033,8 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
* at_xdmac_start_xfer() for this descriptor. Now it's time
* to release it.
*/
if (desc->active_xfer) {
pm_runtime_put_autosuspend(atxdmac->dev);
pm_runtime_mark_last_busy(atxdmac->dev);
}
if (desc->active_xfer)
pm_runtime_put_noidle(atxdmac->dev);
}
clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);

View File

@ -136,7 +136,8 @@ static void dw_edma_pcie_get_vsec_dma_data(struct pci_dev *pdev,
map = FIELD_GET(DW_PCIE_VSEC_DMA_MAP, val);
if (map != EDMA_MF_EDMA_LEGACY &&
map != EDMA_MF_EDMA_UNROLL &&
map != EDMA_MF_HDMA_COMPAT)
map != EDMA_MF_HDMA_COMPAT &&
map != EDMA_MF_HDMA_NATIVE)
return;
pdata->mf = map;
@ -291,6 +292,8 @@ static int dw_edma_pcie_probe(struct pci_dev *pdev,
pci_dbg(pdev, "Version:\teDMA Unroll (0x%x)\n", chip->mf);
else if (chip->mf == EDMA_MF_HDMA_COMPAT)
pci_dbg(pdev, "Version:\tHDMA Compatible (0x%x)\n", chip->mf);
else if (chip->mf == EDMA_MF_HDMA_NATIVE)
pci_dbg(pdev, "Version:\tHDMA Native (0x%x)\n", chip->mf);
else
pci_dbg(pdev, "Version:\tUnknown (0x%x)\n", chip->mf);

View File

@ -95,7 +95,7 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
}
val = edma_readl_chreg(fsl_chan, ch_csr);
val |= EDMA_V3_CH_CSR_ERQ;
val |= EDMA_V3_CH_CSR_ERQ | EDMA_V3_CH_CSR_EEI;
edma_writel_chreg(fsl_chan, val, ch_csr);
}
@ -821,7 +821,7 @@ void fsl_edma_issue_pending(struct dma_chan *chan)
int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
int ret;
int ret = 0;
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_prepare_enable(fsl_chan->clk);
@ -831,17 +831,29 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
32, 0);
if (fsl_chan->txirq) {
if (fsl_chan->txirq)
ret = request_irq(fsl_chan->txirq, fsl_chan->irq_handler, IRQF_SHARED,
fsl_chan->chan_name, fsl_chan);
if (ret) {
dma_pool_destroy(fsl_chan->tcd_pool);
return ret;
}
}
if (ret)
goto err_txirq;
if (fsl_chan->errirq > 0)
ret = request_irq(fsl_chan->errirq, fsl_chan->errirq_handler, IRQF_SHARED,
fsl_chan->errirq_name, fsl_chan);
if (ret)
goto err_errirq;
return 0;
err_errirq:
if (fsl_chan->txirq)
free_irq(fsl_chan->txirq, fsl_chan);
err_txirq:
dma_pool_destroy(fsl_chan->tcd_pool);
return ret;
}
void fsl_edma_free_chan_resources(struct dma_chan *chan)
@ -862,6 +874,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
if (fsl_chan->txirq)
free_irq(fsl_chan->txirq, fsl_chan);
if (fsl_chan->errirq)
free_irq(fsl_chan->errirq, fsl_chan);
vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
dma_pool_destroy(fsl_chan->tcd_pool);

View File

@ -71,6 +71,18 @@
#define EDMA_V3_CH_ES_ERR BIT(31)
#define EDMA_V3_MP_ES_VLD BIT(31)
#define EDMA_V3_CH_ERR_DBE BIT(0)
#define EDMA_V3_CH_ERR_SBE BIT(1)
#define EDMA_V3_CH_ERR_SGE BIT(2)
#define EDMA_V3_CH_ERR_NCE BIT(3)
#define EDMA_V3_CH_ERR_DOE BIT(4)
#define EDMA_V3_CH_ERR_DAE BIT(5)
#define EDMA_V3_CH_ERR_SOE BIT(6)
#define EDMA_V3_CH_ERR_SAE BIT(7)
#define EDMA_V3_CH_ERR_ECX BIT(8)
#define EDMA_V3_CH_ERR_UCE BIT(9)
#define EDMA_V3_CH_ERR BIT(31)
enum fsl_edma_pm_state {
RUNNING = 0,
SUSPENDED,
@ -162,6 +174,7 @@ struct fsl_edma_chan {
u32 dma_dev_size;
enum dma_data_direction dma_dir;
char chan_name[32];
char errirq_name[36];
void __iomem *tcd;
void __iomem *mux_addr;
u32 real_count;
@ -174,7 +187,9 @@ struct fsl_edma_chan {
int priority;
int hw_chanid;
int txirq;
int errirq;
irqreturn_t (*irq_handler)(int irq, void *dev_id);
irqreturn_t (*errirq_handler)(int irq, void *dev_id);
bool is_rxchan;
bool is_remote;
bool is_multi_fifo;
@ -208,6 +223,9 @@ struct fsl_edma_desc {
/* Need clean CHn_CSR DONE before enable TCD's MAJORELINK */
#define FSL_EDMA_DRV_CLEAR_DONE_E_LINK BIT(14)
#define FSL_EDMA_DRV_TCD64 BIT(15)
/* All channel ERR IRQ share one IRQ line */
#define FSL_EDMA_DRV_ERRIRQ_SHARE BIT(16)
#define FSL_EDMA_DRV_EDMA3 (FSL_EDMA_DRV_SPLIT_REG | \
FSL_EDMA_DRV_BUS_8BYTE | \

View File

@ -50,6 +50,83 @@ static irqreturn_t fsl_edma_tx_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
static void fsl_edma3_err_check(struct fsl_edma_chan *fsl_chan)
{
unsigned int ch_err;
u32 val;
scoped_guard(spinlock, &fsl_chan->vchan.lock) {
ch_err = edma_readl_chreg(fsl_chan, ch_es);
if (!(ch_err & EDMA_V3_CH_ERR))
return;
edma_writel_chreg(fsl_chan, EDMA_V3_CH_ERR, ch_es);
val = edma_readl_chreg(fsl_chan, ch_csr);
val &= ~EDMA_V3_CH_CSR_ERQ;
edma_writel_chreg(fsl_chan, val, ch_csr);
}
/* Ignore this interrupt since channel has been disabled already */
if (!fsl_chan->edesc)
return;
if (ch_err & EDMA_V3_CH_ERR_DBE)
dev_err(&fsl_chan->pdev->dev, "Destination Bus Error interrupt.\n");
if (ch_err & EDMA_V3_CH_ERR_SBE)
dev_err(&fsl_chan->pdev->dev, "Source Bus Error interrupt.\n");
if (ch_err & EDMA_V3_CH_ERR_SGE)
dev_err(&fsl_chan->pdev->dev, "Scatter/Gather Configuration Error interrupt.\n");
if (ch_err & EDMA_V3_CH_ERR_NCE)
dev_err(&fsl_chan->pdev->dev, "NBYTES/CITER Configuration Error interrupt.\n");
if (ch_err & EDMA_V3_CH_ERR_DOE)
dev_err(&fsl_chan->pdev->dev, "Destination Offset Error interrupt.\n");
if (ch_err & EDMA_V3_CH_ERR_DAE)
dev_err(&fsl_chan->pdev->dev, "Destination Address Error interrupt.\n");
if (ch_err & EDMA_V3_CH_ERR_SOE)
dev_err(&fsl_chan->pdev->dev, "Source Offset Error interrupt.\n");
if (ch_err & EDMA_V3_CH_ERR_SAE)
dev_err(&fsl_chan->pdev->dev, "Source Address Error interrupt.\n");
if (ch_err & EDMA_V3_CH_ERR_ECX)
dev_err(&fsl_chan->pdev->dev, "Transfer Canceled interrupt.\n");
if (ch_err & EDMA_V3_CH_ERR_UCE)
dev_err(&fsl_chan->pdev->dev, "Uncorrectable TCD error during channel execution interrupt.\n");
fsl_chan->status = DMA_ERROR;
}
static irqreturn_t fsl_edma3_err_handler_per_chan(int irq, void *dev_id)
{
struct fsl_edma_chan *fsl_chan = dev_id;
fsl_edma3_err_check(fsl_chan);
return IRQ_HANDLED;
}
static irqreturn_t fsl_edma3_err_handler_shared(int irq, void *dev_id)
{
struct fsl_edma_engine *fsl_edma = dev_id;
unsigned int ch;
for (ch = 0; ch < fsl_edma->n_chans; ch++) {
if (fsl_edma->chan_masked & BIT(ch))
continue;
fsl_edma3_err_check(&fsl_edma->chans[ch]);
}
return IRQ_HANDLED;
}
static irqreturn_t fsl_edma3_tx_handler(int irq, void *dev_id)
{
struct fsl_edma_chan *fsl_chan = dev_id;
@ -309,7 +386,8 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma)
{
int i;
char *errirq_name;
int i, ret;
for (i = 0; i < fsl_edma->n_chans; i++) {
@ -324,6 +402,27 @@ static int fsl_edma3_irq_init(struct platform_device *pdev, struct fsl_edma_engi
return -EINVAL;
fsl_chan->irq_handler = fsl_edma3_tx_handler;
if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE)) {
fsl_chan->errirq = fsl_chan->txirq;
fsl_chan->errirq_handler = fsl_edma3_err_handler_per_chan;
}
}
/* All channel err use one irq number */
if (fsl_edma->drvdata->flags & FSL_EDMA_DRV_ERRIRQ_SHARE) {
/* last one is error irq */
fsl_edma->errirq = platform_get_irq_optional(pdev, fsl_edma->n_chans);
if (fsl_edma->errirq < 0)
return 0; /* dts miss err irq, treat as no err irq case */
errirq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s-err",
dev_name(&pdev->dev));
ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, fsl_edma3_err_handler_shared,
0, errirq_name, fsl_edma);
if (ret)
return dev_err_probe(&pdev->dev, ret, "Can't register eDMA err IRQ.\n");
}
return 0;
@ -464,7 +563,8 @@ static struct fsl_edma_drvdata imx7ulp_data = {
};
static struct fsl_edma_drvdata imx8qm_data = {
.flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE,
.flags = FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_MEM_REMOTE
| FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
@ -481,14 +581,15 @@ static struct fsl_edma_drvdata imx8ulp_data = {
};
static struct fsl_edma_drvdata imx93_data3 = {
.flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3,
.flags = FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA3 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x10000,
.chreg_off = 0x10000,
.setup_irq = fsl_edma3_irq_init,
};
static struct fsl_edma_drvdata imx93_data4 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4,
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4
| FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
@ -498,7 +599,7 @@ static struct fsl_edma_drvdata imx93_data4 = {
static struct fsl_edma_drvdata imx95_data5 = {
.flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_DMACLK | FSL_EDMA_DRV_EDMA4 |
FSL_EDMA_DRV_TCD64,
FSL_EDMA_DRV_TCD64 | FSL_EDMA_DRV_ERRIRQ_SHARE,
.chreg_space_sz = 0x8000,
.chreg_off = 0x10000,
.mux_off = 0x200,
@ -700,6 +801,9 @@ static int fsl_edma_probe(struct platform_device *pdev)
snprintf(fsl_chan->chan_name, sizeof(fsl_chan->chan_name), "%s-CH%02d",
dev_name(&pdev->dev), i);
snprintf(fsl_chan->errirq_name, sizeof(fsl_chan->errirq_name),
"%s-CH%02d-err", dev_name(&pdev->dev), i);
fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING;
fsl_chan->srcid = 0;

View File

@ -1226,6 +1226,8 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->dev = &op->dev;
INIT_LIST_HEAD(&fdev->common.channels);
/* The DMA address bits supported for this device. */
fdev->addr_bits = (long)device_get_match_data(fdev->dev);
/* ioremap the registers for use */
fdev->regs = of_iomap(op->dev.of_node, 0);
@ -1254,7 +1256,7 @@ static int fsldma_of_probe(struct platform_device *op)
fdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
fdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
dma_set_mask(&(op->dev), DMA_BIT_MASK(fdev->addr_bits));
platform_set_drvdata(op, fdev);
@ -1387,10 +1389,20 @@ static const struct dev_pm_ops fsldma_pm_ops = {
};
#endif
/* The .data field is used for dma-bit-mask. */
static const struct of_device_id fsldma_of_ids[] = {
{ .compatible = "fsl,elo3-dma", },
{ .compatible = "fsl,eloplus-dma", },
{ .compatible = "fsl,elo-dma", },
{
.compatible = "fsl,elo3-dma",
.data = (void *)40,
},
{
.compatible = "fsl,eloplus-dma",
.data = (void *)36,
},
{
.compatible = "fsl,elo-dma",
.data = (void *)32,
},
{}
};
MODULE_DEVICE_TABLE(of, fsldma_of_ids);

View File

@ -124,6 +124,7 @@ struct fsldma_device {
struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
u32 feature; /* The same as DMA channels */
int irq; /* Channel IRQ */
int addr_bits; /* DMA addressing bits supported */
};
/* Define macros for fsldma_chan->feature property */

View File

@ -349,7 +349,9 @@ static void idxd_cdev_evl_drain_pasid(struct idxd_wq *wq, u32 pasid)
set_bit(h, evl->bmap);
h = (h + 1) % size;
}
drain_workqueue(wq->wq);
if (wq->wq)
drain_workqueue(wq->wq);
mutex_unlock(&evl->lock);
}
@ -442,10 +444,12 @@ static int idxd_submit_user_descriptor(struct idxd_user_context *ctx,
* DSA devices are capable of indirect ("batch") command submission.
* On devices where direct user submissions are not safe, we cannot
* allow this since there is no good way for us to verify these
* indirect commands.
* indirect commands. Narrow the restriction of operations with the
* BATCH opcode to only DSA version 1 devices.
*/
if (is_dsa_dev(idxd_dev) && descriptor.opcode == DSA_OPCODE_BATCH &&
!wq->idxd->user_submission_safe)
wq->idxd->hw.version == DEVICE_VERSION_1 &&
!wq->idxd->user_submission_safe)
return -EINVAL;
/*
* As per the programming specification, the completion address must be

View File

@ -19,7 +19,6 @@
#define IDXD_DRIVER_VERSION "1.00"
extern struct kmem_cache *idxd_desc_pool;
extern bool tc_override;
struct idxd_wq;
@ -171,7 +170,6 @@ struct idxd_cdev {
#define DRIVER_NAME_SIZE 128
#define IDXD_ALLOCATED_BATCH_SIZE 128U
#define WQ_NAME_SIZE 1024
#define WQ_TYPE_SIZE 10

View File

@ -1208,9 +1208,11 @@ static ssize_t op_cap_show_common(struct device *dev, char *buf, unsigned long *
/* On systems where direct user submissions are not safe, we need to clear out
* the BATCH capability from the capability mask in sysfs since we cannot support
* that command on such systems.
* that command on such systems. Narrow the restriction of operations with the
* BATCH opcode to only DSA version 1 devices.
*/
if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe)
if (i == DSA_OPCODE_BATCH/64 && !confdev_to_idxd(dev)->user_submission_safe &&
confdev_to_idxd(dev)->hw.version == DEVICE_VERSION_1)
clear_bit(DSA_OPCODE_BATCH % 64, &val);
pos += sysfs_emit_at(buf, pos, "%*pb", 64, &val);

View File

@ -14,6 +14,7 @@
#include <linux/dmaengine.h>
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/of.h>
@ -89,8 +90,14 @@ struct rz_dmac_chan {
#define to_rz_dmac_chan(c) container_of(c, struct rz_dmac_chan, vc.chan)
struct rz_dmac_icu {
struct platform_device *pdev;
u8 dmac_index;
};
struct rz_dmac {
struct dma_device engine;
struct rz_dmac_icu icu;
struct device *dev;
struct reset_control *rstc;
void __iomem *base;
@ -99,6 +106,8 @@ struct rz_dmac {
unsigned int n_channels;
struct rz_dmac_chan *channels;
bool has_icu;
DECLARE_BITMAP(modules, 1024);
};
@ -167,6 +176,9 @@ struct rz_dmac {
#define RZ_DMAC_MAX_CHANNELS 16
#define DMAC_NR_LMDESC 64
/* RZ/V2H ICU related */
#define RZV2H_MAX_DMAC_INDEX 4
/*
* -----------------------------------------------------------------------------
* Device access
@ -324,7 +336,13 @@ static void rz_dmac_prepare_desc_for_memcpy(struct rz_dmac_chan *channel)
lmdesc->chext = 0;
lmdesc->header = HEADER_LV;
rz_dmac_set_dmars_register(dmac, channel->index, 0);
if (dmac->has_icu) {
rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
channel->index,
RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
} else {
rz_dmac_set_dmars_register(dmac, channel->index, 0);
}
channel->chcfg = chcfg;
channel->chctrl = CHCTRL_STG | CHCTRL_SETEN;
@ -375,7 +393,13 @@ static void rz_dmac_prepare_descs_for_slave_sg(struct rz_dmac_chan *channel)
channel->lmdesc.tail = lmdesc;
rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
if (dmac->has_icu) {
rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
channel->index, channel->mid_rid);
} else {
rz_dmac_set_dmars_register(dmac, channel->index, channel->mid_rid);
}
channel->chctrl = CHCTRL_SETEN;
}
@ -647,7 +671,13 @@ static void rz_dmac_device_synchronize(struct dma_chan *chan)
if (ret < 0)
dev_warn(dmac->dev, "DMA Timeout");
rz_dmac_set_dmars_register(dmac, channel->index, 0);
if (dmac->has_icu) {
rzv2h_icu_register_dma_req(dmac->icu.pdev, dmac->icu.dmac_index,
channel->index,
RZV2H_ICU_DMAC_REQ_NO_DEFAULT);
} else {
rz_dmac_set_dmars_register(dmac, channel->index, 0);
}
}
/*
@ -748,7 +778,8 @@ static struct dma_chan *rz_dmac_of_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
return dma_request_channel(mask, rz_dmac_chan_filter, dma_spec);
return __dma_request_channel(&mask, rz_dmac_chan_filter, dma_spec,
ofdma->of_node);
}
/*
@ -823,6 +854,38 @@ static int rz_dmac_chan_probe(struct rz_dmac *dmac,
return 0;
}
static int rz_dmac_parse_of_icu(struct device *dev, struct rz_dmac *dmac)
{
struct device_node *np = dev->of_node;
struct of_phandle_args args;
uint32_t dmac_index;
int ret;
ret = of_parse_phandle_with_fixed_args(np, "renesas,icu", 1, 0, &args);
if (ret == -ENOENT)
return 0;
if (ret)
return ret;
dmac->has_icu = true;
dmac->icu.pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
if (!dmac->icu.pdev) {
dev_err(dev, "ICU device not found.\n");
return -ENODEV;
}
dmac_index = args.args[0];
if (dmac_index > RZV2H_MAX_DMAC_INDEX) {
dev_err(dev, "DMAC index %u invalid.\n", dmac_index);
return -EINVAL;
}
dmac->icu.dmac_index = dmac_index;
return 0;
}
static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
{
struct device_node *np = dev->of_node;
@ -839,7 +902,7 @@ static int rz_dmac_parse_of(struct device *dev, struct rz_dmac *dmac)
return -EINVAL;
}
return 0;
return rz_dmac_parse_of_icu(dev, dmac);
}
static int rz_dmac_probe(struct platform_device *pdev)
@ -873,9 +936,11 @@ static int rz_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base);
dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dmac->ext_base))
return PTR_ERR(dmac->ext_base);
if (!dmac->has_icu) {
dmac->ext_base = devm_platform_ioremap_resource(pdev, 1);
if (IS_ERR(dmac->ext_base))
return PTR_ERR(dmac->ext_base);
}
/* Register interrupt handler for error */
irq = platform_get_irq_byname(pdev, irqname);
@ -990,9 +1055,12 @@ static void rz_dmac_remove(struct platform_device *pdev)
reset_control_assert(dmac->rstc);
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev);
platform_device_put(dmac->icu.pdev);
}
static const struct of_device_id of_rz_dmac_match[] = {
{ .compatible = "renesas,r9a09g057-dmac", },
{ .compatible = "renesas,rz-dmac", },
{ /* Sentinel */ }
};

View File

@ -27,10 +27,10 @@
#define ADMA_CH_INT_CLEAR 0x1c
#define ADMA_CH_CTRL 0x24
#define ADMA_CH_CTRL_DIR(val) (((val) & 0xf) << 12)
#define ADMA_CH_CTRL_DIR(val, mask, shift) (((val) & (mask)) << (shift))
#define ADMA_CH_CTRL_DIR_AHUB2MEM 2
#define ADMA_CH_CTRL_DIR_MEM2AHUB 4
#define ADMA_CH_CTRL_MODE_CONTINUOUS (2 << 8)
#define ADMA_CH_CTRL_MODE_CONTINUOUS(shift) (2 << (shift))
#define ADMA_CH_CTRL_FLOWCTRL_EN BIT(1)
#define ADMA_CH_CTRL_XFER_PAUSE_SHIFT 0
@ -41,15 +41,27 @@
#define ADMA_CH_CONFIG_MAX_BURST_SIZE 16
#define ADMA_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0xf)
#define ADMA_CH_CONFIG_MAX_BUFS 8
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) (reqs << 4)
#define TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 4)
#define ADMA_GLOBAL_CH_CONFIG 0x400
#define ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(val) ((val) & 0x7)
#define ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(reqs) ((reqs) << 8)
#define TEGRA186_ADMA_GLOBAL_PAGE_CHGRP 0x30
#define TEGRA186_ADMA_GLOBAL_PAGE_RX_REQ 0x70
#define TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ 0x84
#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 0x44
#define TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 0x48
#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 0x100
#define TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 0x104
#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 0x180
#define TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 0x184
#define TEGRA264_ADMA_GLOBAL_PAGE_OFFSET 0x8
#define ADMA_CH_FIFO_CTRL 0x2c
#define ADMA_CH_TX_FIFO_SIZE_SHIFT 8
#define ADMA_CH_RX_FIFO_SIZE_SHIFT 0
#define ADMA_GLOBAL_CH_FIFO_CTRL 0x300
#define ADMA_CH_LOWER_SRC_ADDR 0x34
#define ADMA_CH_LOWER_TRG_ADDR 0x3c
@ -73,36 +85,48 @@ struct tegra_adma;
* @adma_get_burst_config: Function callback used to set DMA burst size.
* @global_reg_offset: Register offset of DMA global register.
* @global_int_clear: Register offset of DMA global interrupt clear.
* @global_ch_fifo_base: Global channel fifo ctrl base offset
* @global_ch_config_base: Global channel config base offset
* @ch_req_tx_shift: Register offset for AHUB transmit channel select.
* @ch_req_rx_shift: Register offset for AHUB receive channel select.
* @ch_dir_shift: Channel direction bit position.
* @ch_mode_shift: Channel mode bit position.
* @ch_base_offset: Register offset of DMA channel registers.
* @ch_tc_offset_diff: From TC register onwards offset differs for Tegra264
* @ch_fifo_ctrl: Default value for channel FIFO CTRL register.
* @ch_config: Outstanding and WRR config values
* @ch_req_mask: Mask for Tx or Rx channel select.
* @ch_dir_mask: Mask for channel direction.
* @ch_req_max: Maximum number of Tx or Rx channels available.
* @ch_reg_size: Size of DMA channel register space.
* @nr_channels: Number of DMA channels available.
* @ch_fifo_size_mask: Mask for FIFO size field.
* @sreq_index_offset: Slave channel index offset.
* @max_page: Maximum ADMA Channel Page.
* @has_outstanding_reqs: If DMA channel can have outstanding requests.
* @set_global_pg_config: Global page programming.
*/
struct tegra_adma_chip_data {
unsigned int (*adma_get_burst_config)(unsigned int burst_size);
unsigned int global_reg_offset;
unsigned int global_int_clear;
unsigned int global_ch_fifo_base;
unsigned int global_ch_config_base;
unsigned int ch_req_tx_shift;
unsigned int ch_req_rx_shift;
unsigned int ch_dir_shift;
unsigned int ch_mode_shift;
unsigned int ch_base_offset;
unsigned int ch_tc_offset_diff;
unsigned int ch_fifo_ctrl;
unsigned int ch_config;
unsigned int ch_req_mask;
unsigned int ch_dir_mask;
unsigned int ch_req_max;
unsigned int ch_reg_size;
unsigned int nr_channels;
unsigned int ch_fifo_size_mask;
unsigned int sreq_index_offset;
unsigned int max_page;
bool has_outstanding_reqs;
void (*set_global_pg_config)(struct tegra_adma *tdma);
};
@ -112,6 +136,7 @@ struct tegra_adma_chip_data {
struct tegra_adma_chan_regs {
unsigned int ctrl;
unsigned int config;
unsigned int global_config;
unsigned int src_addr;
unsigned int trg_addr;
unsigned int fifo_ctrl;
@ -150,6 +175,9 @@ struct tegra_adma_chan {
/* Transfer count and position info */
unsigned int tx_buf_count;
unsigned int tx_buf_pos;
unsigned int global_ch_fifo_offset;
unsigned int global_ch_config_offset;
};
/*
@ -246,6 +274,29 @@ static void tegra186_adma_global_page_config(struct tegra_adma *tdma)
tdma_write(tdma, TEGRA186_ADMA_GLOBAL_PAGE_TX_REQ + (tdma->ch_page_no * 0x4), 0xffffff);
}
static void tegra264_adma_global_page_config(struct tegra_adma *tdma)
{
u32 global_page_offset = tdma->ch_page_no * TEGRA264_ADMA_GLOBAL_PAGE_OFFSET;
/* If the default page (page1) is not used, then clear page1 registers */
if (tdma->ch_page_no) {
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0, 0);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1, 0);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0, 0);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1, 0);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0, 0);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1, 0);
}
/* Program global registers for selected page */
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_0 + global_page_offset, 0xffffffff);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_CHGRP_1 + global_page_offset, 0xffffffff);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_0 + global_page_offset, 0xffffffff);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_RX_REQ_1 + global_page_offset, 0x1);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_0 + global_page_offset, 0xffffffff);
tdma_write(tdma, TEGRA264_ADMA_GLOBAL_PAGE_TX_REQ_1 + global_page_offset, 0x1);
}
static int tegra_adma_init(struct tegra_adma *tdma)
{
u32 status;
@ -404,11 +455,21 @@ static void tegra_adma_start(struct tegra_adma_chan *tdc)
tdc->tx_buf_pos = 0;
tdc->tx_buf_count = 0;
tdma_ch_write(tdc, ADMA_CH_TC, ch_regs->tc);
tdma_ch_write(tdc, ADMA_CH_TC - tdc->tdma->cdata->ch_tc_offset_diff, ch_regs->tc);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_regs->ctrl);
tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_regs->src_addr);
tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_regs->trg_addr);
tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
ch_regs->src_addr);
tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdc->tdma->cdata->ch_tc_offset_diff,
ch_regs->trg_addr);
if (!tdc->tdma->cdata->global_ch_fifo_base)
tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_regs->fifo_ctrl);
else if (tdc->global_ch_fifo_offset)
tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_regs->fifo_ctrl);
if (tdc->global_ch_config_offset)
tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_regs->global_config);
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_regs->config);
/* Start ADMA */
@ -421,7 +482,8 @@ static unsigned int tegra_adma_get_residue(struct tegra_adma_chan *tdc)
{
struct tegra_adma_desc *desc = tdc->desc;
unsigned int max = ADMA_CH_XFER_STATUS_COUNT_MASK + 1;
unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS);
unsigned int pos = tdma_ch_read(tdc, ADMA_CH_XFER_STATUS -
tdc->tdma->cdata->ch_tc_offset_diff);
unsigned int periods_remaining;
/*
@ -627,13 +689,16 @@ static int tegra_adma_set_xfer_params(struct tegra_adma_chan *tdc,
return -EINVAL;
}
ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir) |
ADMA_CH_CTRL_MODE_CONTINUOUS |
ch_regs->ctrl |= ADMA_CH_CTRL_DIR(adma_dir, cdata->ch_dir_mask,
cdata->ch_dir_shift) |
ADMA_CH_CTRL_MODE_CONTINUOUS(cdata->ch_mode_shift) |
ADMA_CH_CTRL_FLOWCTRL_EN;
ch_regs->config |= cdata->adma_get_burst_config(burst_size);
ch_regs->config |= ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1);
if (cdata->has_outstanding_reqs)
ch_regs->config |= TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8);
if (cdata->global_ch_config_base)
ch_regs->global_config |= cdata->ch_config;
else
ch_regs->config |= cdata->ch_config;
/*
* 'sreq_index' represents the current ADMAIF channel number and as per
@ -788,12 +853,23 @@ static int __maybe_unused tegra_adma_runtime_suspend(struct device *dev)
/* skip if channel is not active */
if (!ch_reg->cmd)
continue;
ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC);
ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR);
ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR);
ch_reg->tc = tdma_ch_read(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff);
ch_reg->src_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_SRC_ADDR -
tdma->cdata->ch_tc_offset_diff);
ch_reg->trg_addr = tdma_ch_read(tdc, ADMA_CH_LOWER_TRG_ADDR -
tdma->cdata->ch_tc_offset_diff);
ch_reg->ctrl = tdma_ch_read(tdc, ADMA_CH_CTRL);
ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
if (tdc->global_ch_config_offset)
ch_reg->global_config = tdma_read(tdc->tdma, tdc->global_ch_config_offset);
if (!tdc->tdma->cdata->global_ch_fifo_base)
ch_reg->fifo_ctrl = tdma_ch_read(tdc, ADMA_CH_FIFO_CTRL);
else if (tdc->global_ch_fifo_offset)
ch_reg->fifo_ctrl = tdma_read(tdc->tdma, tdc->global_ch_fifo_offset);
ch_reg->config = tdma_ch_read(tdc, ADMA_CH_CONFIG);
}
clk_disable:
@ -832,12 +908,23 @@ static int __maybe_unused tegra_adma_runtime_resume(struct device *dev)
/* skip if channel was not active earlier */
if (!ch_reg->cmd)
continue;
tdma_ch_write(tdc, ADMA_CH_TC, ch_reg->tc);
tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR, ch_reg->src_addr);
tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR, ch_reg->trg_addr);
tdma_ch_write(tdc, ADMA_CH_TC - tdma->cdata->ch_tc_offset_diff, ch_reg->tc);
tdma_ch_write(tdc, ADMA_CH_LOWER_SRC_ADDR - tdma->cdata->ch_tc_offset_diff,
ch_reg->src_addr);
tdma_ch_write(tdc, ADMA_CH_LOWER_TRG_ADDR - tdma->cdata->ch_tc_offset_diff,
ch_reg->trg_addr);
tdma_ch_write(tdc, ADMA_CH_CTRL, ch_reg->ctrl);
tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
if (!tdc->tdma->cdata->global_ch_fifo_base)
tdma_ch_write(tdc, ADMA_CH_FIFO_CTRL, ch_reg->fifo_ctrl);
else if (tdc->global_ch_fifo_offset)
tdma_write(tdc->tdma, tdc->global_ch_fifo_offset, ch_reg->fifo_ctrl);
if (tdc->global_ch_config_offset)
tdma_write(tdc->tdma, tdc->global_ch_config_offset, ch_reg->global_config);
tdma_ch_write(tdc, ADMA_CH_CONFIG, ch_reg->config);
tdma_ch_write(tdc, ADMA_CH_CMD, ch_reg->cmd);
}
@ -848,17 +935,23 @@ static const struct tegra_adma_chip_data tegra210_chip_data = {
.adma_get_burst_config = tegra210_adma_get_burst_config,
.global_reg_offset = 0xc00,
.global_int_clear = 0x20,
.global_ch_fifo_base = 0,
.global_ch_config_base = 0,
.ch_req_tx_shift = 28,
.ch_req_rx_shift = 24,
.ch_dir_shift = 12,
.ch_mode_shift = 8,
.ch_base_offset = 0,
.ch_tc_offset_diff = 0,
.ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1),
.ch_req_mask = 0xf,
.ch_dir_mask = 0xf,
.ch_req_max = 10,
.ch_reg_size = 0x80,
.nr_channels = 22,
.ch_fifo_size_mask = 0xf,
.sreq_index_offset = 2,
.max_page = 0,
.has_outstanding_reqs = false,
.set_global_pg_config = NULL,
};
@ -866,23 +959,56 @@ static const struct tegra_adma_chip_data tegra186_chip_data = {
.adma_get_burst_config = tegra186_adma_get_burst_config,
.global_reg_offset = 0,
.global_int_clear = 0x402c,
.global_ch_fifo_base = 0,
.global_ch_config_base = 0,
.ch_req_tx_shift = 27,
.ch_req_rx_shift = 22,
.ch_dir_shift = 12,
.ch_mode_shift = 8,
.ch_base_offset = 0x10000,
.ch_tc_offset_diff = 0,
.ch_config = ADMA_CH_CONFIG_WEIGHT_FOR_WRR(1) |
TEGRA186_ADMA_CH_CONFIG_OUTSTANDING_REQS(8),
.ch_req_mask = 0x1f,
.ch_dir_mask = 0xf,
.ch_req_max = 20,
.ch_reg_size = 0x100,
.nr_channels = 32,
.ch_fifo_size_mask = 0x1f,
.sreq_index_offset = 4,
.max_page = 4,
.has_outstanding_reqs = true,
.set_global_pg_config = tegra186_adma_global_page_config,
};
static const struct tegra_adma_chip_data tegra264_chip_data = {
.adma_get_burst_config = tegra186_adma_get_burst_config,
.global_reg_offset = 0,
.global_int_clear = 0x800c,
.global_ch_fifo_base = ADMA_GLOBAL_CH_FIFO_CTRL,
.global_ch_config_base = ADMA_GLOBAL_CH_CONFIG,
.ch_req_tx_shift = 26,
.ch_req_rx_shift = 20,
.ch_dir_shift = 10,
.ch_mode_shift = 7,
.ch_base_offset = 0x10000,
.ch_tc_offset_diff = 4,
.ch_config = ADMA_GLOBAL_CH_CONFIG_WEIGHT_FOR_WRR(1) |
ADMA_GLOBAL_CH_CONFIG_OUTSTANDING_REQS(8),
.ch_req_mask = 0x3f,
.ch_dir_mask = 7,
.ch_req_max = 32,
.ch_reg_size = 0x100,
.nr_channels = 64,
.ch_fifo_size_mask = 0x7f,
.sreq_index_offset = 0,
.max_page = 10,
.set_global_pg_config = tegra264_adma_global_page_config,
};
static const struct of_device_id tegra_adma_of_match[] = {
{ .compatible = "nvidia,tegra210-adma", .data = &tegra210_chip_data },
{ .compatible = "nvidia,tegra186-adma", .data = &tegra186_chip_data },
{ .compatible = "nvidia,tegra264-adma", .data = &tegra264_chip_data },
{ },
};
MODULE_DEVICE_TABLE(of, tegra_adma_of_match);
@ -985,6 +1111,15 @@ static int tegra_adma_probe(struct platform_device *pdev)
tdc->chan_addr = tdma->ch_base_addr + (cdata->ch_reg_size * i);
if (tdma->base_addr) {
if (cdata->global_ch_fifo_base)
tdc->global_ch_fifo_offset = cdata->global_ch_fifo_base + (4 * i);
if (cdata->global_ch_config_base)
tdc->global_ch_config_offset =
cdata->global_ch_config_base + (4 * i);
}
tdc->irq = of_irq_get(pdev->dev.of_node, i);
if (tdc->irq <= 0) {
ret = tdc->irq ?: -ENXIO;

View File

@ -5624,7 +5624,8 @@ static int udma_probe(struct platform_device *pdev)
uc->config.dir = DMA_MEM_TO_MEM;
uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
dev_name(dev), i);
if (!uc->name)
return -ENOMEM;
vchan_init(&uc->vc, &ud->ddev);
/* Use custom vchan completion handling */
tasklet_setup(&uc->vc.task, udma_vchan_complete);

View File

@ -2909,6 +2909,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
return -EINVAL;
}
xdev->common.directions |= chan->direction;
/* Request the interrupt */
chan->irq = of_irq_get(node, chan->tdest);
if (chan->irq < 0)
@ -3115,6 +3117,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
}
dma_set_max_seg_size(xdev->dev, xdev->max_buffer_len);
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
xdev->has_axistream_connected =
of_property_read_bool(node, "xlnx,axistream-connected");

View File

@ -15,6 +15,7 @@
#include <linux/err.h>
#include <linux/io.h>
#include <linux/irqchip.h>
#include <linux/irqchip/irq-renesas-rzv2h.h>
#include <linux/irqdomain.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
@ -41,6 +42,8 @@
#define ICU_TSCLR 0x24
#define ICU_TITSR(k) (0x28 + (k) * 4)
#define ICU_TSSR(k) (0x30 + (k) * 4)
#define ICU_DMkSELy(k, y) (0x420 + (k) * 0x20 + (y) * 4)
#define ICU_DMACKSELk(k) (0x500 + (k) * 4)
/* NMI */
#define ICU_NMI_EDGE_FALLING 0
@ -103,6 +106,15 @@ struct rzv2h_hw_info {
u8 field_width;
};
/* DMAC */
#define ICU_DMAC_DkRQ_SEL_MASK GENMASK(9, 0)
#define ICU_DMAC_DMAREQ_SHIFT(up) ((up) * 16)
#define ICU_DMAC_DMAREQ_MASK(up) (ICU_DMAC_DkRQ_SEL_MASK \
<< ICU_DMAC_DMAREQ_SHIFT(up))
#define ICU_DMAC_PREP_DMAREQ(sel, up) (FIELD_PREP(ICU_DMAC_DkRQ_SEL_MASK, (sel)) \
<< ICU_DMAC_DMAREQ_SHIFT(up))
/**
* struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
* @base: Controller's base address
@ -117,6 +129,27 @@ struct rzv2h_icu_priv {
const struct rzv2h_hw_info *info;
};
void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
u16 req_no)
{
struct rzv2h_icu_priv *priv = platform_get_drvdata(icu_dev);
u32 icu_dmksely, dmareq, dmareq_mask;
u8 y, upper;
y = dmac_channel / 2;
upper = dmac_channel % 2;
dmareq = ICU_DMAC_PREP_DMAREQ(req_no, upper);
dmareq_mask = ICU_DMAC_DMAREQ_MASK(upper);
guard(raw_spinlock_irqsave)(&priv->lock);
icu_dmksely = readl(priv->base + ICU_DMkSELy(dmac_index, y));
icu_dmksely = (icu_dmksely & ~dmareq_mask) | dmareq;
writel(icu_dmksely, priv->base + ICU_DMkSELy(dmac_index, y));
}
EXPORT_SYMBOL_GPL(rzv2h_icu_register_dma_req);
static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
{
return data->domain->host_data;
@ -491,6 +524,8 @@ static int rzv2h_icu_init_common(struct device_node *node, struct device_node *p
if (!rzv2h_icu_data)
return -ENOMEM;
platform_set_drvdata(pdev, rzv2h_icu_data);
rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
if (IS_ERR(rzv2h_icu_data->base))
return PTR_ERR(rzv2h_icu_data->base);

View File

@ -0,0 +1,23 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Renesas RZ/V2H(P) Interrupt Control Unit (ICU)
*
* Copyright (C) 2025 Renesas Electronics Corporation.
*/
#ifndef __LINUX_IRQ_RENESAS_RZV2H
#define __LINUX_IRQ_RENESAS_RZV2H
#include <linux/platform_device.h>
#define RZV2H_ICU_DMAC_REQ_NO_DEFAULT 0x3ff
#ifdef CONFIG_RENESAS_RZV2H_ICU
void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
u16 req_no);
#else
static inline void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index,
u8 dmac_channel, u16 req_no) { }
#endif
#endif /* __LINUX_IRQ_RENESAS_RZV2H */