Tom Rini | 83d290c | 2018-05-06 17:58:06 -0400 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
Mugunthan V N | a0594ce | 2016-02-15 15:31:37 +0530 | [diff] [blame] | 2 | /* |
Álvaro Fernández Rojas | 27ab27f | 2018-11-28 19:17:50 +0100 | [diff] [blame] | 3 | * Copyright (C) 2018 Álvaro Fernández Rojas <noltari@gmail.com> |
| 4 | * Copyright (C) 2015 - 2018 Texas Instruments Incorporated <www.ti.com> |
| 5 | * Written by Mugunthan V N <mugunthanvnm@ti.com> |
| 6 | * |
Mugunthan V N | a0594ce | 2016-02-15 15:31:37 +0530 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #ifndef _DMA_H_ |
| 10 | #define _DMA_H_ |
| 11 | |
Simon Glass | cd93d62 | 2020-05-10 11:40:13 -0600 | [diff] [blame] | 12 | #include <linux/bitops.h> |
Álvaro Fernández Rojas | 27ab27f | 2018-11-28 19:17:50 +0100 | [diff] [blame] | 13 | #include <linux/errno.h> |
| 14 | #include <linux/types.h> |
| 15 | |
Simon Glass | 401d1c4 | 2020-10-30 21:38:53 -0600 | [diff] [blame] | 16 | struct udevice; |
| 17 | |
Mugunthan V N | a0594ce | 2016-02-15 15:31:37 +0530 | [diff] [blame] | 18 | /* |
| 19 | * enum dma_direction - dma transfer direction indicator |
| 20 | * @DMA_MEM_TO_MEM: Memcpy mode |
| 21 | * @DMA_MEM_TO_DEV: From Memory to Device |
| 22 | * @DMA_DEV_TO_MEM: From Device to Memory |
| 23 | * @DMA_DEV_TO_DEV: From Device to Device |
| 24 | */ |
| 25 | enum dma_direction { |
| 26 | DMA_MEM_TO_MEM, |
| 27 | DMA_MEM_TO_DEV, |
| 28 | DMA_DEV_TO_MEM, |
| 29 | DMA_DEV_TO_DEV, |
| 30 | }; |
| 31 | |
| 32 | #define DMA_SUPPORTS_MEM_TO_MEM BIT(0) |
| 33 | #define DMA_SUPPORTS_MEM_TO_DEV BIT(1) |
| 34 | #define DMA_SUPPORTS_DEV_TO_MEM BIT(2) |
| 35 | #define DMA_SUPPORTS_DEV_TO_DEV BIT(3) |
| 36 | |
| 37 | /* |
Mugunthan V N | a0594ce | 2016-02-15 15:31:37 +0530 | [diff] [blame] | 38 | * struct dma_dev_priv - information about a device used by the uclass |
| 39 | * |
| 40 | * @supported: mode of transfers that DMA can support, should be |
| 41 | * one/multiple of DMA_SUPPORTS_* |
| 42 | */ |
| 43 | struct dma_dev_priv { |
| 44 | u32 supported; |
| 45 | }; |
| 46 | |
Álvaro Fernández Rojas | 27ab27f | 2018-11-28 19:17:50 +0100 | [diff] [blame] | 47 | #ifdef CONFIG_DMA_CHANNELS |
| 48 | /** |
| 49 | * A DMA is a feature of computer systems that allows certain hardware |
| 50 | * subsystems to access main system memory, independent of the CPU. |
| 51 | * DMA channels are typically generated externally to the HW module |
| 52 | * consuming them, by an entity this API calls a DMA provider. This API |
| 53 | * provides a standard means for drivers to enable and disable DMAs, and to |
| 54 | * copy, send and receive data using DMA. |
| 55 | * |
| 56 | * A driver that implements UCLASS_DMA is a DMA provider. A provider will |
| 57 | * often implement multiple separate DMAs, since the hardware it manages |
| 58 | * often has this capability. dma_uclass.h describes the interface which |
| 59 | * DMA providers must implement. |
| 60 | * |
| 61 | * DMA consumers/clients are the HW modules driven by the DMA channels. This |
| 62 | * header file describes the API used by drivers for those HW modules. |
| 63 | * |
| 64 | * DMA consumer DMA_MEM_TO_DEV (transmit) usage example (based on networking). |
| 65 | * Note. dma_send() is sync operation always - it'll start transfer and will |
| 66 | * poll for it to complete: |
| 67 | * - get/request dma channel |
| 68 | * struct dma dma_tx; |
| 69 | * ret = dma_get_by_name(common->dev, "tx0", &dma_tx); |
| 70 | * if (ret) ... |
| 71 | * |
| 72 | * - enable dma channel |
| 73 | * ret = dma_enable(&dma_tx); |
| 74 | * if (ret) ... |
| 75 | * |
| 76 | * - dma transmit DMA_MEM_TO_DEV. |
| 77 | * struct ti_drv_packet_data packet_data; |
| 78 | * |
| 79 | * packet_data.opt1 = val1; |
| 80 | * packet_data.opt2 = val2; |
| 81 | * ret = dma_send(&dma_tx, packet, length, &packet_data); |
| 82 | * if (ret) .. |
| 83 | * |
| 84 | * DMA consumer DMA_DEV_TO_MEM (receive) usage example (based on networking). |
| 85 | * Note. dma_receive() is sync operation always - it'll start transfer |
| 86 | * (if required) and will poll for it to complete (or for any previously |
| 87 | * configured dev2mem transfer to complete): |
| 88 | * - get/request dma channel |
| 89 | * struct dma dma_rx; |
| 90 | * ret = dma_get_by_name(common->dev, "rx0", &dma_rx); |
| 91 | * if (ret) ... |
| 92 | * |
| 93 | * - enable dma channel |
| 94 | * ret = dma_enable(&dma_rx); |
| 95 | * if (ret) ... |
| 96 | * |
| 97 | * - dma receive DMA_DEV_TO_MEM. |
| 98 | * struct ti_drv_packet_data packet_data; |
| 99 | * |
| 100 | * len = dma_receive(&dma_rx, (void **)packet, &packet_data); |
| 101 | * if (ret < 0) ... |
| 102 | * |
| 103 | * DMA consumer DMA_DEV_TO_MEM (receive) zero-copy usage example (based on |
| 104 | * networking). Networking subsystem allows to configure and use few receive |
| 105 | * buffers (dev2mem), as Networking RX DMA channels usually implemented |
| 106 | * as streaming interface |
| 107 | * - get/request dma channel |
| 108 | * struct dma dma_rx; |
| 109 | * ret = dma_get_by_name(common->dev, "rx0", &dma_rx); |
| 110 | * if (ret) ... |
| 111 | * |
| 112 | * for (i = 0; i < RX_DESC_NUM; i++) { |
| 113 | * ret = dma_prepare_rcv_buf(&dma_rx, |
| 114 | * net_rx_packets[i], |
| 115 | * RX_BUF_SIZE); |
| 116 | * if (ret) ... |
| 117 | * } |
| 118 | * |
| 119 | * - enable dma channel |
| 120 | * ret = dma_enable(&dma_rx); |
| 121 | * if (ret) ... |
| 122 | * |
| 123 | * - dma receive DMA_DEV_TO_MEM. |
| 124 | * struct ti_drv_packet_data packet_data; |
| 125 | * |
| 126 | * len = dma_receive(&dma_rx, (void **)packet, &packet_data); |
| 127 | * if (ret < 0) .. |
| 128 | * |
| 129 | * -- process packet -- |
| 130 | * |
| 131 | * - return buffer back to DAM channel |
| 132 | * ret = dma_prepare_rcv_buf(&dma_rx, |
| 133 | * net_rx_packets[rx_next], |
| 134 | * RX_BUF_SIZE); |
| 135 | */ |
| 136 | |
| 137 | struct udevice; |
| 138 | |
| 139 | /** |
| 140 | * struct dma - A handle to (allowing control of) a single DMA. |
| 141 | * |
| 142 | * Clients provide storage for DMA handles. The content of the structure is |
| 143 | * managed solely by the DMA API and DMA drivers. A DMA struct is |
| 144 | * initialized by "get"ing the DMA struct. The DMA struct is passed to all |
| 145 | * other DMA APIs to identify which DMA channel to operate upon. |
| 146 | * |
| 147 | * @dev: The device which implements the DMA channel. |
| 148 | * @id: The DMA channel ID within the provider. |
| 149 | * |
| 150 | * Currently, the DMA API assumes that a single integer ID is enough to |
| 151 | * identify and configure any DMA channel for any DMA provider. If this |
| 152 | * assumption becomes invalid in the future, the struct could be expanded to |
| 153 | * either (a) add more fields to allow DMA providers to store additional |
| 154 | * information, or (b) replace the id field with an opaque pointer, which the |
| 155 | * provider would dynamically allocated during its .of_xlate op, and process |
| 156 | * during is .request op. This may require the addition of an extra op to clean |
| 157 | * up the allocation. |
| 158 | */ |
| 159 | struct dma { |
| 160 | struct udevice *dev; |
| 161 | /* |
| 162 | * Written by of_xlate. We assume a single id is enough for now. In the |
| 163 | * future, we might add more fields here. |
| 164 | */ |
| 165 | unsigned long id; |
| 166 | }; |
| 167 | |
| 168 | # if CONFIG_IS_ENABLED(OF_CONTROL) && CONFIG_IS_ENABLED(DMA) |
| 169 | /** |
| 170 | * dma_get_by_index - Get/request a DMA by integer index. |
| 171 | * |
| 172 | * This looks up and requests a DMA. The index is relative to the client |
| 173 | * device; each device is assumed to have n DMAs associated with it somehow, |
| 174 | * and this function finds and requests one of them. The mapping of client |
| 175 | * device DMA indices to provider DMAs may be via device-tree properties, |
| 176 | * board-provided mapping tables, or some other mechanism. |
| 177 | * |
| 178 | * @dev: The client device. |
| 179 | * @index: The index of the DMA to request, within the client's list of |
| 180 | * DMA channels. |
| 181 | * @dma: A pointer to a DMA struct to initialize. |
| 182 | * @return 0 if OK, or a negative error code. |
| 183 | */ |
| 184 | int dma_get_by_index(struct udevice *dev, int index, struct dma *dma); |
| 185 | |
| 186 | /** |
| 187 | * dma_get_by_name - Get/request a DMA by name. |
| 188 | * |
| 189 | * This looks up and requests a DMA. The name is relative to the client |
| 190 | * device; each device is assumed to have n DMAs associated with it somehow, |
| 191 | * and this function finds and requests one of them. The mapping of client |
| 192 | * device DMA names to provider DMAs may be via device-tree properties, |
| 193 | * board-provided mapping tables, or some other mechanism. |
| 194 | * |
| 195 | * @dev: The client device. |
| 196 | * @name: The name of the DMA to request, within the client's list of |
| 197 | * DMA channels. |
| 198 | * @dma: A pointer to a DMA struct to initialize. |
| 199 | * @return 0 if OK, or a negative error code. |
| 200 | */ |
| 201 | int dma_get_by_name(struct udevice *dev, const char *name, struct dma *dma); |
| 202 | # else |
| 203 | static inline int dma_get_by_index(struct udevice *dev, int index, |
| 204 | struct dma *dma) |
| 205 | { |
| 206 | return -ENOSYS; |
| 207 | } |
| 208 | |
| 209 | static inline int dma_get_by_name(struct udevice *dev, const char *name, |
| 210 | struct dma *dma) |
| 211 | { |
| 212 | return -ENOSYS; |
| 213 | } |
| 214 | # endif |
| 215 | |
| 216 | /** |
| 217 | * dma_request - Request a DMA by provider-specific ID. |
| 218 | * |
| 219 | * This requests a DMA using a provider-specific ID. Generally, this function |
| 220 | * should not be used, since dma_get_by_index/name() provide an interface that |
| 221 | * better separates clients from intimate knowledge of DMA providers. |
| 222 | * However, this function may be useful in core SoC-specific code. |
| 223 | * |
| 224 | * @dev: The DMA provider device. |
| 225 | * @dma: A pointer to a DMA struct to initialize. The caller must |
| 226 | * have already initialized any field in this struct which the |
| 227 | * DMA provider uses to identify the DMA channel. |
| 228 | * @return 0 if OK, or a negative error code. |
| 229 | */ |
| 230 | int dma_request(struct udevice *dev, struct dma *dma); |
| 231 | |
| 232 | /** |
| 233 | * dma_free - Free a previously requested DMA. |
| 234 | * |
| 235 | * @dma: A DMA struct that was previously successfully requested by |
| 236 | * dma_request/get_by_*(). |
| 237 | * @return 0 if OK, or a negative error code. |
| 238 | */ |
| 239 | int dma_free(struct dma *dma); |
| 240 | |
| 241 | /** |
| 242 | * dma_enable() - Enable (turn on) a DMA channel. |
| 243 | * |
| 244 | * @dma: A DMA struct that was previously successfully requested by |
| 245 | * dma_request/get_by_*(). |
| 246 | * @return zero on success, or -ve error code. |
| 247 | */ |
| 248 | int dma_enable(struct dma *dma); |
| 249 | |
| 250 | /** |
| 251 | * dma_disable() - Disable (turn off) a DMA channel. |
| 252 | * |
| 253 | * @dma: A DMA struct that was previously successfully requested by |
| 254 | * dma_request/get_by_*(). |
| 255 | * @return zero on success, or -ve error code. |
| 256 | */ |
| 257 | int dma_disable(struct dma *dma); |
| 258 | |
| 259 | /** |
| 260 | * dma_prepare_rcv_buf() - Prepare/add receive DMA buffer. |
| 261 | * |
| 262 | * It allows to implement zero-copy async DMA_DEV_TO_MEM (receive) transactions |
| 263 | * if supported by DMA providers. |
| 264 | * |
| 265 | * @dma: A DMA struct that was previously successfully requested by |
| 266 | * dma_request/get_by_*(). |
| 267 | * @dst: The receive buffer pointer. |
| 268 | * @size: The receive buffer size |
| 269 | * @return zero on success, or -ve error code. |
| 270 | */ |
| 271 | int dma_prepare_rcv_buf(struct dma *dma, void *dst, size_t size); |
| 272 | |
| 273 | /** |
| 274 | * dma_receive() - Receive a DMA transfer. |
| 275 | * |
| 276 | * @dma: A DMA struct that was previously successfully requested by |
| 277 | * dma_request/get_by_*(). |
| 278 | * @dst: The destination pointer. |
| 279 | * @metadata: DMA driver's channel specific data |
| 280 | * @return length of received data on success, or zero - no data, |
| 281 | * or -ve error code. |
| 282 | */ |
| 283 | int dma_receive(struct dma *dma, void **dst, void *metadata); |
| 284 | |
| 285 | /** |
| 286 | * dma_send() - Send a DMA transfer. |
| 287 | * |
| 288 | * @dma: A DMA struct that was previously successfully requested by |
| 289 | * dma_request/get_by_*(). |
| 290 | * @src: The source pointer. |
| 291 | * @len: Length of the data to be sent (number of bytes). |
| 292 | * @metadata: DMA driver's channel specific data |
| 293 | * @return zero on success, or -ve error code. |
| 294 | */ |
| 295 | int dma_send(struct dma *dma, void *src, size_t len, void *metadata); |
Vignesh Raghavendra | b8a4dd2 | 2019-12-04 22:17:20 +0530 | [diff] [blame] | 296 | |
| 297 | /** |
| 298 | * dma_get_cfg() - Get DMA channel configuration for client's use |
| 299 | * |
| 300 | * @dma: The DMA Channel to manipulate |
| 301 | * @cfg_id: DMA provider specific ID to identify what |
| 302 | * configuration data client needs |
| 303 | * @cfg_data: Pointer to store pointer to DMA driver specific |
| 304 | * configuration data for the given cfg_id (output param) |
| 305 | * @return zero on success, or -ve error code. |
| 306 | */ |
| 307 | int dma_get_cfg(struct dma *dma, u32 cfg_id, void **cfg_data); |
Álvaro Fernández Rojas | 27ab27f | 2018-11-28 19:17:50 +0100 | [diff] [blame] | 308 | #endif /* CONFIG_DMA_CHANNELS */ |
| 309 | |
Vignesh Raghavendra | 1e37330 | 2019-11-15 17:00:42 +0530 | [diff] [blame] | 310 | #if CONFIG_IS_ENABLED(DMA) |
Mugunthan V N | a0594ce | 2016-02-15 15:31:37 +0530 | [diff] [blame] | 311 | /* |
| 312 | * dma_get_device - get a DMA device which supports transfer |
| 313 | * type of transfer_type |
| 314 | * |
| 315 | * @transfer_type - transfer type should be one/multiple of |
| 316 | * DMA_SUPPORTS_* |
| 317 | * @devp - udevice pointer to return the found device |
| 318 | * @return - will return on success and devp will hold the |
| 319 | * pointer to the device |
| 320 | */ |
| 321 | int dma_get_device(u32 transfer_type, struct udevice **devp); |
| 322 | |
| 323 | /* |
| 324 | * dma_memcpy - try to use DMA to do a mem copy which will be |
| 325 | * much faster than CPU mem copy |
| 326 | * |
| 327 | * @dst - destination pointer |
| 328 | * @src - souce pointer |
| 329 | * @len - data length to be copied |
| 330 | * @return - on successful transfer returns no of bytes |
| 331 | transferred and on failure return error code. |
| 332 | */ |
| 333 | int dma_memcpy(void *dst, void *src, size_t len); |
Vignesh Raghavendra | 1e37330 | 2019-11-15 17:00:42 +0530 | [diff] [blame] | 334 | #else |
| 335 | static inline int dma_get_device(u32 transfer_type, struct udevice **devp) |
| 336 | { |
| 337 | return -ENOSYS; |
| 338 | } |
Mugunthan V N | a0594ce | 2016-02-15 15:31:37 +0530 | [diff] [blame] | 339 | |
Vignesh Raghavendra | 1e37330 | 2019-11-15 17:00:42 +0530 | [diff] [blame] | 340 | static inline int dma_memcpy(void *dst, void *src, size_t len) |
| 341 | { |
| 342 | return -ENOSYS; |
| 343 | } |
| 344 | #endif /* CONFIG_DMA */ |
Mugunthan V N | a0594ce | 2016-02-15 15:31:37 +0530 | [diff] [blame] | 345 | #endif /* _DMA_H_ */ |