blob: 6d98920de6628536babf60b679e1d070a93a8006 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasutb660df32012-08-26 15:19:06 +00002/*
3 * Generic bounce buffer implementation
4 *
5 * Copyright (C) 2012 Marek Vasut <marex@denx.de>
Marek Vasutb660df32012-08-26 15:19:06 +00006 */
7
8#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -07009#include <cpu_func.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -060010#include <log.h>
Marek Vasutb660df32012-08-26 15:19:06 +000011#include <malloc.h>
12#include <errno.h>
13#include <bouncebuf.h>
Simon Glass90526e92020-05-10 11:39:56 -060014#include <asm/cache.h>
Marek Vasutb660df32012-08-26 15:19:06 +000015
Stephen Warren84d35b22012-11-06 11:27:29 +000016static int addr_aligned(struct bounce_buffer *state)
Marek Vasutb660df32012-08-26 15:19:06 +000017{
18 const ulong align_mask = ARCH_DMA_MINALIGN - 1;
19
20 /* Check if start is aligned */
Stephen Warren84d35b22012-11-06 11:27:29 +000021 if ((ulong)state->user_buffer & align_mask) {
22 debug("Unaligned buffer address %p\n", state->user_buffer);
Marek Vasutb660df32012-08-26 15:19:06 +000023 return 0;
24 }
25
Stephen Warren84d35b22012-11-06 11:27:29 +000026 /* Check if length is aligned */
27 if (state->len != state->len_aligned) {
Vasili Galka5d69a5d2014-08-26 13:45:48 +030028 debug("Unaligned buffer length %zu\n", state->len);
Marek Vasutb660df32012-08-26 15:19:06 +000029 return 0;
30 }
31
32 /* Aligned */
33 return 1;
34}
35
Marek Vasut8074ffe2020-04-04 12:45:02 +020036int bounce_buffer_start_extalign(struct bounce_buffer *state, void *data,
37 size_t len, unsigned int flags,
38 size_t alignment,
39 int (*addr_is_aligned)(struct bounce_buffer *state))
Marek Vasutb660df32012-08-26 15:19:06 +000040{
Stephen Warren84d35b22012-11-06 11:27:29 +000041 state->user_buffer = data;
42 state->bounce_buffer = data;
43 state->len = len;
Marek Vasut8074ffe2020-04-04 12:45:02 +020044 state->len_aligned = roundup(len, alignment);
Stephen Warren84d35b22012-11-06 11:27:29 +000045 state->flags = flags;
Marek Vasutb660df32012-08-26 15:19:06 +000046
Marek Vasut8074ffe2020-04-04 12:45:02 +020047 if (!addr_is_aligned(state)) {
48 state->bounce_buffer = memalign(alignment,
Stephen Warren84d35b22012-11-06 11:27:29 +000049 state->len_aligned);
50 if (!state->bounce_buffer)
51 return -ENOMEM;
52
53 if (state->flags & GEN_BB_READ)
54 memcpy(state->bounce_buffer, state->user_buffer,
55 state->len);
Marek Vasutb660df32012-08-26 15:19:06 +000056 }
57
Stephen Warren84d35b22012-11-06 11:27:29 +000058 /*
59 * Flush data to RAM so DMA reads can pick it up,
60 * and any CPU writebacks don't race with DMA writes
61 */
62 flush_dcache_range((unsigned long)state->bounce_buffer,
63 (unsigned long)(state->bounce_buffer) +
64 state->len_aligned);
Marek Vasutb660df32012-08-26 15:19:06 +000065
66 return 0;
67}
68
Marek Vasut8074ffe2020-04-04 12:45:02 +020069int bounce_buffer_start(struct bounce_buffer *state, void *data,
70 size_t len, unsigned int flags)
71{
72 return bounce_buffer_start_extalign(state, data, len, flags,
73 ARCH_DMA_MINALIGN,
74 addr_aligned);
75}
76
Stephen Warren84d35b22012-11-06 11:27:29 +000077int bounce_buffer_stop(struct bounce_buffer *state)
Marek Vasutb660df32012-08-26 15:19:06 +000078{
Stephen Warren84d35b22012-11-06 11:27:29 +000079 if (state->flags & GEN_BB_WRITE) {
80 /* Invalidate cache so that CPU can see any newly DMA'd data */
81 invalidate_dcache_range((unsigned long)state->bounce_buffer,
82 (unsigned long)(state->bounce_buffer) +
83 state->len_aligned);
84 }
Marek Vasutb660df32012-08-26 15:19:06 +000085
Stephen Warren84d35b22012-11-06 11:27:29 +000086 if (state->bounce_buffer == state->user_buffer)
Marek Vasutb660df32012-08-26 15:19:06 +000087 return 0;
88
Stephen Warren84d35b22012-11-06 11:27:29 +000089 if (state->flags & GEN_BB_WRITE)
90 memcpy(state->user_buffer, state->bounce_buffer, state->len);
Marek Vasutb660df32012-08-26 15:19:06 +000091
Stephen Warren84d35b22012-11-06 11:27:29 +000092 free(state->bounce_buffer);
Marek Vasutb660df32012-08-26 15:19:06 +000093
94 return 0;
95}