blob: 614eb36c7856ef87969a9d769f5e2bcd250ecb9c [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Marek Vasutb660df32012-08-26 15:19:06 +00002/*
3 * Generic bounce buffer implementation
4 *
5 * Copyright (C) 2012 Marek Vasut <marex@denx.de>
Marek Vasutb660df32012-08-26 15:19:06 +00006 */
7
8#include <common.h>
Simon Glass1eb69ae2019-11-14 12:57:39 -07009#include <cpu_func.h>
Marek Vasutb660df32012-08-26 15:19:06 +000010#include <malloc.h>
11#include <errno.h>
12#include <bouncebuf.h>
13
Stephen Warren84d35b22012-11-06 11:27:29 +000014static int addr_aligned(struct bounce_buffer *state)
Marek Vasutb660df32012-08-26 15:19:06 +000015{
16 const ulong align_mask = ARCH_DMA_MINALIGN - 1;
17
18 /* Check if start is aligned */
Stephen Warren84d35b22012-11-06 11:27:29 +000019 if ((ulong)state->user_buffer & align_mask) {
20 debug("Unaligned buffer address %p\n", state->user_buffer);
Marek Vasutb660df32012-08-26 15:19:06 +000021 return 0;
22 }
23
Stephen Warren84d35b22012-11-06 11:27:29 +000024 /* Check if length is aligned */
25 if (state->len != state->len_aligned) {
Vasili Galka5d69a5d2014-08-26 13:45:48 +030026 debug("Unaligned buffer length %zu\n", state->len);
Marek Vasutb660df32012-08-26 15:19:06 +000027 return 0;
28 }
29
30 /* Aligned */
31 return 1;
32}
33
Stephen Warren84d35b22012-11-06 11:27:29 +000034int bounce_buffer_start(struct bounce_buffer *state, void *data,
35 size_t len, unsigned int flags)
Marek Vasutb660df32012-08-26 15:19:06 +000036{
Stephen Warren84d35b22012-11-06 11:27:29 +000037 state->user_buffer = data;
38 state->bounce_buffer = data;
39 state->len = len;
40 state->len_aligned = roundup(len, ARCH_DMA_MINALIGN);
41 state->flags = flags;
Marek Vasutb660df32012-08-26 15:19:06 +000042
Stephen Warren84d35b22012-11-06 11:27:29 +000043 if (!addr_aligned(state)) {
44 state->bounce_buffer = memalign(ARCH_DMA_MINALIGN,
45 state->len_aligned);
46 if (!state->bounce_buffer)
47 return -ENOMEM;
48
49 if (state->flags & GEN_BB_READ)
50 memcpy(state->bounce_buffer, state->user_buffer,
51 state->len);
Marek Vasutb660df32012-08-26 15:19:06 +000052 }
53
Stephen Warren84d35b22012-11-06 11:27:29 +000054 /*
55 * Flush data to RAM so DMA reads can pick it up,
56 * and any CPU writebacks don't race with DMA writes
57 */
58 flush_dcache_range((unsigned long)state->bounce_buffer,
59 (unsigned long)(state->bounce_buffer) +
60 state->len_aligned);
Marek Vasutb660df32012-08-26 15:19:06 +000061
62 return 0;
63}
64
Stephen Warren84d35b22012-11-06 11:27:29 +000065int bounce_buffer_stop(struct bounce_buffer *state)
Marek Vasutb660df32012-08-26 15:19:06 +000066{
Stephen Warren84d35b22012-11-06 11:27:29 +000067 if (state->flags & GEN_BB_WRITE) {
68 /* Invalidate cache so that CPU can see any newly DMA'd data */
69 invalidate_dcache_range((unsigned long)state->bounce_buffer,
70 (unsigned long)(state->bounce_buffer) +
71 state->len_aligned);
72 }
Marek Vasutb660df32012-08-26 15:19:06 +000073
Stephen Warren84d35b22012-11-06 11:27:29 +000074 if (state->bounce_buffer == state->user_buffer)
Marek Vasutb660df32012-08-26 15:19:06 +000075 return 0;
76
Stephen Warren84d35b22012-11-06 11:27:29 +000077 if (state->flags & GEN_BB_WRITE)
78 memcpy(state->user_buffer, state->bounce_buffer, state->len);
Marek Vasutb660df32012-08-26 15:19:06 +000079
Stephen Warren84d35b22012-11-06 11:27:29 +000080 free(state->bounce_buffer);
Marek Vasutb660df32012-08-26 15:19:06 +000081
82 return 0;
83}