blob: ea40929e3e520e6748e151561cc8ea9ed55626c9 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Eric Nelsone40cf342016-03-28 10:05:44 -07002/*
3 * Copyright (C) Nelson Integration, LLC 2016
4 * Author: Eric Nelson<eric@nelint.com>
5 *
Eric Nelsone40cf342016-03-28 10:05:44 -07006 */
7#include <config.h>
8#include <common.h>
9#include <malloc.h>
10#include <part.h>
11#include <linux/ctype.h>
12#include <linux/list.h>
13
14struct block_cache_node {
15 struct list_head lh;
16 int iftype;
17 int devnum;
18 lbaint_t start;
19 lbaint_t blkcnt;
20 unsigned long blksz;
21 char *cache;
22};
23
Angelo Durgehelloc05b38d2020-01-26 19:31:22 +010024#ifndef CONFIG_M68K
25static LIST_HEAD(block_cache);
26#else
Angelo Durgehello1526bcc2020-01-21 10:37:27 +010027static struct list_head block_cache;
Angelo Durgehelloc05b38d2020-01-26 19:31:22 +010028#endif
Eric Nelsone40cf342016-03-28 10:05:44 -070029
30static struct block_cache_stats _stats = {
Marek Vasut2e89bbe2018-08-08 13:20:29 +020031 .max_blocks_per_entry = 8,
Eric Nelsone40cf342016-03-28 10:05:44 -070032 .max_entries = 32
33};
34
Angelo Durgehelloc05b38d2020-01-26 19:31:22 +010035#ifdef CONFIG_M68K
Angelo Durgehello1526bcc2020-01-21 10:37:27 +010036int blkcache_init(void)
37{
38 INIT_LIST_HEAD(&block_cache);
39
40 return 0;
41}
Angelo Durgehelloc05b38d2020-01-26 19:31:22 +010042#endif
Angelo Durgehello1526bcc2020-01-21 10:37:27 +010043
Eric Nelsone40cf342016-03-28 10:05:44 -070044static struct block_cache_node *cache_find(int iftype, int devnum,
45 lbaint_t start, lbaint_t blkcnt,
46 unsigned long blksz)
47{
48 struct block_cache_node *node;
49
50 list_for_each_entry(node, &block_cache, lh)
51 if ((node->iftype == iftype) &&
52 (node->devnum == devnum) &&
53 (node->blksz == blksz) &&
54 (node->start <= start) &&
55 (node->start + node->blkcnt >= start + blkcnt)) {
56 if (block_cache.next != &node->lh) {
57 /* maintain MRU ordering */
58 list_del(&node->lh);
59 list_add(&node->lh, &block_cache);
60 }
61 return node;
62 }
63 return 0;
64}
65
66int blkcache_read(int iftype, int devnum,
67 lbaint_t start, lbaint_t blkcnt,
68 unsigned long blksz, void *buffer)
69{
70 struct block_cache_node *node = cache_find(iftype, devnum, start,
71 blkcnt, blksz);
72 if (node) {
73 const char *src = node->cache + (start - node->start) * blksz;
74 memcpy(buffer, src, blksz * blkcnt);
75 debug("hit: start " LBAF ", count " LBAFU "\n",
76 start, blkcnt);
77 ++_stats.hits;
78 return 1;
79 }
80
81 debug("miss: start " LBAF ", count " LBAFU "\n",
82 start, blkcnt);
83 ++_stats.misses;
84 return 0;
85}
86
87void blkcache_fill(int iftype, int devnum,
88 lbaint_t start, lbaint_t blkcnt,
89 unsigned long blksz, void const *buffer)
90{
91 lbaint_t bytes;
92 struct block_cache_node *node;
93
94 /* don't cache big stuff */
95 if (blkcnt > _stats.max_blocks_per_entry)
96 return;
97
98 if (_stats.max_entries == 0)
99 return;
100
101 bytes = blksz * blkcnt;
102 if (_stats.max_entries <= _stats.entries) {
103 /* pop LRU */
104 node = (struct block_cache_node *)block_cache.prev;
105 list_del(&node->lh);
106 _stats.entries--;
107 debug("drop: start " LBAF ", count " LBAFU "\n",
108 node->start, node->blkcnt);
109 if (node->blkcnt * node->blksz < bytes) {
110 free(node->cache);
111 node->cache = 0;
112 }
113 } else {
114 node = malloc(sizeof(*node));
115 if (!node)
116 return;
117 node->cache = 0;
118 }
119
120 if (!node->cache) {
121 node->cache = malloc(bytes);
122 if (!node->cache) {
123 free(node);
124 return;
125 }
126 }
127
128 debug("fill: start " LBAF ", count " LBAFU "\n",
129 start, blkcnt);
130
131 node->iftype = iftype;
132 node->devnum = devnum;
133 node->start = start;
134 node->blkcnt = blkcnt;
135 node->blksz = blksz;
136 memcpy(node->cache, buffer, bytes);
137 list_add(&node->lh, &block_cache);
138 _stats.entries++;
139}
140
141void blkcache_invalidate(int iftype, int devnum)
142{
143 struct list_head *entry, *n;
144 struct block_cache_node *node;
145
146 list_for_each_safe(entry, n, &block_cache) {
147 node = (struct block_cache_node *)entry;
148 if ((node->iftype == iftype) &&
149 (node->devnum == devnum)) {
150 list_del(entry);
151 free(node->cache);
152 free(node);
153 --_stats.entries;
154 }
155 }
156}
157
158void blkcache_configure(unsigned blocks, unsigned entries)
159{
160 struct block_cache_node *node;
161 if ((blocks != _stats.max_blocks_per_entry) ||
162 (entries != _stats.max_entries)) {
163 /* invalidate cache */
164 while (!list_empty(&block_cache)) {
165 node = (struct block_cache_node *)block_cache.next;
166 list_del(&node->lh);
167 free(node->cache);
168 free(node);
169 }
170 _stats.entries = 0;
171 }
172
173 _stats.max_blocks_per_entry = blocks;
174 _stats.max_entries = entries;
175
176 _stats.hits = 0;
177 _stats.misses = 0;
178}
179
180void blkcache_stats(struct block_cache_stats *stats)
181{
182 memcpy(stats, &_stats, sizeof(*stats));
183 _stats.hits = 0;
184 _stats.misses = 0;
185}