blob: f603aa129d4699351c243c2aa38d9aae80bbda58 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Eric Nelsone40cf342016-03-28 10:05:44 -07002/*
3 * Copyright (C) Nelson Integration, LLC 2016
4 * Author: Eric Nelson<eric@nelint.com>
5 *
Eric Nelsone40cf342016-03-28 10:05:44 -07006 */
7#include <config.h>
8#include <common.h>
9#include <malloc.h>
10#include <part.h>
11#include <linux/ctype.h>
12#include <linux/list.h>
13
14struct block_cache_node {
15 struct list_head lh;
16 int iftype;
17 int devnum;
18 lbaint_t start;
19 lbaint_t blkcnt;
20 unsigned long blksz;
21 char *cache;
22};
23
Angelo Durgehello1526bcc2020-01-21 10:37:27 +010024static struct list_head block_cache;
Eric Nelsone40cf342016-03-28 10:05:44 -070025
26static struct block_cache_stats _stats = {
Marek Vasut2e89bbe2018-08-08 13:20:29 +020027 .max_blocks_per_entry = 8,
Eric Nelsone40cf342016-03-28 10:05:44 -070028 .max_entries = 32
29};
30
Angelo Durgehello1526bcc2020-01-21 10:37:27 +010031int blkcache_init(void)
32{
33 INIT_LIST_HEAD(&block_cache);
34
35 return 0;
36}
37
Eric Nelsone40cf342016-03-28 10:05:44 -070038static struct block_cache_node *cache_find(int iftype, int devnum,
39 lbaint_t start, lbaint_t blkcnt,
40 unsigned long blksz)
41{
42 struct block_cache_node *node;
43
44 list_for_each_entry(node, &block_cache, lh)
45 if ((node->iftype == iftype) &&
46 (node->devnum == devnum) &&
47 (node->blksz == blksz) &&
48 (node->start <= start) &&
49 (node->start + node->blkcnt >= start + blkcnt)) {
50 if (block_cache.next != &node->lh) {
51 /* maintain MRU ordering */
52 list_del(&node->lh);
53 list_add(&node->lh, &block_cache);
54 }
55 return node;
56 }
57 return 0;
58}
59
60int blkcache_read(int iftype, int devnum,
61 lbaint_t start, lbaint_t blkcnt,
62 unsigned long blksz, void *buffer)
63{
64 struct block_cache_node *node = cache_find(iftype, devnum, start,
65 blkcnt, blksz);
66 if (node) {
67 const char *src = node->cache + (start - node->start) * blksz;
68 memcpy(buffer, src, blksz * blkcnt);
69 debug("hit: start " LBAF ", count " LBAFU "\n",
70 start, blkcnt);
71 ++_stats.hits;
72 return 1;
73 }
74
75 debug("miss: start " LBAF ", count " LBAFU "\n",
76 start, blkcnt);
77 ++_stats.misses;
78 return 0;
79}
80
81void blkcache_fill(int iftype, int devnum,
82 lbaint_t start, lbaint_t blkcnt,
83 unsigned long blksz, void const *buffer)
84{
85 lbaint_t bytes;
86 struct block_cache_node *node;
87
88 /* don't cache big stuff */
89 if (blkcnt > _stats.max_blocks_per_entry)
90 return;
91
92 if (_stats.max_entries == 0)
93 return;
94
95 bytes = blksz * blkcnt;
96 if (_stats.max_entries <= _stats.entries) {
97 /* pop LRU */
98 node = (struct block_cache_node *)block_cache.prev;
99 list_del(&node->lh);
100 _stats.entries--;
101 debug("drop: start " LBAF ", count " LBAFU "\n",
102 node->start, node->blkcnt);
103 if (node->blkcnt * node->blksz < bytes) {
104 free(node->cache);
105 node->cache = 0;
106 }
107 } else {
108 node = malloc(sizeof(*node));
109 if (!node)
110 return;
111 node->cache = 0;
112 }
113
114 if (!node->cache) {
115 node->cache = malloc(bytes);
116 if (!node->cache) {
117 free(node);
118 return;
119 }
120 }
121
122 debug("fill: start " LBAF ", count " LBAFU "\n",
123 start, blkcnt);
124
125 node->iftype = iftype;
126 node->devnum = devnum;
127 node->start = start;
128 node->blkcnt = blkcnt;
129 node->blksz = blksz;
130 memcpy(node->cache, buffer, bytes);
131 list_add(&node->lh, &block_cache);
132 _stats.entries++;
133}
134
135void blkcache_invalidate(int iftype, int devnum)
136{
137 struct list_head *entry, *n;
138 struct block_cache_node *node;
139
140 list_for_each_safe(entry, n, &block_cache) {
141 node = (struct block_cache_node *)entry;
142 if ((node->iftype == iftype) &&
143 (node->devnum == devnum)) {
144 list_del(entry);
145 free(node->cache);
146 free(node);
147 --_stats.entries;
148 }
149 }
150}
151
152void blkcache_configure(unsigned blocks, unsigned entries)
153{
154 struct block_cache_node *node;
155 if ((blocks != _stats.max_blocks_per_entry) ||
156 (entries != _stats.max_entries)) {
157 /* invalidate cache */
158 while (!list_empty(&block_cache)) {
159 node = (struct block_cache_node *)block_cache.next;
160 list_del(&node->lh);
161 free(node->cache);
162 free(node);
163 }
164 _stats.entries = 0;
165 }
166
167 _stats.max_blocks_per_entry = blocks;
168 _stats.max_entries = entries;
169
170 _stats.hits = 0;
171 _stats.misses = 0;
172}
173
174void blkcache_stats(struct block_cache_stats *stats)
175{
176 memcpy(stats, &_stats, sizeof(*stats));
177 _stats.hits = 0;
178 _stats.misses = 0;
179}