blob: 26bcbea4353307bf81c61b8a25bd45b8192846b8 [file] [log] [blame]
Tom Rini83d290c2018-05-06 17:58:06 -04001// SPDX-License-Identifier: GPL-2.0+
Eric Nelsone40cf342016-03-28 10:05:44 -07002/*
3 * Copyright (C) Nelson Integration, LLC 2016
4 * Author: Eric Nelson<eric@nelint.com>
5 *
Eric Nelsone40cf342016-03-28 10:05:44 -07006 */
Eric Nelsone40cf342016-03-28 10:05:44 -07007#include <common.h>
Simon Glasse6f6f9e2020-05-10 11:39:58 -06008#include <blk.h>
Simon Glassf7ae49f2020-05-10 11:40:05 -06009#include <log.h>
Eric Nelsone40cf342016-03-28 10:05:44 -070010#include <malloc.h>
11#include <part.h>
Simon Glass401d1c42020-10-30 21:38:53 -060012#include <asm/global_data.h>
Eric Nelsone40cf342016-03-28 10:05:44 -070013#include <linux/ctype.h>
14#include <linux/list.h>
15
16struct block_cache_node {
17 struct list_head lh;
18 int iftype;
19 int devnum;
20 lbaint_t start;
21 lbaint_t blkcnt;
22 unsigned long blksz;
23 char *cache;
24};
25
Angelo Durgehelloc05b38d2020-01-26 19:31:22 +010026static LIST_HEAD(block_cache);
Eric Nelsone40cf342016-03-28 10:05:44 -070027
28static struct block_cache_stats _stats = {
Marek Vasut2e89bbe2018-08-08 13:20:29 +020029 .max_blocks_per_entry = 8,
Eric Nelsone40cf342016-03-28 10:05:44 -070030 .max_entries = 32
31};
32
Eric Nelsone40cf342016-03-28 10:05:44 -070033static struct block_cache_node *cache_find(int iftype, int devnum,
34 lbaint_t start, lbaint_t blkcnt,
35 unsigned long blksz)
36{
37 struct block_cache_node *node;
38
39 list_for_each_entry(node, &block_cache, lh)
40 if ((node->iftype == iftype) &&
41 (node->devnum == devnum) &&
42 (node->blksz == blksz) &&
43 (node->start <= start) &&
44 (node->start + node->blkcnt >= start + blkcnt)) {
45 if (block_cache.next != &node->lh) {
46 /* maintain MRU ordering */
47 list_del(&node->lh);
48 list_add(&node->lh, &block_cache);
49 }
50 return node;
51 }
52 return 0;
53}
54
55int blkcache_read(int iftype, int devnum,
56 lbaint_t start, lbaint_t blkcnt,
57 unsigned long blksz, void *buffer)
58{
59 struct block_cache_node *node = cache_find(iftype, devnum, start,
60 blkcnt, blksz);
61 if (node) {
62 const char *src = node->cache + (start - node->start) * blksz;
63 memcpy(buffer, src, blksz * blkcnt);
64 debug("hit: start " LBAF ", count " LBAFU "\n",
65 start, blkcnt);
66 ++_stats.hits;
67 return 1;
68 }
69
70 debug("miss: start " LBAF ", count " LBAFU "\n",
71 start, blkcnt);
72 ++_stats.misses;
73 return 0;
74}
75
76void blkcache_fill(int iftype, int devnum,
77 lbaint_t start, lbaint_t blkcnt,
78 unsigned long blksz, void const *buffer)
79{
80 lbaint_t bytes;
81 struct block_cache_node *node;
82
83 /* don't cache big stuff */
84 if (blkcnt > _stats.max_blocks_per_entry)
85 return;
86
87 if (_stats.max_entries == 0)
88 return;
89
90 bytes = blksz * blkcnt;
91 if (_stats.max_entries <= _stats.entries) {
92 /* pop LRU */
93 node = (struct block_cache_node *)block_cache.prev;
94 list_del(&node->lh);
95 _stats.entries--;
96 debug("drop: start " LBAF ", count " LBAFU "\n",
97 node->start, node->blkcnt);
98 if (node->blkcnt * node->blksz < bytes) {
99 free(node->cache);
100 node->cache = 0;
101 }
102 } else {
103 node = malloc(sizeof(*node));
104 if (!node)
105 return;
106 node->cache = 0;
107 }
108
109 if (!node->cache) {
110 node->cache = malloc(bytes);
111 if (!node->cache) {
112 free(node);
113 return;
114 }
115 }
116
117 debug("fill: start " LBAF ", count " LBAFU "\n",
118 start, blkcnt);
119
120 node->iftype = iftype;
121 node->devnum = devnum;
122 node->start = start;
123 node->blkcnt = blkcnt;
124 node->blksz = blksz;
125 memcpy(node->cache, buffer, bytes);
126 list_add(&node->lh, &block_cache);
127 _stats.entries++;
128}
129
130void blkcache_invalidate(int iftype, int devnum)
131{
132 struct list_head *entry, *n;
133 struct block_cache_node *node;
134
135 list_for_each_safe(entry, n, &block_cache) {
136 node = (struct block_cache_node *)entry;
Simon Glass5ea894a2022-10-29 19:47:08 -0600137 if (iftype == -1 ||
138 (node->iftype == iftype && node->devnum == devnum)) {
Eric Nelsone40cf342016-03-28 10:05:44 -0700139 list_del(entry);
140 free(node->cache);
141 free(node);
142 --_stats.entries;
143 }
144 }
145}
146
147void blkcache_configure(unsigned blocks, unsigned entries)
148{
Simon Glass5ea894a2022-10-29 19:47:08 -0600149 /* invalidate cache if there is a change */
Eric Nelsone40cf342016-03-28 10:05:44 -0700150 if ((blocks != _stats.max_blocks_per_entry) ||
Simon Glass5ea894a2022-10-29 19:47:08 -0600151 (entries != _stats.max_entries))
152 blkcache_invalidate(-1, 0);
Eric Nelsone40cf342016-03-28 10:05:44 -0700153
154 _stats.max_blocks_per_entry = blocks;
155 _stats.max_entries = entries;
156
157 _stats.hits = 0;
158 _stats.misses = 0;
159}
160
161void blkcache_stats(struct block_cache_stats *stats)
162{
163 memcpy(stats, &_stats, sizeof(*stats));
164 _stats.hits = 0;
165 _stats.misses = 0;
166}
Simon Glass5ea894a2022-10-29 19:47:08 -0600167
168void blkcache_free(void)
169{
170 blkcache_invalidate(-1, 0);
171}