blob: 108e9701d61c1f953b42575eb8c83d4ef5e1a567 [file] [log] [blame]
Oleksandr Andrushchenko48654412020-08-06 12:42:48 +03001// SPDX-License-Identifier: MIT License
2/*
3 * hypervisor.c
4 *
5 * Communication to/from hypervisor.
6 *
7 * Copyright (c) 2002-2003, K A Fraser
8 * Copyright (c) 2005, Grzegorz Milos, gm281@cam.ac.uk,Intel Research Cambridge
9 * Copyright (c) 2020, EPAM Systems Inc.
10 */
11#include <common.h>
12#include <cpu_func.h>
13#include <log.h>
14#include <memalign.h>
15
16#include <asm/io.h>
17#include <asm/armv8/mmu.h>
18#include <asm/xen/system.h>
19
20#include <linux/bug.h>
21
22#include <xen/hvm.h>
23#include <xen/interface/memory.h>
24
25#define active_evtchns(cpu, sh, idx) \
26 ((sh)->evtchn_pending[idx] & \
27 ~(sh)->evtchn_mask[idx])
28
29int in_callback;
30
31/*
32 * Shared page for communicating with the hypervisor.
33 * Events flags go here, for example.
34 */
35struct shared_info *HYPERVISOR_shared_info;
36
37static const char *param_name(int op)
38{
39#define PARAM(x)[HVM_PARAM_##x] = #x
40 static const char *const names[] = {
41 PARAM(CALLBACK_IRQ),
42 PARAM(STORE_PFN),
43 PARAM(STORE_EVTCHN),
44 PARAM(PAE_ENABLED),
45 PARAM(IOREQ_PFN),
46 PARAM(VPT_ALIGN),
47 PARAM(CONSOLE_PFN),
48 PARAM(CONSOLE_EVTCHN),
49 };
50#undef PARAM
51
52 if (op >= ARRAY_SIZE(names))
53 return "unknown";
54
55 if (!names[op])
56 return "reserved";
57
58 return names[op];
59}
60
61/**
62 * hvm_get_parameter_maintain_dcache - function to obtain a HVM
63 * parameter value.
64 * @idx: HVM parameter index
65 * @value: Value to fill in
66 *
67 * According to Xen on ARM ABI (xen/include/public/arch-arm.h):
68 * all memory which is shared with other entities in the system
69 * (including the hypervisor and other guests) must reside in memory
70 * which is mapped as Normal Inner Write-Back Outer Write-Back
71 * Inner-Shareable.
72 *
73 * Thus, page attributes must be equally set for all the entities
74 * working with that page.
75 *
76 * Before MMU setup the data cache is turned off, so it means that
77 * manual data cache maintenance is required, because of the
78 * difference of page attributes.
79 */
80int hvm_get_parameter_maintain_dcache(int idx, uint64_t *value)
81{
82 struct xen_hvm_param xhv;
83 int ret;
84
85 invalidate_dcache_range((unsigned long)&xhv,
86 (unsigned long)&xhv + sizeof(xhv));
87 xhv.domid = DOMID_SELF;
88 xhv.index = idx;
89 invalidate_dcache_range((unsigned long)&xhv,
90 (unsigned long)&xhv + sizeof(xhv));
91
92 ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
93 if (ret < 0) {
94 pr_err("Cannot get hvm parameter %s (%d): %d!\n",
95 param_name(idx), idx, ret);
96 BUG();
97 }
98 invalidate_dcache_range((unsigned long)&xhv,
99 (unsigned long)&xhv + sizeof(xhv));
100
101 *value = xhv.value;
102
103 return ret;
104}
105
106int hvm_get_parameter(int idx, uint64_t *value)
107{
108 struct xen_hvm_param xhv;
109 int ret;
110
111 xhv.domid = DOMID_SELF;
112 xhv.index = idx;
113 ret = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
114 if (ret < 0) {
115 pr_err("Cannot get hvm parameter %s (%d): %d!\n",
116 param_name(idx), idx, ret);
117 BUG();
118 }
119
120 *value = xhv.value;
121
122 return ret;
123}
124
125struct shared_info *map_shared_info(void *p)
126{
127 struct xen_add_to_physmap xatp;
128
129 HYPERVISOR_shared_info = (struct shared_info *)memalign(PAGE_SIZE,
130 PAGE_SIZE);
131 if (!HYPERVISOR_shared_info)
132 BUG();
133
134 xatp.domid = DOMID_SELF;
135 xatp.idx = 0;
136 xatp.space = XENMAPSPACE_shared_info;
137 xatp.gpfn = virt_to_pfn(HYPERVISOR_shared_info);
138 if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp) != 0)
139 BUG();
140
141 return HYPERVISOR_shared_info;
142}
143
144void do_hypervisor_callback(struct pt_regs *regs)
145{
146 unsigned long l1, l2, l1i, l2i;
147 unsigned int port;
148 int cpu = 0;
149 struct shared_info *s = HYPERVISOR_shared_info;
150 struct vcpu_info *vcpu_info = &s->vcpu_info[cpu];
151
152 in_callback = 1;
153
154 vcpu_info->evtchn_upcall_pending = 0;
155 l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
156
157 while (l1 != 0) {
158 l1i = __ffs(l1);
159 l1 &= ~(1UL << l1i);
160
161 while ((l2 = active_evtchns(cpu, s, l1i)) != 0) {
162 l2i = __ffs(l2);
163 l2 &= ~(1UL << l2i);
164
165 port = (l1i * (sizeof(unsigned long) * 8)) + l2i;
166 /* TODO: handle new event: do_event(port, regs); */
167 /* Suppress -Wunused-but-set-variable */
168 (void)(port);
169 }
170 }
171
172 in_callback = 0;
173}
174
175void force_evtchn_callback(void)
176{
177#ifdef XEN_HAVE_PV_UPCALL_MASK
178 int save;
179#endif
180 struct vcpu_info *vcpu;
181
182 vcpu = &HYPERVISOR_shared_info->vcpu_info[smp_processor_id()];
183#ifdef XEN_HAVE_PV_UPCALL_MASK
184 save = vcpu->evtchn_upcall_mask;
185#endif
186
187 while (vcpu->evtchn_upcall_pending) {
188#ifdef XEN_HAVE_PV_UPCALL_MASK
189 vcpu->evtchn_upcall_mask = 1;
190#endif
191 do_hypervisor_callback(NULL);
192#ifdef XEN_HAVE_PV_UPCALL_MASK
193 vcpu->evtchn_upcall_mask = save;
194#endif
195 };
196}
197
198void mask_evtchn(uint32_t port)
199{
200 struct shared_info *s = HYPERVISOR_shared_info;
201
202 synch_set_bit(port, &s->evtchn_mask[0]);
203}
204
205void unmask_evtchn(uint32_t port)
206{
207 struct shared_info *s = HYPERVISOR_shared_info;
208 struct vcpu_info *vcpu_info = &s->vcpu_info[smp_processor_id()];
209
210 synch_clear_bit(port, &s->evtchn_mask[0]);
211
212 /*
213 * Just like a real IO-APIC we 'lose the interrupt edge' if the
214 * channel is masked.
215 */
216 if (synch_test_bit(port, &s->evtchn_pending[0]) &&
217 !synch_test_and_set_bit(port / (sizeof(unsigned long) * 8),
218 &vcpu_info->evtchn_pending_sel)) {
219 vcpu_info->evtchn_upcall_pending = 1;
220#ifdef XEN_HAVE_PV_UPCALL_MASK
221 if (!vcpu_info->evtchn_upcall_mask)
222#endif
223 force_evtchn_callback();
224 }
225}
226
227void clear_evtchn(uint32_t port)
228{
229 struct shared_info *s = HYPERVISOR_shared_info;
230
231 synch_clear_bit(port, &s->evtchn_pending[0]);
232}
233
234void xen_init(void)
235{
236 debug("%s\n", __func__);
237
238 map_shared_info(NULL);
239}
240