Oleksandr Andrushchenko | 365d88a | 2020-08-06 12:42:46 +0300 | [diff] [blame] | 1 | /* SPDX-License-Identifier: MIT |
| 2 | * |
| 3 | * xen.h |
| 4 | * |
| 5 | * Guest OS interface to Xen. |
| 6 | * |
| 7 | * Copyright (c) 2004, K A Fraser |
| 8 | */ |
| 9 | |
| 10 | #ifndef __XEN_PUBLIC_XEN_H__ |
| 11 | #define __XEN_PUBLIC_XEN_H__ |
| 12 | |
| 13 | #include <xen/arm/interface.h> |
| 14 | |
| 15 | /* |
| 16 | * XEN "SYSTEM CALLS" (a.k.a. HYPERCALLS). |
| 17 | */ |
| 18 | |
| 19 | /* |
| 20 | * x86_32: EAX = vector; EBX, ECX, EDX, ESI, EDI = args 1, 2, 3, 4, 5. |
| 21 | * EAX = return value |
| 22 | * (argument registers may be clobbered on return) |
| 23 | * x86_64: RAX = vector; RDI, RSI, RDX, R10, R8, R9 = args 1, 2, 3, 4, 5, 6. |
| 24 | * RAX = return value |
| 25 | * (argument registers not clobbered on return; RCX, R11 are) |
| 26 | */ |
| 27 | #define __HYPERVISOR_set_trap_table 0 |
| 28 | #define __HYPERVISOR_mmu_update 1 |
| 29 | #define __HYPERVISOR_set_gdt 2 |
| 30 | #define __HYPERVISOR_stack_switch 3 |
| 31 | #define __HYPERVISOR_set_callbacks 4 |
| 32 | #define __HYPERVISOR_fpu_taskswitch 5 |
| 33 | #define __HYPERVISOR_sched_op_compat 6 |
| 34 | #define __HYPERVISOR_platform_op 7 |
| 35 | #define __HYPERVISOR_set_debugreg 8 |
| 36 | #define __HYPERVISOR_get_debugreg 9 |
| 37 | #define __HYPERVISOR_update_descriptor 10 |
| 38 | #define __HYPERVISOR_memory_op 12 |
| 39 | #define __HYPERVISOR_multicall 13 |
| 40 | #define __HYPERVISOR_update_va_mapping 14 |
| 41 | #define __HYPERVISOR_set_timer_op 15 |
| 42 | #define __HYPERVISOR_event_channel_op_compat 16 |
| 43 | #define __HYPERVISOR_xen_version 17 |
| 44 | #define __HYPERVISOR_console_io 18 |
| 45 | #define __HYPERVISOR_physdev_op_compat 19 |
| 46 | #define __HYPERVISOR_grant_table_op 20 |
| 47 | #define __HYPERVISOR_vm_assist 21 |
| 48 | #define __HYPERVISOR_update_va_mapping_otherdomain 22 |
| 49 | #define __HYPERVISOR_iret 23 /* x86 only */ |
| 50 | #define __HYPERVISOR_vcpu_op 24 |
| 51 | #define __HYPERVISOR_set_segment_base 25 /* x86/64 only */ |
| 52 | #define __HYPERVISOR_mmuext_op 26 |
| 53 | #define __HYPERVISOR_xsm_op 27 |
| 54 | #define __HYPERVISOR_nmi_op 28 |
| 55 | #define __HYPERVISOR_sched_op 29 |
| 56 | #define __HYPERVISOR_callback_op 30 |
| 57 | #define __HYPERVISOR_xenoprof_op 31 |
| 58 | #define __HYPERVISOR_event_channel_op 32 |
| 59 | #define __HYPERVISOR_physdev_op 33 |
| 60 | #define __HYPERVISOR_hvm_op 34 |
| 61 | #define __HYPERVISOR_sysctl 35 |
| 62 | #define __HYPERVISOR_domctl 36 |
| 63 | #define __HYPERVISOR_kexec_op 37 |
| 64 | #define __HYPERVISOR_tmem_op 38 |
| 65 | #define __HYPERVISOR_xc_reserved_op 39 /* reserved for XenClient */ |
| 66 | #define __HYPERVISOR_xenpmu_op 40 |
| 67 | #define __HYPERVISOR_dm_op 41 |
| 68 | |
| 69 | /* Architecture-specific hypercall definitions. */ |
| 70 | #define __HYPERVISOR_arch_0 48 |
| 71 | #define __HYPERVISOR_arch_1 49 |
| 72 | #define __HYPERVISOR_arch_2 50 |
| 73 | #define __HYPERVISOR_arch_3 51 |
| 74 | #define __HYPERVISOR_arch_4 52 |
| 75 | #define __HYPERVISOR_arch_5 53 |
| 76 | #define __HYPERVISOR_arch_6 54 |
| 77 | #define __HYPERVISOR_arch_7 55 |
| 78 | |
Oleksandr Andrushchenko | 365d88a | 2020-08-06 12:42:46 +0300 | [diff] [blame] | 79 | #ifndef __ASSEMBLY__ |
| 80 | |
| 81 | typedef u16 domid_t; |
| 82 | |
| 83 | /* Domain ids >= DOMID_FIRST_RESERVED cannot be used for ordinary domains. */ |
| 84 | #define DOMID_FIRST_RESERVED (0x7FF0U) |
| 85 | |
| 86 | /* DOMID_SELF is used in certain contexts to refer to oneself. */ |
| 87 | #define DOMID_SELF (0x7FF0U) |
| 88 | |
| 89 | /* |
| 90 | * DOMID_IO is used to restrict page-table updates to mapping I/O memory. |
| 91 | * Although no Foreign Domain need be specified to map I/O pages, DOMID_IO |
| 92 | * is useful to ensure that no mappings to the OS's own heap are accidentally |
| 93 | * installed. (e.g., in Linux this could cause havoc as reference counts |
| 94 | * aren't adjusted on the I/O-mapping code path). |
| 95 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, but in that context can |
| 96 | * be specified by any calling domain. |
| 97 | */ |
| 98 | #define DOMID_IO (0x7FF1U) |
| 99 | |
| 100 | /* |
| 101 | * DOMID_XEN is used to allow privileged domains to map restricted parts of |
| 102 | * Xen's heap space (e.g., the machine_to_phys table). |
| 103 | * This only makes sense in MMUEXT_SET_FOREIGNDOM, and is only permitted if |
| 104 | * the caller is privileged. |
| 105 | */ |
| 106 | #define DOMID_XEN (0x7FF2U) |
| 107 | |
| 108 | /* DOMID_COW is used as the owner of sharable pages */ |
| 109 | #define DOMID_COW (0x7FF3U) |
| 110 | |
| 111 | /* DOMID_INVALID is used to identify pages with unknown owner. */ |
| 112 | #define DOMID_INVALID (0x7FF4U) |
| 113 | |
| 114 | /* Idle domain. */ |
| 115 | #define DOMID_IDLE (0x7FFFU) |
| 116 | |
| 117 | struct vcpu_info { |
| 118 | /* |
| 119 | * 'evtchn_upcall_pending' is written non-zero by Xen to indicate |
| 120 | * a pending notification for a particular VCPU. It is then cleared |
| 121 | * by the guest OS /before/ checking for pending work, thus avoiding |
| 122 | * a set-and-check race. Note that the mask is only accessed by Xen |
| 123 | * on the CPU that is currently hosting the VCPU. This means that the |
| 124 | * pending and mask flags can be updated by the guest without special |
| 125 | * synchronisation (i.e., no need for the x86 LOCK prefix). |
| 126 | * This may seem suboptimal because if the pending flag is set by |
| 127 | * a different CPU then an IPI may be scheduled even when the mask |
| 128 | * is set. However, note: |
| 129 | * 1. The task of 'interrupt holdoff' is covered by the per-event- |
| 130 | * channel mask bits. A 'noisy' event that is continually being |
| 131 | * triggered can be masked at source at this very precise |
| 132 | * granularity. |
| 133 | * 2. The main purpose of the per-VCPU mask is therefore to restrict |
| 134 | * reentrant execution: whether for concurrency control, or to |
| 135 | * prevent unbounded stack usage. Whatever the purpose, we expect |
| 136 | * that the mask will be asserted only for short periods at a time, |
| 137 | * and so the likelihood of a 'spurious' IPI is suitably small. |
| 138 | * The mask is read before making an event upcall to the guest: a |
| 139 | * non-zero mask therefore guarantees that the VCPU will not receive |
| 140 | * an upcall activation. The mask is cleared when the VCPU requests |
| 141 | * to block: this avoids wakeup-waiting races. |
| 142 | */ |
| 143 | u8 evtchn_upcall_pending; |
| 144 | u8 evtchn_upcall_mask; |
| 145 | xen_ulong_t evtchn_pending_sel; |
| 146 | struct arch_vcpu_info arch; |
| 147 | struct pvclock_vcpu_time_info time; |
| 148 | }; /* 64 bytes (x86) */ |
| 149 | |
| 150 | /* |
| 151 | * Xen/kernel shared data -- pointer provided in start_info. |
| 152 | * NB. We expect that this struct is smaller than a page. |
| 153 | */ |
| 154 | struct shared_info { |
| 155 | struct vcpu_info vcpu_info[MAX_VIRT_CPUS]; |
| 156 | |
| 157 | /* |
| 158 | * A domain can create "event channels" on which it can send and receive |
| 159 | * asynchronous event notifications. There are three classes of event that |
| 160 | * are delivered by this mechanism: |
| 161 | * 1. Bi-directional inter- and intra-domain connections. Domains must |
| 162 | * arrange out-of-band to set up a connection (usually by allocating |
| 163 | * an unbound 'listener' port and avertising that via a storage service |
| 164 | * such as xenstore). |
| 165 | * 2. Physical interrupts. A domain with suitable hardware-access |
| 166 | * privileges can bind an event-channel port to a physical interrupt |
| 167 | * source. |
| 168 | * 3. Virtual interrupts ('events'). A domain can bind an event-channel |
| 169 | * port to a virtual interrupt source, such as the virtual-timer |
| 170 | * device or the emergency console. |
| 171 | * |
| 172 | * Event channels are addressed by a "port index". Each channel is |
| 173 | * associated with two bits of information: |
| 174 | * 1. PENDING -- notifies the domain that there is a pending notification |
| 175 | * to be processed. This bit is cleared by the guest. |
| 176 | * 2. MASK -- if this bit is clear then a 0->1 transition of PENDING |
| 177 | * will cause an asynchronous upcall to be scheduled. This bit is only |
| 178 | * updated by the guest. It is read-only within Xen. If a channel |
| 179 | * becomes pending while the channel is masked then the 'edge' is lost |
| 180 | * (i.e., when the channel is unmasked, the guest must manually handle |
| 181 | * pending notifications as no upcall will be scheduled by Xen). |
| 182 | * |
| 183 | * To expedite scanning of pending notifications, any 0->1 pending |
| 184 | * transition on an unmasked channel causes a corresponding bit in a |
| 185 | * per-vcpu selector word to be set. Each bit in the selector covers a |
| 186 | * 'C long' in the PENDING bitfield array. |
| 187 | */ |
| 188 | xen_ulong_t evtchn_pending[sizeof(xen_ulong_t) * 8]; |
| 189 | xen_ulong_t evtchn_mask[sizeof(xen_ulong_t) * 8]; |
| 190 | |
| 191 | /* |
| 192 | * Wallclock time: updated only by control software. Guests should base |
| 193 | * their gettimeofday() syscall on this wallclock-base value. |
| 194 | */ |
| 195 | struct pvclock_wall_clock wc; |
| 196 | |
| 197 | struct arch_shared_info arch; |
| 198 | |
| 199 | }; |
| 200 | |
| 201 | #else /* __ASSEMBLY__ */ |
| 202 | |
| 203 | /* In assembly code we cannot use C numeric constant suffixes. */ |
| 204 | #define mk_unsigned_long(x) x |
| 205 | |
| 206 | #endif /* !__ASSEMBLY__ */ |
| 207 | |
| 208 | #endif /* __XEN_PUBLIC_XEN_H__ */ |