/usr/include/xen/hvm/dm_op.h is in libxen-dev 4.9.2-0ubuntu1.
This file is owned by root:root, with mode 0o644.
The actual contents of the file can be viewed below.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 | /*
* Copyright (c) 2016, Citrix Systems Inc
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
*/
#ifndef __XEN_PUBLIC_HVM_DM_OP_H__
#define __XEN_PUBLIC_HVM_DM_OP_H__
#include "../xen.h"
#if defined(__XEN__) || defined(__XEN_TOOLS__)
#include "../event_channel.h"
#ifndef uint64_aligned_t
#define uint64_aligned_t uint64_t
#endif
/*
* IOREQ Servers
*
* The interface between an I/O emulator an Xen is called an IOREQ Server.
* A domain supports a single 'legacy' IOREQ Server which is instantiated if
* parameter...
*
* HVM_PARAM_IOREQ_PFN is read (to get the gmfn containing the synchronous
* ioreq structures), or...
* HVM_PARAM_BUFIOREQ_PFN is read (to get the gmfn containing the buffered
* ioreq ring), or...
* HVM_PARAM_BUFIOREQ_EVTCHN is read (to get the event channel that Xen uses
* to request buffered I/O emulation).
*
* The following hypercalls facilitate the creation of IOREQ Servers for
* 'secondary' emulators which are invoked to implement port I/O, memory, or
* PCI config space ranges which they explicitly register.
*/
typedef uint16_t ioservid_t;
/*
* XEN_DMOP_create_ioreq_server: Instantiate a new IOREQ Server for a
* secondary emulator.
*
* The <id> handed back is unique for target domain. The valur of
* <handle_bufioreq> should be one of HVM_IOREQSRV_BUFIOREQ_* defined in
* hvm_op.h. If the value is HVM_IOREQSRV_BUFIOREQ_OFF then the buffered
* ioreq ring will not be allocated and hence all emulation requests to
* this server will be synchronous.
*/
#define XEN_DMOP_create_ioreq_server 1
struct xen_dm_op_create_ioreq_server {
/* IN - should server handle buffered ioreqs */
uint8_t handle_bufioreq;
uint8_t pad[3];
/* OUT - server id */
ioservid_t id;
};
/*
* XEN_DMOP_get_ioreq_server_info: Get all the information necessary to
* access IOREQ Server <id>.
*
* The emulator needs to map the synchronous ioreq structures and buffered
* ioreq ring (if it exists) that Xen uses to request emulation. These are
* hosted in the target domain's gmfns <ioreq_pfn> and <bufioreq_pfn>
* respectively. In addition, if the IOREQ Server is handling buffered
* emulation requests, the emulator needs to bind to event channel
* <bufioreq_port> to listen for them. (The event channels used for
* synchronous emulation requests are specified in the per-CPU ioreq
* structures in <ioreq_pfn>).
* If the IOREQ Server is not handling buffered emulation requests then the
* values handed back in <bufioreq_pfn> and <bufioreq_port> will both be 0.
*/
#define XEN_DMOP_get_ioreq_server_info 2
struct xen_dm_op_get_ioreq_server_info {
/* IN - server id */
ioservid_t id;
uint16_t pad;
/* OUT - buffered ioreq port */
evtchn_port_t bufioreq_port;
/* OUT - sync ioreq pfn */
uint64_aligned_t ioreq_pfn;
/* OUT - buffered ioreq pfn */
uint64_aligned_t bufioreq_pfn;
};
/*
* XEN_DMOP_map_io_range_to_ioreq_server: Register an I/O range for
* emulation by the client of
* IOREQ Server <id>.
* XEN_DMOP_unmap_io_range_from_ioreq_server: Deregister an I/O range
* previously registered for
* emulation by the client of
* IOREQ Server <id>.
*
* There are three types of I/O that can be emulated: port I/O, memory
* accesses and PCI config space accesses. The <type> field denotes which
* type of range* the <start> and <end> (inclusive) fields are specifying.
* PCI config space ranges are specified by segment/bus/device/function
* values which should be encoded using the DMOP_PCI_SBDF helper macro
* below.
*
* NOTE: unless an emulation request falls entirely within a range mapped
* by a secondary emulator, it will not be passed to that emulator.
*/
#define XEN_DMOP_map_io_range_to_ioreq_server 3
#define XEN_DMOP_unmap_io_range_from_ioreq_server 4
struct xen_dm_op_ioreq_server_range {
/* IN - server id */
ioservid_t id;
uint16_t pad;
/* IN - type of range */
uint32_t type;
# define XEN_DMOP_IO_RANGE_PORT 0 /* I/O port range */
# define XEN_DMOP_IO_RANGE_MEMORY 1 /* MMIO range */
# define XEN_DMOP_IO_RANGE_PCI 2 /* PCI segment/bus/dev/func range */
/* IN - inclusive start and end of range */
uint64_aligned_t start, end;
};
#define XEN_DMOP_PCI_SBDF(s,b,d,f) \
((((s) & 0xffff) << 16) | \
(((b) & 0xff) << 8) | \
(((d) & 0x1f) << 3) | \
((f) & 0x07))
/*
* XEN_DMOP_set_ioreq_server_state: Enable or disable the IOREQ Server <id>
*
* The IOREQ Server will not be passed any emulation requests until it is
* in the enabled state.
* Note that the contents of the ioreq_pfn and bufioreq_fn (see
* XEN_DMOP_get_ioreq_server_info) are not meaningful until the IOREQ Server
* is in the enabled state.
*/
#define XEN_DMOP_set_ioreq_server_state 5
struct xen_dm_op_set_ioreq_server_state {
/* IN - server id */
ioservid_t id;
/* IN - enabled? */
uint8_t enabled;
uint8_t pad;
};
/*
* XEN_DMOP_destroy_ioreq_server: Destroy the IOREQ Server <id>.
*
* Any registered I/O ranges will be automatically deregistered.
*/
#define XEN_DMOP_destroy_ioreq_server 6
struct xen_dm_op_destroy_ioreq_server {
/* IN - server id */
ioservid_t id;
uint16_t pad;
};
/*
* XEN_DMOP_track_dirty_vram: Track modifications to the specified pfn
* range.
*
* NOTE: The bitmap passed back to the caller is passed in a
* secondary buffer.
*/
#define XEN_DMOP_track_dirty_vram 7
struct xen_dm_op_track_dirty_vram {
/* IN - number of pages to be tracked */
uint32_t nr;
uint32_t pad;
/* IN - first pfn to track */
uint64_aligned_t first_pfn;
};
/*
* XEN_DMOP_set_pci_intx_level: Set the logical level of one of a domain's
* PCI INTx pins.
*/
#define XEN_DMOP_set_pci_intx_level 8
struct xen_dm_op_set_pci_intx_level {
/* IN - PCI INTx identification (domain:bus:device:intx) */
uint16_t domain;
uint8_t bus, device, intx;
/* IN - Level: 0 -> deasserted, 1 -> asserted */
uint8_t level;
};
/*
* XEN_DMOP_set_isa_irq_level: Set the logical level of a one of a domain's
* ISA IRQ lines.
*/
#define XEN_DMOP_set_isa_irq_level 9
struct xen_dm_op_set_isa_irq_level {
/* IN - ISA IRQ (0-15) */
uint8_t isa_irq;
/* IN - Level: 0 -> deasserted, 1 -> asserted */
uint8_t level;
};
/*
* XEN_DMOP_set_pci_link_route: Map a PCI INTx line to an IRQ line.
*/
#define XEN_DMOP_set_pci_link_route 10
struct xen_dm_op_set_pci_link_route {
/* PCI INTx line (0-3) */
uint8_t link;
/* ISA IRQ (1-15) or 0 -> disable link */
uint8_t isa_irq;
};
/*
* XEN_DMOP_modified_memory: Notify that a set of pages were modified by
* an emulator.
*
* DMOP buf 1 contains an array of xen_dm_op_modified_memory_extent with
* @nr_extents entries.
*
* On error, @nr_extents will contain the index+1 of the extent that
* had the error. It is not defined if or which pages may have been
* marked as dirty, in this event.
*/
#define XEN_DMOP_modified_memory 11
struct xen_dm_op_modified_memory {
/*
* IN - Number of extents to be processed
* OUT -returns n+1 for failing extent
*/
uint32_t nr_extents;
/* IN/OUT - Must be set to 0 */
uint32_t opaque;
};
struct xen_dm_op_modified_memory_extent {
/* IN - number of contiguous pages modified */
uint32_t nr;
uint32_t pad;
/* IN - first pfn modified */
uint64_aligned_t first_pfn;
};
/*
* XEN_DMOP_set_mem_type: Notify that a region of memory is to be treated
* in a specific way. (See definition of
* hvmmem_type_t).
*
* NOTE: In the event of a continuation (return code -ERESTART), the
* @first_pfn is set to the value of the pfn of the remaining
* region and @nr reduced to the size of the remaining region.
*/
#define XEN_DMOP_set_mem_type 12
struct xen_dm_op_set_mem_type {
/* IN - number of contiguous pages */
uint32_t nr;
/* IN - new hvmmem_type_t of region */
uint16_t mem_type;
uint16_t pad;
/* IN - first pfn in region */
uint64_aligned_t first_pfn;
};
/*
* XEN_DMOP_inject_event: Inject an event into a VCPU, which will
* get taken up when it is next scheduled.
*
* Note that the caller should know enough of the state of the CPU before
* injecting, to know what the effect of injecting the event will be.
*/
#define XEN_DMOP_inject_event 13
struct xen_dm_op_inject_event {
/* IN - index of vCPU */
uint32_t vcpuid;
/* IN - interrupt vector */
uint8_t vector;
/* IN - event type (DMOP_EVENT_* ) */
uint8_t type;
/* NB. This enumeration precisely matches hvm.h:X86_EVENTTYPE_* */
# define XEN_DMOP_EVENT_ext_int 0 /* external interrupt */
# define XEN_DMOP_EVENT_nmi 2 /* nmi */
# define XEN_DMOP_EVENT_hw_exc 3 /* hardware exception */
# define XEN_DMOP_EVENT_sw_int 4 /* software interrupt (CD nn) */
# define XEN_DMOP_EVENT_pri_sw_exc 5 /* ICEBP (F1) */
# define XEN_DMOP_EVENT_sw_exc 6 /* INT3 (CC), INTO (CE) */
/* IN - instruction length */
uint8_t insn_len;
uint8_t pad0;
/* IN - error code (or ~0 to skip) */
uint32_t error_code;
uint32_t pad1;
/* IN - CR2 for page faults */
uint64_aligned_t cr2;
};
/*
* XEN_DMOP_inject_msi: Inject an MSI for an emulated device.
*/
#define XEN_DMOP_inject_msi 14
struct xen_dm_op_inject_msi {
/* IN - MSI data (lower 32 bits) */
uint32_t data;
uint32_t pad;
/* IN - MSI address (0xfeexxxxx) */
uint64_aligned_t addr;
};
/*
* XEN_DMOP_map_mem_type_to_ioreq_server : map or unmap the IOREQ Server <id>
* to specific memory type <type>
* for specific accesses <flags>
*
* For now, flags only accept the value of XEN_DMOP_IOREQ_MEM_ACCESS_WRITE,
* which means only write operations are to be forwarded to an ioreq server.
* Support for the emulation of read operations can be added when an ioreq
* server has such requirement in future.
*/
#define XEN_DMOP_map_mem_type_to_ioreq_server 15
struct xen_dm_op_map_mem_type_to_ioreq_server {
ioservid_t id; /* IN - ioreq server id */
uint16_t type; /* IN - memory type */
uint32_t flags; /* IN - types of accesses to be forwarded to the
ioreq server. flags with 0 means to unmap the
ioreq server */
#define XEN_DMOP_IOREQ_MEM_ACCESS_READ (1u << 0)
#define XEN_DMOP_IOREQ_MEM_ACCESS_WRITE (1u << 1)
uint64_t opaque; /* IN/OUT - only used for hypercall continuation,
has to be set to zero by the caller */
};
struct xen_dm_op {
uint32_t op;
uint32_t pad;
union {
struct xen_dm_op_create_ioreq_server create_ioreq_server;
struct xen_dm_op_get_ioreq_server_info get_ioreq_server_info;
struct xen_dm_op_ioreq_server_range map_io_range_to_ioreq_server;
struct xen_dm_op_ioreq_server_range unmap_io_range_from_ioreq_server;
struct xen_dm_op_set_ioreq_server_state set_ioreq_server_state;
struct xen_dm_op_destroy_ioreq_server destroy_ioreq_server;
struct xen_dm_op_track_dirty_vram track_dirty_vram;
struct xen_dm_op_set_pci_intx_level set_pci_intx_level;
struct xen_dm_op_set_isa_irq_level set_isa_irq_level;
struct xen_dm_op_set_pci_link_route set_pci_link_route;
struct xen_dm_op_modified_memory modified_memory;
struct xen_dm_op_set_mem_type set_mem_type;
struct xen_dm_op_inject_event inject_event;
struct xen_dm_op_inject_msi inject_msi;
struct xen_dm_op_map_mem_type_to_ioreq_server
map_mem_type_to_ioreq_server;
} u;
};
#endif /* __XEN__ || __XEN_TOOLS__ */
struct xen_dm_op_buf {
XEN_GUEST_HANDLE(void) h;
xen_ulong_t size;
};
typedef struct xen_dm_op_buf xen_dm_op_buf_t;
DEFINE_XEN_GUEST_HANDLE(xen_dm_op_buf_t);
/* ` enum neg_errnoval
* ` HYPERVISOR_dm_op(domid_t domid,
* ` unsigned int nr_bufs,
* ` xen_dm_op_buf_t bufs[])
* `
*
* @domid is the domain the hypercall operates on.
* @nr_bufs is the number of buffers in the @bufs array.
* @bufs points to an array of buffers where @bufs[0] contains a struct
* xen_dm_op, describing the specific device model operation and its
* parameters.
* @bufs[1..] may be referenced in the parameters for the purposes of
* passing extra information to or from the domain.
*/
#endif /* __XEN_PUBLIC_HVM_DM_OP_H__ */
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* tab-width: 4
* indent-tabs-mode: nil
* End:
*/
|