aboutsummaryrefslogtreecommitdiff
path: root/include/asm-sparc64
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2006-02-04 03:01:45 -0800
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 01:11:37 -0800
commit766f861fbbd968a1850295ed6dec4504b4500dcc (patch)
tree76729285f448b58c812469b1bddf64f92e9f8d6e /include/asm-sparc64
parent314ef6859750b6539eac48d78059bb7986f29cb1 (diff)
[SPARC64]: SUN4V hypervisor interface defines.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r--include/asm-sparc64/hypervisor.h2072
1 files changed, 2072 insertions, 0 deletions
diff --git a/include/asm-sparc64/hypervisor.h b/include/asm-sparc64/hypervisor.h
new file mode 100644
index 00000000000..9c8e453abe9
--- /dev/null
+++ b/include/asm-sparc64/hypervisor.h
@@ -0,0 +1,2072 @@
+#ifndef _SPARC64_HYPERVISOR_H
+#define _SPARC64_HYPERVISOR_H
+
+/* Sun4v hypervisor interfaces and defines.
+ *
+ * Hypervisor calls are made via traps to software traps number 0x80
+ * and above. Registers %o0 to %o5 serve as argument, status, and
+ * return value registers.
+ *
+ * There are two kinds of these traps. First there are the normal
+ * "fast traps" which use software trap 0x80 and encode the function
+ * to invoke by number in register %o5. Argument and return value
+ * handling is as follows:
+ *
+ * -----------------------------------------------
+ * | %o5 | function number | undefined |
+ * | %o0 | argument 0 | return status |
+ * | %o1 | argument 1 | return value 1 |
+ * | %o2 | argument 2 | return value 2 |
+ * | %o3 | argument 3 | return value 3 |
+ * | %o4 | argument 4 | return value 4 |
+ * -----------------------------------------------
+ *
+ * The second type are "hyper-fast traps" which encode the function
+ * number in the software trap number itself. So these use trap
+ * numbers > 0x80. The register usage for hyper-fast traps is as
+ * follows:
+ *
+ * -----------------------------------------------
+ * | %o0 | argument 0 | return status |
+ * | %o1 | argument 1 | return value 1 |
+ * | %o2 | argument 2 | return value 2 |
+ * | %o3 | argument 3 | return value 3 |
+ * | %o4 | argument 4 | return value 4 |
+ * -----------------------------------------------
+ *
+ * Registers providing explicit arguments to the hypervisor calls
+ * are volatile across the call. Upon return their values are
+ * undefined unless explicitly specified as containing a particular
+ * return value by the specific call. The return status is always
+ * returned in register %o0, zero indicates a successful execution of
+ * the hypervisor call and other values indicate an error status as
+ * defined below. So, for example, if a hyper-fast trap takes
+ * arguments 0, 1, and 2, then %o0, %o1, and %o2 are volatile across
+ * the call and %o3, %o4, and %o5 would be preserved.
+ *
+ * If the hypervisor trap is invalid, or the fast trap function number
+ * is invalid, HV_EBADTRAP will be returned in %o0. Also, all 64-bits
+ * of the argument and return values are significant.
+ */
+
+/* Trap numbers. */
+#define HV_FAST_TRAP 0x80
+#define HV_MMU_MAP_ADDR_TRAP 0x83
+#define HV_MMU_UNMAP_ADDR_TRAP 0x84
+#define HV_TTRACE_ADDENTRY_TRAP 0x85
+#define HV_CORE_TRAP 0xff
+
+/* Error codes. */
+#define HV_EOK 0 /* Successful return */
+#define HV_ENOCPU 1 /* Invalid CPU id */
+#define HV_ENORADDR 2 /* Invalid real address */
+#define HV_ENOINTR 3 /* Invalid interrupt id */
+#define HV_EBADPGSZ 4 /* Invalid pagesize encoding */
+#define HV_EBADTSB 5 /* Invalid TSB description */
+#define HV_EINVAL 6 /* Invalid argument */
+#define HV_EBADTRAP 7 /* Invalid function number */
+#define HV_EBADALIGN 8 /* Invalid address alignment */
+#define HV_EWOULDBLOCK 9 /* Cannot complete w/o blocking */
+#define HV_ENOACCESS 10 /* No access to resource */
+#define HV_EIO 11 /* I/O error */
+#define HV_ECPUERROR 12 /* CPU in error state */
+#define HV_ENOTSUPPORTED 13 /* Function not supported */
+#define HV_ENOMAP 14 /* No mapping found */
+#define HV_ETOOMANY 15 /* Too many items specified */
+
+/* mach_exit()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MACH_EXIT
+ * ARG0: exit code
+ * ERRORS: This service does not return.
+ *
+ * Stop all CPUs in the virtual domain and place them into the stopped
+ * state. The 64-bit exit code may be passed to a service entity as
+ * the domain's exit status. On systems without a service entity, the
+ * domain will undergo a reset, and the boot firmware will be
+ * reloaded.
+ *
+ * This function will never return to the guest that invokes it.
+ *
+ * Note: By convention an exit code of zero denotes a successful exit by
+ * the guest code. A non-zero exit code denotes a guest specific
+ * error indication.
+ *
+ */
+#define HV_FAST_MACH_EXIT 0x00
+
+/* Domain services. */
+
+/* mach_desc()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MACH_DESC
+ * ARG0: buffer
+ * ARG1: length
+ * RET0: status
+ * RET1: length
+ * ERRORS: HV_EBADALIGN Buffer is badly aligned
+ * HV_ENORADDR Buffer is to an illegal real address.
+ * HV_EINVAL Buffer length is too small for complete
+ * machine description.
+ *
+ * Copy the most current machine description into the buffer indicated
+ * by the real address in ARG0. The buffer provided must be 16 byte
+ * aligned. Upon success or HV_EINVAL, this service returns the
+ * actual size of the machine description in the RET1 return value.
+ *
+ * Note: A method of determining the appropriate buffer size for the
+ * machine description is to first call this service with a buffer
+ * length of 0 bytes.
+ */
+#define HV_FAST_MACH_DESC 0x01
+
+/* mach_exit()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MACH_SIR
+ * ERRORS: This service does not return.
+ *
+ * Perform a software initiated reset of the virtual machine domain.
+ * All CPUs are captured as soon as possible, all hardware devices are
+ * returned to the entry default state, and the domain is restarted at
+ * the SIR (trap type 0x04) real trap table (RTBA) entry point on one
+ * of the CPUs. The single CPU restarted is selected as determined by
+ * platform specific policy. Memory is preserved across this
+ * operation.
+ */
+#define HV_FAST_MACH_SIR 0x02
+
+/* mach_set_soft_state()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MACH_SET_SOFT_STATE
+ * ARG0: software state
+ * ARG1: software state description pointer
+ * RET0: status
+ * ERRORS: EINVAL software state not valid or software state
+ * description is not NULL terminated
+ * ENORADDR software state description pointer is not a
+ * valid real address
+ * EBADALIGNED software state description is not correctly
+ * aligned
+ *
+ * This allows the guest to report it's soft state to the hypervisor. There
+ * are two primary components to this state. The first part states whether
+ * the guest software is running or not. The second containts optional
+ * details specific to the software.
+ *
+ * The software state argument is defined below in HV_SOFT_STATE_*, and
+ * indicates whether the guest is operating normally or in a transitional
+ * state.
+ *
+ * The software state description argument is a real address of a data buffer
+ * of size 32-bytes aligned on a 32-byte boundary. It is treated as a NULL
+ * terminated 7-bit ASCII string of up to 31 characters not including the
+ * NULL termination.
+ */
+#define HV_FAST_MACH_SET_SOFT_STATE 0x03
+#define HV_SOFT_STATE_NORMAL 0x01
+#define HV_SOFT_STATE_TRANSITION 0x02
+
+/* mach_get_soft_state()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MACH_GET_SOFT_STATE
+ * ARG0: software state description pointer
+ * RET0: status
+ * RET1: software state
+ * ERRORS: ENORADDR software state description pointer is not a
+ * valid real address
+ * EBADALIGNED software state description is not correctly
+ * aligned
+ *
+ * Retrieve the current value of the guest's software state. The rules
+ * for the software state pointer are the same as for mach_set_soft_state()
+ * above.
+ */
+#define HV_FAST_MACH_GET_SOFT_STATE 0x04
+
+/* CPU services.
+ *
+ * CPUs represent devices that can execute software threads. A single
+ * chip that contains multiple cores or strands is represented as
+ * multiple CPUs with unique CPU identifiers. CPUs are exported to
+ * OBP via the machine description (and to the OS via the OBP device
+ * tree). CPUs are always in one of three states: stopped, running,
+ * or error.
+ *
+ * A CPU ID is a pre-assigned 16-bit value that uniquely identifies a
+ * CPU within a logical domain. Operations that are to be performed
+ * on multiple CPUs specify them via a CPU list. A CPU list is an
+ * array in real memory, of which each 16-bit word is a CPU ID. CPU
+ * lists are passed through the API as two arguments. The first is
+ * the number of entries (16-bit words) in the CPU list, and the
+ * second is the (real address) pointer to the CPU ID list.
+ */
+
+/* cpu_start()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_START
+ * ARG0: CPU ID
+ * ARG1: PC
+ * ARG1: RTBA
+ * ARG1: target ARG0
+ * RET0: status
+ * ERRORS: ENOCPU Invalid CPU ID
+ * EINVAL Target CPU ID is not in the stopped state
+ * ENORADDR Invalid PC or RTBA real address
+ * EBADALIGN Unaligned PC or unaligned RTBA
+ * EWOULDBLOCK Starting resources are not available
+ *
+ * Start CPU with given CPU ID with PC in %pc and with a real trap
+ * base address value of RTBA. The indicated CPU must be in the
+ * stopped state. The supplied RTBA must be aligned on a 256 byte
+ * boundary. On successful completion, the specified CPU will be in
+ * the running state and will be supplied with "target ARG0" in %o0
+ * and RTBA in %tba.
+ */
+#define HV_FAST_CPU_START 0x10
+
+/* cpu_stop()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_STOP
+ * ARG0: CPU ID
+ * RET0: status
+ * ERRORS: ENOCPU Invalid CPU ID
+ * EINVAL Target CPU ID is the current cpu
+ * EINVAL Target CPU ID is not in the running state
+ * EWOULDBLOCK Stopping resources are not available
+ * ENOTSUPPORTED Not supported on this platform
+ *
+ * The specified CPU is stopped. The indicated CPU must be in the
+ * running state. On completion, it will be in the stopped state. It
+ * is not legal to stop the current CPU.
+ *
+ * Note: As this service cannot be used to stop the current cpu, this service
+ * may not be used to stop the last running CPU in a domain. To stop
+ * and exit a running domain, a guest must use the mach_exit() service.
+ */
+#define HV_FAST_CPU_STOP 0x11
+
+/* cpu_yield()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_YIELD
+ * RET0: status
+ * ERRORS: No possible error.
+ *
+ * Suspend execution on the current CPU. Execution will resume when
+ * an interrupt (device, %stick_compare, or cross-call) is targeted to
+ * the CPU. On some CPUs, this API may be used by the hypervisor to
+ * save power by disabling hardware strands.
+ */
+#define HV_FAST_CPU_YIELD 0x12
+
+
+/* cpu_qconf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_QCONF
+ * ARG0: queue
+ * ARG1: base real address
+ * ARG2: number of entries
+ * RET0: status
+ * ERRORS: ENORADDR Invalid base real address
+ * EINVAL Invalid queue or number of entries is less
+ * than 2 or too large.
+ * EBADALIGN Base real address is not correctly aligned
+ * for size.
+ *
+ * Configure the given queue to be placed at the givem base real
+ * address, with the given number of entries. The number of entries
+ * must be a power of 2. The base real address must be aligned
+ * exactly to match the queue size. Each queue entry is 64 bytes
+ * long, so for example a 32 entry queue must be aligned on a 2048
+ * byte real address boundary.
+ *
+ * The specified queue is unconfigured is number of entries is given as zero.
+ *
+ * For the current version of this API service, the argument queue is defined
+ * as follows:
+ * queue description
+ * ----- -------------------------
+ * 0x3c cpu mondo queue
+ * 0x3d device mondo queue
+ * 0x3e resumable error queue
+ * 0x3f non-resumable error queue
+ *
+ * Note: The maximum number of entries for each queue for a specific cpu may
+ * be determined from the machine description.
+ */
+#define HV_FAST_CPU_QCONF 0x14
+#define HV_CPU_QUEUE_CPU_MONDO 0x3c
+#define HV_CPU_QUEUE_DEVICE_MONDO 0x3d
+#define HV_CPU_QUEUE_RES_ERROR 0x3e
+#define HV_CPU_QUEUE_NONRES_ERROR 0x3f
+
+/* cpu_qinfo()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_QINFO
+ * ARG0: queue
+ * RET0: status
+ * RET1: base real address
+ * RET1: number of entries
+ * ERRORS: EINVAL Invalid queue
+ *
+ * Return the configuration info for the given queue. The base real
+ * address and number of entries of the defined queue are returned.
+ * The queue argument values are the same as for cpu_qconf() above.
+ *
+ * If the specified queue is a valid queue number, but no queue has
+ * been defined, the number of entries will be set to zero and the
+ * base real address returned is undefined.
+ */
+#define HV_FAST_CPU_QINFO 0x15
+
+/* cpu_mondo_send()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_MONDO_SEND
+ * ARG0-1: CPU list
+ * ARG2: data real address
+ * RET0: status
+ * ERRORS: EBADALIGN Mondo data is not 64-byte aligned or CPU list
+ * is not 2-byte aligned.
+ * ENORADDR Invalid data mondo address, or invalid cpu list
+ * address.
+ * ENOCPU Invalid cpu in CPU list
+ * EWOULDBLOCK Some or all of the listed CPUs did not receive
+ * the mondo
+ * EINVAL CPU list includes caller's CPU ID
+ *
+ * Send a mondo interrupt to the CPUs in the given CPU list with the
+ * 64-bytes at the given data real address. The data must be 64-byte
+ * aligned. The mondo data will be delivered to the cpu_mondo queues
+ * of the recipient CPUs.
+ *
+ * In all cases, error or not, the CPUs in the CPU list to which the
+ * mondo has been successfully delivered will be indicated by having
+ * their entry in CPU list updated with the value 0xffff.
+ */
+#define HV_FAST_CPU_MONDO_SEND 0x42
+
+/* cpu_myid()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_MYID
+ * RET0: status
+ * RET1: CPU ID
+ * ERRORS: No errors defined.
+ *
+ * Return the hypervisor ID handle for the current CPU. Use by a
+ * virtual CPU to discover it's own identity.
+ */
+#define HV_FAST_CPU_MYID 0x16
+
+/* cpu_state()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_STATE
+ * ARG0: CPU ID
+ * RET0: status
+ * RET1: state
+ * ERRORS: ENOCPU Invalid CPU ID
+ *
+ * Retrieve the current state of the CPU with the given CPU ID.
+ */
+#define HV_FAST_CPU_STATE 0x17
+#define HV_CPU_STATE_STOPPED 0x01
+#define HV_CPU_STATE_RUNNING 0x02
+#define HV_CPU_STATE_ERROR 0x03
+
+/* cpu_set_rtba()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_SET_RTBA
+ * ARG0: RTBA
+ * RET0: status
+ * RET1: previous RTBA
+ * ERRORS: ENORADDR Invalid RTBA real address
+ * EBADALIGN RTBA is incorrectly aligned for a trap table
+ *
+ * Set the real trap base address of the local cpu to the given RTBA.
+ * The supplied RTBA must be aligned on a 256 byte boundary. Upon
+ * success the previous value of the RTBA is returned in RET1.
+ *
+ * Note: This service does not affect %tba
+ */
+#define HV_FAST_CPU_SET_RTBA 0x18
+
+/* cpu_set_rtba()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CPU_GET_RTBA
+ * RET0: status
+ * RET1: previous RTBA
+ * ERRORS: No possible error.
+ *
+ * Returns the current value of RTBA in RET1.
+ */
+#define HV_FAST_CPU_GET_RTBA 0x19
+
+/* MMU services.
+ *
+ * Layout of a TSB description for mmu_tsb_ctx{,non}0() calls.
+ */
+#ifndef __ASSEMBLY__
+struct hv_tsb_descr {
+ unsigned short pgsz_idx;
+ unsigned short assoc;
+ unsigned int num_ttes; /* in TTEs */
+ unsigned int ctx_idx;
+ unsigned int pgsz_mask;
+ unsigned long tsb_base;
+ unsigned long resv;
+};
+#endif
+#define HV_TSB_DESCR_PGSZ_IDX_OFFSET 0x00
+#define HV_TSB_DESCR_ASSOC_OFFSET 0x02
+#define HV_TSB_DESCR_NUM_TTES_OFFSET 0x04
+#define HV_TSB_DESCR_CTX_IDX_OFFSET 0x08
+#define HV_TSB_DESCR_PGSZ_MASK_OFFSET 0x0c
+#define HV_TSB_DESCR_TSB_BASE_OFFSET 0x10
+#define HV_TSB_DESCR_RESV_OFFSET 0x18
+
+/* Page size bitmask. */
+#define HV_PGSZ_MASK_8K (1 << 0)
+#define HV_PGSZ_MASK_64K (1 << 1)
+#define HV_PGSZ_MASK_512K (1 << 2)
+#define HV_PGSZ_MASK_4MB (1 << 3)
+#define HV_PGSZ_MASK_32MB (1 << 4)
+#define HV_PGSZ_MASK_256MB (1 << 5)
+#define HV_PGSZ_MASK_2GB (1 << 6)
+#define HV_PGSZ_MASK_16GB (1 << 7)
+
+/* Page size index. The value given in the TSB descriptor must correspond
+ * to the smallest page size specified in the pgsz_mask page size bitmask.
+ */
+#define HV_PGSZ_IDX_8K 0
+#define HV_PGSZ_IDX_64K 1
+#define HV_PGSZ_IDX_512K 2
+#define HV_PGSZ_IDX_4MB 3
+#define HV_PGSZ_IDX_32MB 4
+#define HV_PGSZ_IDX_256MB 5
+#define HV_PGSZ_IDX_2GB 6
+#define HV_PGSZ_IDX_16GB 7
+
+/* MMU fault status area.
+ *
+ * MMU related faults have their status and fault address information
+ * placed into a memory region made available by privileged code. Each
+ * virtual processor must make a mmu_fault_area_conf() call to tell the
+ * hypervisor where that processor's fault status should be stored.
+ *
+ * The fault status block is a multiple of 64-bytes and must be aligned
+ * on a 64-byte boundary.
+ */
+#ifndef __ASSEMBLY__
+struct hv_fault_status {
+ unsigned long i_fault_type;
+ unsigned long i_fault_addr;
+ unsigned long i_fault_ctx;
+ unsigned long i_reserved[5];
+ unsigned long d_fault_type;
+ unsigned long d_fault_addr;
+ unsigned long d_fault_ctx;
+ unsigned long d_reserved[5];
+};
+#endif
+#define HV_FAULT_I_TYPE_OFFSET 0x00
+#define HV_FAULT_I_ADDR_OFFSET 0x08
+#define HV_FAULT_I_CTX_OFFSET 0x10
+#define HV_FAULT_D_TYPE_OFFSET 0x40
+#define HV_FAULT_D_ADDR_OFFSET 0x48
+#define HV_FAULT_D_CTX_OFFSET 0x50
+
+#define HV_FAULT_TYPE_FAST_MISS 1
+#define HV_FAULT_TYPE_FAST_PROT 2
+#define HV_FAULT_TYPE_MMU_MISS 3
+#define HV_FAULT_TYPE_INV_RA 4
+#define HV_FAULT_TYPE_PRIV_VIOL 5
+#define HV_FAULT_TYPE_PROT_VIOL 6
+#define HV_FAULT_TYPE_NFO 7
+#define HV_FAULT_TYPE_NFO_SEFF 8
+#define HV_FAULT_TYPE_INV_VA 9
+#define HV_FAULT_TYPE_INV_ASI 10
+#define HV_FAULT_TYPE_NC_ATOMIC 11
+#define HV_FAULT_TYPE_PRIV_ACT 12
+#define HV_FAULT_TYPE_RESV1 13
+#define HV_FAULT_TYPE_UNALIGNED 14
+#define HV_FAULT_TYPE_INV_PGSZ 15
+/* Values 16 --> -2 are reserved. */
+#define HV_FAULT_TYPE_MULTIPLE -1
+
+/* Flags argument for mmu_{map,unmap}_addr(), mmu_demap_{page,context,all}(),
+ * and mmu_{map,unmap}_perm_addr().
+ */
+#define HV_MMU_DMMU 0x01
+#define HV_MMU_IMMU 0x02
+#define HV_MMU_ALL (HV_MMU_DMMU | HV_MMU_IMMU)
+
+/* mmu_map_addr()
+ * TRAP: HV_MMU_MAP_ADDR_TRAP
+ * ARG0: virtual address
+ * ARG1: mmu context
+ * ARG2: TTE
+ * ARG3: flags (HV_MMU_{IMMU,DMMU})
+ * ERRORS: EINVAL Invalid virtual address, mmu context, or flags
+ * EBADPGSZ Invalid page size value
+ * ENORADDR Invalid real address in TTE
+ *
+ * Create a non-permanent mapping using the given TTE, virtual
+ * address, and mmu context. The flags argument determines which
+ * (data, or instruction, or both) TLB the mapping gets loaded into.
+ *
+ * The behavior is undefined if the valid bit is clear in the TTE.
+ *
+ * Note: This API call is for privileged code to specify temporary translation
+ * mappings without the need to create and manage a TSB.
+ */
+
+/* mmu_unmap_addr()
+ * TRAP: HV_MMU_UNMAP_ADDR_TRAP
+ * ARG0: virtual address
+ * ARG1: mmu context
+ * ARG2: flags (HV_MMU_{IMMU,DMMU})
+ * ERRORS: EINVAL Invalid virtual address, mmu context, or flags
+ *
+ * Demaps the given virtual address in the given mmu context on this
+ * CPU. This function is intended to be used to demap pages mapped
+ * with mmu_map_addr. This service is equivalent to invoking
+ * mmu_demap_page() with only the current CPU in the CPU list. The
+ * flags argument determines which (data, or instruction, or both) TLB
+ * the mapping gets unmapped from.
+ *
+ * Attempting to perform an unmap operation for a previously defined
+ * permanent mapping will have undefined results.
+ */
+
+/* mmu_tsb_ctx0()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_TSB_CTX0
+ * ARG0: number of TSB descriptions
+ * ARG1: TSB descriptions pointer
+ * RET0: status
+ * ERRORS: ENORADDR Invalid TSB descriptions pointer or
+ * TSB base within a descriptor
+ * EBADALIGN TSB descriptions pointer is not aligned
+ * to an 8-byte boundary, or TSB base
+ * within a descriptor is not aligned for
+ * the given TSB size
+ * EBADPGSZ Invalid page size in a TSB descriptor
+ * EBADTSB Invalid associativity or size in a TSB
+ * descriptor
+ * EINVAL Invalid number of TSB descriptions, or
+ * invalid context index in a TSB
+ * descriptor, or index page size not
+ * equal to smallest page size in page
+ * size bitmask field.
+ *
+ * Configures the TSBs for the current CPU for virtual addresses with
+ * context zero. The TSB descriptions pointer is a pointer to an
+ * array of the given number of TSB descriptions.
+ *
+ * Note: The maximum number of TSBs available to a virtual CPU is given by the
+ * mmu-max-#tsbs property of the cpu's corresponding "cpu" node in the
+ * machine description.
+ */
+#define HV_FAST_MMU_TSB_CTX0 0x20
+
+/* mmu_tsb_ctxnon0()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_TSB_CTXNON0
+ * ARG0: number of TSB descriptions
+ * ARG1: TSB descriptions pointer
+ * RET0: status
+ * ERRORS: Same as for mmu_tsb_ctx0() above.
+ *
+ * Configures the TSBs for the current CPU for virtual addresses with
+ * non-zero contexts. The TSB descriptions pointer is a pointer to an
+ * array of the given number of TSB descriptions.
+ *
+ * Note: A maximum of 16 TSBs may be specified in the TSB description list.
+ */
+#define HV_FAST_MMU_TSB_CTXNON0 0x21
+
+/* mmu_demap_page()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_DEMAP_PAGE
+ * ARG0: reserved, must be zero
+ * ARG1: reserved, must be zero
+ * ARG2: virtual address
+ * ARG3: mmu context
+ * ARG4: flags (HV_MMU_{IMMU,DMMU})
+ * RET0: status
+ * ERRORS: EINVAL Invalid virutal address, context, or
+ * flags value
+ * ENOTSUPPORTED ARG0 or ARG1 is non-zero
+ *
+ * Demaps any page mapping of the given virtual address in the given
+ * mmu context for the current virtual CPU. Any virtually tagged
+ * caches are guaranteed to be kept consistent. The flags argument
+ * determines which TLB (instruction, or data, or both) participate in
+ * the operation.
+ *
+ * ARG0 and ARG1 are both reserved and must be set to zero.
+ */
+#define HV_FAST_MMU_DEMAP_PAGE 0x22
+
+/* mmu_demap_ctx()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_DEMAP_CTX
+ * ARG0: reserved, must be zero
+ * ARG1: reserved, must be zero
+ * ARG2: mmu context
+ * ARG3: flags (HV_MMU_{IMMU,DMMU})
+ * RET0: status
+ * ERRORS: EINVAL Invalid context or flags value
+ * ENOTSUPPORTED ARG0 or ARG1 is non-zero
+ *
+ * Demaps all non-permanent virtual page mappings previously specified
+ * for the given context for the current virtual CPU. Any virtual
+ * tagged caches are guaranteed to be kept consistent. The flags
+ * argument determines which TLB (instruction, or data, or both)
+ * participate in the operation.
+ *
+ * ARG0 and ARG1 are both reserved and must be set to zero.
+ */
+#define HV_FAST_MMU_DEMAP_CTX 0x23
+
+/* mmu_demap_all()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_DEMAP_ALL
+ * ARG0: reserved, must be zero
+ * ARG1: reserved, must be zero
+ * ARG2: flags (HV_MMU_{IMMU,DMMU})
+ * RET0: status
+ * ERRORS: EINVAL Invalid flags value
+ * ENOTSUPPORTED ARG0 or ARG1 is non-zero
+ *
+ * Demaps all non-permanent virtual page mappings previously specified
+ * for the current virtual CPU. Any virtual tagged caches are
+ * guaranteed to be kept consistent. The flags argument determines
+ * which TLB (instruction, or data, or both) participate in the
+ * operation.
+ *
+ * ARG0 and ARG1 are both reserved and must be set to zero.
+ */
+#define HV_FAST_MMU_DEMAP_ALL 0x24
+
+/* mmu_map_perm_addr()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_MAP_PERM_ADDR
+ * ARG0: virtual address
+ * ARG1: reserved, must be zero
+ * ARG2: TTE
+ * ARG3: flags (HV_MMU_{IMMU,DMMU})
+ * RET0: status
+ * ERRORS: EINVAL Invalid virutal address or flags value
+ * EBADPGSZ Invalid page size value
+ * ENORADDR Invalid real address in TTE
+ * ETOOMANY Too many mappings (max of 8 reached)
+ *
+ * Create a permanent mapping using the given TTE and virtual address
+ * for context 0 on the calling virtual CPU. A maximum of 8 such
+ * permanent mappings may be specified by privileged code. Mappings
+ * may be removed with mmu_unmap_perm_addr().
+ *
+ * The behavior is undefined if a TTE with the valid bit clear is given.
+ *
+ * Note: This call is used to specify address space mappings for which
+ * privileged code does not expect to receive misses. For example,
+ * this mechanism can be used to map kernel nucleus code and data.
+ */
+#define HV_FAST_MMU_MAP_PERM_ADDR 0x25
+
+/* mmu_fault_area_conf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_FAULT_AREA_CONF
+ * ARG0: real address
+ * RET0: status
+ * RET1: previous mmu fault area real address
+ * ERRORS: ENORADDR Invalid real address
+ * EBADALIGN Invalid alignment for fault area
+ *
+ * Configure the MMU fault status area for the calling CPU. A 64-byte
+ * aligned real address specifies where MMU fault status information
+ * is placed. The return value is the previously specified area, or 0
+ * for the first invocation. Specifying a fault area at real address
+ * 0 is not allowed.
+ */
+#define HV_FAST_MMU_FAULT_AREA_CONF 0x26
+
+/* mmu_enable()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_ENABLE
+ * ARG0: enable flag
+ * ARG1: return target address
+ * RET0: status
+ * ERRORS: ENORADDR Invalid real address when disabling
+ * translation.
+ * EBADALIGN The return target address is not
+ * aligned to an instruction.
+ * EINVAL The enable flag request the current
+ * operating mode (e.g. disable if already
+ * disabled)
+ *
+ * Enable or disable virtual address translation for the calling CPU
+ * within the virtual machine domain. If the enable flag is zero,
+ * translation is disabled, any non-zero value will enable
+ * translation.
+ *
+ * When this function returns, the newly selected translation mode
+ * will be active. If the mmu is being enabled, then the return
+ * target address is a virtual address else it is a real address.
+ *
+ * Upon successful completion, control will be returned to the given
+ * return target address (ie. the cpu will jump to that address). On
+ * failure, the previous mmu mode remains and the trap simply returns
+ * as normal with the appropriate error code in RET0.
+ */
+#define HV_FAST_MMU_ENABLE 0x27
+
+/* mmu_unmap_perm_addr()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_UNMAP_PERM_ADDR
+ * ARG0: virtual address
+ * ARG1: reserved, must be zero
+ * ARG2: flags (HV_MMU_{IMMU,DMMU})
+ * RET0: status
+ * ERRORS: EINVAL Invalid virutal address or flags value
+ * ENOMAP Specified mapping was not found
+ *
+ * Demaps any permanent page mapping (established via
+ * mmu_map_perm_addr()) at the given virtual address for context 0 on
+ * the current virtual CPU. Any virtual tagged caches are guaranteed
+ * to be kept consistent.
+ */
+#define HV_FAST_MMU_UNMAP_PERM_ADDR 0x28
+
+/* mmu_tsb_ctx0_info()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_TSB_CTX0_INFO
+ * ARG0: max TSBs
+ * ARG1: buffer pointer
+ * RET0: status
+ * RET1: number of TSBs
+ * ERRORS: EINVAL Supplied buffer is too small
+ * EBADALIGN The buffer pointer is badly aligned
+ * ENORADDR Invalid real address for buffer pointer
+ *
+ * Return the TSB configuration as previous defined by mmu_tsb_ctx0()
+ * into the provided buffer. The size of the buffer is given in ARG1
+ * in terms of the number of TSB description entries.
+ *
+ * Upon return, RET1 always contains the number of TSB descriptions
+ * previously configured. If zero TSBs were configured, EOK is
+ * returned with RET1 containing 0.
+ */
+#define HV_FAST_MMU_TSB_CTX0_INFO 0x29
+
+/* mmu_tsb_ctxnon0_info()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_TSB_CTXNON0_INFO
+ * ARG0: max TSBs
+ * ARG1: buffer pointer
+ * RET0: status
+ * RET1: number of TSBs
+ * ERRORS: EINVAL Supplied buffer is too small
+ * EBADALIGN The buffer pointer is badly aligned
+ * ENORADDR Invalid real address for buffer pointer
+ *
+ * Return the TSB configuration as previous defined by
+ * mmu_tsb_ctxnon0() into the provided buffer. The size of the buffer
+ * is given in ARG1 in terms of the number of TSB description entries.
+ *
+ * Upon return, RET1 always contains the number of TSB descriptions
+ * previously configured. If zero TSBs were configured, EOK is
+ * returned with RET1 containing 0.
+ */
+#define HV_FAST_MMU_TSB_CTXNON0_INFO 0x2a
+
+/* mmu_fault_area_info()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MMU_FAULT_AREA_INFO
+ * RET0: status
+ * RET1: fault area real address
+ * ERRORS: No errors defined.
+ *
+ * Return the currently defined MMU fault status area for the current
+ * CPU. The real address of the fault status area is returned in
+ * RET1, or 0 is returned in RET1 if no fault status area is defined.
+ *
+ * Note: mmu_fault_area_conf() may be called with the return value (RET1)
+ * from this service if there is a need to save and restore the fault
+ * area for a cpu.
+ */
+#define HV_FAST_MMU_FAULT_AREA_INFO 0x2b
+
+/* Cache and Memory services. */
+
+/* mem_scrub()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MEM_SCRUB
+ * ARG0: real address
+ * ARG1: length
+ * RET0: status
+ * RET1: length scrubbed
+ * ERRORS: ENORADDR Invalid real address
+ * EBADALIGN Start address or length are not correctly
+ * aligned
+ * EINVAL Length is zero
+ *
+ * Zero the memory contents in the range real address to real address
+ * plus length minus 1. Also, valid ECC will be generated for that
+ * memory address range. Scrubbing is started at the given real
+ * address, but may not scrub the entire given length. The actual
+ * length scrubbed will be returned in RET1.
+ *
+ * The real address and length must be aligned on an 8K boundary, or
+ * contain the start address and length from a sun4v error report.
+ *
+ * Note: There are two uses for this function. The first use is to block clear
+ * and initialize memory and the second is to scrub an u ncorrectable
+ * error reported via a resumable or non-resumable trap. The second
+ * use requires the arguments to be equal to the real address and length
+ * provided in a sun4v memory error report.
+ */
+#define HV_FAST_MEM_SCRUB 0x31
+
+/* mem_sync()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_MEM_SYNC
+ * ARG0: real address
+ * ARG1: length
+ * RET0: status
+ * RET1: length synced
+ * ERRORS: ENORADDR Invalid real address
+ * EBADALIGN Start address or length are not correctly
+ * aligned
+ * EINVAL Length is zero
+ *
+ * Force the next access within the real address to real address plus
+ * length minus 1 to be fetches from main system memory. Less than
+ * the given length may be synced, the actual amount synced is
+ * returned in RET1. The real address and length must be aligned on
+ * an 8K boundary.
+ */
+#define HV_FAST_MEM_SYNC 0x32
+
+/* Time of day services.
+ *
+ * The hypervisor maintains the time of day on a per-domain basis.
+ * Changing the time of day in one domain does not affect the time of
+ * day on any other domain.
+ *
+ * Time is described by a single unsigned 64-bit word which is the
+ * number of seconds since the UNIX Epoch (00:00:00 UTC, January 1,
+ * 1970).
+ */
+
+/* tod_get()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_TOD_GET
+ * RET0: status
+ * RET1: TOD
+ * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable
+ * ENOTSUPPORTED If TOD not supported on this platform
+ *
+ * Return the current time of day. May block if TOD access is
+ * temporarily not possible.
+ */
+#define HV_FAST_TOD_GET 0x50
+
+/* tod_set()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_TOD_SET
+ * ARG0: TOD
+ * RET0: status
+ * ERRORS: EWOULDBLOCK TOD resource is temporarily unavailable
+ * ENOTSUPPORTED If TOD not supported on this platform
+ *
+ * The current time of day is set to the value specified in ARG0. May
+ * block if TOD access is temporarily not possible.
+ */
+#define HV_FAST_TOD_SET 0x51
+
+/* Console services */
+
+/* con_getchar()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CONS_GETCHAR
+ * RET0: status
+ * RET1: character
+ * ERRORS: EWOULDBLOCK No character available.
+ *
+ * Returns a character from the console device. If no character is
+ * available then an EWOULDBLOCK error is returned. If a character is
+ * available, then the returned status is EOK and the character value
+ * is in RET1.
+ *
+ * A virtual BREAK is represented by the 64-bit value -1.
+ *
+ * A virtual HUP signal is represented by the 64-bit value -2.
+ */
+#define HV_FAST_CONS_GETCHAR 0x60
+
+/* con_putchar()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_CONS_PUTCHAR
+ * ARG0: character
+ * RET0: status
+ * ERRORS: EINVAL Illegal character
+ * EWOULDBLOCK Output buffer currentl full, would block
+ *
+ * Send a character to the console device. Only character values
+ * between 0 and 255 may be used. Values outside this range are
+ * invalid except for the 64-bit value -1 which is used to send a
+ * virtual BREAK.
+ */
+#define HV_FAST_CONS_PUTCHAR 0x61
+
+/* Trap trace services.
+ *
+ * The hypervisor provides a trap tracing capability for privileged
+ * code running on each virtual CPU. Privileged code provides a
+ * round-robin trap trace queue within which the hypervisor writes
+ * 64-byte entries detailing hyperprivileged traps taken n behalf of
+ * privileged code. This is provided as a debugging capability for
+ * privileged code.
+ *
+ * The trap trace control structure is 64-bytes long and placed at the
+ * start (offset 0) of the trap trace buffer, and is described as
+ * follows:
+ */
+#ifndef __ASSEMBLY__
+struct hv_trap_trace_control {
+ unsigned long head_offset;
+ unsigned long tail_offset;
+ unsigned long __reserved[0x30 / sizeof(unsigned long)];
+};
+#endif
+#define HV_TRAP_TRACE_CTRL_HEAD_OFFSET 0x00
+#define HV_TRAP_TRACE_CTRL_TAIL_OFFSET 0x08
+
+/* The head offset is the offset of the most recently completed entry
+ * in the trap-trace buffer. The tail offset is the offset of the
+ * next entry to be written. The control structure is owned and
+ * modified by the hypervisor. A guest may not modify the control
+ * structure contents. Attempts to do so will result in undefined
+ * behavior for the guest.
+ *
+ * Each trap trace buffer entry is layed out as follows:
+ */
+#ifndef __ASSEMBLY__
+struct hv_trap_trace_entry {
+ unsigned char type; /* Hypervisor or guest entry? */
+ unsigned char hpstate; /* Hyper-privileged state */
+ unsigned char tl; /* Trap level */
+ unsigned char gl; /* Global register level */
+ unsigned short tt; /* Trap type */
+ unsigned short tag; /* Extended trap identifier */
+ unsigned long tstate; /* Trap state */
+ unsigned long tick; /* Tick */
+ unsigned long tpc; /* Trap PC */
+ unsigned long f1; /* Entry specific */
+ unsigned long f2; /* Entry specific */
+ unsigned long f3; /* Entry specific */
+ unsigned long f4; /* Entry specific */
+};
+#endif
+#define HV_TRAP_TRACE_ENTRY_TYPE 0x00
+#define HV_TRAP_TRACE_ENTRY_HPSTATE 0x01
+#define HV_TRAP_TRACE_ENTRY_TL 0x02
+#define HV_TRAP_TRACE_ENTRY_GL 0x03
+#define HV_TRAP_TRACE_ENTRY_TT 0x04
+#define HV_TRAP_TRACE_ENTRY_TAG 0x06
+#define HV_TRAP_TRACE_ENTRY_TSTATE 0x08
+#define HV_TRAP_TRACE_ENTRY_TICK 0x10
+#define HV_TRAP_TRACE_ENTRY_TPC 0x18
+#define HV_TRAP_TRACE_ENTRY_F1 0x20
+#define HV_TRAP_TRACE_ENTRY_F2 0x28
+#define HV_TRAP_TRACE_ENTRY_F3 0x30
+#define HV_TRAP_TRACE_ENTRY_F4 0x38
+
+/* The type field is encoded as follows. */
+#define HV_TRAP_TYPE_UNDEF 0x00 /* Entry content undefined */
+#define HV_TRAP_TYPE_HV 0x01 /* Hypervisor trap entry */
+#define HV_TRAP_TYPE_GUEST 0xff /* Added via ttrace_addentry() */
+
+/* ttrace_buf_conf()
+ * TRAP: HV_FAST_TRAP
+ * FUNCTION: HV_FAST_TTRACE_BUF_CONF
+ * ARG0: real address
+ * ARG1: number of entries
+ * RET0: status
+ * RET1: number of entries
+ * ERRORS: ENORADDR Invalid real address
+ * EINVAL Size is too small
+ * EBADALIGN Real address not aligned on 64-byte boundary
+ *
+ * Requests hypervisor trap tracing and declares a virtual CPU's trap
+ * trace buffer to the hypervisor. The real address supplies the real
+ * base address of the trap trace queue and must be 64-byte aligned.
+ * Specifying a value of 0 for the number of entries disables trap
+ * tracing for the calling virtual CPU. The buffer allocated must be
+ * sized for a power of two number of 64-byte trap trace entries plus
+ * an initial 64-byte control structure.
+