import Workstation 17.5.0 module sources

This commit is contained in:
Michal Kubecek 2023-10-20 13:56:49 +02:00
parent fee62c948e
commit 60bfdb5ed5
62 changed files with 1507 additions and 607 deletions

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2015-2021 VMware, Inc. All rights reserved.
* Copyright (c) 2015-2022 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -791,7 +791,7 @@ MonLoader_Process(MonLoaderHeader *header, // IN/OUT
}
*line = LINE_INVALID;
if (header->entries == 0 || header->count == 0) {
if (header->count == 0) {
return ML_ERROR_TABLE_MISSING;
}

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2016-2020 VMware, Inc. All rights reserved.
* Copyright (c) 2016-2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -28,7 +28,7 @@
#include "vm_assert.h"
#include "hostif.h"
#include "vmmblob.h"
#include "vmm_constants.h"
#include "mon_constants.h"
#include "monLoader.h"
/*

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2011, 2016, 2018,2020 VMware, Inc. All rights reserved.
* Copyright (c) 2011, 2016, 2018,2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998, 2016-2022 VMware, Inc. All rights reserved.
* Copyright (c) 1998, 2016-2022 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998, 2016-2019 VMware, Inc. All rights reserved.
* Copyright (c) 1998, 2016-2019 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2021 VMware, Inc. All rights reserved.
* Copyright (c) 1998-2021 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998, 2008, 2018 VMware, Inc. All rights reserved.
* Copyright (c) 1998, 2008, 2018 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2007-2020 VMware, Inc. All rights reserved.
* Copyright (c) 2007-2020,2022 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -29,7 +29,6 @@
#include "vm_basic_defs.h"
#include "vm_assert.h"
#include "address_defs.h"
#include "vmm_constants.h"
#define DIRECT_EXEC_USER_RPL 3
#define BINARY_TRANSLATION_RPL 1

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2022 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -135,6 +135,12 @@
# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 5)
# define RHEL85_BACKPORTS 1
# endif
# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9, 1)
# define RHEL91_BACKPORTS 1
# endif
# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9, 2)
# define RHEL92_BACKPORTS 1
# endif
#endif
#endif /* __COMPAT_VERSION_H__ */

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2020 VMware, Inc. All rights reserved.
* Copyright (c) 2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -391,9 +391,10 @@ IntelVT_FindCommon3rd(const MSRCache *data, // IN
*----------------------------------------------------------------------
*/
static INLINE Bool
IntelVT_Enabled(const MSRCache *cache, uint32 pcpu)
IntelVT_Enabled(const MSRCache *cache, uint32 pcpu, Bool smxEnabled)
{
return VT_EnabledFromFeatures(MSRCache_Get(cache, MSR_FEATCTL, pcpu));
return VT_EnabledFromFeatures(MSRCache_Get(cache, MSR_FEATCTL, pcpu),
smxEnabled);
}

View File

@ -168,6 +168,7 @@ PtrToVA64(void const *ptr) // IN
#endif
#if !defined __APPLE__
#if !__linux__
/*
* On platforms other than Linux, IOCTLCMD_foo values are just numbers, and
@ -198,13 +199,7 @@ enum IOCTLCmd {
* handling 32 bit ioctl syscalls. Hence FIRST and LAST. FIRST must be
* 2001 so that VERSION is 2001 for backwards compatibility.
*/
#if defined __linux__ || defined _WIN32
/* Start at 2001 because legacy code did. */
IOCTLCMD(FIRST) = 2001,
#else
/* Start at 0. */
IOCTLCMD(FIRST),
#endif
IOCTLCMD(VERSION) = IOCTLCMD(FIRST),
IOCTLCMD(CREATE_VM),
IOCTLCMD(PROCESS_BOOTSTRAP),
@ -268,18 +263,13 @@ enum IOCTLCmd {
IOCTLCMD(UNMAP_SCATTER_LIST),
#endif
#if defined __APPLE__
IOCTLCMD(GET_NUM_RESPONDING_CPUS),
IOCTLCMD(INIT_DRIVER),
IOCTLCMD(BLUEPILL),
#endif
IOCTLCMD(GET_UNAVAIL_PERF_CTRS),
IOCTLCMD(GET_MONITOR_CONTEXT),
IOCTLCMD(KERNEL_CET_ENABLED),
// Must be last.
IOCTLCMD(LAST)
};
#endif // !defined __APPLE__
#if defined _WIN32
@ -293,7 +283,7 @@ enum IOCTLCmd {
#define FILE_DEVICE_VMX86 0x8101
#define VMX86_IOCTL_BASE_INDEX 0x801
#define VMIOCTL_BUFFERED(name) \
CTL_CODE(FILE_DEVICE_VMX86, \
CTL_CODE(FILE_DEVICE_VMX86, \
VMX86_IOCTL_BASE_INDEX + IOCTLCMD_ ## name, \
METHOD_BUFFERED, \
FILE_ANY_ACCESS)
@ -352,7 +342,7 @@ enum IOCTLCmd {
#define IOCTL_VMX86_REMAP_SCATTER_LIST_RO VMIOCTL_BUFFERED(REMAP_SCATTER_LIST_RO)
#define IOCTL_VMX86_UNMAP_SCATTER_LIST VMIOCTL_BUFFERED(UNMAP_SCATTER_LIST)
#define IOCTL_VMX86_KERNEL_CET_ENABLED VMIOCTL_BUFFERED(KERNEL_CET_ENABLED)
#endif
#endif // defined _WIN32
#define INIT_BLOCK_MAGIC (0x1789 + 14)
@ -589,10 +579,6 @@ typedef union {
Context64 context; // OUT
} VMMonContext;
#if defined __APPLE__
# include "iocontrolsMacos.h"
#endif
/* Clean up helper macros */
#undef IOCTLCMD

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2021 VMware, Inc. All rights reserved.
* Copyright (c) 1998-2022 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -34,7 +34,6 @@
#include "ptsc.h"
#include "vcpuid.h"
#include "vcpuset.h"
#include "vmm_constants.h"
#include "contextinfo.h"
#include "rateconv.h"
#include "mon_assert.h"
@ -363,7 +362,7 @@ typedef struct VMCrossPageData {
} VMCrossPageData;
#pragma pack(pop)
#define CROSSPAGE_VERSION_BASE 0xc14 /* increment by 1 */
#define CROSSPAGE_VERSION_BASE 0xc15 /* increment by 1 */
#define CROSSPAGE_VERSION ((CROSSPAGE_VERSION_BASE << 1) + WS_INTR_STRESS)
#if !defined(VMX86_SERVER) && defined(VMM)

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2015-2020 VMware, Inc. All rights reserved.
* Copyright (c) 2015-2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2016-2019 VMware, Inc. All rights reserved.
* Copyright (c) 2016-2019 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -20,8 +20,8 @@
* nonsense here. This file is for _constants_.
*/
#ifndef _VMM_CONSTANTS_H_
#define _VMM_CONSTANTS_H_
#ifndef _MON_CONSTANTS_H_
#define _MON_CONSTANTS_H_
#define INCLUDE_ALLOW_USERLEVEL
@ -31,19 +31,19 @@
#include "includeCheck.h"
#define VMM_PANIC_NONE 0
#define VMM_PANIC_MSG_COPYING 1
#define VMM_PANIC_MSG_COPIED 2
#define VMM_PANIC_COREDUMPING 3
#define VMM_PANIC_VCPU 4
#define MON_PANIC_NONE 0
#define MON_PANIC_MSG_COPYING 1
#define MON_PANIC_MSG_COPIED 2
#define MON_PANIC_COREDUMPING 3
#define MON_PANIC_VCPU 4
#define VMM_PANIC_MSG_SIZE 256
#define MON_PANIC_MSG_SIZE 256
/* Ensure enough space for obj build with GCOV_VMM=1. */
#if defined(VMX86_SERVER)
#define VMMBLOB_SIZE_MAX (24 * 1024 * 1024)
#define VMMBLOB_SIZE_MAX (25 * 1024 * 1024)
#else
#define VMMBLOB_SIZE_MAX (24 * 1024 * 1024)
#define VMMBLOB_SIZE_MAX (25 * 1024 * 1024)
#endif
#endif

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2001-2013 VMware, Inc. All rights reserved.
* Copyright (C) 2001-2013, 2022-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -19,7 +19,7 @@
/*
* overheadmem_types.h
*
* Types for tracking memory overheads.
* Types for tracking memory overheads.
*/
#ifndef _OVERHEADMEM_TYPES_H
@ -30,6 +30,7 @@
#define INCLUDE_ALLOW_VMMON
#define INCLUDE_ALLOW_VMCORE
#define INCLUDE_ALLOW_MODULE
#define INCLUDE_ALLOW_VMKERNEL
#include "includeCheck.h"
#include "vm_basic_types.h"
@ -128,4 +129,12 @@ typedef struct OvhdMemNode {
OvhdMemType type; // how/where memory for source is managed
} OvhdMemNode;
typedef struct OvhdMemStats {
OvhdMemNode *anonNodes;
unsigned *usedByCategory;
unsigned *rsvdByCategory;
unsigned *maxUsedByCategory;
unsigned *maxRsvdByCategory;
} OvhdMemStats;
#endif

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2012,2014-2019 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2012,2014-2019,2022 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -37,8 +37,8 @@
#define PERF_EVENT_NAME_LEN 64
/*
* nmiNo -- vmm peer is not attempting to do nmi profiling this run
* nmiYes -- vmm peer is doing nmi profiling and nmis are currently enabled
* nmiNo -- vmm peer is not attempting to do nmi profiling this run.
* nmiYes -- vmm peer is doing nmi profiling and nmis are currently enabled.
* nmiStopped -- vmm peer is doing nmi profiling, but nmis are temporarily
* disabled for safety reasons.
*/
@ -47,6 +47,7 @@ typedef struct NMIShared { /* shared with vmx and vmkernel */
NMIStatus vmmStatus;
int32 nmiErrorCode;
int64 nmiErrorData;
NMI_SHARED_ARCH_FIELDS
} NMIShared;
/*

View File

@ -40,6 +40,9 @@
#include "vm_asm.h"
#include "x86cpuid_asm.h"
#define NMI_SHARED_ARCH_FIELDS \
uint64 nmiMaskedTSC;
#define PERFCTR_AMD_NUM_COUNTERS 4
#define PERFCTR_AMD_EXT_NUM_COUNTERS 6
#define PERFCTR_P6_NUM_COUNTERS 2

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2002,2014-2017 VMware, Inc. All rights reserved.
* Copyright (C) 2002,2014-2017,2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -50,11 +50,10 @@ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
VA addr) // IN: Address in the virtual address
// space of that process
{
pgd_t *pgd;
compat_p4d_t *p4d;
MPN mpn;
pgd_t *pgd = pgd_offset(mm, addr);
pgd = pgd_offset(mm, addr);
if (pgd_present(*pgd) == 0) {
return INVALID_MPN;
}
@ -71,27 +70,28 @@ PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
if (compat_p4d_large(*p4d)) {
mpn = compat_p4d_pfn(*p4d) + ((addr & ~COMPAT_P4D_MASK) >> PAGE_SHIFT);
} else {
pud_t *pud;
pud_t *pud = pud_offset(p4d, addr);
pud = pud_offset(p4d, addr);
if (pud_present(*pud) == 0) {
return INVALID_MPN;
}
if (pud_large(*pud)) {
mpn = pud_pfn(*pud) + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
} else {
pmd_t *pmd;
pmd_t *pmd = pmd_offset(pud, addr);
pmd = pmd_offset(pud, addr);
if (pmd_present(*pmd) == 0) {
return INVALID_MPN;
}
if (pmd_large(*pmd)) {
mpn = pmd_pfn(*pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
} else {
pte_t *pte;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6,5,0)
pte_t *pte = pte_offset_kernel(pmd, addr);
#else
pte_t *pte = pte_offset_map(pmd, addr);
#endif
pte = pte_offset_map(pmd, addr);
if (pte_present(*pte) == 0) {
pte_unmap(pte);
return INVALID_MPN;

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2014,2017,2019-2021 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2014,2017,2019-2022 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -125,8 +125,6 @@ PTSC_MHz(void)
return ptscInfo.mHz;
}
#if defined(VM_X86_64) || defined(VM_ARM_64)
/*
* Conversions to/from cycles. Note that the conversions operate on
* signed values, so be careful when taking the difference of two
@ -158,16 +156,6 @@ PTSC_CyclesToUS(VmRelativeTS ts)
return Muls64x32s64(ts, ptscInfo.cyclesToUs.mult, ptscInfo.cyclesToUs.shift);
}
#else
/* 32-bit Muls64x32s64 too big to justify inlining. */
VmRelativeTS PTSC_USToCycles(int64 us);
VmRelativeTS PTSC_MSToCycles(int64 ms);
int64 PTSC_CyclesToNS(VmRelativeTS ts);
int64 PTSC_CyclesToUS(VmRelativeTS ts);
#endif
#if defined(VMX86_SERVER) && (defined(VMX86_VMX) || defined (ULM_ESX))
/*
@ -184,7 +172,7 @@ PTSC_Get(void)
if (vmkUserTdata.magic != USER_THREADDATA_MAGIC) {
return 0;
}
ptsc = vmkUserTdata.u.pseudoTSCGet(&vmkUserTdata);
ptsc = vmkUserTdata.pseudoTSCGet(&vmkUserTdata);
ASSERT((int64)ptsc >= 0);
return ptsc;
}

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2003-2016, 2018 VMware, Inc. All rights reserved.
* Copyright (C) 2003-2016, 2018-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -45,6 +45,10 @@
#include "vm_atomic.h"
#include "versioned_atomic.h"
#if defined(VM_ARM_ANY) && defined(_MSC_VER)
#include "mul64.h"
#endif
#if defined __cplusplus
extern "C" {
#endif

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2018-2022 VMware, Inc. All rights reserved.
* Copyright (C) 2018-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -49,12 +49,27 @@
#define GDT_SIZE (sizeof(Descriptor) * NUM_VALID_SEGMENTS)
#define GDT_LIMIT (GDT_SIZE - 1)
/*
* This is one more than max GDT limit value.
*/
#define VMK_GDT_SIZE 0x10000
/*
* In the vmkernel the GDTR limit is set to the maximum because this will help
* the ULM/KLM to minimize the cost of a vmexit. After a vmexit the KLM doesn't
* need to restore the GDTR if the host GDTR limit is set to the maximum
* as Intel's VT restores the GDTR but sets the limit to the maximum value.
* Please see details 27.5.2 Loading Host Segment and Descriptor-Table
* Registers from the Intel manual.
*/
#define VMK_GDT_LIMIT (VMK_GDT_SIZE - 1)
#define IRB_SIZE 32 /* Interrupt redirection bitmap. */
#define TSS_SIZE (sizeof(Task64) + IRB_SIZE)
#define PCPU_DATA_VA (VPN_2_VA(GDT_AND_TASK_START))
#define GDT_START_VA (PCPU_DATA_VA + PCPU_DATA_SIZE)
#define TASK_START_VA (GDT_START_VA + GDT_SIZE)
#define TASK_START_VA (PCPU_DATA_VA + PCPU_DATA_SIZE)
#define GDT_START_VA (TASK_START_VA + TSS_SIZE)
/*
* vmkBoot uses some of the lower-numbered segments, as do host kernels on
@ -67,6 +82,13 @@
#define NUM_SYSTEM_SEGMENTS 2
#define NUM_TASK_SEGMENTS 2
#define NUM_TOTAL_SEGMENTS ((VMK_GDT_SIZE) / sizeof(Descriptor))
#define NUM_MAP_SEGMENTS (NUM_BOOT_SEGMENTS + NUM_USER_SEGMENTS + \
NUM_SYSTEM_SEGMENTS + \
(2 * (sizeof(Descriptor64) / \
sizeof(Descriptor))))
#define NUM_PAD_SEGMENTS (NUM_TOTAL_SEGMENTS - NUM_MAP_SEGMENTS)
#define FIRST_USER_SEGMENT NUM_BOOT_SEGMENTS
#define FIRST_SYSTEM_SEGMENT (PAGE_SIZE / sizeof(Descriptor) - \
NUM_SYSTEM_SEGMENTS - \
@ -74,19 +96,6 @@
TSS_SIZE / sizeof(Descriptor) - \
PCPU_DATA_SIZE / sizeof(Descriptor))
#define GDT_USER_TLS_MIN USER_TLS_1_SEGMENT
#define GDT_USER_TLS_MAX USER_TLS_3_SEGMENT
#define USER_TLS_COUNT ((USER_TLS_3_SEGMENT - USER_TLS_1_SEGMENT) + 1)
#define FOREACH_USER_TLS_INDEX(_i) \
{ \
unsigned _i; \
for (_i = 0; _i < USER_TLS_COUNT; _i++) { \
#define FOREACH_USER_TLS_INDEX_DONE \
} \
}
#define NULL_LDTR 0
/*
@ -94,10 +103,6 @@
* well as higher-numbered segments, though the vmkernel should not
* use monitor-private segments.
*
* The descriptor after SYSTEM_CODE_SEGMENT (loaded into %cs) must be
* appropriate for %ss because of the syscall instruction for 64-bit
* user worlds. Thus SYSTEM_DATA_SEGMENT is directly after it.
*
* The monitor segments are placed at the end of the GDT. The high
* segment placement for the monitor ensures that there is no
* selector-overlap with hosted kernel segments; the hosted world
@ -108,14 +113,8 @@ typedef enum VmwSegs {
NULL_SEGMENT = 0,
/* (... reserved for host operating system or vmkBoot segments). */
USER32_CODE_SEGMENT = FIRST_USER_SEGMENT,
USER_DATA_SEGMENT,
USER64_SYSRET_SEGMENT,
USER64_STACK_SEGMENT,
USER64_CODE_SEGMENT,
USER_TLS_1_SEGMENT,
USER_TLS_2_SEGMENT,
USER_TLS_3_SEGMENT,
USER_DATA_STACK_SEGMENT = FIRST_USER_SEGMENT,
USER_CODE_SEGMENT,
AFTER_LAST_USER_SEGMENT,
@ -137,18 +136,15 @@ typedef enum VmwSegs {
MAKE_SELECTOR_UNCHECKED(x##_SEGMENT, SELECTOR_GDT, 3)
/* Selectors used statically in code or in assembly must be unchecked. */
#define SYSTEM_NULL_SELECTOR GDT_SYSTEM_SEL(NULL)
#define SYSTEM_NULL_SELECTOR GDT_SYSTEM_SEL(NULL)
#ifdef VMKERNEL
/* USER32_CODE_SELECTOR is also defined in mach/i386/thread_status.h */
#define USER32_CODE_SELECTOR GDT_USER_SEL_UNCHECKED(USER32_CODE)
#define USER_DATA_SELECTOR GDT_USER_SEL_UNCHECKED(USER_DATA)
#define USER64_CODE_SELECTOR GDT_USER_SEL_UNCHECKED(USER64_CODE)
#define USER64_SYSRET_SELECTOR GDT_USER_SEL(USER64_SYSRET)
#define USER_CODE_SELECTOR GDT_USER_SEL_UNCHECKED(USER_CODE)
#define USER_DATA_STACK_SELECTOR GDT_USER_SEL_UNCHECKED(USER_DATA_STACK)
#endif
#define SYSTEM_CODE_SELECTOR GDT_SYSTEM_SEL_UNCHECKED(SYSTEM_CODE)
#define SYSTEM_DATA_SELECTOR GDT_SYSTEM_SEL_UNCHECKED(SYSTEM_DATA)
#define MONITOR_TASK_SELECTOR GDT_SYSTEM_SEL(MONITOR_TASK)
#define VMKERNEL_TASK_SELECTOR GDT_SYSTEM_SEL(VMKERNEL_TASK)
#define SYSTEM_CODE_SELECTOR GDT_SYSTEM_SEL_UNCHECKED(SYSTEM_CODE)
#define SYSTEM_DATA_SELECTOR GDT_SYSTEM_SEL_UNCHECKED(SYSTEM_DATA)
#define MONITOR_TASK_SELECTOR GDT_SYSTEM_SEL(MONITOR_TASK)
#define VMKERNEL_TASK_SELECTOR GDT_SYSTEM_SEL(VMKERNEL_TASK)
/*
* This struct is shared between the vmkernel and the monitor. Since
@ -172,20 +168,29 @@ typedef struct PcpuData {
/*
* The VMM GDT is comprised of many segment descriptors with one initial
* Task State Segment system descriptor. The VMM Task State Segment is
* on the same page sequentially after its GDT.
* on the same page just before GDT start VA. The base address of the
* GDT in GDTR register is set to address of empty descriptor.
*/
#pragma pack(push, 1)
typedef struct StaticGDTPage {
PcpuData pcpuData; /* Non-architectural. */
Task64 monTSS;
uint8 TSSIRBitmap[IRB_SIZE];
Descriptor empty[NUM_BOOT_SEGMENTS + NUM_USER_SEGMENTS];
Descriptor systemSegs[NUM_SYSTEM_SEGMENTS];
Descriptor64 vmkTask;
Descriptor64 monTask;
Task64 monTSS;
uint8 TSSIRBitmap[IRB_SIZE];
} StaticGDTPage;
#pragma pack(pop)
/*
* The base address of the GDT in GDTR register is set to VmkernelGDT.
* The size of VmkernelGDT is 64K with 239 mapped entries and 7953
* pad entries (8 bytes per entry). We only reserve VA space for 7953
* pad entries and not map them in PTE. This is done so that we could
* set GDTR limit to maximum value (that is 64K - 1). Please see
* bora/main/doc/gdtLim.txt for more details.
*/
#pragma pack(push, 1)
typedef struct VmkernelGDT {
Descriptor bootSegs[NUM_BOOT_SEGMENTS];
@ -193,16 +198,17 @@ typedef struct VmkernelGDT {
Descriptor systemSegs[NUM_SYSTEM_SEGMENTS]; /* VMM/VMK-shared. */
Descriptor64 vmkTask;
Descriptor64 monTask;
Descriptor padSegs[NUM_PAD_SEGMENTS];
} VmkernelGDT;
#pragma pack(pop)
#pragma pack(push, 1)
typedef struct VmkernelGDTPage {
typedef struct VmkernelGDTInfo {
PcpuData pcpuData; /* Non-architectural */
VmkernelGDT vmkGDT;
Task64 vmkTSS;
uint8 TSSIRBitmap[IRB_SIZE];
} VmkernelGDTPage;
VmkernelGDT vmkGDT;
} VmkernelGDTInfo;
#pragma pack(pop)
MY_ASSERTS(segs,
@ -212,11 +218,42 @@ MY_ASSERTS(segs,
ASSERT_ON_COMPILE(AFTER_LAST_USER_SEGMENT <= FIRST_SYSTEM_SEGMENT);
)
/*
* Invariants:
* 1) PcpuData, task and GDT mapped segments resides in the same page.
* VMM maps this 4KB page and refers it to locate task and GDT segments.
* HTSched maps this 4KB page of each host PCPU's GDT.
* 2) GDT_START_VA should point to the mapped segments in VmkernelGDTInfo
* struct and also it should point to mapped segments in StaticGDTPage.
* The offset of mapped segments in VmkernelGDTInfo and StaticGDTPage
* should remain same.
* 3) The key data structures like systemSegs, vmkTask and monTask in
* the StaticGDTPage and VmkernelGDTInfo structs align.
*/
MY_ASSERTS(pcpuData,
ASSERT_ON_COMPILE(sizeof(PcpuData) == PCPU_DATA_SIZE);
ASSERT_ON_COMPILE(offsetof(VmkernelGDTPage, vmkGDT) ==
PCPU_DATA_SIZE);
ASSERT_ON_COMPILE(sizeof(VmkernelGDTPage) == PAGE_SIZE);
ASSERT_ON_COMPILE(offsetof(VmkernelGDTInfo, vmkGDT) ==
PCPU_DATA_SIZE + TSS_SIZE);
ASSERT_ON_COMPILE(sizeof(VmkernelGDTInfo) == PCPU_DATA_SIZE +
TSS_SIZE + VMK_GDT_SIZE);
ASSERT_ON_COMPILE((PCPU_DATA_SIZE + TSS_SIZE +
sizeof(Descriptor) * NUM_MAP_SEGMENTS) ==
PAGE_SIZE);
ASSERT_ON_COMPILE(sizeof(VmkernelGDTInfo) -
sizeof(Descriptor) * NUM_PAD_SEGMENTS ==
PAGE_SIZE);
ASSERT_ON_COMPILE(sizeof(StaticGDTPage) == PAGE_SIZE);
ASSERT_ON_COMPILE((PCPU_DATA_SIZE + TSS_SIZE +
offsetof(VmkernelGDT, systemSegs)) ==
offsetof(StaticGDTPage, systemSegs));
ASSERT_ON_COMPILE((PCPU_DATA_SIZE + TSS_SIZE +
offsetof(VmkernelGDT, vmkTask)) ==
offsetof(StaticGDTPage, vmkTask));
ASSERT_ON_COMPILE((PCPU_DATA_SIZE + TSS_SIZE +
offsetof(VmkernelGDT, monTask)) ==
offsetof(StaticGDTPage, monTask));
ASSERT_ON_COMPILE(offsetof(VmkernelGDTInfo, vmkTSS) ==
offsetof(StaticGDTPage, monTSS));
)
#endif /* _SEGS_H_ */

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2002-2021 VMware, Inc. All rights reserved.
* Copyright (c) 2002-2021 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -47,25 +47,16 @@
#define VCS_VCPUID_SUBSET_IDX(v) ((v) >> VCS_SUBSET_SHIFT)
#define VCS_VCPUID_SUBSET_BIT(v) (CONST64U(1) << ((v) & VCS_SUBSET_MASK))
/*
* If you update this type, you also need to update the SEND_IPI line in
* bora/public/iocontrolsMacosTable.h.
*/
typedef struct VCPUSet {
uint64 subset[VCS_SUBSET_COUNT];
} VCPUSet;
MY_ASSERTS(VCPUSET_ASSERTS,
ASSERT_ON_COMPILE(VCS_SUBSET_WIDTH * VCS_SUBSET_COUNT >= MAX_VCPUS);
/*
* Catch changes in VCPUSet which need to be reflected in
* bora/public/iocontrolsMacosTable.h.
*/
ASSERT_ON_COMPILE(VCS_SUBSET_COUNT == 32);
/*
* There is code that depends on sizeof(VCPUSet) being a power of
* 2 in at least vcpuHotPlug.c and possible other places.
*/
ASSERT_ON_COMPILE((sizeof(VCPUSet) & (sizeof(VCPUSet) - 1)) == 0);
ASSERT_ON_COMPILE((sizeof(VCPUSet) & (sizeof(VCPUSet) - 1)) == 0);
)
#endif

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998,2007-2021 VMware, Inc. All rights reserved.
* Copyright (c) 1998,2007-2021 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2008-2022 VMware, Inc. All rights reserved.
* Copyright (C) 2008-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -74,9 +74,7 @@
VT_VMCS_2ND_VMEXEC_CTL_APIC | \
VT_VMCS_2ND_VMEXEC_CTL_PML | \
VT_VMCS_2ND_VMEXEC_CTL_ENCLS | \
VT_VMCS_2ND_VMEXEC_CTL_ENCLV | \
VT_VMCS_2ND_VMEXEC_CTL_UMWAIT | \
VT_VMCS_2ND_VMEXEC_CTL_EPC_VIRT_EXT)
VT_VMCS_2ND_VMEXEC_CTL_UMWAIT)
#define VVT_2ND_CTLS QWORD(VVT_2ND_CTLS1, \
VVT_2ND_CTLS0)

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2014,2016-2022 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2014,2016-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -705,4 +705,62 @@ RDTSCP(void)
#endif
}
static INLINE void
MONITOR(void *addr, uint32 extensions, uint32 hints)
{
/*
* Flush the monitored cache line to work around hardware bug
* on Dunnington CPUs which can cause false wakeups.
* (cf. PowerSetCState in the vmkernel.)
*/
#ifdef __GNUC__
__asm__ __volatile__(
"clflush (%0);"
"monitor" : : "a" (addr), "c" (extensions), "d" (hints)
);
#elif defined _MSC_VER
_mm_clflush(addr);
_mm_monitor(addr, extensions, hints);
#endif
}
static INLINE void
MWAIT(uint32 extensions, uint32 hints)
{
#ifdef __GNUC__
__asm__ __volatile__(
"mwait" : : "a" (hints), "c" (extensions)
);
#elif defined _MSC_VER
_mm_mwait(extensions, hints);
#endif
}
static INLINE void
MONITORX(void *addr, uint32 extensions, uint32 hints)
{
#ifdef __GNUC__
__asm__ __volatile__(
"monitorx" : : "a" (addr), "c" (extensions), "d" (hints)
);
#elif defined _MSC_VER
_mm_monitorx(addr, extensions, hints);
#endif
}
static INLINE void
MWAITX(uint32 extensions, uint32 hints, uint32 timeout)
{
#ifdef __GNUC__
__asm__ __volatile__(
"mwaitx" : : "a" (hints), "b" (timeout), "c" (extensions)
);
#elif defined _MSC_VER
_mm_mwaitx(extensions, hints, timeout);
#endif
}
#endif

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2018-2021 VMware, Inc. All rights reserved.
* Copyright (C) 2018-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -165,7 +165,10 @@ Atomic_Read8Acquire(Atomic_uint8 const *var) // IN:
{
uint8 val;
#if defined __GNUC__
#if defined VM_ATOMIC_USE_C11
val = atomic_load_explicit((_Atomic uint8 *)&var->value,
memory_order_acquire);
#elif defined __GNUC__
# if defined __i386__ || defined __x86_64__
__asm__ __volatile__(
"movb %1, %0"
@ -224,7 +227,10 @@ Atomic_Read16Acquire(Atomic_uint16 const *var) // IN:
ASSERT((uintptr_t)var % 2 == 0);
#if defined __GNUC__
#if defined VM_ATOMIC_USE_C11
val = atomic_load_explicit((_Atomic uint16 *)&var->value,
memory_order_acquire);
#elif defined __GNUC__
# if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"movw %1, %0"
@ -283,7 +289,10 @@ Atomic_Read32Acquire(Atomic_uint32 const *var) // IN:
ASSERT((uintptr_t)var % 4 == 0);
#if defined __GNUC__
#if defined VM_ATOMIC_USE_C11
val = atomic_load_explicit((_Atomic uint32 *)&var->value,
memory_order_acquire);
#elif defined __GNUC__
/*
* Use inline assembler to force using a single load instruction to
* ensure that the compiler doesn't split a transfer operation into multiple
@ -351,7 +360,10 @@ Atomic_Read64Acquire(Atomic_uint64 const *var) // IN:
ASSERT((uintptr_t)var % 8 == 0);
#if defined __GNUC__
#if defined VM_ATOMIC_USE_C11
val = atomic_load_explicit((_Atomic uint64 *)&var->value,
memory_order_acquire);
#elif defined __GNUC__
# if defined __x86_64__
__asm__ __volatile__(
"movq %1, %0"
@ -445,7 +457,10 @@ static INLINE void
Atomic_Write8Release(Atomic_uint8 *var, // OUT:
uint8 val) // IN:
{
#if defined __GNUC__
#if defined VM_ATOMIC_USE_C11
atomic_store_explicit((_Atomic uint8 *)&var->value, val,
memory_order_release);
#elif defined __GNUC__
# if defined __i386__ || defined __x86_64__
__asm__ __volatile__(
"movb %1, %0"
@ -501,7 +516,10 @@ Atomic_Write16Release(Atomic_uint16 *var, // OUT:
{
ASSERT((uintptr_t)var % 2 == 0);
#if defined __GNUC__
#if defined VM_ATOMIC_USE_C11
atomic_store_explicit((_Atomic uint16 *)&var->value, val,
memory_order_release);
#elif defined __GNUC__
# if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"movw %1, %0"
@ -557,7 +575,10 @@ Atomic_Write32Release(Atomic_uint32 *var, // OUT:
{
ASSERT((uintptr_t)var % 4 == 0);
#if defined __GNUC__
#if defined VM_ATOMIC_USE_C11
atomic_store_explicit((_Atomic uint32 *)&var->value, val,
memory_order_release);
#elif defined __GNUC__
# if defined __x86_64__ || defined __i386__
__asm__ __volatile__(
"movl %1, %0"
@ -618,7 +639,10 @@ Atomic_Write64Release(Atomic_uint64 *var, // OUT:
ASSERT((uintptr_t)var % 8 == 0);
#if defined __GNUC__
#if defined VM_ATOMIC_USE_C11
atomic_store_explicit((_Atomic uint64 *)&var->value, val,
memory_order_release);
#elif defined __GNUC__
# if defined __x86_64__
__asm__ __volatile__(
"movq %1, %0"
@ -741,31 +765,43 @@ MAKE_ATOMIC_ACQREL_FUNCS(Bool, 8, Bool, Bool, Bool)
static INLINE void
Atomic_FenceAcquire(void)
{
// C11 atomic_thread_fence(memory_order_acquire);
#ifdef VM_ATOMIC_USE_C11
atomic_thread_fence(memory_order_acquire);
#else
SMP_R_BARRIER_RW();
#endif
}
static INLINE void
Atomic_FenceRelease(void)
{
// C11 atomic_thread_fence(memory_order_release);
#ifdef VM_ATOMIC_USE_C11
atomic_thread_fence(memory_order_release);
#else
SMP_RW_BARRIER_W();
#endif
}
static INLINE void
Atomic_FenceAcqRel(void)
{
// C11 atomic_thread_fence(memory_order_acq_rel);
#ifdef VM_ATOMIC_USE_C11
atomic_thread_fence(memory_order_acq_rel);
#else
/* R_RW + RW_W is generally cheaper than RW_RW (W_R is expensive) */
SMP_R_BARRIER_RW();
SMP_RW_BARRIER_W();
#endif
}
static INLINE void
Atomic_FenceSeqCst(void)
{
// C11 atomic_thread_fence(memory_order_seq_cst);
#ifdef VM_ATOMIC_USE_C11
atomic_thread_fence(memory_order_seq_cst);
#else
SMP_RW_BARRIER_RW();
#endif
}
#ifdef VM_ARM_64

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2003-2022 VMware, Inc. All rights reserved.
* Copyright (c) 2003-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -61,6 +61,12 @@
#include "mul64.h"
#endif
#if defined _M_ARM64EC
#include "vm_assert.h"
#define MUL64_NO_ASM 1
#include "mul64.h"
#endif
#if defined __cplusplus
extern "C" {
#endif
@ -95,7 +101,7 @@ extern "C" {
* mssb64 MSB set (uint64) 1..64 0
*/
#ifdef _MSC_VER
#if defined(_MSC_VER) && !defined(__clang__) // Clang defines _MSC_VER on Windows
static INLINE int
lssb32_0(const uint32 value)
{
@ -773,7 +779,7 @@ RDTSC(void)
* bora/lib/vprobe/arm64/vp_emit_tc.c::VpEmit_BuiltinRDTSCWork()
* bora/modules/vmkernel/tests/core/xmapTest/xmapTest_arm64.c::XMapTest_SetupLoopCode()
*/
#if (defined(VMKERNEL) || defined(VMM)) && !defined(VMK_ARM_EL1_OR_VHE)
#if defined(VMKERNEL) && !defined(VMK_ARM_EL1_OR_VHE)
return MRS(CNTPCT_EL0);
#else
return MRS(CNTVCT_EL0);
@ -1117,7 +1123,7 @@ RoundUpPow2Asm32(uint32 value)
// if out == 2^32 then out = 1 as it is right rotate
: [in]"+r"(value),[out]"+r"(out));
return out;
#elif defined(VM_ARM_64)
#elif defined(VM_ARM_64) || defined(__wasm__)
return RoundUpPow2C32(value);
#else
uint32 out = 2;

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2021 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -57,7 +57,7 @@ extern "C" {
* constraints.
*
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
#if (defined(VMM) || defined(VMKERNEL) || defined(FROBOS) || defined(ULM))
static INLINE Bool
xtest(void)
{
@ -66,14 +66,18 @@ xtest(void)
__asm__ __volatile__("xtest\n"
"setnz %%al"
: "=a" (result) : : "cc");
#else
#elif defined (__GNUC__)
__asm__ __volatile__("xtest"
: "=@ccnz" (result) : : "cc");
#elif defined (_WIN32)
result = _xtest();
#else
#error No xtest implementation for this compiler.
#endif
return result;
}
#endif /* __GNUC__ */
#endif /* VMM || VMKERNEL || FROBOS || ULM */
/*

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2021 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -331,7 +331,7 @@ XRSTORS(const void *load, uint64 mask)
* constraints.
*
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
#if (defined(VMM) || defined(VMKERNEL) || defined(FROBOS) || defined(ULM))
static INLINE Bool
xtest(void)
{
@ -340,14 +340,18 @@ xtest(void)
__asm__ __volatile__("xtest\n"
"setnz %%al"
: "=a" (result) : : "cc");
#else
#elif defined(__GNUC__)
__asm__ __volatile__("xtest"
: "=@ccnz" (result) : : "cc");
#elif defined (_WIN64)
result = _xtest();
#else
#error No xtest implementation for this compiler.
#endif
return result;
}
#endif /* __GNUC__ */
#endif /* VMM || VMKERNEL || FROBOS || ULM */
/*
*-----------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2003-2022 VMware, Inc. All rights reserved.
* Copyright (C) 2003-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -19,7 +19,7 @@
/*
* vm_basic_defs.h --
*
* Standard macros for VMware source code.
* Standard macros for VMware source code.
*/
#ifndef _VM_BASIC_DEFS_H_
@ -36,14 +36,6 @@
#include "includeCheck.h"
#include "vm_basic_types.h" // For INLINE.
/* Checks for FreeBSD, filtering out VMKERNEL. */
#if !defined(VMKERNEL) && defined(__FreeBSD__)
#define __IS_FREEBSD__ 1
#else
#define __IS_FREEBSD__ 0
#endif
#define __IS_FREEBSD_VER__(ver) (__IS_FREEBSD__ && __FreeBSD_version >= (ver))
/*
* <stddef.h> provides definitions for:
* NULL, offsetof
@ -128,11 +120,11 @@ Max(int a, int b)
#define VMW_CLAMP(x, min, max) \
((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
#define ROUNDUP(x,y) (((x) + (y) - 1) / (y) * (y))
#define ROUNDDOWN(x,y) ((x) / (y) * (y))
#define ROUNDUPBITS(x, bits) (((uintptr_t) (x) + MASK(bits)) & ~MASK(bits))
#define ROUNDDOWNBITS(x, bits) ((uintptr_t) (x) & ~MASK(bits))
#define CEILING(x, y) (((x) + (y) - 1) / (y))
#define ROUNDUP(x,y) (((x) + (y) - 1) / (y) * (y))
#define ROUNDDOWN(x,y) ((x) / (y) * (y))
#define ROUNDUPBITS(x, bits) (((uintptr_t)(x) + MASK(bits)) & ~MASK(bits))
#define ROUNDDOWNBITS(x, bits) ((uintptr_t)(x) & ~MASK(bits))
#define CEILING(x, y) (((x) + (y) - 1) / (y))
#if defined VMKERNEL || defined VMKBOOT
# define CEIL(_a, _b) CEILING(_a, _b)
@ -157,8 +149,9 @@ Max(int a, int b)
* argument. The range 0..31 is safe.
*/
#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
#define MASK64(n) ((CONST64U(1) << (n)) - 1) /* make an n-bit mask */
#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
#define MASK64(n) ((CONST64U(1) << (n)) - 1) /* make an n-bit mask */
#define MASK128(n) (((uint128)1 << (n)) - 1) /* make an n-bit mask */
/*
* MASKRANGE64 makes a bit vector starting at bit lo and ending at bit hi. No
* checking for lo < hi is done.
@ -187,7 +180,9 @@ Max(int a, int b)
#define XCONC(x, y) CONC(x, y)
#define XXCONC(x, y) XCONC(x, y)
#define MAKESTR(x) #x
#ifndef XSTR
#define XSTR(x) MAKESTR(x)
#endif
/*
@ -227,6 +222,8 @@ Max(int a, int b)
#define PAGE_SHIFT PAGE_SHIFT_4KB
#elif defined __arm__
#define PAGE_SHIFT PAGE_SHIFT_4KB
#elif defined __wasm__
#define PAGE_SHIFT PAGE_SHIFT_4KB
#else
#error
#endif
@ -260,15 +257,6 @@ Max(int a, int b)
#define PAGE_NUMBER(_addr) ((uintptr_t)(_addr) / PAGE_SIZE)
#endif
#ifndef VM_PAGE_BASE
#define VM_PAGE_BASE(_addr) ((_addr) & ~(PAGE_SIZE - 1))
#endif
#ifndef VM_PAGES_SPANNED
#define VM_PAGES_SPANNED(_addr, _size) \
((((_addr) & (PAGE_SIZE - 1)) + (_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
#endif
#ifndef BYTES_2_PAGES
#define BYTES_2_PAGES(_nbytes) ((_nbytes) >> PAGE_SHIFT)
#endif
@ -281,6 +269,16 @@ Max(int a, int b)
#define PAGES_2_BYTES(_npages) (((uint64)(_npages)) << PAGE_SHIFT)
#endif
#ifndef VM_PAGE_BASE
#define VM_PAGE_BASE(_addr) ((_addr) & ~(PAGE_SIZE - 1))
#endif
#ifndef VM_PAGES_SPANNED
#define VM_PAGES_SPANNED(_addr, _size) \
(BYTES_2_PAGES(PAGE_OFFSET(_addr) + PAGE_OFFSET(_size) + (PAGE_SIZE - 1)) + \
BYTES_2_PAGES(_size))
#endif
#ifndef KBYTES_SHIFT
#define KBYTES_SHIFT 10
#endif
@ -428,9 +426,9 @@ Max(int a, int b)
*/
#define DEPOSIT_BITS(_src,_pos,_len,_target) { \
unsigned mask = ((1 << _len) - 1); \
unsigned shiftedmask = ((1 << _len) - 1) << _pos; \
_target = (_target & ~shiftedmask) | ((_src & mask) << _pos); \
unsigned mask = ((1 << _len) - 1); \
unsigned shiftedmask = ((1 << _len) - 1) << _pos; \
_target = (_target & ~shiftedmask) | ((_src & mask) << _pos); \
}
@ -647,6 +645,12 @@ typedef int pid_t;
#define VMKERNEL_ONLY(x)
#endif
#ifdef COMP_TEST
#define vmx86_test 1
#else
#define vmx86_test 0
#endif
/*
* In MSVC, _WIN32 is defined as 1 when the compilation target is
* 32-bit ARM, 64-bit ARM, x86, or x64 (which implies _WIN64). This
@ -700,6 +704,18 @@ typedef int pid_t;
#define VMM_ONLY(x)
#endif
#ifdef VMX86_VMX
#define vmx86_vmx 1
#else
#define vmx86_vmx 0
#endif
#ifdef VMM_BOOTSTRAP
#define vmm_bootstrap 1
#else
#define vmm_bootstrap 0
#endif
#ifdef ULM
#define vmx86_ulm 1
#define ULM_ONLY(x) x
@ -776,6 +792,7 @@ typedef int pid_t;
lfMessageFont)
/* This is not intended to be thread-safe. */
#ifndef KBUILD_MODNAME
#define DO_ONCE(code) \
do { \
static MONITOR_ONLY(PERVCPU) Bool _doOnceDone = FALSE; \
@ -784,6 +801,7 @@ typedef int pid_t;
code; \
} \
} while (0)
#endif
/*
* Bug 827422 and 838523.

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2022 VMware, Inc. All rights reserved.
* Copyright (c) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -39,11 +39,11 @@
/*
* Standardize MSVC arch macros to GCC arch macros.
*/
#if defined _MSC_VER && defined _M_X64
#if defined _MSC_VER && defined _M_X64 && !defined _M_ARM64EC
# define __x86_64__ 1
#elif defined _MSC_VER && defined _M_IX86
# define __i386__ 1
#elif defined _MSC_VER && defined _M_ARM64
#elif defined _MSC_VER && (defined _M_ARM64 || defined _M_ARM64EC)
# define __aarch64__ 1
#elif defined _MSC_VER && defined _M_ARM
# define __arm__ 1
@ -163,7 +163,7 @@
* - Linux userlevel uses 'long' uint64_t
* - Windows uses 'long long' uint64_t
*/
#if !defined(VMKERNEL) && !defined(DECODERLIB) && \
#if !defined(VMKERNEL) && \
defined(__linux__) && defined(__KERNEL__)
# include <linux/types.h>
# include <linux/version.h>
@ -205,7 +205,7 @@
* - VMM does not have POSIX headers
* - Windows <sys/types.h> does not define ssize_t
*/
#if defined(VMKERNEL) || defined(VMM) || defined(DECODERLIB)
#if defined(VMKERNEL) || defined(VMM)
/* Guard against FreeBSD <sys/types.h> collison. */
# if !defined(_SIZE_T_DEFINED) && !defined(_SIZE_T)
# define _SIZE_T_DEFINED
@ -290,11 +290,11 @@ typedef char Bool;
#if !defined(USING_AUTOCONF)
# if defined(__FreeBSD__) || defined(sun)
# ifndef KLD_MODULE
# if __FreeBSD_version >= 500043
# if defined(__FreeBSD__)
# if !defined(VMKERNEL)
# include <inttypes.h>
# endif
# else
# else /* sun */
# include <sys/inttypes.h>
# endif
# endif
@ -353,7 +353,7 @@ typedef int64 VmTimeVirtualClock; /* Virtual Clock kept in CPU cycles */
#define FMTPD "I"
#define FMTH "I"
#endif
#elif defined __APPLE__ || (!defined VMKERNEL && !defined DECODERLIB && \
#elif defined __APPLE__ || (!defined VMKERNEL && \
defined __linux__ && defined __KERNEL__)
/* semi-LLP64 targets; 'long' is 64-bit, but uint64_t is 'long long' */
#define FMT64 "ll"
@ -508,7 +508,7 @@ typedef uint16 UReg16;
typedef uint32 UReg32;
typedef uint64 UReg64;
#if defined(__GNUC__) && defined(__SIZEOF_INT128__)
#ifdef VM_HAS_INT128
typedef int128 Reg128;
typedef uint128 UReg128;
#endif

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2006-2021 VMware, Inc. All rights reserved.
* Copyright (C) 2006-2021,2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -275,4 +275,8 @@ typedef enum x86_FLAGS {
#define BNDCFG_RSVD 0x00000ffc
#define BNDCFG_BDBASE CONST64U(0xfffffffffffff000)
/* Reset state of RIP. */
#define RESET_RIP 0xfff0
#define RESET_RIP_TDX 0xfffffff0 /* Reset RIP for TDX protected mode boot. */
#endif // ifndef _VM_BASIC_DEFS_H_

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2022 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -148,7 +148,8 @@ typedef struct CPUIDQuery {
CPUIDLEVEL(TRUE, 1E, 0x1e, 1, 19) \
CPUIDLEVEL(FALSE, 1F, 0x1f, 6, 17) \
CPUIDLEVEL(TRUE, 20, 0x20, 1, 20) \
CPUIDLEVEL(TRUE , 21, 0x21, 1, 20) \
CPUIDLEVEL(TRUE, 21, 0x21, 1, 20) \
CPUIDLEVEL(TRUE, 23, 0x23, 4, 21) \
CPUIDLEVEL(FALSE, 400, 0x40000000, 0, 0) \
CPUIDLEVEL(FALSE, 401, 0x40000001, 0, 0) \
CPUIDLEVEL(FALSE, 402, 0x40000002, 0, 0) \
@ -174,7 +175,7 @@ typedef struct CPUIDQuery {
CPUIDLEVEL(FALSE, 81D, 0x8000001d, 5, 0) \
CPUIDLEVEL(FALSE, 81E, 0x8000001e, 0, 0) \
CPUIDLEVEL(TRUE, 81F, 0x8000001f, 0, 14) \
CPUIDLEVEL(TRUE, 820, 0x80000020, 2, 17) \
CPUIDLEVEL(TRUE, 820, 0x80000020, 4, 17) \
CPUIDLEVEL(TRUE, 821, 0x80000021, 0, 17) \
CPUIDLEVEL(TRUE, 822, 0x80000022, 0, 20) \
CPUIDLEVEL(TRUE, 823, 0x80000023, 0, 20) \
@ -483,6 +484,7 @@ FLAG( 6, 0, EAX, 18, 1, HWP_FAST_ACCESS, NO, 0 ) \
FLAG( 6, 0, EAX, 19, 1, HW_FEEDBACK, NO, 0 ) \
FLAG( 6, 0, EAX, 20, 1, HWP_IGNORE_IDLE_REQUEST, NO, 0 ) \
FLAG( 6, 0, EAX, 23, 1, HW_FEEDBACK_ENHANCED, NO, 0 ) \
FLAG( 6, 0, EAX, 24, 1, HWP_THERM_INTERRUPT_MSR, NO, 0 ) \
FIELD( 6, 0, EBX, 0, 4, NUM_INTR_THRESHOLDS, NO, 0 ) \
FLAG( 6, 0, ECX, 0, 1, HW_COORD_FEEDBACK, NO, 0 ) \
FLAG( 6, 0, ECX, 1, 1, ACNT2, ANY, 13 ) \
@ -532,7 +534,7 @@ FLAG( 7, 0, ECX, 1, 1, AVX512VBMI, YES, 17 ) \
FLAG( 7, 0, ECX, 2, 1, UMIP, YES, 17 ) \
FLAG( 7, 0, ECX, 3, 1, PKU, YES, 13 ) \
FLAG( 7, 0, ECX, 4, 1, OSPKE, ANY, 13 ) \
FLAG( 7, 0, ECX, 5, 1, WAITPKG, YES, FUT ) \
FLAG( 7, 0, ECX, 5, 1, WAITPKG, YES, 21 ) \
FLAG( 7, 0, ECX, 6, 1, AVX512VBMI2, YES, 17 ) \
FLAG( 7, 0, ECX, 7, 1, CET_SS, YES, 20 ) \
FLAG( 7, 0, ECX, 8, 1, GFNI, YES, 17 ) \
@ -550,19 +552,22 @@ FLAG( 7, 0, ECX, 24, 1, BUS_LOCK_DB, NO, 0 ) \
FLAG( 7, 0, ECX, 25, 1, CLDEMOTE, YES, 18 ) \
FLAG( 7, 0, ECX, 27, 1, MOVDIRI, YES, 18 ) \
FLAG( 7, 0, ECX, 28, 1, MOVDIR64B, YES, 18 ) \
FLAG( 7, 0, ECX, 29, 1, ENQCMD, NO, 0 ) \
FLAG( 7, 0, ECX, 29, 1, ENQCMD, YES, FUT ) \
FLAG( 7, 0, ECX, 30, 1, SGX_LC, ANY, 17 ) \
FLAG( 7, 0, ECX, 31, 1, PKS, YES, 20 ) \
FLAG( 7, 0, EDX, 1, 1, SGK_KEYS, NO, 0 ) \
FLAG( 7, 0, EDX, 2, 1, AVX512QVNNIW, YES, 16 ) \
FLAG( 7, 0, EDX, 3, 1, AVX512QFMAPS, YES, 16 ) \
FLAG( 7, 0, EDX, 4, 1, FAST_SHORT_REPMOV, YES, 18 ) \
FLAG( 7, 0, EDX, 5, 1, UINTR, NO, 0 ) \
FLAG( 7, 0, EDX, 8, 1, AVX512VP2INTERSECT, YES, 18 ) \
FLAG( 7, 0, EDX, 9, 1, SRBDS_CTRL, NO, 0 ) \
FLAG( 7, 0, EDX, 10, 1, MDCLEAR, YES, 9 ) \
FLAG( 7, 0, EDX, 13, 1, TSX_FORCE_ABORT, NO, 0 ) \
FLAG( 7, 0, EDX, 11, 1, RTM_ALWAYS_ABORT, NO, 0 ) \
FLAG( 7, 0, EDX, 13, 1, RTM_FORCE_ABORT, NO, 0 ) \
FLAG( 7, 0, EDX, 14, 1, SERIALIZE, YES, 20 ) \
FLAG( 7, 0, EDX, 15, 1, HYBRID, NO, 0 ) \
FLAG( 7, 0, EDX, 16, 1, TSXLDTRK, NO, 0 ) \
FLAG( 7, 0, EDX, 16, 1, TSXLDTRK, YES, 21 ) \
FLAG( 7, 0, EDX, 18, 1, PCONFIG, NO, 0 ) \
FLAG( 7, 0, EDX, 19, 1, ARCH_LBR, YES, 20 ) \
FLAG( 7, 0, EDX, 20, 1, CET_IBT, YES, 20 ) \
@ -576,14 +581,34 @@ FLAG( 7, 0, EDX, 28, 1, FCMD, YES, 9 ) \
FLAG( 7, 0, EDX, 29, 1, ARCH_CAPABILITIES, ANY, 9 ) \
FLAG( 7, 0, EDX, 30, 1, CORE_CAPABILITIES, NO, 0 ) \
FLAG( 7, 0, EDX, 31, 1, SSBD, YES, 9 ) \
FLAG( 7, 1, EAX, 3, 1, RAO_INT, NO, 0 ) \
FLAG( 7, 1, EAX, 4, 1, AVX_VNNI, YES, 20 ) \
FLAG( 7, 1, EAX, 5, 1, AVX512BF16, YES, 18 ) \
FLAG( 7, 1, EAX, 6, 1, LASS, NO, 0 ) \
FLAG( 7, 1, EAX, 7, 1, CMPCCXADD, NO, 0 ) \
FLAG( 7, 1, EAX, 8, 1, ARCH_PERFMON_EXT, NO, 0 ) \
FLAG( 7, 1, EAX, 10, 1, FAST_ZERO_MOVSB, YES, 20 ) \
FLAG( 7, 1, EAX, 11, 1, FAST_SHORT_STOSB, YES, 20 ) \
FLAG( 7, 1, EAX, 12, 1, FAST_SHORT_CMPSB_SCASB, YES, 20 ) \
FLAG( 7, 1, EAX, 19, 1, WRMSRNS, NO, 0 ) \
FLAG( 7, 1, EAX, 21, 1, AMX_FP16, YES, 21 ) \
FLAG( 7, 1, EAX, 22, 1, HRESET, NO, 0 ) \
FLAG( 7, 1, EAX, 23, 1, AVX_IFMA, NO, 0 ) \
FLAG( 7, 1, EAX, 26, 1, LAM, NO, 0 ) \
FLAG( 7, 2, EDX, 0, 1, PSFD, YES, 20 )
FLAG( 7, 1, EAX, 27, 1, MSRLIST, NO, 0 ) \
FLAG( 7, 1, EBX, 0, 1, LEAF7_PPIN, NO, 0 ) \
FLAG( 7, 1, EDX, 4, 1, AVX_VNNI_INT8, NO, 0 ) \
FLAG( 7, 1, EDX, 5, 1, AVX_NE_CONVERT, NO, 0 ) \
FLAG( 7, 1, EDX, 8, 1, AMX_COMPLEX, NO, 0 ) \
FLAG( 7, 1, EDX, 14, 1, PREFETCHI, YES, 21 ) \
FLAG( 7, 1, EDX, 18, 1, CET_SSS, NO, 0 ) \
FLAG( 7, 2, EDX, 0, 1, PSFD, YES, 20 ) \
FLAG( 7, 2, EDX, 1, 1, IPRED_CTRL, YES, 21 ) \
FLAG( 7, 2, EDX, 2, 1, RRSBA_CTRL, YES, 21 ) \
FLAG( 7, 2, EDX, 3, 1, DDPD_U, YES, 21 ) \
FLAG( 7, 2, EDX, 4, 1, BHI_CTRL, YES, 21 ) \
FLAG( 7, 2, EDX, 5, 1, MCDT_NO, NO, 0 ) \
FLAG( 7, 2, EDX, 6, 1, UC_LOCK_DISABLE, NO, 0 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_9 \
@ -644,7 +669,7 @@ FLAG( D, 1, EAX, 3, 1, XSAVES, YES, 13 ) \
FLAG( D, 1, EAX, 4, 1, XFD, YES, 20 ) \
FIELD( D, 1, EBX, 0, 32, XSAVES_ENABLED_SIZE, ANY, 13 ) \
FLAG( D, 1, ECX, 8, 1, XSS_MASTER_PT, NO, 0 ) \
FLAG( D, 1, ECX, 10, 1, XSS_MASTER_PASID, NO, 0 ) \
FLAG( D, 1, ECX, 10, 1, XSS_MASTER_PASID, YES, FUT ) \
FLAG( D, 1, ECX, 11, 1, XSS_MASTER_CET_U, YES, 20 ) \
FLAG( D, 1, ECX, 12, 1, XSS_MASTER_CET_S, YES, 20 ) \
FLAG( D, 1, ECX, 13, 1, XSS_MASTER_HDC, NO, 0 ) \
@ -691,10 +716,10 @@ FIELD( D, 9, EBX, 0, 32, XSAVE_PKRU_OFFSET, YES, 13 ) \
FLAG( D, 9, ECX, 0, 1, XSAVE_PKRU_SUP_BY_XSS, NO, 0 ) \
FLAG( D, 9, ECX, 1, 1, XSAVE_PKRU_ALIGN, YES, 13 ) \
FLAG( D, 9, ECX, 2, 1, XSAVE_PKRU_XFD, NO, 0 ) \
FIELD( D, 10, EAX, 0, 32, XSAVES_PASID_STATE_SIZE, NO, 0 ) \
FLAG( D, 10, ECX, 0, 1, XSAVES_PASID_STATE_SUP_BY_XSS, NO, 0 ) \
FLAG( D, 10, ECX, 1, 1, XSAVES_PASID_STATE_ALIGN, NO, 0 ) \
FLAG( D, 10, ECX, 2, 1, XSAVES_PASID_STATE_XFD, NO, 0 ) \
FIELD( D, 10, EAX, 0, 32, XSAVES_PASID_STATE_SIZE, YES, FUT ) \
FLAG( D, 10, ECX, 0, 1, XSAVES_PASID_STATE_SUP_BY_XSS, YES, FUT ) \
FLAG( D, 10, ECX, 1, 1, XSAVES_PASID_STATE_ALIGN, YES, FUT ) \
FLAG( D, 10, ECX, 2, 1, XSAVES_PASID_STATE_XFD, YES, FUT ) \
FIELD( D, 11, EAX, 0, 32, XSAVES_CET_U_SIZE, YES, 20 ) \
FLAG( D, 11, ECX, 0, 1, XSAVES_CET_U_SUP_BY_XSS, YES, 20 ) \
FLAG( D, 11, ECX, 1, 1, XSAVES_CET_U_ALIGN, YES, 20 ) \
@ -759,9 +784,11 @@ FIELD( 10, 2, EDX, 0, 16, PQE_L2_MAX_COS_NUMBER, NO, 0 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_12 \
FLAG( 12, 0, EAX, 0, 1, SGX1, ANY, 17 ) \
FLAG( 12, 0, EAX, 1, 1, SGX2, ANY, FUT ) \
FLAG( 12, 0, EAX, 5, 1, SGX_OVERSUB_ENCLV, ANY, FUT ) \
FLAG( 12, 0, EAX, 6, 1, SGX_OVERSUB_ENCLS, ANY, FUT ) \
FLAG( 12, 0, EAX, 1, 1, SGX2, ANY, 21 ) \
FLAG( 12, 0, EAX, 5, 1, SGX_OVERSUB_ENCLV, NO, 0 ) \
FLAG( 12, 0, EAX, 6, 1, SGX_OVERSUB_ENCLS, NO, 0 ) \
FLAG( 12, 0, EAX, 7, 1, SGX_EVERIFYREPORT2, NO, 0 ) \
FLAG( 12, 0, EAX, 10, 1, SGX_EUPDATESVN, NO, 0 ) \
FLAG( 12, 0, EBX, 0, 1, SGX_MISCSELECT_EXINFO, ANY, FUT ) \
FIELD( 12, 0, EBX, 1, 31, SGX_MISCSELECT_RSVD, NO, 0 ) \
FIELD( 12, 0, EDX, 0, 8, MAX_ENCLAVE_SIZE_NOT64, ANY, 17 ) \
@ -792,6 +819,9 @@ FLAG( 14, 0, EBX, 2, 1, PT_IP_FILTER_PERSIST_MSR, NO, 0 ) \
FLAG( 14, 0, EBX, 3, 1, PT_MTC, NO, 0 ) \
FLAG( 14, 0, EBX, 4, 1, PT_PTWRITE, NO, 0 ) \
FLAG( 14, 0, EBX, 5, 1, PT_POWER_EVENT, NO, 0 ) \
FLAG( 14, 0, EBX, 6, 1, PT_PSB_PMI, NO, 0 ) \
FLAG( 14, 0, EBX, 7, 1, PT_EVENT_TRACE_ENABLE, NO, 0 ) \
FLAG( 14, 0, EBX, 8, 1, PT_TNT_DISABLE, NO, 0 ) \
FLAG( 14, 0, ECX, 0, 1, PT_TOPA, NO, 0 ) \
FLAG( 14, 0, ECX, 1, 1, PT_TOPA_MULTI, NO, 0 ) \
FLAG( 14, 0, ECX, 2, 1, PT_SRO, NO, 0 ) \
@ -883,20 +913,24 @@ FLAG( 1C, 0, EBX, 2, 1, LBR_CALL_STACK_MODE, YES, 20 ) \
FLAG( 1C, 0, ECX, 0, 1, LBR_MISPREDICT, YES, 20 ) \
FLAG( 1C, 0, ECX, 1, 1, LBR_TIMED_LBRS, YES, 20 ) \
FLAG( 1C, 0, ECX, 2, 1, LBR_BRANCH_TYPE, YES, 20 ) \
FLAG( 1C, 0, ECX, 16, 1, LBR_EVENT_LOGGING_PMC0, NO, 0 ) \
FLAG( 1C, 0, ECX, 17, 1, LBR_EVENT_LOGGING_PMC1, NO, 0 ) \
FLAG( 1C, 0, ECX, 18, 1, LBR_EVENT_LOGGING_PMC2, NO, 0 ) \
FLAG( 1C, 0, ECX, 19, 1, LBR_EVENT_LOGGING_PMC3, NO, 0 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_1D \
FIELD( 1D, 0, EAX, 0, 32, TILE_PALETTE_MAX, YES, 20 ) \
FIELD( 1D, 1, EAX, 0, 16, TILE_PALETTE1_TOTAL_BYTES, YES, 20 ) \
FIELD( 1D, 1, EAX, 16, 16, TILE_PALETTE1_BYTES_PER_TILE, YES, 20 ) \
FIELD( 1D, 1, EBX, 0, 16, TILE_PALETTE1_BYTES_PER_ROW, YES, 20 ) \
FIELD( 1D, 1, EBX, 16, 16, TILE_PALETTE1_NUM_REGS, YES, 20 ) \
FIELD( 1D, 1, ECX, 0, 16, TILE_PALETTE1_MAX_ROWS, YES, 20 )
FIELD( 1D, 0, EAX, 0, 32, TILE_PALETTE_MAX, YES, 20 ) \
FIELD( 1D, 1, EAX, 0, 16, TILE_PALETTE1_TOTAL_BYTES, YES, 20 ) \
FIELD( 1D, 1, EAX, 16, 16, TILE_PALETTE1_BYTES_PER_TILE, YES, 20 ) \
FIELD( 1D, 1, EBX, 0, 16, TILE_PALETTE1_BYTES_PER_ROW, YES, 20 ) \
FIELD( 1D, 1, EBX, 16, 16, TILE_PALETTE1_NUM_REGS, YES, 20 ) \
FIELD( 1D, 1, ECX, 0, 16, TILE_PALETTE1_MAX_ROWS, YES, 20 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_1E \
FIELD( 1E, 0, EBX, 0, 8, TMUL_MAX_K, YES, 20 ) \
FIELD( 1E, 0, EBX, 8, 16, TMUL_MAX_N, YES, 20 )
FIELD( 1E, 0, EBX, 0, 8, TMUL_MAX_K, YES, 20 ) \
FIELD( 1E, 0, EBX, 8, 16, TMUL_MAX_N, YES, 20 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_1F \
@ -918,6 +952,26 @@ FIELD( 21, 0, EBX, 0, 32, TDX_VENDOR1, NO, 0 ) \
FIELD( 21, 0, ECX, 0, 32, TDX_VENDOR3, NO, 0 ) \
FIELD( 21, 0, EDX, 0, 32, TDX_VENDOR2, NO, 0 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_23 \
FIELD( 23, 0, EAX, 0, 32, ARCH_PMC_MAX_SUBLEAF, NO, 0 ) \
FLAG( 23, 0, EBX, 0, 1, ARCH_PMC_UNITMASK2, NO, 0 ) \
FLAG( 23, 0, EBX, 1, 1, ARCH_PMC_ZBIT, NO, 0 ) \
FIELD( 23, 1, EAX, 0, 32, ARCH_PMC_GEN_BITMAP, NO, 0 ) \
FIELD( 23, 1, EBX, 0, 32, ARCH_PMC_FIXED_BITMAP, NO, 0 ) \
FLAG( 23, 3, EAX, 0, 1, ARCH_PMC_CORE_CYCLES, NO, 0 ) \
FLAG( 23, 3, EAX, 1, 1, ARCH_PMC_INSTR_RETIRED, NO, 0 ) \
FLAG( 23, 3, EAX, 2, 1, ARCH_PMC_REF_CYCLES, NO, 0 ) \
FLAG( 23, 3, EAX, 3, 1, ARCH_PMC_LAST_LVL_CREF, NO, 0 ) \
FLAG( 23, 3, EAX, 4, 1, ARCH_PMC_LAST_LVL_CMISS, NO, 0 ) \
FLAG( 23, 3, EAX, 5, 1, ARCH_PMC_BR_INST_RETIRED, NO, 0 ) \
FLAG( 23, 3, EAX, 6, 1, ARCH_PMC_BR_MISS_RETIRED, NO, 0 ) \
FLAG( 23, 3, EAX, 7, 1, ARCH_PMC_TOPDOWN_SLOTS, NO, 0 ) \
FLAG( 23, 3, EAX, 8, 1, ARCH_PMC_TOPDOWN_BACKEND, NO, 0 ) \
FLAG( 23, 3, EAX, 9, 1, ARCH_PMC_TOPDOWN_BAD_SPEC, NO, 0 ) \
FLAG( 23, 3, EAX, 10, 1, ARCH_PMC_TOPDOWN_FRONTEND, NO, 0 ) \
FLAG( 23, 3, EAX, 11, 1, ARCH_PMC_TOPDOWN_RETIRE, NO, 0 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_400 \
FIELD(400, 0, EAX, 0, 32, MAX_HYP_LEVEL, NA, 0 ) \
@ -1042,7 +1096,7 @@ FIELD( 81, 0, EAX, 12, 2, LEAF81_TYPE, ANY, 4 ) \
FIELD( 81, 0, EAX, 16, 4, LEAF81_EXTENDED_MODEL, ANY, 4 ) \
FIELD( 81, 0, EAX, 20, 8, LEAF81_EXTENDED_FAMILY, ANY, 4 ) \
FIELD( 81, 0, EBX, 0, 16, LEAF81_BRAND_ID, ANY, 4 ) \
FIELD( 81, 0, EBX, 16, 16, UNDEF, ANY, 4 ) \
FIELD( 81, 0, EBX, 28, 4, LEAF81_PKG_TYPE, ANY, 4 ) \
FLAG( 81, 0, ECX, 0, 1, LAHF64, YES, 4 ) \
FLAG( 81, 0, ECX, 1, 1, CMPLEGACY, ANY, 9 ) \
FLAG( 81, 0, ECX, 2, 1, SVM, YES, 8 ) \
@ -1068,7 +1122,7 @@ FLAG( 81, 0, ECX, 24, 1, PERFNB, NO, 0 ) \
FLAG( 81, 0, ECX, 26, 1, DATABK, NO, 0 ) \
FLAG( 81, 0, ECX, 27, 1, PERFTSC, NO, 0 ) \
FLAG( 81, 0, ECX, 28, 1, PERFL3, NO, 0 ) \
FLAG( 81, 0, ECX, 29, 1, MONITORX, NO, 0 ) \
FLAG( 81, 0, ECX, 29, 1, MONITORX, YES, 21 ) \
FLAG( 81, 0, ECX, 30, 1, ADDR_MASK_EXT, NO, 0 ) \
FLAG( 81, 0, EDX, 0, 1, LEAF81_FPU, YES, 4 ) \
FLAG( 81, 0, EDX, 1, 1, LEAF81_VME, YES, 4 ) \
@ -1179,7 +1233,8 @@ FLAG( 87, 0, EDX, 10, 1, EFFECTIVE_FREQUENCY, NA, 0 ) \
FLAG( 87, 0, EDX, 11, 1, PROC_FEEDBACK_INTERFACE, NA, 0 ) \
FLAG( 87, 0, EDX, 12, 1, PROC_POWER_REPORTING, NA, 0 ) \
FLAG( 87, 0, EDX, 13, 1, CONNECTED_STANDBY, NA, 0 ) \
FLAG( 87, 0, EDX, 14, 1, RAPL, NA, 0 )
FLAG( 87, 0, EDX, 14, 1, RAPL, NA, 0 ) \
FLAG( 87, 0, EDX, 15, 1, FAST_CPPC, NA, 0 )
/* LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_88 \
@ -1208,7 +1263,9 @@ FLAG( 88, 0, EBX, 23, 1, PPIN, NO, 0 ) \
FLAG( 88, 0, EBX, 24, 1, LEAF88_SSBD_SPEC_CTRL, YES, 20 ) \
FLAG( 88, 0, EBX, 25, 1, LEAF88_SSBD_VIRT_SPEC_CTRL, NO, 0 ) \
FLAG( 88, 0, EBX, 26, 1, LEAF88_SSBD_NOT_NEEDED, NO, 0 ) \
FLAG( 88, 0, EBX, 27, 1, CPPC, NO, 0 ) \
FLAG( 88, 0, EBX, 28, 1, LEAF88_PSFD, YES, 20 ) \
FLAG( 88, 0, EBX, 29, 1, BTC_NO, NO, 0 ) \
FIELD( 88, 0, ECX, 0, 8, LEAF88_CORE_COUNT, YES, 4 ) \
FIELD( 88, 0, ECX, 12, 4, APICID_COREID_SIZE, YES, 7 ) \
FIELD( 88, 0, ECX, 16, 2, PERFTSC_SIZE, NO, 0 ) \
@ -1247,9 +1304,13 @@ FLAG( 8A, 0, EDX, 17, 1, SVM_GMET, YES, 17 ) \
FLAG( 8A, 0, EDX, 18, 1, SVMEDX_RSVD3, NO, 0 ) \
FLAG( 8A, 0, EDX, 19, 1, SVM_SSS, YES, 20 ) \
FLAG( 8A, 0, EDX, 20, 1, SVM_GUEST_SPEC_CTRL, NO, 0 ) \
FIELD( 8A, 0, EDX, 21, 3, SVMEDX_RSVD4, NO, 0 ) \
FLAG( 8A, 0, EDX, 21, 1, SVM_NON_WRITEABLE_PT, NO, 0 ) \
FLAG( 8A, 0, EDX, 23, 1, SVM_HOST_MCE_OVERRIDE, NO, 0 ) \
FLAG( 8A, 0, EDX, 24, 1, SVM_TLB_CTL, NO, 0 ) \
FIELD( 8A, 0, EDX, 25, 7, SVMEDX_RSVD5, NO, 0 )
FLAG( 8A, 0, EDX, 25, 1, SVM_NMI_VIRT, NO, 0 ) \
FLAG( 8A, 0, EDX, 26, 1, SVM_IBS_VIRT, NO, 0 ) \
FLAG( 8A, 0, EDX, 27, 1, SVM_EXTLVT_OFFSET_FAULT, NO, 0 ) \
FLAG( 8A, 0, EDX, 28, 1, SVM_VMCB_ADDR_CHK, NO, 0 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_819 \
@ -1280,7 +1341,8 @@ FLAG( 81B, 0, EAX, 6, 1, OPCOUNT_EXT, NA, 0 ) \
FLAG( 81B, 0, EAX, 7, 1, RIP_INVALID_CHECK, NA, 0 ) \
FLAG( 81B, 0, EAX, 8, 1, OP_BRN_FUSE, NA, 0 ) \
FLAG( 81B, 0, EAX, 9, 1, IBS_FETCH_CTL_EXTD, NA, 0 ) \
FLAG( 81B, 0, EAX, 10, 1, IBS_OP_DATA4, NA, 0 )
FLAG( 81B, 0, EAX, 10, 1, IBS_OP_DATA4, NA, 0 ) \
FLAG( 81B, 0, EAX, 11, 1, IBS_FETCH_OP, NA, 0 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_81C \
@ -1348,6 +1410,9 @@ FLAG( 81F, 0, EAX, 2, 1, PAGE_FLUSH_MSR, NO, 0 ) \
FLAG( 81F, 0, EAX, 3, 1, SEV_ES, YES, 17 ) \
FLAG( 81F, 0, EAX, 4, 1, SEV_SNP, NO, 0 ) \
FLAG( 81F, 0, EAX, 5, 1, VMPL, NO, 0 ) \
FLAG( 81F, 0, EAX, 6, 1, RMPQUERY, NO, 0 ) \
FLAG( 81F, 0, EAX, 7, 1, VMPL_SSS, NO, 0 ) \
FLAG( 81F, 0, EAX, 8, 1, SECURE_TSC, NO, 0 ) \
FLAG( 81F, 0, EAX, 9, 1, TSC_AUX_VIRT, YES, 20 ) \
FLAG( 81F, 0, EAX, 10, 1, SEV_HEC, NO, 0 ) \
FLAG( 81F, 0, EAX, 11, 1, SEV_64BIT_REQ, NO, 0 ) \
@ -1355,6 +1420,11 @@ FLAG( 81F, 0, EAX, 12, 1, SEV_RESTR_INJECTION, NO, 0 ) \
FLAG( 81F, 0, EAX, 13, 1, SEV_ALT_INJECTION, NO, 0 ) \
FLAG( 81F, 0, EAX, 14, 1, SEV_DEBUG_SWAP, NO, 0 ) \
FLAG( 81F, 0, EAX, 15, 1, SEV_NO_HOST_IBS, NO, 0 ) \
FLAG( 81F, 0, EAX, 16, 1, SEV_VTE, NO, 0 ) \
FLAG( 81F, 0, EAX, 17, 1, VMGEXIT_PARAMETER, NO, 0 ) \
FLAG( 81F, 0, EAX, 18, 1, VIRTUAL_MSR_TOM, NO, 0 ) \
FLAG( 81F, 0, EAX, 19, 1, SEV_IBS_VIRT, NO, 0 ) \
FLAG( 81F, 0, EAX, 24, 1, VMSA_REG_PROT, NO, 0 ) \
FIELD(81F, 0, EBX, 0, 6, SME_PAGE_TABLE_BIT_NUM, YES, 17 ) \
FIELD(81F, 0, EBX, 6, 6, SME_PHYS_ADDR_SPACE_REDUCTION, NO, 0 ) \
FIELD(81F, 0, EBX, 12, 4, NUM_VMPL, NO, 0 ) \
@ -1364,17 +1434,68 @@ FIELD(81F, 0, EDX, 0, 32, SEV_MIN_ASID, NO, 0 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_820 \
FLAG( 820, 0, EBX, 1, 1, LEAF820_MBE, NO, 0 ) \
FLAG( 820, 0, EBX, 2, 1, LEAF820_SMBE, NO, 0 ) \
FLAG( 820, 0, EBX, 3, 1, LEAF820_EVT_CFG, NO, 0 ) \
FLAG( 820, 0, EBX, 4, 1, LEAF820_L3RR, NO, 0 ) \
FIELD(820, 1, EAX, 0, 32, CAPACITY_MASK_LEN, NO, 0 ) \
FIELD(820, 1, EDX, 0, 32, NUM_SERVICE_CLASSES, NO, 0 )
FIELD(820, 1, EDX, 0, 32, NUM_SERVICE_CLASSES, NO, 0 ) \
FIELD(820, 2, EAX, 0, 32, SMBE_LENGTH, NO, 0 ) \
FIELD(820, 2, EDX, 0, 32, COS_MAX, NO, 0 ) \
FIELD(820, 3, EBX, 0, 8, NUM_BANDWIDTH_EVENTS, NO, 0 ) \
FLAG( 820, 3, ECX, 0, 1, L3_CACHE_LCL_BW_FILL, NO, 0 ) \
FLAG( 820, 3, ECX, 1, 1, L3_CACHE_RMT_BW_FILL, NO, 0 ) \
FLAG( 820, 3, ECX, 2, 1, L3_CACHE_LCL_BW_NT_WRITE, NO, 0 ) \
FLAG( 820, 3, ECX, 3, 1, L3_CACHE_RMT_BW_NT_WRITE, NO, 0 ) \
FLAG( 820, 3, ECX, 4, 1, L3_CACHE_LCL_SLOW_BW_FILL, NO, 0 ) \
FLAG( 820, 3, ECX, 5, 1, L3_CACHE_RMT_SLOW_BW_FILL, NO, 0 ) \
FLAG( 820, 3, ECX, 6, 1, L3_CACHE_BW_VIC, NO, 0 )
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_821 \
FLAG( 821, 0, EAX, 7, 1, UPPER_ADDRESS_IGNORE, YES, 20 )
FLAG( 821, 0, EAX, 0, 1, NO_NESTED_DATA_BP, NO, 0 ) \
FLAG( 821, 0, EAX, 1, 1, NON_SERIALIZING_FSGSBASE, NO, 0 ) \
FLAG( 821, 0, EAX, 2, 1, ALWAYS_SERIALIZING_LFENCE, YES, 19 ) \
FLAG( 821, 0, EAX, 3, 1, SMM_PGCFG_LOCK, NO, 0 ) \
FLAG( 821, 0, EAX, 6, 1, NULL_SELECTOR_CLEARS_BASE, NO, 0 ) \
FLAG( 821, 0, EAX, 7, 1, UPPER_ADDRESS_IGNORE, YES, 20 ) \
FLAG( 821, 0, EAX, 8, 1, AUTOMATIC_IBRS, YES, 20 ) \
FLAG( 821, 0, EAX, 9, 1, NO_SMMCTL_MSR, NO, 0 ) \
FLAG( 821, 0, EAX, 10, 1, AMD_FAST_SHORT_STOSB, YES, 20 ) \
FLAG( 821, 0, EAX, 11, 1, AMD_FAST_SHORT_CMPSB, YES, 20 ) \
FLAG( 821, 0, EAX, 13, 1, PREFETCHCTL_MSR, NO, 0 ) \
FLAG( 821, 0, EAX, 17, 1, CPL3_CPUID_GP, NO, 0 ) \
FLAG( 821, 0, EAX, 18, 1, EPSF, NO, 0 ) \
FIELD(821, 0, EBX, 0, 12, MICROCODE_PATCH_SIZE, NO, 0 )
#define CPUID_FIELD_DATA_LEVEL_822
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_822 \
FLAG( 822, 0, EAX, 0, 1, AMD_PERFMON_V2, NO, 0 ) \
FLAG( 822, 0, EAX, 1, 1, AMD_LBREXT_V2, NO, 0 ) \
FLAG( 822, 0, EAX, 2, 1, AMD_LBR_PMC_FREEZE, NO, 0 ) \
FIELD(822, 0, EBX, 0, 4, AMD_NUM_CORE_PMC, NO, 0 ) \
FIELD(822, 0, EBX, 4, 6, AMD_LBR_STACK_SIZE, NO, 0 ) \
FIELD(822, 0, EBX, 10, 6, AMD_NUM_DF_PMC, NO, 0 ) \
FIELD(822, 0, EBX, 16, 6, AMD_NUM_UMC_PMC, NO, 0 ) \
FIELD(822, 0, ECX, 0, 32, AMD_ACTIVE_UMC_PMC_MASK, NO, 0 )
#define CPUID_FIELD_DATA_LEVEL_823
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_823 \
FLAG( 823, 0, EAX, 0, 1, MEM_HMK, NO, 0 ) \
FIELD(823, 0, EBX, 0, 16, MEM_HMK_MAX_ENCR_KEYID, NO, 0 )
#define CPUID_FIELD_DATA_LEVEL_826
/* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV */
#define CPUID_FIELD_DATA_LEVEL_826 \
FIELD(826, 0, EAX, 0, 5, AMD_TOPOLOGY_MASK_WIDTH, NA, 0 ) \
FLAG( 826, 0, EAX, 29, 1, AMD_TOPOLOGY_EFFICIENCY_RANK, NA, 0 ) \
FLAG( 826, 0, EAX, 30, 1, AMD_TOPOLOGY_HETEROGENEOUS_CORES, NA, 0 ) \
FLAG( 826, 0, EAX, 31, 1, AMD_TOPOLOGY_ASYMMETRIC_CORES, NA, 0 ) \
FIELD(826, 0, EBX, 0, 16, AMD_TOPOLOGY_CPUS_SHARING_LEVEL, NA, 0 ) \
FIELD(826, 0, EBX, 16, 8, AMD_TOPOLOGY_POWER_RANKING, NA, 0 ) \
FIELD(826, 0, EBX, 24, 4, AMD_TOPOLOGY_NATIVE_MODEL_ID, NA, 0 ) \
FIELD(826, 0, EBX, 28, 4, AMD_TOPOLOGY_CORE_TYPE, NA, 0 ) \
FIELD(826, 0, ECX, 0, 8, AMD_TOPOLOGY_LEVEL_NUMBER, NA, 0 ) \
FIELD(826, 0, ECX, 8, 8, AMD_TOPOLOGY_LEVEL_TYPE, NA, 0 ) \
FIELD(826, 0, EDX, 0, 32, AMD_TOPOLOGY_EXT_APIC_ID, NA, 0 )
#define CPUID_FIELD_DATA \
CPUID_FIELD_DATA_LEVEL_0 \
@ -1405,6 +1526,7 @@ FLAG( 821, 0, EAX, 7, 1, UPPER_ADDRESS_IGNORE, YES, 20 )
CPUID_FIELD_DATA_LEVEL_1F \
CPUID_FIELD_DATA_LEVEL_20 \
CPUID_FIELD_DATA_LEVEL_21 \
CPUID_FIELD_DATA_LEVEL_23 \
CPUID_FIELD_DATA_LEVEL_400 \
CPUID_FIELD_DATA_LEVEL_401 \
CPUID_FIELD_DATA_LEVEL_402 \
@ -1673,8 +1795,10 @@ CPUIDCheck(int32 eaxIn, int32 eaxInCheck,
#define CPUID_MODEL_ICELAKE_7E 0x7e // Ice Lake U/Y
#define CPUID_MODEL_ICELAKE_6A 0x6a // Ice Lake SP (ICX)
#define CPUID_MODEL_ICELAKE_6C 0x6c // Ice Lake D
#define CPUID_MODEL_LAKEFIELD_8A 0x8a // Lakefield
#define CPUID_MODEL_TIGERLAKE_8C 0x8c // Tiger Lake UP3/UP4/H35
#define CPUID_MODEL_TIGERLAKE_8D 0x8d // Tiger Lake H81
#define CPUID_MODEL_SAPPHIRERAPIDS_8F 0x8f // Sapphire Rapids
#define CPUID_MODEL_KNM_85 0x85 // Knights Mill
#define CPUID_MODEL_KABYLAKE_8E 0x8e // Kaby Lake U/Y QS
#define CPUID_MODEL_ALDERLAKE_97 0x97 // Alder Lake-S
@ -1683,6 +1807,9 @@ CPUIDCheck(int32 eaxIn, int32 eaxInCheck,
#define CPUID_MODEL_COMETLAKE_A5 0xa5 // Comet Lake S
#define CPUID_MODEL_COMETLAKE_A6 0xa6 // Comet Lake U
#define CPUID_MODEL_ROCKETLAKE_A7 0xa7 // Rocket Lake S
#define CPUID_MODEL_RAPTORLAKE_B7 0xb7 // Raptor Lake S/HX B-0
#define CPUID_MODEL_RAPTORLAKE_BA 0xba // Raptor Lake H/P/PX J-0, U Q-0
#define CPUID_MODEL_RAPTORLAKE_BF 0xbf // Raptor Lake S/HX C-0
/* Intel stepping information */
#define CPUID_STEPPING_KABYLAKE_ES 0x8 // Kaby Lake S/H/U/Y ES
@ -2083,6 +2210,34 @@ CPUID_MODEL_IS_ALDERLAKE(uint32 v) // IN: %eax from CPUID with %eax=1.
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_ALDERLAKE_9A);
}
static INLINE Bool
CPUID_MODEL_IS_RAPTORLAKE(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
(CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_RAPTORLAKE_B7 ||
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_RAPTORLAKE_BF ||
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_RAPTORLAKE_BA);
}
static INLINE Bool
CPUID_MODEL_IS_SAPPHIRERAPIDS(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
return CPUID_FAMILY_IS_P6(v) &&
CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_SAPPHIRERAPIDS_8F;
}
static INLINE Bool
CPUID_UARCH_IS_SAPPHIRERAPIDS(uint32 v) // IN: %eax from CPUID with %eax=1.
{
/* Assumes the CPU manufacturer is Intel. */
return CPUID_MODEL_IS_SAPPHIRERAPIDS(v) ||
CPUID_MODEL_IS_ALDERLAKE(v) ||
CPUID_MODEL_IS_RAPTORLAKE(v);
}
static INLINE Bool
CPUID_UARCH_IS_HASWELL(uint32 v) // IN: %eax from CPUID with %eax=1.

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2019 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2019,2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -72,6 +72,19 @@
#define OFFSET_LO(_dw) ((uint32)((_dw) & 0xffff)) /* CallGate */
#define OFFSET_HI(_dw) ((uint32)(((_dw) >> 16) & 0xffff))
/* Reset values for descriptors. */
#define RESET_DESC_BASE 0x00000000
#define RESET_DESC_BASE_CS 0xffff0000
#define RESET_DESC_LIMIT 0x0000ffff
#define RESET_DESC_LIMIT_AP 0xffffffff
#define RESET_DESC_LIMIT_TDX 0xffffffff
#define RESET_SELECTOR_VALUE 0x00000000
#define RESET_SELECTOR_VALUE_CS 0x0000f000
#define RESET_GDT_LIMIT 0x0000ffff
#define RESET_IDT_LIMIT 0x0000ffff
/*
* Accessor functions for descriptors.
*

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2022 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -101,6 +101,7 @@ typedef struct MSRQuery {
#define MSR_DEBUGCTL 0x000001d9
#define MSR_TSC_DEADLINE 0x000006e0
#define MSR_PKRS 0x000006e1
#define MSR_DEBUG_INTERFACE 0x00000c80
#define MSR_EFER 0xc0000080
#define MSR_FSBASE 0xc0000100
#define MSR_GSBASE 0xc0000101
@ -108,6 +109,15 @@ typedef struct MSRQuery {
#define MSR_TSC_AUX 0xc0000103
#define MSR_BD_TSC_RATIO 0xc0000104
#define MSR_TEMPERATURE_TARGET 0x000001a2
#define MSR_PACKAGE_THERM_STATUS 0x000001b1
#define MSR_RAPL_POWER_UNIT 0x00000606
#define MSR_PKG_ENERGY_STATUS 0x00000611
#define MSR_PKG_PERF_STATUS 0x00000613
#define MSR_PKG_POWER_INFO 0x00000614
#define MSR_DRAM_ENERGY_STATUS 0x00000619
#define MSR_DRAM_PERF_STATUS 0x0000061b
/* CET MSRs */
#define MSR_U_CET 0x6a0
#define MSR_S_CET 0x6a2
@ -142,6 +152,19 @@ typedef struct MSRQuery {
#define MSR_ARCH_CAPABILITIES_IF_PSCHANGE_MC_NO (1ULL << 6)
#define MSR_ARCH_CAPABILITIES_TSX_CTRL (1ULL << 7)
#define MSR_ARCH_CAPABILITIES_TAA_NO (1ULL << 8)
#define MSR_ARCH_CAPABILITIES_MISC_PKG_CTRLS (1ULL << 10)
#define MSR_ARCH_CAPABILITIES_ENERGY_FILT_CTL (1ULL << 11)
#define MSR_ARCH_CAPABILITIES_DOITM (1ULL << 12)
#define MSR_ARCH_CAPABILITIES_SBDR_SSDP_NO (1ULL << 13)
#define MSR_ARCH_CAPABILITIES_FBSDP_NO (1ULL << 14)
#define MSR_ARCH_CAPABILITIES_PSDP_NO (1ULL << 15)
#define MSR_ARCH_CAPABILITIES_FB_CLEAR (1ULL << 17)
#define MSR_ARCH_CAPABILITIES_FB_CLEAR_CTRL (1ULL << 18)
#define MSR_ARCH_CAPABILITIES_RRSBA (1ULL << 19)
#define MSR_ARCH_CAPABILITIES_BHI_NO (1ULL << 20)
#define MSR_ARCH_CAPABILITIES_XAPIC_DIS_STATUS (1ULL << 21)
#define MSR_ARCH_CAPABILITIES_OVERCLOCKING_STATUS (1ULL << 23)
#define MSR_ARCH_CAPABILITIES_PBRSB_NO (1ULL << 24)
#define MSR_FLUSH_CMD 0x10b
#define MSR_FLUSH_CMD_FLUSH_L1D (1ULL << 0)
@ -149,7 +172,13 @@ typedef struct MSRQuery {
#define MSR_SPEC_CTRL_IBRS (1UL << 0)
#define MSR_SPEC_CTRL_STIBP (1UL << 1)
#define MSR_SPEC_CTRL_SSBD (1UL << 2)
#define MSR_SPEC_CTRL_IPRED_DIS_U (1UL << 3)
#define MSR_SPEC_CTRL_IPRED_DIS_S (1UL << 4)
#define MSR_SPEC_CTRL_RRSBA_DIS_U (1UL << 5)
#define MSR_SPEC_CTRL_RRSBA_DIS_S (1UL << 6)
#define MSR_SPEC_CTRL_PSFD (1UL << 7)
#define MSR_SPEC_CTRL_DDPD_U (1UL << 8)
#define MSR_SPEC_CTRL_BHI_DIS_S (1UL << 10)
#define MSR_PRED_CMD_IBPB (1UL << 0)
@ -157,6 +186,12 @@ typedef struct MSRQuery {
#define MSR_TSX_CTRL_RTM_DISABLE (1ULL << 0)
#define MSR_TSX_CTRL_CPUID_CLEAR (1ULL << 1)
#define MSR_MCU_OPT_CTRL 0x123
#define MSR_MCU_OPT_CTRL_RNGDS_MITG_DIS (1ULL << 0)
#define MSR_MCU_OPT_CTRL_RTM_ALLOW (1ULL << 1)
#define MSR_MCU_OPT_CTRL_RTM_LOCKED (1ULL << 2)
#define MSR_MCU_OPT_CTRL_FB_CLEAR_DIS (1ULL << 3)
#ifndef MSR_MISC_FEATURES_ENABLES
#define MSR_MISC_FEATURES_ENABLES 0x140
#endif
@ -295,6 +330,13 @@ typedef struct MSRQuery {
#define MSR_ARCH_LBR_CTL_BRANCH_MASK 0x7f0000
#define MSR_ARCH_LBR_CTL_ALL 0x7f000f
/* AMD LBR stack MSRs. */
#define MSR_DBG_EXTN_CTL 0xc000010f
#define MSR_DBG_EXTN_CTL_LBRS (1 << 6)
#define MSR_AMD_LBR_FROM_IP 0xc0010300
#define MSR_AMD_LBR_TO_IP 0xc0010301
/* Power Management MSRs */
#define MSR_PERF_STATUS 0x00000198 // Current Performance State (ro)
#define MSR_PERF_CTL 0x00000199 // Target Performance State (rw)
@ -620,6 +662,10 @@ typedef struct MSRQuery {
#define MSR_EFER_AMD_MBZ 0xffffffffffcf0200ULL /* Must be zero (resrvd) */
#define MSR_EFER_AMD_RAZ 0x00000000000000feULL /* Read as zero */
/* MSR_BD_TSC_RATIO bits */
#define MSR_BD_TSC_RATIO_RSVD CONST64U(0xffffff0000000000)
#define MSR_BD_TSC_RATIO_DEFAULT 0x0100000000ULL
#define MSR_AMD_PATCH_LOADER 0xc0010020
/* This ifndef is necessary because this is defined by some kernel headers. */
@ -634,12 +680,15 @@ typedef struct MSRQuery {
#ifndef MSR_K8_SYSCFG
#define MSR_K8_SYSCFG 0xc0010010
#endif
#define MSR_K8_SYSCFG_MFDM (1ULL<<19)
#define MSR_K8_SYSCFG_MTRRTOM2EN (1ULL<<21)
#define MSR_K8_SYSCFG_TOM2FORCEMEMTYPEWB (1ULL<<22)
#define MSR_K8_SYSCFG_SMEE (1ULL<<23)
#define MSR_K8_SYSCFG_SNPE (1ULL<<24)
#define MSR_K8_SYSCFG_VMPLE (1ULL<<25)
#define MSR_K8_IORRBASE0 0xc0010016
#define MSR_K8_TOPMEM 0xc001001a
#define MSR_K8_TOPMEM2 0xc001001d
/* AMD "Greyhound" MSRs */
@ -717,6 +766,19 @@ typedef struct MSRQuery {
/* Field definitions for MSR_GHCB_PA_AP_RESET_HOLD_REQ */
#define MSR_GHCB_PA_AP_RESET_HOLD_SHIFT 12
/* Field definitions for MSR_GHCB_PA_REGISTER_GHCB_GPA_RESP */
#define MSR_GHCB_PA_REGISTER_GHCB_GPA_ERR 0xfffffffffffff
/* Field definitions for MSR_GHCB_PA_SNP_PSC_REQ */
#define MSR_GHCB_PA_SNP_PSC_OP_PRIVATE (1ULL << 52)
#define MSR_GHCB_PA_SNP_PSC_OP_SHARED (2ULL << 52)
/* These definitions are used in assembly code to set edx for a wrmsr. */
#define MSR_GHCB_PA_SNP_PSC_OP_PRIVATE_HI32 (1ULL << 20)
#define MSR_GHCB_PA_SNP_PSC_OP_SHARED_HI32 (2ULL << 20)
/* Field definitions for MSR_GHCB_PA_SNP_PSC_RESP */
#define MSR_GHCB_PA_SNP_PSC_ERRCODE_SHIFT 32
/* Field definitions for MSR_GHCB_PA_FEATURES_REQ */
#define MSR_GHCB_PA_FEATURES_SHIFT 12
@ -733,6 +795,16 @@ typedef struct MSRQuery {
#define SEV_TERM_FROBOS_REG_FAILED 1 /* GHCB PA registration failed. */
#define SEV_TERM_FROBOS_PVALIDATE_FAILED 2 /* PVALIDATE failed unexpectedly. */
#define SEV_TERM_FROBOS_MISSING_CC_BLOB 3 /* Required CC blob is missing. */
#define SEV_TERM_FROBOS_BAD_CC_BLOB 4 /* Contents of CC blob are bad. */
#define SEV_TERM_FROBOS_INVALID_MEM 5 /* Unexpected PVALIDATE #VC. */
#define SEV_TERM_FROBOS_INVALID_FIELD 6 /* Required GHCB field not valid. */
#define SEV_TERM_FROBOS_BAD_NAE_STATE 7 /* #VC called with invalid state. */
#define SEV_TERM_FROBOS_UNHANDLED_NAE 8 /* NAE not handled by #VC. */
#define SEV_TERM_FROBOS_BAD_VMGEXIT_RESP 9 /* Unrecognized VMGEXIT response. */
#define SEV_TERM_FROBOS_DECODE_ERROR 10 /* Instruction decode error. */
#define SEV_TERM_FROBOS_PSC_FAILED 11 /* Page state change req failed. */
#define SEV_TERM_FROBOS_NESTED_VC_EXC 12 /* A nested #VC occurred. */
/* SEV feature-enabled bits in MSR_SEV_STATUS. */
#define MSR_SEV_STATUS_SEV_EN_BIT 0
@ -945,6 +1017,9 @@ typedef unsigned char MTRRType;
#define MTRR_TYPE_WT 4
#define MTRR_TYPE_WP 5
#define MTRR_TYPE_WB 6
/* AMD-only extended type bits: to be OR'ed with the above standard types */
#define MTRR_TYPE_EXT_WRMEM (1 << 3)
#define MTRR_TYPE_EXT_RDMEM (1 << 4)
/* PAT Memory Type Only */
/* UC- is equivalent to UC, except that the MTRR values take precedence */
#define MTRR_TYPE_UCM 7
@ -954,7 +1029,7 @@ typedef unsigned char MTRRType;
* specify that type is unknown as it is very unlikely that Intel will
* use this value. Note that linux is taking the same liberty.
*/
#define MTRR_TYPE_UNKNOW 0xff
#define MTRR_TYPE_UNKNOWN 0xff
/*
* PERF_STATUS bits
@ -995,6 +1070,12 @@ typedef unsigned char MTRRType;
#define MSR_INTEL_PQE_CLOS_L2_MASK_BASE 0xd10
#define MSR_INTEL_PQE_CLOS_L2_MASK_MAX 0xd4f
/* PASID MSR */
#define MSR_PASID 0xd93
#define MSR_PASID_RSVD_MASK 0xffffffff7ff00000ULL
#define MSR_PASID_VALID_BIT (1ULL << 31)
#define MSR_PASID_PASID_MASK 0xfffff
static INLINE uint32
X86MSR_SysCallEIP(uint64 star)
{

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2014,2016,2018-2020,2022 VMware, Inc. All rights reserved.
* Copyright (c) 1998-2014,2016,2018-2020,2022 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -122,6 +122,61 @@ LMPTEIsSafe(VM_PAE_PTE pte, PT_Level level, uint64 physMask)
}
#endif /* VMX86_DEBUG */
/*
*----------------------------------------------------------------------
*
* NPTValidLargePage --
*
* Returns TRUE iff the provided large page NPT entry is valid
* (i.e. no reserved bits set).
*
*----------------------------------------------------------------------
*/
static INLINE Bool
NPTValidLargePage(VM_PAE_PTE npte, PT_Level level, unsigned depth)
{
const PPN lpRsvd = MASK((level - 1) * PT_LEVEL_SHIFT) &
~(PTE_LARGE_PAT >> PAGE_SHIFT);
return (level == PT_LEVEL_2 ||
(level == PT_LEVEL_3 && depth == PT_LEVEL_4)) &&
(LM_PTE_2_PFN(npte) & lpRsvd) == 0;
}
/*
*----------------------------------------------------------------------
*
* NPTEIsValid --
*
* Check an NPT entry for validity at the indicated page table level
* and depth. Use the provided physMask as the mask of reserved PA bits.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
NPTEIsValid(VM_PAE_PTE npte, PT_Level level, Bool nxOn, unsigned depth,
uint64 physMask)
{
VM_PAE_PTE rsvd;
if (depth == PT_LEVEL_3) {
rsvd = physMask & ~PTE_NX;
if (level == PT_LEVEL_3) {
rsvd |= PTE_NX | PDPTR_MBZ_MASK;
}
} else {
rsvd = physMask & MASK64(52);
if (level == PT_LEVEL_4) {
rsvd |= PTE_PS | PTE_G;
}
}
if (UNLIKELY(!nxOn)) {
/* When NX is disabled, PTE_NX is treated as reserved. */
rsvd |= PTE_NX;
}
return !PTE_PRESENT(npte) ||
((npte & rsvd) == 0 &&
(level == PT_LEVEL_1 ||
(!PTE_LARGEPAGE(npte) || NPTValidLargePage(npte, level, depth))));
}
/*
* x86-64 architecture requires implementations supporting less than

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2020 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -62,11 +62,11 @@
#define PTE_LARGE_PAT_2_PS(_pte) (((_pte) & PTE_LARGE_PAT) >> 5)
#define PTE_PROT_FLAGS (PTE_P|PTE_RW|PTE_US)
#define PTE_FLAGS (PTE_PROT_FLAGS|PTE_G)
#define PTE_PROT_FLAGS (PTE_P|PTE_RW|PTE_US)
#define PTE_FLAGS (PTE_PROT_FLAGS|PTE_G)
#define PTE_KERNEL (PTE_P|PTE_RW)
#define PTE_KERNEL_NX (PTE_P|PTE_RW|PTE_NX)
#define PTE_KERNEL (PTE_P|PTE_RW)
#define PTE_KERNEL_NX (PTE_P|PTE_RW|PTE_NX)
#define PTE_PRESENT(_pte) (((_pte) & PTE_P) != 0)
#define PTE_WRITEABLE(_pte) (((_pte) & PTE_RW) != 0)
@ -79,6 +79,8 @@
#define PTE_NOEXECUTE(_pte) (((_pte) & PTE_NX) != 0)
#define PTE_PK(_pte) (((_pte) & PTE_PK_MASK) >> PTE_PK_SHIFT)
#define PDPTR_MBZ_MASK CONST64(0x1e6)
#define PK_AD 1 /* Access disable bit */
#define PK_WD 2 /* Write disable bit */
#define PKR_WIDTH 2
@ -86,15 +88,15 @@
#define PK_RIGHTS(_pkru, _key) (((_pkru) >> ((_key) * PKR_WIDTH)) & PKR_MASK);
/* Error code flags */
#define PF_P (1 << 0)
#define PF_RW (1 << 1)
#define PF_US (1 << 2)
#define PF_RSVD (1 << 3)
#define PF_ID (1 << 4)
#define PF_PK (1 << 5)
#define PF_SS (1 << 6)
#define PF_SGX (1 << 15)
#define PF_RMP (1 << 31)
#define PF_P (1u << 0)
#define PF_RW (1u << 1)
#define PF_US (1u << 2)
#define PF_RSVD (1u << 3)
#define PF_ID (1u << 4)
#define PF_PK (1u << 5)
#define PF_SS (1u << 6)
#define PF_SGX (1u << 15)
#define PF_RMP (1u << 31)
/*
* Operand definitions for the INVPCID instruction. See SDM Vol. 2A.

View File

@ -139,10 +139,12 @@
#define SVM_VMCB_APIC_VTPR_MASK 0x00000000000000ffULL
#define SVM_VMCB_APIC_VTPR_SHIFT 0
#define SVM_VMCB_APIC_VIRQ 0x0000000000000100ULL
#define SVM_VMCB_APIC_VGIF 0x0000000000000200ULL
#define SVM_VMCB_APIC_VINTR_PRIO_MASK 0x00000000000f0000ULL
#define SVM_VMCB_APIC_VINTR_PRIO_SHIFT 16
#define SVM_VMCB_APIC_VIGN_TPR 0x0000000000100000ULL
#define SVM_VMCB_APIC_VINTR_MASKING 0x0000000001000000ULL
#define SVM_VMCB_APIC_VGIF_ENABLE 0x0000000002000000ULL
#define SVM_VMCB_APIC_AVIC_ENABLE 0x0000000080000000ULL
#define SVM_VMCB_APIC_VINTR_VECTOR_MASK 0x000000ff00000000ULL
#define SVM_VMCB_APIC_VINTR_VECTOR_SHIFT 32
@ -301,6 +303,7 @@
#define SVM_EXITCODE_AP_JUMP_TABLE 0x80000005 // SW only
#define SVM_EXITCODE_SNP_PSC_REQ 0x80000010 // SW only
#define SVM_EXITCODE_SNP_GUEST_REQ 0x80000011 // SW only
#define SVM_EXITCODE_SNP_AP_CREATION 0x80000013 // SW only
#define SVM_EXITCODE_HV_FEATURES 0x8000FFFD // SW only
#define SVM_EXITCODE_UNSUPPORTED 0x8000FFFF // SW only
#define SVM_EXITCODE_INVALID (-1ULL)
@ -375,6 +378,11 @@
#define SVM_APEXIT_SET 0x0
#define SVM_APEXIT_GET 0x1
/* ExitInfo1 for SNP AP creation exits */
#define SVM_SNPAPCREATE_WAIT_INIT 0x0
#define SVM_SNPAPCREATE_VMRUN 0x1
#define SVM_SNPAPCREATE_DESTROY 0x2
/* Event Injection */
#define SVM_INTINFO_VECTOR_MASK 0x000000ff
#define SVM_INTINFO_TYPE_SHIFT 8
@ -388,6 +396,27 @@
#define SVM_INTINFO_RSVD 0x7ffff000
#define SVM_INTINFO_VALID 0x80000000
/* AVIC related definitions. */
#define SVM_AVIC_PHYS_TBL_MAX_VCPUS 512
#define SVM_AVIC_PHYS_ID_TBL_VALID (1ULL << 63)
#define SVM_AVIC_TRAP_BITMASK \
((1ULL << APICR_ID) | \
(1ULL << APICR_EOI) | \
(1ULL << APICR_RMTREAD) | \
(1ULL << APICR_LDR) | \
(1ULL << APICR_DFR) | \
(1ULL << APICR_SVR) | \
(1ULL << APICR_ESR) | \
(1ULL << APICR_TIMERLVT) | \
(1ULL << APICR_THERMLVT) | \
(1ULL << APICR_PCLVT) | \
(1ULL << APICR_LVT0) | \
(1ULL << APICR_LVT1) | \
(1ULL << APICR_ERRLVT) | \
(1ULL << APICR_INITCNT) | \
(1ULL << APICR_DIVIDER))
#define SVM_EXEC_CTL_BIT(exitCode) (1ULL << (exitCode - SVM_EXITCODE_INTR))

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2016-2018,2021 VMware, Inc. All rights reserved.
* Copyright (C) 2016-2018,2021-2022 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -25,7 +25,16 @@
#include "community_source.h"
#define VT_EXITREASON_VMENTRYFAIL (1U << 31)
/*
* Definitions for fields in the exit reason. Bits 28 and 29 are only applicable
* for exits that end in SMM. Bit 16 is MBZ while other bits are reserved for
* future use.
*/
#define VT_EXITREASON_BASIC_REASON_MASK 0xFFFF
#define VT_EXITREASON_INSIDE_ENCLAVE (1U << 27)
#define VT_EXITREASON_PENDING_MTF (1U << 28)
#define VT_EXITREASON_EXIT_ROOT_OPERATION (1U << 29)
#define VT_EXITREASON_VMENTRYFAIL (1U << 31)
VT_EXIT(EXC_OR_NMI, 0)
VT_EXIT(EXTINT, 1)
@ -93,11 +102,17 @@ VT_EXIT(PML_LOGFULL, 62)
VT_EXIT(XSAVES, 63)
VT_EXIT(XRSTORS, 64)
VT_EXIT(VMEXIT65, 65)
VT_EXIT(VMEXIT66, 66)
VT_EXIT(SPP_EVENT, 66)
VT_EXIT(UMWAIT, 67)
VT_EXIT(TPAUSE, 68)
VT_EXIT(VMEXIT69, 69)
VT_EXIT(LOADIWKEY, 69)
VT_EXIT(ENCLV, 70)
VT_EXIT(SGX_CONFLICT, 71)
VT_EXIT(ENQCMD_PASID_FAIL, 72)
VT_EXIT(ENQCMDS_PASID_FAIL, 73)
VT_EXIT(BUS_LOCK, 74)
VT_EXIT(NOTIFY_WINDOW, 75)
VT_EXIT(VMEXIT76, 76)
VT_EXIT(TDCALL, 77)
/* Bump this up if you add an exit reason. */
#define VT_NUM_EXIT_REASONS 72
#define VT_NUM_EXIT_REASONS 78

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2015-2021 VMware, Inc. All rights reserved.
* Copyright (C) 2015-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -284,10 +284,10 @@ VMCS_FIELD(ENCLS_EXITING_BITMAP, 0x202E, _S64, _TC, 23, _NC, _V, _NA, _S)
VMCS_UNUSED( 0x2030, _S64, _TC, 24)
VMCS_FIELD(TSC_MULTIPLIER, 0x2032, _S64, _TC, 25, _NC, _NV, _NA, _S)
VMCS_UNUSED( 0x2034, _S64, _TC, 26)
VMCS_FIELD(ENCLV_EXITING_BITMAP, 0x2036, _S64, _TC, 27, _NC, _V, _NA, _S)
VMCS_UNUSED( 0x2038, _S64, _TC, 28)
VMCS_UNUSED( 0x203A, _S64, _TC, 29)
VMCS_UNUSED( 0x203C, _S64, _TC, 30)
VMCS_FIELD(ENCLV_EXITING_BITMAP, 0x2036, _S64, _TC, 27, _NC, _NV, _NA, _S)
VMCS_FIELD(LOW_PASID_DIR, 0x2038, _S64, _TC, 28, _NC, _NV, _NA, _S)
VMCS_FIELD(HIGH_PASID_DIR, 0x203A, _S64, _TC, 29, _NC, _NV, _NA, _S)
VMCS_FIELD(SHARED_EPTP, 0x203C, _S64, _TC, 30, _NC, _NV, _NA, _NS)
VMCS_UNUSED( 0x203E, _S64, _TC, 31)
VMCS_GROUP_END(64, CTL)

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2004-2022 VMware, Inc. All rights reserved.
* Copyright (C) 2004-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -170,10 +170,10 @@
VT_ENCODING_NUM_TYPES * \
VT_ENCODING_NUM_SIZES)
/*
* The highest index of any currently defined field is 27, for
* ENCLV_EXITING_BITMAP.
* The highest index of any currently defined field is 30, for
* SHARED_EPTP.
*/
#define VT_ENCODING_MAX_INDEX 27
#define VT_ENCODING_MAX_INDEX 30
/* VMCS ID's for various CPU models. */
#define VT_VMCS_ID_VMWARE 1
@ -322,7 +322,7 @@ enum {
VMX_CPU2(EPT_VIOL_VE, 18) \
VMX_CPU2(PT_SUPPRESS_NR_BIT, 19) \
VMX_CPU2(XSAVES, 20) \
VMX_CPU2(PASID, 21) \
VMX_CPU2(PASID_TRANS, 21) \
VMX_CPU2(EPT_MBX, 22) \
VMX_CPU2(EPT_SUB_PAGE, 23) \
VMX_CPU2(PT_GUEST_PA, 24) \
@ -348,6 +348,7 @@ enum {
VMX_CPU3(PAGING_WRITE, 2) \
VMX_CPU3(GUEST_PAGING_VERIF, 3) \
VMX_CPU3(IPI_VIRTUALIZATION, 4) \
VMX_CPU3(VIRT_SPEC_CTRL, 7) \
#define VMX_PROCBASED_CTLS3_CAP \
VMX_PROCBASED_CTLS3_CAP_NDA \
@ -587,18 +588,16 @@ enum {
* exit reasons, because we shouldn't encounter any new exit reasons
* unless we opt-in to the features that produce them.
*/
#define VT_EXITREASON_SYNTH_BASE 77
#define VT_EXITREASON_SYNTH_IRET 77
#define VT_EXITREASON_SYNTH_NMI 78
#define VT_EXITREASON_SYNTH_ICEBP 79
#define VT_EXITREASON_SYNTH_EXC_BASE 80
#define VT_EXITREASON_SYNTH_MAX 111
#define VT_EXITREASON_SYNTH_BASE 78
#define VT_EXITREASON_SYNTH_IRET 78
#define VT_EXITREASON_SYNTH_NMI 79
#define VT_EXITREASON_SYNTH_ICEBP 80
#define VT_EXITREASON_SYNTH_EXC_BASE 81
#define VT_EXITREASON_SYNTH_MAX 112
#define VT_EXITREASON_SYNTH_EXC(gatenum) \
(VT_EXITREASON_SYNTH_EXC_BASE + gatenum) /* 0-31 */
#define VT_EXITREASON_INSIDE_ENCLAVE (1U << 27)
/* Instruction error codes. */
#define VT_ERROR_VMCALL_VMX_ROOT 1
#define VT_ERROR_VMCLEAR_INVALID_PA 2
@ -703,12 +702,6 @@ enum {
#define VT_GUESTFAIL_QUAL_NMI 3
#define VT_GUESTFAIL_QUAL_LINK 4
/* SGX conflict VM-exit Qualification Codes */
#define VT_SGX_TRACKING_RESOURCE_CONFLICT 0
#define VT_SGX_TRACKING_REFERENCE_CONFLICT 1
#define VT_SGX_EPC_PAGE_CONFLICT_EXCEPTION 2
#define VT_SGX_EPC_PAGE_CONFLICT_ERROR 3
/* VMX abort indicators. */
#define VT_VMX_ABORT_GUEST_MSRS 1
@ -1022,14 +1015,17 @@ VTComputeMandatoryBits(uint64 msrVal, uint32 bits)
* VT_EnabledFromFeatures --
*
* Returns TRUE if VT is enabled in the given feature control bits.
* If SMX is enabled, then only SMXE must be set, otherwise, only VMXE
* must be set. The LOCK bit must always be set.
*
*----------------------------------------------------------------------
*/
static INLINE Bool
VT_EnabledFromFeatures(uint64 featCtl)
VT_EnabledFromFeatures(uint64 featCtl, Bool smxEnabled)
{
return ((featCtl & (MSR_FEATCTL_VMXE | MSR_FEATCTL_LOCK)) ==
(MSR_FEATCTL_VMXE | MSR_FEATCTL_LOCK));
uint64 req = MSR_FEATCTL_LOCK |
(smxEnabled ? MSR_FEATCTL_SMXE : MSR_FEATCTL_VMXE);
return (featCtl & req) == req;
}
/*
@ -1150,6 +1146,25 @@ VT_ConvEPTViolSupportedFromFeatures(uint64 secondary)
return (HIDWORD(secondary) & VT_VMCS_2ND_VMEXEC_CTL_EPT_VIOL_VE) != 0;
}
/*
*----------------------------------------------------------------------
*
* VT_PasidTransSupportedFromFeatures --
*
* Returns TRUE if the given VMX features provide support for
* PASID translation
*
* Assumes that VT is supported.
*
*----------------------------------------------------------------------
*/
static inline Bool
VT_PasidTransSupportedFromFeatures(uint64 secondary)
{
return (HIDWORD(secondary) & VT_VMCS_2ND_VMEXEC_CTL_PASID_TRANS) != 0;
}
#if !defined(VM_ARM_64) /* PR 2822467 */ && \
(defined(DECODER) || defined(FROBOS) || defined(ULM) || \
defined(VMKBOOT) || defined(VMKERNEL) || defined(VMM) || \
@ -1165,9 +1180,9 @@ VT_ConvEPTViolSupportedFromFeatures(uint64 secondary)
*----------------------------------------------------------------------
*/
static INLINE Bool
VT_EnabledCPU(void)
VT_EnabledCPU(Bool smxEnabled)
{
return VT_EnabledFromFeatures(X86MSR_GetMSR(MSR_FEATCTL));
return VT_EnabledFromFeatures(X86MSR_GetMSR(MSR_FEATCTL), smxEnabled);
}

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2004-2015 VMware, Inc. All rights reserved.
* Copyright (C) 2004-2015, 2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -229,6 +229,7 @@ VMWRITE(size_t encoding, size_t val)
VMXStatus status;
status = VMWRITE_2_STATUS(encoding, val);
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
VMWRITE_UNCHECKED(encoding, val);
}
@ -262,6 +263,7 @@ VMLAUNCH(void)
VMXStatus status;
status = VMLAUNCH_2_STATUS();
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
VMLAUNCH_UNCHECKED();
}
@ -295,6 +297,7 @@ VMRESUME(void)
VMXStatus status;
status = VMRESUME_2_STATUS();
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
VMRESUME_UNCHECKED();
}
@ -328,6 +331,7 @@ VMCALL(void)
VMXStatus status;
status = VMCALL_2_STATUS();
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
VMCALL_UNCHECKED();
}
@ -374,6 +378,7 @@ INVVPID(InvvpidArg *v, size_t extent)
VMXStatus status;
status = INVVPID_2_STATUS(v, extent);
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
INVVPID_UNCHECKED(v, extent);
}
@ -446,6 +451,7 @@ INVEPT(InveptArg *e, size_t extent)
VMXStatus status;
status = INVEPT_2_STATUS(e, extent);
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
INVEPT_UNCHECKED(e, extent);
}
@ -541,14 +547,14 @@ VMCLEAR_2_STATUS(MA *vmcs)
ASSERT(mscStatus < ARRAYSIZE(MscToStatus));
return MscToStatus[mscStatus];
}
static INLINE VMXStatus
VMREAD_2_STATUS(size_t encoding, size_t *retval)
{
unsigned char mscStatus;
static const VMXStatus MscToStatus[] =
{VMX_Success, VMX_FailValid, VMX_FailInvalid};
mscStatus = __vmx_vmread(encoding, retval);
ASSERT(mscStatus < ARRAYSIZE(MscToStatus));
return MscToStatus[mscStatus];
@ -597,8 +603,9 @@ VMXON(MA *vmxonRegion)
VMXStatus status;
status = VMXON_2_STATUS(vmxonRegion);
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
VMXON_UNCHECKED(vmxonRegion);
VMXON_UNCHECKED(vmxonRegion);
}
}
@ -609,6 +616,7 @@ VMXOFF(void)
VMXStatus status;
status = VMXOFF_2_STATUS();
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
VMXOFF_UNCHECKED();
}
@ -621,6 +629,7 @@ VMPTRLD(MA *vmcs)
VMXStatus status;
status = VMPTRLD_2_STATUS(vmcs);
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
VMPTRLD_UNCHECKED(vmcs);
}
@ -633,6 +642,7 @@ VMPTRST(MA *vmcs)
VMXStatus status;
status = VMPTRST_2_STATUS(vmcs);
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
VMPTRST_UNCHECKED(vmcs);
}
@ -646,6 +656,7 @@ VMCLEAR(MA* vmcs)
VMXStatus status;
status = VMCLEAR_2_STATUS(vmcs);
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
VMCLEAR_UNCHECKED(vmcs);
}
@ -659,6 +670,7 @@ VMREAD(size_t encoding)
VMXStatus status;
status = VMREAD_2_STATUS(encoding, &retval);
ASSERT(status == VMX_Success);
UNUSED_VARIABLE(status);
} else {
retval = VMREAD_UNCHECKED(encoding);
}

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2022 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -2332,7 +2332,9 @@ isVAReadable(VA r) // IN:
int ret;
r = APICR_TO_ADDR(r, APICR_VERSION);
#if defined(HAVE_GET_KERNEL_NOFAULT) || LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)
#if defined(HAVE_GET_KERNEL_NOFAULT) || \
defined(RHEL92_BACKPORTS) || \
LINUX_VERSION_CODE >= KERNEL_VERSION(5, 17, 0)
/*
* Exists from 5.10, first indicated by HAVE_GET_KERNEL_NOFAULT,
* and from post-5.17 just existing everywhere.

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2002-2020 VMware, Inc. All rights reserved.
* Copyright (c) 2002-2020 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2013, 2017, 2022 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2013, 2017, 2022-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -26,6 +26,9 @@
#include <linux/slab.h>
#include <linux/poll.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(6, 4, 10)
#include <net/gso.h>
#endif
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/mm.h>
@ -684,7 +687,7 @@ VNetBridgeReceiveFromVNet(VNetJack *this, // IN: jack
}
spin_unlock_irqrestore(&bridge->historyLock, flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0) && !defined(RHEL91_BACKPORTS)
netif_rx_ni(clone);
#else
netif_rx(clone);

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2022 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -135,6 +135,12 @@
# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(8, 5)
# define RHEL85_BACKPORTS 1
# endif
# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9, 1)
# define RHEL91_BACKPORTS 1
# endif
# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(9, 2)
# define RHEL92_BACKPORTS 1
# endif
#endif
#endif /* __COMPAT_VERSION_H__ */

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2014,2016,2019,2022 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2014,2016,2019,2022-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -345,11 +345,11 @@ VNetNetIfReceive(VNetJack *this, // IN: jack
netIf->dev->flags)) {
goto drop_packet;
}
/* send to the host interface */
skb->dev = netIf->dev;
skb->protocol = eth_type_trans(skb, netIf->dev);
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 18, 0) && !defined(RHEL91_BACKPORTS)
netif_rx_ni(skb);
#else
netif_rx(skb);
@ -357,7 +357,7 @@ VNetNetIfReceive(VNetJack *this, // IN: jack
netIf->stats.rx_packets++;
return;
drop_packet:
dev_kfree_skb(skb);
}

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2002-2018,2020,2022 VMware, Inc. All rights reserved.
* Copyright (C) 2002-2018,2020,2022,2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -4868,8 +4868,8 @@ SetPacketByte(SMACPacket *packet, // IN: packet
ASSERT(packet);
#ifdef _WIN32
if (packet == NULL || packet->buf1 == NULL || packet->buf2 == NULL) {
return FALSE;
if (packet == NULL) {
return FALSE;
}
/* check length, be sure to handle case where offset = -1, length > 0 */
@ -4880,8 +4880,14 @@ SetPacketByte(SMACPacket *packet, // IN: packet
/* if offset starts in the first buffer, then copy from first buffer */
if (offset < packet->buf1Len) {
if (packet->buf1 == NULL) {
return FALSE;
}
((uint8*)packet->buf1)[offset] = data;
} else {
if (packet->buf2 == NULL) {
return FALSE;
}
offset -= packet->buf1Len;
((uint8*)packet->buf2)[offset] = data;
}

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2003-2022 VMware, Inc. All rights reserved.
* Copyright (c) 2003-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -61,6 +61,12 @@
#include "mul64.h"
#endif
#if defined _M_ARM64EC
#include "vm_assert.h"
#define MUL64_NO_ASM 1
#include "mul64.h"
#endif
#if defined __cplusplus
extern "C" {
#endif
@ -95,7 +101,7 @@ extern "C" {
* mssb64 MSB set (uint64) 1..64 0
*/
#ifdef _MSC_VER
#if defined(_MSC_VER) && !defined(__clang__) // Clang defines _MSC_VER on Windows
static INLINE int
lssb32_0(const uint32 value)
{
@ -773,7 +779,7 @@ RDTSC(void)
* bora/lib/vprobe/arm64/vp_emit_tc.c::VpEmit_BuiltinRDTSCWork()
* bora/modules/vmkernel/tests/core/xmapTest/xmapTest_arm64.c::XMapTest_SetupLoopCode()
*/
#if (defined(VMKERNEL) || defined(VMM)) && !defined(VMK_ARM_EL1_OR_VHE)
#if defined(VMKERNEL) && !defined(VMK_ARM_EL1_OR_VHE)
return MRS(CNTPCT_EL0);
#else
return MRS(CNTVCT_EL0);
@ -1117,7 +1123,7 @@ RoundUpPow2Asm32(uint32 value)
// if out == 2^32 then out = 1 as it is right rotate
: [in]"+r"(value),[out]"+r"(out));
return out;
#elif defined(VM_ARM_64)
#elif defined(VM_ARM_64) || defined(__wasm__)
return RoundUpPow2C32(value);
#else
uint32 out = 2;

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2021 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -57,7 +57,7 @@ extern "C" {
* constraints.
*
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
#if (defined(VMM) || defined(VMKERNEL) || defined(FROBOS) || defined(ULM))
static INLINE Bool
xtest(void)
{
@ -66,14 +66,18 @@ xtest(void)
__asm__ __volatile__("xtest\n"
"setnz %%al"
: "=a" (result) : : "cc");
#else
#elif defined (__GNUC__)
__asm__ __volatile__("xtest"
: "=@ccnz" (result) : : "cc");
#elif defined (_WIN32)
result = _xtest();
#else
#error No xtest implementation for this compiler.
#endif
return result;
}
#endif /* __GNUC__ */
#endif /* VMM || VMKERNEL || FROBOS || ULM */
/*

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2021 VMware, Inc. All rights reserved.
* Copyright (C) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -331,7 +331,7 @@ XRSTORS(const void *load, uint64 mask)
* constraints.
*
*/
#if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS))
#if (defined(VMM) || defined(VMKERNEL) || defined(FROBOS) || defined(ULM))
static INLINE Bool
xtest(void)
{
@ -340,14 +340,18 @@ xtest(void)
__asm__ __volatile__("xtest\n"
"setnz %%al"
: "=a" (result) : : "cc");
#else
#elif defined(__GNUC__)
__asm__ __volatile__("xtest"
: "=@ccnz" (result) : : "cc");
#elif defined (_WIN64)
result = _xtest();
#else
#error No xtest implementation for this compiler.
#endif
return result;
}
#endif /* __GNUC__ */
#endif /* VMM || VMKERNEL || FROBOS || ULM */
/*
*-----------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2003-2022 VMware, Inc. All rights reserved.
* Copyright (C) 2003-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -19,7 +19,7 @@
/*
* vm_basic_defs.h --
*
* Standard macros for VMware source code.
* Standard macros for VMware source code.
*/
#ifndef _VM_BASIC_DEFS_H_
@ -36,14 +36,6 @@
#include "includeCheck.h"
#include "vm_basic_types.h" // For INLINE.
/* Checks for FreeBSD, filtering out VMKERNEL. */
#if !defined(VMKERNEL) && defined(__FreeBSD__)
#define __IS_FREEBSD__ 1
#else
#define __IS_FREEBSD__ 0
#endif
#define __IS_FREEBSD_VER__(ver) (__IS_FREEBSD__ && __FreeBSD_version >= (ver))
/*
* <stddef.h> provides definitions for:
* NULL, offsetof
@ -128,11 +120,11 @@ Max(int a, int b)
#define VMW_CLAMP(x, min, max) \
((x) < (min) ? (min) : ((x) > (max) ? (max) : (x)))
#define ROUNDUP(x,y) (((x) + (y) - 1) / (y) * (y))
#define ROUNDDOWN(x,y) ((x) / (y) * (y))
#define ROUNDUPBITS(x, bits) (((uintptr_t) (x) + MASK(bits)) & ~MASK(bits))
#define ROUNDDOWNBITS(x, bits) ((uintptr_t) (x) & ~MASK(bits))
#define CEILING(x, y) (((x) + (y) - 1) / (y))
#define ROUNDUP(x,y) (((x) + (y) - 1) / (y) * (y))
#define ROUNDDOWN(x,y) ((x) / (y) * (y))
#define ROUNDUPBITS(x, bits) (((uintptr_t)(x) + MASK(bits)) & ~MASK(bits))
#define ROUNDDOWNBITS(x, bits) ((uintptr_t)(x) & ~MASK(bits))
#define CEILING(x, y) (((x) + (y) - 1) / (y))
#if defined VMKERNEL || defined VMKBOOT
# define CEIL(_a, _b) CEILING(_a, _b)
@ -157,8 +149,9 @@ Max(int a, int b)
* argument. The range 0..31 is safe.
*/
#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
#define MASK64(n) ((CONST64U(1) << (n)) - 1) /* make an n-bit mask */
#define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */
#define MASK64(n) ((CONST64U(1) << (n)) - 1) /* make an n-bit mask */
#define MASK128(n) (((uint128)1 << (n)) - 1) /* make an n-bit mask */
/*
* MASKRANGE64 makes a bit vector starting at bit lo and ending at bit hi. No
* checking for lo < hi is done.
@ -187,7 +180,9 @@ Max(int a, int b)
#define XCONC(x, y) CONC(x, y)
#define XXCONC(x, y) XCONC(x, y)
#define MAKESTR(x) #x
#ifndef XSTR
#define XSTR(x) MAKESTR(x)
#endif
/*
@ -227,6 +222,8 @@ Max(int a, int b)
#define PAGE_SHIFT PAGE_SHIFT_4KB
#elif defined __arm__
#define PAGE_SHIFT PAGE_SHIFT_4KB
#elif defined __wasm__
#define PAGE_SHIFT PAGE_SHIFT_4KB
#else
#error
#endif
@ -260,15 +257,6 @@ Max(int a, int b)
#define PAGE_NUMBER(_addr) ((uintptr_t)(_addr) / PAGE_SIZE)
#endif
#ifndef VM_PAGE_BASE
#define VM_PAGE_BASE(_addr) ((_addr) & ~(PAGE_SIZE - 1))
#endif
#ifndef VM_PAGES_SPANNED
#define VM_PAGES_SPANNED(_addr, _size) \
((((_addr) & (PAGE_SIZE - 1)) + (_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT)
#endif
#ifndef BYTES_2_PAGES
#define BYTES_2_PAGES(_nbytes) ((_nbytes) >> PAGE_SHIFT)
#endif
@ -281,6 +269,16 @@ Max(int a, int b)
#define PAGES_2_BYTES(_npages) (((uint64)(_npages)) << PAGE_SHIFT)
#endif
#ifndef VM_PAGE_BASE
#define VM_PAGE_BASE(_addr) ((_addr) & ~(PAGE_SIZE - 1))
#endif
#ifndef VM_PAGES_SPANNED
#define VM_PAGES_SPANNED(_addr, _size) \
(BYTES_2_PAGES(PAGE_OFFSET(_addr) + PAGE_OFFSET(_size) + (PAGE_SIZE - 1)) + \
BYTES_2_PAGES(_size))
#endif
#ifndef KBYTES_SHIFT
#define KBYTES_SHIFT 10
#endif
@ -428,9 +426,9 @@ Max(int a, int b)
*/
#define DEPOSIT_BITS(_src,_pos,_len,_target) { \
unsigned mask = ((1 << _len) - 1); \
unsigned shiftedmask = ((1 << _len) - 1) << _pos; \
_target = (_target & ~shiftedmask) | ((_src & mask) << _pos); \
unsigned mask = ((1 << _len) - 1); \
unsigned shiftedmask = ((1 << _len) - 1) << _pos; \
_target = (_target & ~shiftedmask) | ((_src & mask) << _pos); \
}
@ -647,6 +645,12 @@ typedef int pid_t;
#define VMKERNEL_ONLY(x)
#endif
#ifdef COMP_TEST
#define vmx86_test 1
#else
#define vmx86_test 0
#endif
/*
* In MSVC, _WIN32 is defined as 1 when the compilation target is
* 32-bit ARM, 64-bit ARM, x86, or x64 (which implies _WIN64). This
@ -700,6 +704,18 @@ typedef int pid_t;
#define VMM_ONLY(x)
#endif
#ifdef VMX86_VMX
#define vmx86_vmx 1
#else
#define vmx86_vmx 0
#endif
#ifdef VMM_BOOTSTRAP
#define vmm_bootstrap 1
#else
#define vmm_bootstrap 0
#endif
#ifdef ULM
#define vmx86_ulm 1
#define ULM_ONLY(x) x
@ -776,6 +792,7 @@ typedef int pid_t;
lfMessageFont)
/* This is not intended to be thread-safe. */
#ifndef KBUILD_MODNAME
#define DO_ONCE(code) \
do { \
static MONITOR_ONLY(PERVCPU) Bool _doOnceDone = FALSE; \
@ -784,6 +801,7 @@ typedef int pid_t;
code; \
} \
} while (0)
#endif
/*
* Bug 827422 and 838523.

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998-2022 VMware, Inc. All rights reserved.
* Copyright (c) 1998-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -39,11 +39,11 @@
/*
* Standardize MSVC arch macros to GCC arch macros.
*/
#if defined _MSC_VER && defined _M_X64
#if defined _MSC_VER && defined _M_X64 && !defined _M_ARM64EC
# define __x86_64__ 1
#elif defined _MSC_VER && defined _M_IX86
# define __i386__ 1
#elif defined _MSC_VER && defined _M_ARM64
#elif defined _MSC_VER && (defined _M_ARM64 || defined _M_ARM64EC)
# define __aarch64__ 1
#elif defined _MSC_VER && defined _M_ARM
# define __arm__ 1
@ -163,7 +163,7 @@
* - Linux userlevel uses 'long' uint64_t
* - Windows uses 'long long' uint64_t
*/
#if !defined(VMKERNEL) && !defined(DECODERLIB) && \
#if !defined(VMKERNEL) && \
defined(__linux__) && defined(__KERNEL__)
# include <linux/types.h>
# include <linux/version.h>
@ -205,7 +205,7 @@
* - VMM does not have POSIX headers
* - Windows <sys/types.h> does not define ssize_t
*/
#if defined(VMKERNEL) || defined(VMM) || defined(DECODERLIB)
#if defined(VMKERNEL) || defined(VMM)
/* Guard against FreeBSD <sys/types.h> collison. */
# if !defined(_SIZE_T_DEFINED) && !defined(_SIZE_T)
# define _SIZE_T_DEFINED
@ -290,11 +290,11 @@ typedef char Bool;
#if !defined(USING_AUTOCONF)
# if defined(__FreeBSD__) || defined(sun)
# ifndef KLD_MODULE
# if __FreeBSD_version >= 500043
# if defined(__FreeBSD__)
# if !defined(VMKERNEL)
# include <inttypes.h>
# endif
# else
# else /* sun */
# include <sys/inttypes.h>
# endif
# endif
@ -353,7 +353,7 @@ typedef int64 VmTimeVirtualClock; /* Virtual Clock kept in CPU cycles */
#define FMTPD "I"
#define FMTH "I"
#endif
#elif defined __APPLE__ || (!defined VMKERNEL && !defined DECODERLIB && \
#elif defined __APPLE__ || (!defined VMKERNEL && \
defined __linux__ && defined __KERNEL__)
/* semi-LLP64 targets; 'long' is 64-bit, but uint64_t is 'long long' */
#define FMT64 "ll"
@ -508,7 +508,7 @@ typedef uint16 UReg16;
typedef uint32 UReg32;
typedef uint64 UReg64;
#if defined(__GNUC__) && defined(__SIZEOF_INT128__)
#ifdef VM_HAS_INT128
typedef int128 Reg128;
typedef uint128 UReg128;
#endif

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 1998,2005-2012,2014-2022 VMware, Inc. All rights reserved.
* Copyright (C) 1998,2005-2012,2014-2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -56,6 +56,7 @@
*/
#define PCI_VENDOR_ID_VMWARE 0x15AD
#define PCI_DEVICE_ID_VMWARE_SBX 0x0420
#define PCI_DEVICE_ID_VMWARE_SVGA4 0x0408
#define PCI_DEVICE_ID_VMWARE_SVGA_EFI 0x0407
#define PCI_DEVICE_ID_VMWARE_SVGA3 0x0406
#define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405
@ -71,6 +72,7 @@
#define PCI_DEVICE_ID_VMWARE_UHCI 0x0774
#define PCI_DEVICE_ID_VMWARE_XHCI_0096 0x0778
#define PCI_DEVICE_ID_VMWARE_XHCI_0100 0x0779
#define PCI_DEVICE_ID_VMWARE_XHCI_0120 0x077A
#define PCI_DEVICE_ID_VMWARE_1394 0x0780
#define PCI_DEVICE_ID_VMWARE_BRIDGE 0x0790
#define PCI_DEVICE_ID_VMWARE_ROOTPORT 0x07A0
@ -259,15 +261,15 @@
#define NVME_MAX_NAMESPACES 64 /* We support 64 namespaces same
* as PVSCSI controller.
*/
#define NVME_HW19_MAX_NAMESPACES 15 // HWv19 and before supports 15 namespaces
#define NVME_HW20_MAX_NAMESPACES 15 // HWv20 and before supports 15 namespaces
#define NVME_FUTURE_MAX_NAMESPACES 256 /* To support NVME to the possible 256
* disks per controller in future.
*/
/************* SCSI implementation limits ********************************/
#define SCSI_MAX_CONTROLLERS 4 // Need more than 1 for MSCS clustering
#define SCSI_MAX_DEVICES 16 // BT-958 emulates only 16
#define PVSCSI_HWV14_MAX_DEVICES 65 /* HWv14 And Later Supports 64
* + controller at ID 7
#define PVSCSI_HWV14_MAX_DEVICES 65 /* HWv14 And Later Supports 64
* + controller at ID 7
*/
#define PVSCSI_MAX_DEVICES 255 // 255 (including the controller)
#define PVSCSI_MAX_NUM_DISKS (PVSCSI_HWV14_MAX_DEVICES - 1)
@ -358,6 +360,22 @@
#define NUM_SERIAL_PORTS 32
#define NUM_PARALLEL_PORTS 3
/************* USB host controller limits ********************/
#define USB_EHCI_MAX_CONTROLLERS 1
#define USB_XHCI_MAX_CONTROLLERS 1
/*
* As per USB specification 127 devices can be connected. Along with user usb
* devices other types of devices like root hub, hub, keyboard, mouse are also
* present and are not expose directly to users. These other devices also
* occupy ports on USB.
*
* Although we have 20 devices limit for virtual usb mass storage on each
* controller we can't just put 20 here as we need to account for other devices
* which are necessary for functionality
* TODO: enforce 20 devices limit from hostd
*/
#define USB_MAX_DEVICES_PER_HOST_CTRL 127
/************* Strings for Host USB Driver *******************************/
#ifdef _WIN32
@ -366,13 +384,13 @@
* Globally unique ID for the VMware device interface. Define INITGUID before including
* this header file to instantiate the variable.
*/
DEFINE_GUID(GUID_DEVICE_INTERFACE_VMWARE_USB_DEVICES,
DEFINE_GUID(GUID_DEVICE_INTERFACE_VMWARE_USB_DEVICES,
0x2da1fe75, 0xaab3, 0x4d2c, 0xac, 0xdf, 0x39, 0x8, 0x8c, 0xad, 0xa6, 0x65);
/*
* Globally unique ID for the VMware device setup class.
*/
DEFINE_GUID(GUID_CLASS_VMWARE_USB_DEVICES,
DEFINE_GUID(GUID_CLASS_VMWARE_USB_DEVICES,
0x3b3e62a5, 0x3556, 0x4d7e, 0xad, 0xad, 0xf5, 0xfa, 0x3a, 0x71, 0x2b, 0x56);
/*

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2007, 2023 VMware, Inc. All rights reserved.
* Copyright (C) 2007, 2022 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the

View File

@ -1,5 +1,5 @@
/*********************************************************
* Copyright (C) 2008 VMware, Inc. All rights reserved.
* Copyright (C) 2008, 2023 VMware, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@ -42,10 +42,12 @@ typedef struct VNetUserListener_EventNode VNetUserListener_EventNode;
struct VNetUserListener_EventNode {
VNetUserListener_EventNode *nextEvent;
VNet_EventHeader event;
union {
VNet_EventHeader header;
VNet_LinkStateEvent lse;
} event;
};
#define EVENT_NODE_HEADER_SIZE offsetof(struct VNetUserListener_EventNode, event)
typedef struct VNetUserListener {
VNetPort port; /* base port/jack */
@ -220,7 +222,7 @@ VNetUserListenerEventHandler(void *context, // IN: the user listener
VNetUserListener_EventNode *t;
/* allocate and initialize event node */
t = kmalloc(EVENT_NODE_HEADER_SIZE + e->size, GFP_ATOMIC);
t = kmalloc(sizeof *t, GFP_ATOMIC);
if (t == NULL) {
LOG(0, (KERN_DEBUG "VNetUserListenerEventHandler, out of memory\n"));
return;
@ -299,7 +301,7 @@ VNetUserListenerRead(VNetPort *port, // IN: the user listener
spin_unlock(&userListener->lock);
/* return data and free event */
n = t->event.size;
n = t->event.header.size;
if (count < n) {
n = count;
}