12861:20241101:185430.946 log level has been increased to 5 (trace)
12861:20241101:185430.946 In vmware_job_get() queue:3
12861:20241101:185430.946 End of vmware_job_get() queue:3 type:none
12855:20241101:185430.946 log level has been increased to 5 (trace)
12855:20241101:185430.946 In vmware_job_get() queue:3
12855:20241101:185430.946 End of vmware_job_get() queue:3 type:none
12857:20241101:185430.946 log level has been increased to 5 (trace)
12857:20241101:185430.946 In vmware_job_get() queue:3
12857:20241101:185430.946 End of vmware_job_get() queue:3 type:none
12859:20241101:185430.946 log level has been increased to 5 (trace)
12859:20241101:185430.946 In vmware_job_get() queue:3
12859:20241101:185430.946 End of vmware_job_get() queue:3 type:none
12861:20241101:185431.946 In vmware_job_get() queue:3
12861:20241101:185431.946 End of vmware_job_get() queue:3 type:none
12855:20241101:185431.946 In vmware_job_get() queue:3
12855:20241101:185431.946 End of vmware_job_get() queue:3 type:none
12857:20241101:185431.946 In vmware_job_get() queue:3
12857:20241101:185431.946 End of vmware_job_get() queue:3 type:none
12859:20241101:185431.946 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 6.000000 sec during 5.377107 sec]'
12859:20241101:185431.946 In vmware_job_get() queue:3
12859:20241101:185431.946 End of vmware_job_get() queue:3 type:none
12861:20241101:185432.946 In vmware_job_get() queue:3
12861:20241101:185432.947 End of vmware_job_get() queue:3 type:none
12855:20241101:185432.947 In vmware_job_get() queue:3
12855:20241101:185432.947 End of vmware_job_get() queue:3 type:none
12857:20241101:185432.947 In vmware_job_get() queue:3
12857:20241101:185432.947 End of vmware_job_get() queue:3 type:none
12859:20241101:185432.947 In vmware_job_get() queue:3
12859:20241101:185432.947 End of vmware_job_get() queue:3 type:none
12861:20241101:185433.947 In vmware_job_get() queue:3
12861:20241101:185433.947 End of vmware_job_get() queue:3 type:none
12855:20241101:185433.947 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 6.000000 sec during 5.060374 sec]'
12855:20241101:185433.947 In vmware_job_get() queue:3
12857:20241101:185433.947 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 6.000000 sec during 5.071500 sec]'
12857:20241101:185433.947 In vmware_job_get() queue:3
12857:20241101:185433.947 End of vmware_job_get() queue:3 type:none
12855:20241101:185433.947 End of vmware_job_get() queue:3 type:none
12859:20241101:185433.947 In vmware_job_get() queue:3
12859:20241101:185433.947 End of vmware_job_get() queue:3 type:none
12861:20241101:185434.947 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 6.000000 sec during 5.376940 sec]'
12861:20241101:185434.947 In vmware_job_get() queue:3
12861:20241101:185434.947 End of vmware_job_get() queue:3 type:none
12857:20241101:185434.947 In vmware_job_get() queue:3
12857:20241101:185434.947 End of vmware_job_get() queue:3 type:none
12855:20241101:185434.947 In vmware_job_get() queue:3
12855:20241101:185434.947 End of vmware_job_get() queue:3 type:none
12859:20241101:185434.947 In vmware_job_get() queue:3
12859:20241101:185434.947 End of vmware_job_get() queue:3 type:none
12861:20241101:185435.947 In vmware_job_get() queue:3
12861:20241101:185435.947 End of vmware_job_get() queue:3 type:none
12857:20241101:185435.947 In vmware_job_get() queue:3
12857:20241101:185435.947 End of vmware_job_get() queue:3 type:none
12859:20241101:185435.948 In vmware_job_get() queue:3
12859:20241101:185435.948 End of vmware_job_get() queue:3 type:none
12855:20241101:185435.948 In vmware_job_get() queue:3
12855:20241101:185435.948 End of vmware_job_get() queue:3 type:none
12861:20241101:185436.947 In vmware_job_get() queue:3
12861:20241101:185436.947 End of vmware_job_get() queue:3 type:none
12857:20241101:185436.948 In vmware_job_get() queue:3
12857:20241101:185436.948 End of vmware_job_get() queue:3 type:none
12859:20241101:185436.948 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001173 sec]'
12859:20241101:185436.948 In vmware_job_get() queue:3
12859:20241101:185436.948 End of vmware_job_get() queue:3 type:none
12855:20241101:185436.948 In vmware_job_get() queue:3
12855:20241101:185436.948 End of vmware_job_get() queue:3 type:none
12857:20241101:185437.948 In vmware_job_get() queue:3
12857:20241101:185437.948 End of vmware_job_get() queue:3 type:none
12861:20241101:185437.948 In vmware_job_get() queue:3
12861:20241101:185437.948 End of vmware_job_get() queue:3 type:none
12855:20241101:185437.948 In vmware_job_get() queue:3
12855:20241101:185437.948 End of vmware_job_get() queue:3 type:none
12859:20241101:185437.949 In vmware_job_get() queue:3
12859:20241101:185437.949 End of vmware_job_get() queue:3 type:none
12837:20241101:185438.211 received configuration data from server at "10.50.242.78", datalen 437
12857:20241101:185438.948 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001160 sec]'
12857:20241101:185438.948 In vmware_job_get() queue:3
12857:20241101:185438.948 End of vmware_job_get() queue:3 type:none
12861:20241101:185438.948 In vmware_job_get() queue:3
12861:20241101:185438.948 End of vmware_job_get() queue:3 type:none
12855:20241101:185438.949 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001613 sec]'
12855:20241101:185438.949 In vmware_job_get() queue:3
12855:20241101:185438.949 End of vmware_job_get() queue:3 type:none
12859:20241101:185438.949 In vmware_job_get() queue:3
12859:20241101:185438.949 End of vmware_job_get() queue:3 type:none
12857:20241101:185439.948 In vmware_job_get() queue:3
12857:20241101:185439.948 End of vmware_job_get() queue:3 type:none
12861:20241101:185439.949 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001681 sec]'
12861:20241101:185439.949 In vmware_job_get() queue:3
12861:20241101:185439.949 End of vmware_job_get() queue:3 type:none
12855:20241101:185439.949 In vmware_job_get() queue:3
12855:20241101:185439.949 End of vmware_job_get() queue:3 type:none
12859:20241101:185439.949 In vmware_job_get() queue:3
12859:20241101:185439.949 End of vmware_job_get() queue:3 type:none
12857:20241101:185440.949 In vmware_job_get() queue:3
12857:20241101:185440.949 End of vmware_job_get() queue:3 type:none
12861:20241101:185440.949 In vmware_job_get() queue:3
12861:20241101:185440.949 End of vmware_job_get() queue:3 type:none
12855:20241101:185440.949 In vmware_job_get() queue:3
12855:20241101:185440.949 End of vmware_job_get() queue:3 type:none
12859:20241101:185440.949 In vmware_job_get() queue:3
12859:20241101:185440.949 End of vmware_job_get() queue:3 type:none
12857:20241101:185441.949 In vmware_job_get() queue:3
12857:20241101:185441.950 End of vmware_job_get() queue:3 type:none
12861:20241101:185441.950 In vmware_job_get() queue:3
12861:20241101:185441.950 End of vmware_job_get() queue:3 type:none
12855:20241101:185441.950 In vmware_job_get() queue:3
12855:20241101:185441.950 End of vmware_job_get() queue:3 type:none
12859:20241101:185441.950 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001612 sec]'
12859:20241101:185441.950 In vmware_job_get() queue:3
12859:20241101:185441.950 End of vmware_job_get() queue:3 type:none
12857:20241101:185442.950 In vmware_job_get() queue:3
12857:20241101:185442.950 End of vmware_job_get() queue:3 type:none
12861:20241101:185442.950 In vmware_job_get() queue:3
12861:20241101:185442.950 End of vmware_job_get() queue:3 type:none
12855:20241101:185442.950 In vmware_job_get() queue:3
12855:20241101:185442.950 End of vmware_job_get() queue:3 type:none
12859:20241101:185442.950 In vmware_job_get() queue:3
12859:20241101:185442.950 End of vmware_job_get() queue:3 type:none
12857:20241101:185443.950 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001972 sec]'
12857:20241101:185443.950 In vmware_job_get() queue:3
12857:20241101:185443.950 End of vmware_job_get() queue:2 type:update_tags
12857:20241101:185443.950 In vmware_job_exec() type:update_tags
12857:20241101:185443.950 End of vmware_job_exec() type:update_tags ret:FAIL
12857:20241101:185443.950 In vmware_job_schedule() queue:2 type:update_tags
12857:20241101:185443.950 End of vmware_job_schedule() type:update_tags nextcheck:18:55:43
12857:20241101:185443.950 In vmware_job_get() queue:3
12857:20241101:185443.950 End of vmware_job_get() queue:2 type:update_conf
12857:20241101:185443.950 In vmware_job_exec() type:update_conf
12857:20241101:185443.950 In zbx_vmware_service_update() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12857:20241101:185443.950 In vmware_service_cust_query_prep() cust_queries:0
12857:20241101:185443.950 End of vmware_service_cust_query_prep() cq_values:0
12857:20241101:185443.950 In vmware_service_cust_query_prep() cust_queries:0
12857:20241101:185443.950 End of vmware_service_cust_query_prep() cq_values:0
12857:20241101:185443.950 In vmware_service_authenticate() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12861:20241101:185443.951 In vmware_job_get() queue:2
12861:20241101:185443.951 End of vmware_job_get() queue:1 type:update_perf_counters
12861:20241101:185443.951 In vmware_job_exec() type:update_perf_counters
12861:20241101:185443.951 End of vmware_job_exec() type:update_perf_counters ret:FAIL
12861:20241101:185443.951 In vmware_job_schedule() queue:1 type:update_perf_counters
12861:20241101:185443.951 End of vmware_job_schedule() type:update_perf_counters nextcheck:18:55:43
12861:20241101:185443.951 In vmware_job_get() queue:2
12861:20241101:185443.951 End of vmware_job_get() queue:2 type:none
12859:20241101:185443.951 In vmware_job_get() queue:2
12859:20241101:185443.951 End of vmware_job_get() queue:2 type:none
12855:20241101:185443.951 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002052 sec]'
12855:20241101:185443.951 In vmware_job_get() queue:2
12855:20241101:185443.951 End of vmware_job_get() queue:2 type:none
12857:20241101:185443.995 vmware_service_authenticate() SOAP response:
527e4cbc-f61f-6d4d-bc41-7019fdb3f447VSPHERE.LOCAL\zabbix2024-11-01T18:54:44.00167Z2024-11-01T18:54:44.00167Zenenfalse10.50.242.760
12857:20241101:185443.995 End of vmware_service_authenticate():SUCCEED
12857:20241101:185443.998 vmware_service_get_contents() SOAP response:
group-d1propertyCollectorViewManagerVMware vCenter ServerVMware vCenter Server 8.0.3 build-24322831VMware, Inc.8.0.324322831INTL000linux-x64vpxVirtualCenter8.0.3.09a31b4b0-64a6-48e1-919a-e9f7ca1668b6VMware VirtualCenter Server8.0VpxSettingsUserDirectorySessionManagerAuthorizationManagerServiceMgrPerfMgrScheduledTaskManagerAlarmManagerEventManagerTaskManagerExtensionManagerCustomizationSpecManagerCustomFieldsManagerDiagMgrLicenseManagerSearchIndexFileManagerDatastoreNamespaceManagervirtualDiskManagerSnmpSystemProvCheckerCompatCheckerOvfManagerIpPoolManagerDVSManagerHostProfileManagerClusterProfileManagerMoComplianceManagerLocalizationManagerStorageResourceManagerguestOperationsManagerOverheadMemoryManagercertificateManagerIoFilterManager
12857:20241101:185443.998 In vmware_service_get_perf_counters()
12857:20241101:185444.022 vmware_service_get_perf_counters() SOAP response:
PerfMgrperfCounter1CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentnonerate442CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentaveragerate133CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentminimumrate444CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentmaximumrate445CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertznonerate446CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzaveragerate137CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzminimumrate448CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzmaximumrate449Total CPU capacity reserved by virtual machinesreservedCapacityCPUcpuMegahertzmegaHertzaverageabsolute2310Amount of time spent on system processes on each virtual CPU in the virtual machinesystemCPUcpuMillisecondmillisecondsummationdelta3311Total CPU time spent in wait statewaitCPUcpuMillisecondmillisecondsummationdelta3312Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement intervalreadyCPUcpuMillisecondmillisecondsummationdelta1313Total time that the CPU spent in an idle stateidleCPUcpuMillisecondmillisecondsummationdelta2314Total CPU usageusedCPUcpuMillisecondmillisecondsummationdelta3315Capacity in MHz of the physical CPU corescapacity.provisionedCPUcpuMegahertzmegaHertzaverageabsolute4416CPU resources devoted by the ESXi scheduler to the virtual machines and resource poolscapacity.entitlementCPUcpuMegahertzmegaHertzaverageabsolute4417CPU usage as a percent during the intervalcapacity.usageCPUcpuMegahertzmegaHertzaveragerate4418The amount of CPU resources a VM would use if there were no CPU contentioncapacity.demandCPUcpuMegahertzmegaHertzaverageabsolute4419Percent of time the VM is unable to run because it is contending for access to the physical CPU(s)capacity.contentionCPUcpuPercentagepercentaveragerate4420The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberaverageabsolute4421The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberaverageabsolute4422Time the VM vCPU is ready to run, but is unable to run due to co-scheduling constraintscorecount.contentionCPUcpuPercentagepercentaveragerate4423Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentnoneabsolute4424Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentaverageabsolute1325Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentminimumabsolute4426Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentmaximumabsolute4427Memory reservation consumed by powered-on virtual machinesreservedCapacityMemorymemMegabytemegaBytesaverageabsolute2328Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesnoneabsolute4429Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesaverageabsolute2330Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesminimumabsolute4431Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesmaximumabsolute4432Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesnoneabsolute4433Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesaverageabsolute2334Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesminimumabsolute4435Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesmaximumabsolute4436Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesnoneabsolute4437Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesaverageabsolute2338Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesminimumabsolute4439Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesmaximumabsolute4440Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesnoneabsolute4441Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesaverageabsolute2342Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesminimumabsolute4443Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesmaximumabsolute4444Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesnoneabsolute4445Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesaverageabsolute2346Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesminimumabsolute4447Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesmaximumabsolute4448Swap storage space consumedswapusedMemorymemKilobytekiloBytesnoneabsolute4449Swap storage space consumedswapusedMemorymemKilobytekiloBytesaverageabsolute2350Swap storage space consumedswapusedMemorymemKilobytekiloBytesminimumabsolute4451Swap storage space consumedswapusedMemorymemKilobytekiloBytesmaximumabsolute4452swapunreservedMemorymemKilobytekiloBytesnoneabsolute4453swapunreservedMemorymemKilobytekiloBytesaverageabsolute4454swapunreservedMemorymemKilobytekiloBytesminimumabsolute4455swapunreservedMemorymemKilobytekiloBytesmaximumabsolute4456Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesnoneabsolute4457Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesaverageabsolute2358Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesminimumabsolute4459Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesmaximumabsolute4460Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesnoneabsolute4461Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesaverageabsolute4462Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesminimumabsolute4463Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesmaximumabsolute4464Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesnoneabsolute4465Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesaverageabsolute4466Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesminimumabsolute4467Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesmaximumabsolute4468Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machinesstateMemorymemNumbernumberlatestabsolute2369Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesnoneabsolute4470Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesaverageabsolute2371Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesminimumabsolute4472Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesmaximumabsolute4473Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesnoneabsolute4474Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesaverageabsolute2375Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesminimumabsolute4476Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesmaximumabsolute4477swapInMemorymemKilobytekiloBytesnoneabsolute4478swapInMemorymemKilobytekiloBytesaverageabsolute2379swapInMemorymemKilobytekiloBytesminimumabsolute4480swapInMemorymemKilobytekiloBytesmaximumabsolute4481swapOutMemorymemKilobytekiloBytesnoneabsolute4482swapOutMemorymemKilobytekiloBytesaverageabsolute2383swapOutMemorymemKilobytekiloBytesminimumabsolute4484swapOutMemorymemKilobytekiloBytesmaximumabsolute4485Rate at which guest physical memory is swapped in from the swap spaceswapinRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1386Rate at which guest physical memory is swapped out to the swap spaceswapoutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1387Amount of memory that is swapped out for the Service ConsoleswapOutManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3388Amount of memory that is swapped in for the Service ConsoleswapInManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3389Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesnoneabsolute4490Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesaverageabsolute1391Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesminimumabsolute4492Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesmaximumabsolute4493Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesnoneabsolute4494Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesaverageabsolute2395Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesminimumabsolute4496Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesmaximumabsolute4497Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesnoneabsolute4498Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesaverageabsolute1399Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesminimumabsolute44100Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesmaximumabsolute44101Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesnoneabsolute44102Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesaverageabsolute11103Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesminimumabsolute44104Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesmaximumabsolute44105Guest physical memory pages that have undergone memory compressioncompressedMemorymemKilobytekiloBytesaverageabsolute23106Rate of guest physical memory page compression by ESXicompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23107Rate of guest physical memory decompressiondecompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23108Total amount of memory available to the hostcapacity.provisionedMemorymemKilobytekiloBytesaverageabsolute44109Amount of host physical memory the VM is entitled to, as determined by the ESXi schedulercapacity.entitlementMemorymemKilobytekiloBytesaverageabsolute44110Amount of physical memory available for use by virtual machines on this hostcapacity.usableMemorymemKilobytekiloBytesaverageabsolute44111Amount of physical memory actively usedcapacity.usageMemorymemKilobytekiloBytesaverageabsolute44112Percentage of time VMs are waiting to access swapped, compressed or ballooned memorycapacity.contentionMemorymemPercentagepercentaveragerate44113capacity.usage.vmMemorymemKilobytekiloBytesaverageabsolute24114capacity.usage.vmOvrhdMemorymemKilobytekiloBytesaverageabsolute24115capacity.usage.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24116capacity.usage.userworldMemorymemKilobytekiloBytesaverageabsolute24117reservedCapacity.vmMemorymemKilobytekiloBytesaverageabsolute24118reservedCapacity.vmOvhdMemorymemKilobytekiloBytesaverageabsolute24119reservedCapacity.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24120reservedCapacity.userworldMemorymemKilobytekiloBytesaverageabsolute24121Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentaverageabsolute44122Amount of physical memory consumed by VMs on this hostconsumed.vmsMemorymemKilobytekiloBytesaverageabsolute24123Amount of physical memory consumed by userworlds on this hostconsumed.userworldsMemorymemKilobytekiloBytesaverageabsolute24124Current read bandwidth of this memory typebandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22125Current write bandwidth of this memory typebandwidth.writeMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22126Total read and write bandwidth of this memory typebandwidth.totalMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute11127vm.bandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22128Get the current miss rate of this memory typemissrateMemorymemPercentagepercentlatestabsolute22129Get the current read latency of this memory typelatency.readMemorymemNanosecondnanosecondlatestabsolute33130Get the current write latency of this memory typelatency.writeMemorymemNanosecondnanosecondlatestabsolute33131Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondnonerate44132Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate13133Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondminimumrate44134Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondmaximumrate44135Number of disk reads during the collection intervalnumberReadDiskdiskNumbernumbersummationdelta33136Number of disk writes during the collection intervalnumberWriteDiskdiskNumbernumbersummationdelta33137Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23138Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23139Average amount of time taken during the collection interval to process a Storage command issued by the guest OS to the virtual machinetotalLatencyDiskdiskMillisecondmillisecondaverageabsolute33140Highest latency value across all disks used by the hostmaxTotalLatencyDiskdiskMillisecondmillisecondlatestabsolute13141Number of Storage commands aborted during the collection intervalcommandsAbortedDiskdiskNumbernumbersummationdelta23142Number of Storage-bus reset commands issued during the collection intervalbusResetsDiskdiskNumbernumbersummationdelta23143Average number of disk reads per second during the collection intervalnumberReadAveragedDiskdiskNumbernumberaveragerate13144Average number of disk writes per second during the collection intervalnumberWriteAveragedDiskdiskNumbernumberaveragerate13145Aggregated disk I/O rate, including the rates for all virtual machines running on the host during the collection intervalthroughput.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate44146Average amount of time for an I/O operation to complete successfullythroughput.contentionDiskdiskMillisecondmillisecondaverageabsolute44147Number of Storage reservation conflicts for the LUN during the collection intervalscsiReservationConflictsDiskdiskNumbernumbersummationdelta22148Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaverageabsolute44149Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondnonerate44150Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate13151Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondminimumrate44152Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondmaximumrate44153Number of packets received during the intervalpacketsRxNetworknetNumbernumbersummationdelta23154Number of packets transmitted during the intervalpacketsTxNetworknetNumbernumbersummationdelta23155Average rate at which data was received during the intervalreceivedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23156Average rate at which data was transmitted during the intervaltransmittedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23157The maximum network bandwidth for the hostthroughput.provisionedNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44158The current available network bandwidth for the hostthroughput.usableNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44159The current network bandwidth usage for the hostthroughput.usageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44160The aggregate network droppped packets for the hostthroughput.contentionNetworknetNumbernumbersummationdelta44161Average rate of packets received and transmitted per secondthroughput.packetsPerSecNetworknetNumbernumberaveragerate44162Total time elapsed, in seconds, since last system startupuptimeSystemsysSecondsecondlatestabsolute13163Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumbersummationdelta13164Current power usagepowerPowerpowerWattwattaveragerate23165Maximum allowed power usagepowerCapPowerpowerWattwattaverageabsolute33166Total energy used since last stats resetenergyPowerpowerJoulejoulesummationdelta33167Current power usage as a percentage of maximum allowed powercapacity.usagePctPowerpowerPercentagepercentaverageabsolute44168Average number of commands issued per second by the storage adapter during the collection intervalcommandsAveragedStorage adapterstorageAdapterNumbernumberaveragerate22169Average number of read commands issued per second by the storage adapter during the collection intervalnumberReadAveragedStorage adapterstorageAdapterNumbernumberaveragerate22170Average number of write commands issued per second by the storage adapter during the collection intervalnumberWriteAveragedStorage adapterstorageAdapterNumbernumberaveragerate22171Rate of reading data by the storage adapterreadStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22172Rate of writing data by the storage adapterwriteStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22173The average time a read by the storage adapter takestotalReadLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22174The average time a write by the storage adapter takestotalWriteLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22175Highest latency value across all storage adapters used by the hostmaxTotalLatencyStorage adapterstorageAdapterMillisecondmillisecondlatestabsolute33176Average amount of time for an I/O operation to complete successfullythroughput.contStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute44177The percent of I/Os that have been issued but have not yet completedOIOsPctStorage adapterstorageAdapterPercentagepercentaverageabsolute44178Average number of read commands issued per second to the virtual disk during the collection intervalnumberReadAveragedVirtual diskvirtualDiskNumbernumberaveragerate13179Average number of write commands issued per second to the virtual disk during the collection intervalnumberWriteAveragedVirtual diskvirtualDiskNumbernumberaveragerate13180Rate of reading data from the virtual diskreadVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22181Rate of writing data to the virtual diskwriteVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22182The average time a read from the virtual disk takestotalReadLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13183The average time a write to the virtual disk takestotalWriteLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13184Average amount of time for an I/O operation to complete successfullythroughput.contVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute44185Average number of read commands issued per second to the datastore during the collection intervalnumberReadAveragedDatastoredatastoreNumbernumberaveragerate13186Average number of write commands issued per second to the datastore during the collection intervalnumberWriteAveragedDatastoredatastoreNumbernumberaveragerate13187Rate of reading data from the datastorereadDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22188Rate of writing data to the datastorewriteDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22189The average time a read from the datastore takestotalReadLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13190The average time a write to the datastore takestotalWriteLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13191Highest latency value across all datastores used by the hostmaxTotalLatencyDatastoredatastoreMillisecondmillisecondlatestabsolute33192Storage I/O Control aggregated IOPSdatastoreIopsDatastoredatastoreNumbernumberaverageabsolute13193Storage I/O Control size-normalized I/O latencysizeNormalizedDatastoreLatencyDatastoredatastoreMicrosecondmicrosecondaverageabsolute13194throughput.usageDatastoredatastoreKilobytes per secondkiloBytesPerSecondaverageabsolute44195throughput.contentionDatastoredatastoreMillisecondmillisecondaverageabsolute44196busResetsDatastoredatastoreNumbernumbersummationdelta22197commandsAbortedDatastoredatastoreNumbernumbersummationdelta22198Percentage of time Storage I/O Control actively controlled datastore latencysiocActiveTimePercentageDatastoredatastorePercentagepercentaverageabsolute13199Average amount of time for an I/O operation to complete successfullythroughput.contStorage pathstoragePathMillisecondmillisecondaverageabsolute44200Highest latency value across all storage paths used by the hostmaxTotalLatencyStorage pathstoragePathMillisecondmillisecondlatestabsolute33201Virtual disk I/O ratethroughput.usageVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate44202Number of terminations to a virtual diskcommandsAbortedVirtual diskvirtualDiskNumbernumbersummationdelta24203Number of resets to a virtual diskbusResetsVirtual diskvirtualDiskNumbernumbersummationdelta24204The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberaverageabsolute22205The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberaverageabsolute22206The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberaverageabsolute22207Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22208The storage adapter's I/O ratethroughput.usagStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate44209Number of Storage-bus reset commands issued during the collection intervalbusResetsStorage pathstoragePathNumbernumbersummationdelta23210Number of Storage commands terminated during the collection intervalcommandsAbortedStorage pathstoragePathNumbernumbersummationdelta23211Storage path I/O ratethroughput.usageStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate44212Average pNic I/O rate for VMsthroughput.usage.vmNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33213Average pNic I/O rate for NFSthroughput.usage.nfsNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33214Average pNic I/O rate for vMotionthroughput.usage.vmotionNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33215Average pNic I/O rate for FTthroughput.usage.ftNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33216Average pNic I/O rate for iSCSIthroughput.usage.iscsiNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33217Average pNic I/O rate for HBRthroughput.usage.hbrNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33218Current maximum allowed power usagecapacity.usablePowerpowerWattwattaverageabsolute44219Current power usagecapacity.usagePowerpowerWattwattaverageabsolute44220Power usage due to host idlenesscapacity.usageIdlePowerpowerWattwattaverageabsolute23221Power usage due to non-VM activitiescapacity.usageSystemPowerpowerWattwattaverageabsolute23222Power usage due to VM workloadscapacity.usageVmPowerpowerWattwattaverageabsolute23223Static power usage of VMcapacity.usageStaticPowerpowerWattwattaverageabsolute23224Amount of CPU resources allocated to the virtual machine or resource pool, based on the total cluster capacity and the resource configuration of the resource hierarchycpuentitlementCPUcpuMegahertzmegaHertzlatestabsolute23225Memory allocation as calculated by the VMkernel scheduler based on current estimated demand and reservation, limit, and shares policies set for all virtual machines and resource pools in the host or clustermementitlementMemorymemMegabytemegaByteslatestabsolute23226DRS score of the virtual machinevmDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11227Fairness of distributed CPU resource allocationcpufairnessCluster servicesclusterServicesNumbernumberlatestabsolute13228Aggregate available memory resources of all the hosts within a clustermemfairnessCluster servicesclusterServicesNumbernumberlatestabsolute13229The rate of transmitted packets for this VDSthroughput.pktsTxNetworknetNumbernumberaverageabsolute33230The rate of transmitted Multicast packets for this VDSthroughput.pktsTxMulticastNetworknetNumbernumberaverageabsolute33231The rate of transmitted Broadcast packets for this VDSthroughput.pktsTxBroadcastNetworknetNumbernumberaverageabsolute33232The rate of received packets for this vDSthroughput.pktsRxNetworknetNumbernumberaverageabsolute33233The rate of received Multicast packets for this VDSthroughput.pktsRxMulticastNetworknetNumbernumberaverageabsolute33234The rate of received Broadcast packets for this VDSthroughput.pktsRxBroadcastNetworknetNumbernumberaverageabsolute33235Count of dropped transmitted packets for this VDSthroughput.droppedTxNetworknetNumbernumberaverageabsolute33236Count of dropped received packets for this VDSthroughput.droppedRxNetworknetNumbernumberaverageabsolute33237The rate of transmitted packets for this DVPortthroughput.vds.pktsTxNetworknetNumbernumberaverageabsolute33238The rate of transmitted multicast packets for this DVPortthroughput.vds.pktsTxMcastNetworknetNumbernumberaverageabsolute33239The rate of transmitted broadcast packets for this DVPortthroughput.vds.pktsTxBcastNetworknetNumbernumberaverageabsolute33240The rate of received packets for this DVPortthroughput.vds.pktsRxNetworknetNumbernumberaverageabsolute33241The rate of received multicast packets for this DVPortthroughput.vds.pktsRxMcastNetworknetNumbernumberaverageabsolute33242The rate of received broadcast packets for this DVPortthroughput.vds.pktsRxBcastNetworknetNumbernumberaverageabsolute33243Count of dropped transmitted packets for this DVPortthroughput.vds.droppedTxNetworknetNumbernumberaverageabsolute33244Count of dropped received packets for this DVPortthroughput.vds.droppedRxNetworknetNumbernumberaverageabsolute33245The rate of transmitted packets for this LAGthroughput.vds.lagTxNetworknetNumbernumberaverageabsolute33246The rate of transmitted Multicast packets for this LAGthroughput.vds.lagTxMcastNetworknetNumbernumberaverageabsolute33247The rate of transmitted Broadcast packets for this LAGthroughput.vds.lagTxBcastNetworknetNumbernumberaverageabsolute33248The rate of received packets for this LAGthroughput.vds.lagRxNetworknetNumbernumberaverageabsolute33249The rate of received multicast packets for this LAGthroughput.vds.lagRxMcastNetworknetNumbernumberaverageabsolute33250The rate of received Broadcast packets for this LAGthroughput.vds.lagRxBcastNetworknetNumbernumberaverageabsolute33251Count of dropped transmitted packets for this LAGthroughput.vds.lagDropTxNetworknetNumbernumberaverageabsolute33252Count of dropped received packets for this LAGthroughput.vds.lagDropRxNetworknetNumbernumberaverageabsolute33253Number of virtual machine power on operationsnumPoweronVirtual machine operationsvmopNumbernumberlatestabsolute13254Number of virtual machine power off operationsnumPoweroffVirtual machine operationsvmopNumbernumberlatestabsolute13255Number of virtual machine suspend operationsnumSuspendVirtual machine operationsvmopNumbernumberlatestabsolute13256Number of virtual machine reset operationsnumResetVirtual machine operationsvmopNumbernumberlatestabsolute13257Number of virtual machine guest reboot operationsnumRebootGuestVirtual machine operationsvmopNumbernumberlatestabsolute13258Number of virtual machine standby guest operationsnumStandbyGuestVirtual machine operationsvmopNumbernumberlatestabsolute13259Number of virtual machine guest shutdown operationsnumShutdownGuestVirtual machine operationsvmopNumbernumberlatestabsolute13260Number of virtual machine create operationsnumCreateVirtual machine operationsvmopNumbernumberlatestabsolute13261Number of virtual machine delete operationsnumDestroyVirtual machine operationsvmopNumbernumberlatestabsolute13262Number of virtual machine register operationsnumRegisterVirtual machine operationsvmopNumbernumberlatestabsolute13263Number of virtual machine unregister operationsnumUnregisterVirtual machine operationsvmopNumbernumberlatestabsolute13264Number of virtual machine reconfigure operationsnumReconfigureVirtual machine operationsvmopNumbernumberlatestabsolute13265Number of virtual machine clone operationsnumCloneVirtual machine operationsvmopNumbernumberlatestabsolute13266Number of virtual machine template deploy operationsnumDeployVirtual machine operationsvmopNumbernumberlatestabsolute13267Number of host change operations for powered-off and suspended VMsnumChangeHostVirtual machine operationsvmopNumbernumberlatestabsolute13268Number of datastore change operations for powered-off and suspended virtual machinesnumChangeDSVirtual machine operationsvmopNumbernumberlatestabsolute13269Number of host and datastore change operations for powered-off and suspended virtual machinesnumChangeHostDSVirtual machine operationsvmopNumbernumberlatestabsolute13270Number of migrations with vMotion (host change operations for powered-on VMs)numVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13271Number of migrations with Storage vMotion (datastore change operations for powered-on VMs)numSVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13272Number of host and datastore change operations for powered-on and suspended virtual machinesnumXVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13273Total available CPU resources of all hosts within a clustereffectivecpuCluster servicesclusterServicesMegahertzmegaHertzaveragerate13274Total amount of machine memory of all hosts in the cluster that is available for use for virtual machine memory and overhead memoryeffectivememCluster servicesclusterServicesMegabytemegaBytesaverageabsolute13275Total amount of CPU resources of all hosts in the clustertotalmhzCPUcpuMegahertzmegaHertzaveragerate13276Total amount of host physical memory of all hosts in the cluster that is available for virtual machine memory (physical memory for use by the guest OS) and virtual machine overhead memorytotalmbMemorymemMegabytemegaBytesaverageabsolute13277DRS score of the clusterclusterDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11278vSphere HA number of failures that can be toleratedfailoverCluster servicesclusterServicesNumbernumberlatestabsolute13279The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentaverageabsolute13280The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesaverageabsolute13281The amount of GPU memory reserved in kilobytesmem.reservedGPUgpuKilobytekiloByteslatestabsolute13282The power used by a GPU in wattspower.usedGPUgpuWattwattlatestabsolute13283The temperature of a GPU in degrees celsiustemperatureGPUgpuTemperature in degrees Celsiuscelsiusaverageabsolute13284The total amount of GPU memory in kilobytesmem.totalGPUgpuKilobytekiloByteslatestabsolute13285Amount of space actually used by the virtual machine or the datastoreusedDiskdiskKilobytekiloByteslatestabsolute11286Amount of storage set aside for use by a datastore or a virtual machineprovisionedDiskdiskKilobytekiloByteslatestabsolute11287Configured size of the datastorecapacityDiskdiskKilobytekiloByteslatestabsolute13288Amount of space associated exclusively with a virtual machineunsharedDiskdiskKilobytekiloByteslatestabsolute11289Amount of disk actually used on the datastoreactualusedDiskdiskMegabytemegaByteslatestabsolute23290Storage overhead of a virtual machine or a datastore due to delta disk backingsdeltausedDiskdiskKilobytekiloByteslatestabsolute23291Virtual disk: The maximum capacity size of the virtual disk. Virtual machine: The provisioned size of all virtual disks plus snapshot files and the swap file, if the VM is running. Datastore: The maximum capacity of the datastore. POD: The maximum capacity of all datastores in the POD.capacity.provisionedDiskdiskKilobytekiloBytesaverageabsolute44292The amount of storage capacity currently being consumed by the entity or on the entity.capacity.usageDiskdiskKilobytekiloBytesaverageabsolute44293The amount of storage capacity overcommitment for the entity, measured in percent.capacity.contentionDiskdiskPercentagepercentaverageabsolute44294The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44295The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44296The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11297Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44298Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44299Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11300Total size of in-memory cache of blocks (buffer cache) read in from block devices (i.e., disk devices) on the system where vCenter Server is runningbufferszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44301Total size of in-memory caches of pages (page cache) for files from on-disk and in-memory filesystems on the system where vCenter Server is runningcacheszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44302Number of context switches per second on the system where vCenter Server is runningctxswitchesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11303Disk sectors read per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskreadsectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44304Number of disk reads per second on the system where vCenter Server is runningdiskreadsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11305Disk sectors written per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskwritesectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44306Number of disk writes per second on the system where vCenter Server is runningdiskwritesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11307The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44308The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44309The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11310The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44311The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44312The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11313vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44314vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44315vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11316vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44317vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44318vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11319vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44320vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44321vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11322Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44323Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44324Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11325Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44326Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44327Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11328Rate of the number of total packets received per second on the system where vCenter Server is runningpacketrecvratevCenter resource usage informationvcResourcesNumbernumberaveragerate11329Number of total packets sent per second on the system where vCenter Server is runningpacketsentratevCenter resource usage informationvcResourcesNumbernumberaveragerate11330Total system CPU used on the system where vCenter Server in runningsystemcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11331Number of page faults per second on the system where vCenter Server is runningpagefaultratevCenter resource usage informationvcResourcesNumbernumberaveragerate11332Physical memory used by vCenterphysicalmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11333CPU used by vCenter Server in privileged modepriviledgedcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11334Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44335Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44336Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute33337The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44338The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44339The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11340Number of systems calls made per second on the system where vCenter Server is runningsyscallsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11341The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44342The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44343The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11344CPU used by vCenter Server in user modeusercpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11345vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44346vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44347vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11348Virtual memory used by vCenter ServervirtualmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11349Average number of outstanding read requests to the virtual disk during the collection intervalreadOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22350Average number of outstanding write requests to the virtual disk during the collection intervalwriteOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22351Storage DRS virtual disk metric for the read workload modelreadLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22352Storage DRS virtual disk metric for the write workload modelwriteLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22353CPU active average over 1 minuteactav1Resource group CPUrescpuPercentagepercentlatestabsolute33354Storage DRS datastore bytes readdatastoreReadBytesDatastoredatastoreNumbernumberlatestabsolute22355Storage DRS datastore bytes writtendatastoreWriteBytesDatastoredatastoreNumbernumberlatestabsolute22356Storage DRS datastore read I/O ratedatastoreReadIopsDatastoredatastoreNumbernumberlatestabsolute13357Storage DRS datastore write I/O ratedatastoreWriteIopsDatastoredatastoreNumbernumberlatestabsolute13358Storage DRS datastore outstanding read requestsdatastoreReadOIODatastoredatastoreNumbernumberlatestabsolute13359Storage DRS datastore outstanding write requestsdatastoreWriteOIODatastoredatastoreNumbernumberlatestabsolute13360Storage DRS datastore normalized read latencydatastoreNormalReadLatencyDatastoredatastoreNumbernumberlatestabsolute22361Storage DRS datastore normalized write latencydatastoreNormalWriteLatencyDatastoredatastoreNumbernumberlatestabsolute22362Storage DRS datastore metric for read workload modeldatastoreReadLoadMetricDatastoredatastoreNumbernumberlatestabsolute44363Storage DRS datastore metric for write workload modeldatastoreWriteLoadMetricDatastoredatastoreNumbernumberlatestabsolute44364The average datastore latency as seen by virtual machinesdatastoreVMObservedLatencyDatastoredatastoreMicrosecondmicrosecondlatestabsolute13365Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaveragerate44366Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskNumbernumberlatestabsolute44367Number of failed reads on the diskreadFailedDiskdiskNumbernumberlatestabsolute44368Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskNumbernumberlatestabsolute44369Number of failed writes on the diskwriteFailedDiskdiskNumbernumberlatestabsolute44370Number of successful commands on the diskcommands.successDiskdiskNumbernumberlatestabsolute44371Number of failed commands on the diskcommands.failedDiskdiskNumbernumberlatestabsolute44372Number of queued commands on the diskcommands.queuedDiskdiskNumbernumberlatestabsolute44373Number of active commands on the diskcommands.activeDiskdiskNumbernumberlatestabsolute44374Current state of devicestateDiskdiskNumbernumberlatestabsolute44375Total number of aborts on a diskTM.abortDiskdiskNumbernumberlatestabsolute44376Total number of aborts retries on a diskTM.abortRetryDiskdiskNumbernumberlatestabsolute44377Total number of failed aborts on a diskTM.abortFailedDiskdiskNumbernumberlatestabsolute44378Total number of virt resets TM.virtResetDiskdiskNumbernumberlatestabsolute44379Total number of virt-reset retries TM.virtResetRetryDiskdiskNumbernumberlatestabsolute44380Total number of failed virt-resetsTM.virtResetFailedDiskdiskNumbernumberlatestabsolute44381Total number of lun resets TM.lunResetDiskdiskNumbernumberlatestabsolute44382Total number of lun-reset retries TM.lunResetRetryDiskdiskNumbernumberlatestabsolute44383Total number of failed lun-resetsTM.lunResetFailedDiskdiskNumbernumberlatestabsolute44384Total number of device resets TM.deviceResetDiskdiskNumbernumberlatestabsolute44385Total number of device-reset retries TM.deviceResetRetryDiskdiskNumbernumberlatestabsolute44386Total number of failed device-resetsTM.deviceResetFailedDiskdiskNumbernumberlatestabsolute44387Total number of bus resets TM.busResetDiskdiskNumbernumberlatestabsolute44388Total number of bus-reset retries TM.busResetRetryDiskdiskNumbernumberlatestabsolute44389Total number of failed bus-resetsTM.busResetFailedDiskdiskNumbernumberlatestabsolute44390Average time, in microseconds, spent by Queue to process each Storage commandlatency.qavgDiskdiskMicrosecondmicrosecondlatestabsolute44391Average time, in microseconds, spent by Device to process each Storage commandlatency.davgDiskdiskMicrosecondmicrosecondlatestabsolute44392Average time, in microseconds, spent by kernel to process each Storage commandlatency.kavgDiskdiskMicrosecondmicrosecondlatestabsolute44393Average time, in microseconds, spent by Guest to process each Storage commandlatency.gavgDiskdiskMicrosecondmicrosecondlatestabsolute44394The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberlatestabsolute44395The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberlatestabsolute44396The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberlatestabsolute44397The percentage HT partner usage per physical CPUpartnerBusyTimeCPUcpuPercentagepercentaveragerate44398CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentaveragerate23399The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberlatestabsolute44400The amount of L3 cache the VM usescache.l3.occupancyCPUcpuKilobytekiloBytesaverageabsolute44401The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberlatestabsolute44402CPU load average over the past 1 minute, sampled on every 6 secondsload.avg1minCPUcpuPercentagepercentlatestabsolute44403CPU load average over the past 5 minutes, sampled on every 6 secondsload.avg5minCPUcpuPercentagepercentlatestabsolute44404CPU load average over the past 15 minutes, sampled on every 6 secondsload.avg15minCPUcpuPercentagepercentlatestabsolute44405Total amount of memory available to the hostcapacity.provisionedMemorymemMegabytemegaByteslatestabsolute44406Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentlatestabsolute44407Ratio of total requested memory and the managed memory minus 1 over the past 1 minuteovercommit.avg1minMemorymemNumbernumberlatestabsolute44408Ratio of total requested memory and the managed memory minus 1 over the past 5 minutesovercommit.avg5minMemorymemNumbernumberlatestabsolute44409Ratio of total requested memory and the managed memory minus 1 over the past 15 minutesovercommit.avg15minMemorymemNumbernumberlatestabsolute44410Total amount of machine memory on the ESXi hostphysical.totalMemorymemMegabytemegaByteslatestabsolute44411Amount of machine memory being used by everything other than VMkernelphysical.userMemorymemMegabytemegaByteslatestabsolute44412Amount of machine memory that is free on the ESXi hostphysical.freeMemorymemMegabytemegaByteslatestabsolute44413Total amount of machine memory managed by VMkernelkernel.managedMemorymemMegabytemegaByteslatestabsolute44414Mininum amount of machine memory that VMkernel likes to keep freekernel.minfreeMemorymemMegabytemegaByteslatestabsolute44415Amount of machine memory that is currently unreservedkernel.unreservedMemorymemMegabytemegaByteslatestabsolute44416Amount of physical memory that is being sharedpshare.sharedMemorymemMegabytemegaByteslatestabsolute44417Amount of machine memory that is common across World(s)pshare.commonMemorymemMegabytemegaByteslatestabsolute44418Amount of machine memory saved due to page-sharingpshare.sharedSaveMemorymemMegabytemegaByteslatestabsolute44419Current swap usageswap.currentMemorymemMegabytemegaByteslatestabsolute44420Where ESXi expects the reclaimed memory using swapping and compression to beswap.targetMemorymemMegabytemegaByteslatestabsolute44421Rate at which memory is swapped in by ESXi from diskswap.readrateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44422Rate at which memory is swapped to disk by the ESXiswap.writerateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44423Total compressed physical memoryzip.zippedMemorymemMegabytemegaByteslatestabsolute44424Saved memory by compressionzip.savedMemorymemMegabytemegaByteslatestabsolute44425Total amount of physical memory reclaimed using the vmmemctl modulesmemctl.currentMemorymemMegabytemegaByteslatestabsolute44426Total amount of physical memory ESXi would like to reclaim using the vmmemctl modulesmemctl.targetMemorymemMegabytemegaByteslatestabsolute44427Maximum amount of physical memory ESXi can reclaim using the vmmemctl modulesmemctl.maxMemorymemMegabytemegaByteslatestabsolute44428Memory reservation health state, 2->Red, 1->Greenhealth.reservationStateMemorymemNumbernumberlatestabsolute44429Amount of Overhead memory actively usedcapacity.overheadMemorymemMegabytemegaBytesaverageabsolute44430Amount of OverheadResv memorycapacity.overheadResvMemorymemMegabytemegaBytesaverageabsolute44431Per tier consumed memory. This value is expressed in megabytescapacity.consumedMemorymemMegabytemegaByteslatestabsolute44432Per tier active memory. This value is expressed in megabytescapacity.activeMemorymemMegabytemegaByteslatestabsolute44433Current CPU power usagecapacity.usageCpuPowerpowerWattwattaverageabsolute44434Current memory power usagecapacity.usageMemPowerpowerWattwattaverageabsolute44435Current other power usagecapacity.usageOtherPowerpowerWattwattaverageabsolute44436vmkernel.downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44437downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44438precopy.timeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44439rttMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44440dst.migration.timeMigration of powered on VMvmotionSecondsecondlatestabsolute44441mem.sizembMigration of powered on VMvmotionMegabytemegaByteslatestabsolute44442Current number of replicated virtual machinesvmsvSphere ReplicationhbrNumbernumberlatestabsolute44443Average amount of data received per secondthroughput.hbr.inboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44444Average amount of data transmitted per secondthroughput.hbr.outboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44445Average disk read latency seen by vSphere Replicationhbr.readLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44446Average guest I/O stall introduced by vSphere Replicationhbr.stallLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44447Average latency seen by vSphere Replicationlatency.hbr.outboundNetworknetMillisecondmillisecondlatestabsolute44448Number of Lightweight Delta (LWD) snapshots takennumSnapshotsvSphere Data Protection (LWD)lwdNumbernumberlatestabsolute44449APD state of the nfs volumeapdStateNFSnfsNumbernumberlatestabsolute44450Cumulative read issue time on NFS volumereadIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44451Cumulative write issue time on NFS volumewriteIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44452Total reads on NFS volumetotalReadsNFSnfsNumbernumberlatestabsolute44453Total reads failed on NFS volumereadsFailedNFSnfsNumbernumberlatestabsolute44454Total writes on NFS volumetotalWritesNFSnfsNumbernumberlatestabsolute44455Total writes failed on NFS volumewritesFailedNFSnfsNumbernumberlatestabsolute44456Cumulative readTime on NFS volumereadTimeNFSnfsMicrosecondmicrosecondlatestabsolute44457Cumulative writeTime on NFS volumewriteTimeNFSnfsMicrosecondmicrosecondlatestabsolute44458Total IO requests queued in NFS volumeioRequestsQueuedNFSnfsNumbernumberlatestabsolute44459Total create calls on NFS volumetotalCreateNFSnfsNumbernumberlatestabsolute44460Total create calls failed on NFS volumecreateFailedNFSnfsNumbernumberlatestabsolute44461Number of times we hit into socket buffer out of space condition for NFS volumesocketBufferFullNFSnfsNumbernumberlatestabsolute44462Total journal transactions on VMFS volumevmfs.totalTxnDatastoredatastoreNumbernumberlatestabsolute44463Total cancelled journal transactions on VMFS volumevmfs.cancelledTxnDatastoredatastoreNumbernumberlatestabsolute44464Current APD state of the VMFS volumevmfs.apdStateDatastoredatastoreNumbernumberlatestabsolute44465Total apd timeout events received on the VMFS volumevmfs.apdCountDatastoredatastoreNumbernumberlatestabsolute44466vVol PE is accessiblepe.isaccessiblevVol object related statsvvolNumbernumberlatestabsolute44467Total no. of read cmds done on vVol PEpe.reads.donevVol object related statsvvolNumbernumberlatestabsolute44468Total no. of write cmds done on vVol PEpe.writes.donevVol object related statsvvolNumbernumberlatestabsolute44469Total no. of cmds done on vVol PEpe.total.donevVol object related statsvvolNumbernumberlatestabsolute44470Total no. of read cmds sent on vVol PEpe.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44471Total no. of write cmds sent on vVol PEpe.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44472Total no. of cmds sent on vVol PEpe.total.sentvVol object related statsvvolNumbernumberlatestabsolute44473No. of read cmds issued on vVol PE that failedpe.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44474No. of write cmds issued on vVol PE that failedpe.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44475Total no. of cmds issued on vVol PE that failedpe.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44476Total no. of read cmds failed on vVol PEpe.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44477Total no. of write cmds failed on vVol PEpe.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44478Total no. of cmds failed on vVol PEpe.total.failedvVol object related statsvvolNumbernumberlatestabsolute44479Cumulative latency of successful reads on vVol PEpe.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44480Cumulative latency of successful writes on vVol PEpe.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44481Cumulative latency of cmds that failed before issue on vVol PEpe.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44482Cumulative latency of all issued cmds on vVol PEpe.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44483Total no. of cancel cmds sent on vVol PEpe.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44484Total no. of cancel cmds failed on vVol PEpe.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44485Total no. of device reset cmds sent on vVol PEpe.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44486Total no. of device reset cmds failed on vVol PEpe.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44487Total no. of reset cmds sent on vVol PEpe.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44488Total no. of reset cmds failed on vVol PEpe.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44489Total no. of unmap cmds sent on vVol PEpe.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44490Total no. of unmap cmds failed on vVol PEpe.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44491Total no. of read cmds done by vVol Containercontainer.reads.donevVol object related statsvvolNumbernumberlatestabsolute44492Total no. of write cmds done by vVol Containercontainer.writes.donevVol object related statsvvolNumbernumberlatestabsolute44493Total no. of cmds done by vVol Containercontainer.total.donevVol object related statsvvolNumbernumberlatestabsolute44494Total no. of read cmds sent by vVol Containercontainer.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44495Total no. of write cmds sent by vVol Containercontainer.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44496Total no. of cmds sent by vVol Containercontainer.total.sentvVol object related statsvvolNumbernumberlatestabsolute44497No. of read cmds issued by vVol Container that failedcontainer.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44498No. of write cmds issued by vVol Container that failedcontainer.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44499Total no. of cmds issued by vVol Container that failedcontainer.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44500Total no. of read cmds failed by vVol Containercontainer.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44501Container:Total no. of write cmds failed by vVol Containercontainer.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44502Total no. of cmds failed by vVol Containercontainer.total.failedvVol object related statsvvolNumbernumberlatestabsolute44503Cumulative latency of successful reads by vVol Containercontainer.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44504Cumulative latency of successful writes by vVol Containercontainer.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44505Cumulative latency of cmds that failed before issue by vVol Containercontainer.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44506Cumulative latency of all issued cmds by vVol Containercontainer.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44507Total no. of read cmds done by vVol Devicedevice.reads.donevVol object related statsvvolNumbernumberlatestabsolute44508Total no. of write cmds done by vVol Devicedevice.writes.donevVol object related statsvvolNumbernumberlatestabsolute44509Total no. of cmds done by vVol Devicedevice.total.donevVol object related statsvvolNumbernumberlatestabsolute44510Total no. of read cmds sent by vVol Devicedevice.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44511Total no. of write cmds sent by vVol Devicedevice.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44512Total no. of cmds sent by vVol Devicedevice.total.sentvVol object related statsvvolNumbernumberlatestabsolute44513No. of read cmds issued by vVol Device that faileddevice.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44514No. of write cmds issued by vVol Device that faileddevice.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44515Total no. of cmds issued by vVol Device that faileddevice.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44516Total no. of read cmds failed by vVol Devicedevice.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44517Total no. of write cmds failed by vVol Devicedevice.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44518Total no. of cmds failed by vVol Devicedevice.total.failedvVol object related statsvvolNumbernumberlatestabsolute44519Cumulative latency of successful reads by vVol Devicedevice.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44520Cumulative latency of successful writes by vVol Devicedevice.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44521Cumulative latency of cmds that failed before issue by vVol Devicedevice.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44522Cumulative latency of all issued cmds by vVol Devicedevice.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44523Total no. of cancel cmds sent by vVol Devicedevice.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44524Total no. of cancel cmds failed by vVol Devicedevice.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44525Total no. of device reset cmds sent by vVol Devicedevice.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44526Total no. of device reset cmds failed by vVol Devicedevice.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44527Total no. of reset cmds sent by vVol Devicedevice.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44528Total no. of reset cmds failed by vVol Devicedevice.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44529Total no. of unmap cmds sent by vVol Devicedevice.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44530Total no. of unmap cmds failed by vVol Devicedevice.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44531CPU time spent waiting for swap-inswapwaitCPUcpuMillisecondmillisecondsummationdelta33532CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentnonerate44533CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentmaximumrate44534CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentminimumrate44535CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentnonerate44536CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentaveragerate23537CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentmaximumrate44538CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentminimumrate44539Total CPU capacity reserved by and available for virtual machinestotalCapacityCPUcpuMegahertzmegaHertzaverageabsolute23540Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)latencyCPUcpuPercentagepercentaveragerate23541CPU resources devoted by the ESX schedulerentitlementCPUcpuMegahertzmegaHertzlatestabsolute23542The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limitdemandCPUcpuMegahertzmegaHertzaverageabsolute23543Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraintscostopCPUcpuMillisecondmillisecondsummationdelta23544Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit settingmaxlimitedCPUcpuMillisecondmillisecondsummationdelta23545Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machinesoverlapCPUcpuMillisecondmillisecondsummationdelta33546Time the virtual machine is scheduled to runrunCPUcpuMillisecondmillisecondsummationdelta23547CPU resource entitlement to CPU demand ratio (in percents)demandEntitlementRatioCPUcpuPercentagepercentlatestabsolute44548Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPUreadinessCPUcpuPercentagepercentaveragerate44549Virtual CPU usage as a percentage during the intervalusage.vcpusCPUcpuPercentagepercentaveragerate44550Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesnoneabsolute44551Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesaverageabsolute23552Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesmaximumabsolute44553Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesminimumabsolute44554Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesnoneabsolute44555Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesaverageabsolute23556Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesmaximumabsolute44557Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesminimumabsolute44558Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesnoneabsolute44559Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesaverageabsolute23560Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesmaximumabsolute44561Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesminimumabsolute44562Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXiactivewriteMemorymemKilobytekiloBytesaverageabsolute23563Host physical memory reserved by ESXi, for its data structures, for running the virtual machineoverheadMaxMemorymemKilobytekiloBytesaverageabsolute23564Total reservation, available and consumed, for powered-on virtual machinestotalCapacityMemorymemMegabytemegaBytesaverageabsolute23565Amount of guest physical memory pages compressed by ESXizippedMemorymemKilobytekiloByteslatestabsolute23566Host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memoryzipSavedMemorymemKilobytekiloByteslatestabsolute23567Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memorylatencyMemorymemPercentagepercentaverageabsolute23568Amount of host physical memory the virtual machine deserves, as determined by ESXientitlementMemorymemKilobytekiloBytesaverageabsolute23569Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooninglowfreethresholdMemorymemKilobytekiloBytesaverageabsolute23570Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesnoneabsolute44571Rate at which guest physical memory is swapped in from the host swap cachellSwapInRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23572Rate at which guest physical memory is swapped out to the host swap cachellSwapOutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23573Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXioverheadTouchedMemorymemKilobytekiloBytesaverageabsolute44574Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesaverageabsolute44575Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesmaximumabsolute44576Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesminimumabsolute44577Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesnoneabsolute44578Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesaverageabsolute44579Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesmaximumabsolute44580Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesminimumabsolute44581Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesnoneabsolute44582Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesaverageabsolute44583Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesmaximumabsolute44584Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesminimumabsolute44585Space used for holding VMFS Pointer Blocks in memoryvmfs.pbc.sizeMemorymemMegabytemegaByteslatestabsolute44586Maximum size the VMFS Pointer Block Cache can grow tovmfs.pbc.sizeMaxMemorymemMegabytemegaByteslatestabsolute44587Amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMemorymemTerabyteteraByteslatestabsolute44588Maximum amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMaxMemorymemTerabyteteraByteslatestabsolute44589Amount of VMFS heap used by the VMFS PB Cachevmfs.pbc.overheadMemorymemKilobytekiloByteslatestabsolute44590Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cachevmfs.pbc.capMissRatioMemorymemPercentagepercentlatestabsolute44591Number of Storage commands issued during the collection intervalcommandsDiskdiskNumbernumbersummationdelta23592Average amount of time, in milliseconds, to read from the physical devicedeviceReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23593Average amount of time, in milliseconds, spent by VMkernel to process each Storage read commandkernelReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23594Average amount of time taken during the collection interval to process a Storage read command issued from the guest OS to the virtual machinetotalReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23595Average amount of time spent in the VMkernel queue, per Storage read command, during the collection intervalqueueReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23596Average amount of time, in milliseconds, to write to the physical devicedeviceWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23597Average amount of time, in milliseconds, spent by VMkernel to process each Storage write commandkernelWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23598Average amount of time taken during the collection interval to process a Storage write command issued by the guest OS to the virtual machinetotalWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23599Average amount of time spent in the VMkernel queue, per Storage write command, during the collection intervalqueueWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23600Average amount of time, in milliseconds, to complete a Storage command from the physical devicedeviceLatencyDiskdiskMillisecondmillisecondaverageabsolute13601Average amount of time, in milliseconds, spent by VMkernel to process each Storage commandkernelLatencyDiskdiskMillisecondmillisecondaverageabsolute23602Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyDiskdiskMillisecondmillisecondaverageabsolute23603Maximum queue depthmaxQueueDepthDiskdiskNumbernumberaverageabsolute13604Average number of Storage commands issued per second during the collection intervalcommandsAveragedDiskdiskNumbernumberaveragerate23605Number of receives droppeddroppedRxNetworknetNumbernumbersummationdelta23606Number of transmits droppeddroppedTxNetworknetNumbernumbersummationdelta23607Average amount of data received per secondbytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23608Average amount of data transmitted per secondbytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23609Number of broadcast packets received during the sampling intervalbroadcastRxNetworknetNumbernumbersummationdelta23610Number of broadcast packets transmitted during the sampling intervalbroadcastTxNetworknetNumbernumbersummationdelta23611Number of multicast packets received during the sampling intervalmulticastRxNetworknetNumbernumbersummationdelta23612Number of multicast packets transmitted during the sampling intervalmulticastTxNetworknetNumbernumbersummationdelta23613Number of packets with errors received during the sampling intervalerrorsRxNetworknetNumbernumbersummationdelta23614Number of packets with errors transmitted during the sampling intervalerrorsTxNetworknetNumbernumbersummationdelta23615Number of frames with unknown protocol received during the sampling intervalunknownProtosNetworknetNumbernumbersummationdelta23616Average amount of data received per second by a pNicpnicBytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44617Average amount of data transmitted per second through a pNicpnicBytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44618Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumberlatestabsolute44619Amount of disk space usage for each mount pointdiskUsageSystemsysPercentagepercentlatestabsolute33620Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertznonerate44621Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzaveragerate33622Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzmaximumrate44623Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzminimumrate44624Memory touched by the system resource groupresourceMemTouchedSystemsysKilobytekiloByteslatestabsolute33625Memory mapped by the system resource groupresourceMemMappedSystemsysKilobytekiloByteslatestabsolute33626Memory saved due to sharing by the system resource groupresourceMemSharedSystemsysKilobytekiloByteslatestabsolute33627Memory swapped out by the system resource groupresourceMemSwappedSystemsysKilobytekiloByteslatestabsolute33628Overhead memory consumed by the system resource groupresourceMemOverheadSystemsysKilobytekiloByteslatestabsolute33629Memory shared by the system resource groupresourceMemCowSystemsysKilobytekiloByteslatestabsolute33630Zero filled memory used by the system resource groupresourceMemZeroSystemsysKilobytekiloByteslatestabsolute33631CPU running average over 1 minute of the system resource groupresourceCpuRun1SystemsysPercentagepercentlatestabsolute33632CPU active average over 1 minute of the system resource groupresourceCpuAct1SystemsysPercentagepercentlatestabsolute33633CPU maximum limited over 1 minute of the system resource groupresourceCpuMaxLimited1SystemsysPercentagepercentlatestabsolute33634CPU running average over 5 minutes of the system resource groupresourceCpuRun5SystemsysPercentagepercentlatestabsolute33635CPU active average over 5 minutes of the system resource groupresourceCpuAct5SystemsysPercentagepercentlatestabsolute33636CPU maximum limited over 5 minutes of the system resource groupresourceCpuMaxLimited5SystemsysPercentagepercentlatestabsolute33637CPU allocation reservation (in MHz) of the system resource groupresourceCpuAllocMinSystemsysMegahertzmegaHertzlatestabsolute33638CPU allocation limit (in MHz) of the system resource groupresourceCpuAllocMaxSystemsysMegahertzmegaHertzlatestabsolute33639CPU allocation shares of the system resource groupresourceCpuAllocSharesSystemsysNumbernumberlatestabsolute33640Memory allocation reservation (in KB) of the system resource groupresourceMemAllocMinSystemsysKilobytekiloByteslatestabsolute33641Memory allocation limit (in KB) of the system resource groupresourceMemAllocMaxSystemsysKilobytekiloByteslatestabsolute33642Memory allocation shares of the system resource groupresourceMemAllocSharesSystemsysNumbernumberlatestabsolute33643Total time elapsed, in seconds, since last operating system boot-uposUptimeSystemsysSecondsecondlatestabsolute44644Memory consumed by the system resource groupresourceMemConsumedSystemsysKilobytekiloByteslatestabsolute44645Number of file descriptors used by the system resource groupresourceFdUsageSystemsysNumbernumberlatestabsolute44646CPU active peak over 1 minuteactpk1Resource group CPUrescpuPercentagepercentlatestabsolute33647CPU running average over 1 minuterunav1Resource group CPUrescpuPercentagepercentlatestabsolute33648CPU active average over 5 minutesactav5Resource group CPUrescpuPercentagepercentlatestabsolute33649CPU active peak over 5 minutesactpk5Resource group CPUrescpuPercentagepercentlatestabsolute33650CPU running average over 5 minutesrunav5Resource group CPUrescpuPercentagepercentlatestabsolute33651CPU active average over 15 minutesactav15Resource group CPUrescpuPercentagepercentlatestabsolute33652CPU active peak over 15 minutesactpk15Resource group CPUrescpuPercentagepercentlatestabsolute33653CPU running average over 15 minutesrunav15Resource group CPUrescpuPercentagepercentlatestabsolute33654CPU running peak over 1 minuterunpk1Resource group CPUrescpuPercentagepercentlatestabsolute33655Amount of CPU resources over the limit that were refused, average over 1 minutemaxLimited1Resource group CPUrescpuPercentagepercentlatestabsolute33656CPU running peak over 5 minutesrunpk5Resource group CPUrescpuPercentagepercentlatestabsolute33657Amount of CPU resources over the limit that were refused, average over 5 minutesmaxLimited5Resource group CPUrescpuPercentagepercentlatestabsolute33658CPU running peak over 15 minutesrunpk15Resource group CPUrescpuPercentagepercentlatestabsolute33659Amount of CPU resources over the limit that were refused, average over 15 minutesmaxLimited15Resource group CPUrescpuPercentagepercentlatestabsolute33660Group CPU sample countsampleCountResource group CPUrescpuNumbernumberlatestabsolute33661Group CPU sample periodsamplePeriodResource group CPUrescpuMillisecondmillisecondlatestabsolute33662Amount of total configured memory that is available for usememUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33663Sum of the memory swapped by all powered-on virtual machines on the hostswapUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33664Amount of Service Console CPU usagecpuUsageManagement agentmanagementAgentMegahertzmegaHertzaveragerate33665Average number of commands issued per second on the storage path during the collection intervalcommandsAveragedStorage pathstoragePathNumbernumberaveragerate33666Average number of read commands issued per second on the storage path during the collection intervalnumberReadAveragedStorage pathstoragePathNumbernumberaveragerate33667Average number of write commands issued per second on the storage path during the collection intervalnumberWriteAveragedStorage pathstoragePathNumbernumberaveragerate33668Rate of reading data on the storage pathreadStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33669Rate of writing data on the storage pathwriteStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33670The average time a read issued on the storage path takestotalReadLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33671The average time a write issued on the storage path takestotalWriteLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33672Average read request size in bytesreadIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44673Average write request size in byteswriteIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44674Number of seeks during the interval that were less than 64 LBNs apartsmallSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44675Number of seeks during the interval that were between 64 and 8192 LBNs apartmediumSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44676Number of seeks during the interval that were greater than 8192 LBNs apartlargeSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44677Read latency in microsecondsreadLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44678Write latency in microsecondswriteLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44679Storage I/O Control datastore maximum queue depthdatastoreMaxQueueDepthDatastoredatastoreNumbernumberlatestabsolute13680Unmapped size in MBunmapSizeDatastoredatastoreMegabytemegaBytessummationdelta44681Number of unmap IOs issuedunmapIOsDatastoredatastoreNumbernumbersummationdelta44682Current number of replicated virtual machineshbrNumVmsvSphere ReplicationhbrNumbernumberaverageabsolute44683Average amount of data received per secondhbrNetRxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44684Average amount of data transmitted per secondhbrNetTxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44685Average network latency seen by vSphere ReplicationhbrNetLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44686Average disk read latency seen by vSphere ReplicationhbrDiskReadLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44687Average guest I/O stall introduced by vSphere ReplicationhbrDiskStallLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44688Average amount of successful transfer time per diskhbrDiskTransferSuccessvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44689Average amount of idle time per diskhbrDiskTransferIdlevSphere ReplicationhbrMillisecondmillisecondaverageabsolute44690Average amount of data in KB successfully transferred per diskhbrDiskTransferBytesvSphere ReplicationhbrKilobytekiloBytesaverageabsolute44691Number of caches controlled by the virtual flash modulenumActiveVMDKsVirtual flash module related statistical valuesvflashModuleNumbernumberlatestabsolute44692Read IOPSreadIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44693Read throughput in kBpsreadThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44694Average read latency in msreadAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44695Max read latency in msreadMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44696Cache hit rate percentagereadCacheHitRatevSAN DOM object related statistical valuesvsanDomObjPercentagepercentlatestabsolute44697Read congestionreadCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44698Write IOPSwriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44699Write throughput in kBpswriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44700Average write latency in mswriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44701Max write latency in mswriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44702Write congestionwriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44703Recovery write IOPSrecoveryWriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44704Recovery write through-put in kBpsrecoveryWriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44705Average recovery write latency in msrecoveryWriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44706Max recovery write latency in msrecoveryWriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44707Recovery write congestionrecoveryWriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44708The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentnoneabsolute44709The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentmaximumabsolute44710The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentminimumabsolute44711The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesnoneabsolute44712The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesmaximumabsolute44713The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesminimumabsolute44714The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentnoneabsolute44715The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentaverageabsolute44716The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentmaximumabsolute44717The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentminimumabsolute44718The amount of GPU memory used in gigabytesmem.used.gbGPUgpuGigabytegigaByteslatestabsolute33719The amount of GPU memory reserved in gigabytesmem.reserved.gbGPUgpuGigabytegigaByteslatestabsolute33720The total amount of GPU memory in gigabytesmem.total.gbGPUgpuGigabytegigaByteslatestabsolute33721Persistent memory available reservation on a host.available.reservationPMEMpmemMegabytemegaByteslatestabsolute44722Persistent memory reservation managed by DRS on a host.drsmanaged.reservationPMEMpmemMegabytemegaByteslatestabsolute44723Total count of virtual CPUs in VMnumVCPUsVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44724Minimum clock speed of the vCPUs during last stats intervalvcpusMhzMinVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44725Maximum clock speed of the vCPUs during last stats intervalvcpusMhzMaxVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44726Average clock speed of the vCPUs during last stats intervalvcpusMhzMeanVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44727Actual clock speed of host CPUcpuSpeedVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44728Minimum overhead heap memory usage since the VM started runningoverheadMemSizeMinVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44729Maximum overhead heap memory usage since the VM started runningoverheadMemSizeMaxVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44730vigor.opsTotalVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44731poll.itersPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44732userRpc.opsPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44
12857:20241101:185444.029 adding performance counter cpu/usage[none]:1
12857:20241101:185444.029 adding performance counter cpu/usage[none,rate]:1
12857:20241101:185444.029 adding performance counter cpu/usage[average]:2
12857:20241101:185444.029 adding performance counter cpu/usage[average,rate]:2
12857:20241101:185444.029 adding performance counter cpu/usage[minimum]:3
12857:20241101:185444.029 adding performance counter cpu/usage[minimum,rate]:3
12857:20241101:185444.029 adding performance counter cpu/usage[maximum]:4
12857:20241101:185444.029 adding performance counter cpu/usage[maximum,rate]:4
12857:20241101:185444.029 adding performance counter cpu/usagemhz[none]:5
12857:20241101:185444.029 adding performance counter cpu/usagemhz[none,rate]:5
12857:20241101:185444.029 adding performance counter cpu/usagemhz[average]:6
12857:20241101:185444.029 adding performance counter cpu/usagemhz[average,rate]:6
12857:20241101:185444.029 adding performance counter cpu/usagemhz[minimum]:7
12857:20241101:185444.029 adding performance counter cpu/usagemhz[minimum,rate]:7
12857:20241101:185444.029 adding performance counter cpu/usagemhz[maximum]:8
12857:20241101:185444.029 adding performance counter cpu/usagemhz[maximum,rate]:8
12857:20241101:185444.030 adding performance counter cpu/reservedCapacity[average]:9
12857:20241101:185444.030 adding performance counter cpu/reservedCapacity[average,absolute]:9
12857:20241101:185444.030 adding performance counter cpu/system[summation]:10
12857:20241101:185444.030 adding performance counter cpu/system[summation,delta]:10
12857:20241101:185444.030 adding performance counter cpu/wait[summation]:11
12857:20241101:185444.030 adding performance counter cpu/wait[summation,delta]:11
12857:20241101:185444.030 adding performance counter cpu/ready[summation]:12
12857:20241101:185444.030 adding performance counter cpu/ready[summation,delta]:12
12857:20241101:185444.030 adding performance counter cpu/idle[summation]:13
12857:20241101:185444.030 adding performance counter cpu/idle[summation,delta]:13
12857:20241101:185444.030 adding performance counter cpu/used[summation]:14
12857:20241101:185444.030 adding performance counter cpu/used[summation,delta]:14
12857:20241101:185444.030 adding performance counter cpu/capacity.provisioned[average]:15
12857:20241101:185444.030 adding performance counter cpu/capacity.provisioned[average,absolute]:15
12857:20241101:185444.030 adding performance counter cpu/capacity.entitlement[average]:16
12857:20241101:185444.030 adding performance counter cpu/capacity.entitlement[average,absolute]:16
12857:20241101:185444.030 adding performance counter cpu/capacity.usage[average]:17
12857:20241101:185444.030 adding performance counter cpu/capacity.usage[average,rate]:17
12857:20241101:185444.030 adding performance counter cpu/capacity.demand[average]:18
12857:20241101:185444.030 adding performance counter cpu/capacity.demand[average,absolute]:18
12857:20241101:185444.030 adding performance counter cpu/capacity.contention[average]:19
12857:20241101:185444.030 adding performance counter cpu/capacity.contention[average,rate]:19
12857:20241101:185444.030 adding performance counter cpu/corecount.provisioned[average]:20
12857:20241101:185444.031 adding performance counter cpu/corecount.provisioned[average,absolute]:20
12857:20241101:185444.031 adding performance counter cpu/corecount.usage[average]:21
12857:20241101:185444.031 adding performance counter cpu/corecount.usage[average,absolute]:21
12857:20241101:185444.031 adding performance counter cpu/corecount.contention[average]:22
12857:20241101:185444.031 adding performance counter cpu/corecount.contention[average,rate]:22
12857:20241101:185444.031 adding performance counter mem/usage[none]:23
12857:20241101:185444.031 adding performance counter mem/usage[none,absolute]:23
12857:20241101:185444.031 adding performance counter mem/usage[average]:24
12857:20241101:185444.031 adding performance counter mem/usage[average,absolute]:24
12857:20241101:185444.031 adding performance counter mem/usage[minimum]:25
12857:20241101:185444.031 adding performance counter mem/usage[minimum,absolute]:25
12857:20241101:185444.031 adding performance counter mem/usage[maximum]:26
12857:20241101:185444.031 adding performance counter mem/usage[maximum,absolute]:26
12857:20241101:185444.031 adding performance counter mem/reservedCapacity[average]:27
12857:20241101:185444.031 adding performance counter mem/reservedCapacity[average,absolute]:27
12857:20241101:185444.031 adding performance counter mem/granted[none]:28
12857:20241101:185444.031 adding performance counter mem/granted[none,absolute]:28
12857:20241101:185444.031 adding performance counter mem/granted[average]:29
12857:20241101:185444.031 adding performance counter mem/granted[average,absolute]:29
12857:20241101:185444.031 adding performance counter mem/granted[minimum]:30
12857:20241101:185444.031 adding performance counter mem/granted[minimum,absolute]:30
12857:20241101:185444.031 adding performance counter mem/granted[maximum]:31
12857:20241101:185444.031 adding performance counter mem/granted[maximum,absolute]:31
12857:20241101:185444.031 adding performance counter mem/active[none]:32
12857:20241101:185444.031 adding performance counter mem/active[none,absolute]:32
12857:20241101:185444.032 adding performance counter mem/active[average]:33
12857:20241101:185444.032 adding performance counter mem/active[average,absolute]:33
12857:20241101:185444.032 adding performance counter mem/active[minimum]:34
12857:20241101:185444.032 adding performance counter mem/active[minimum,absolute]:34
12857:20241101:185444.032 adding performance counter mem/active[maximum]:35
12857:20241101:185444.032 adding performance counter mem/active[maximum,absolute]:35
12857:20241101:185444.032 adding performance counter mem/shared[none]:36
12857:20241101:185444.032 adding performance counter mem/shared[none,absolute]:36
12857:20241101:185444.032 adding performance counter mem/shared[average]:37
12857:20241101:185444.032 adding performance counter mem/shared[average,absolute]:37
12857:20241101:185444.032 adding performance counter mem/shared[minimum]:38
12857:20241101:185444.032 adding performance counter mem/shared[minimum,absolute]:38
12857:20241101:185444.032 adding performance counter mem/shared[maximum]:39
12857:20241101:185444.032 adding performance counter mem/shared[maximum,absolute]:39
12857:20241101:185444.032 adding performance counter mem/zero[none]:40
12857:20241101:185444.032 adding performance counter mem/zero[none,absolute]:40
12857:20241101:185444.032 adding performance counter mem/zero[average]:41
12857:20241101:185444.032 adding performance counter mem/zero[average,absolute]:41
12857:20241101:185444.032 adding performance counter mem/zero[minimum]:42
12857:20241101:185444.032 adding performance counter mem/zero[minimum,absolute]:42
12857:20241101:185444.032 adding performance counter mem/zero[maximum]:43
12857:20241101:185444.032 adding performance counter mem/zero[maximum,absolute]:43
12857:20241101:185444.032 adding performance counter mem/unreserved[none]:44
12857:20241101:185444.032 adding performance counter mem/unreserved[none,absolute]:44
12857:20241101:185444.033 adding performance counter mem/unreserved[average]:45
12857:20241101:185444.033 adding performance counter mem/unreserved[average,absolute]:45
12857:20241101:185444.033 adding performance counter mem/unreserved[minimum]:46
12857:20241101:185444.033 adding performance counter mem/unreserved[minimum,absolute]:46
12857:20241101:185444.033 adding performance counter mem/unreserved[maximum]:47
12857:20241101:185444.033 adding performance counter mem/unreserved[maximum,absolute]:47
12857:20241101:185444.033 adding performance counter mem/swapused[none]:48
12857:20241101:185444.033 adding performance counter mem/swapused[none,absolute]:48
12857:20241101:185444.033 adding performance counter mem/swapused[average]:49
12857:20241101:185444.033 adding performance counter mem/swapused[average,absolute]:49
12857:20241101:185444.033 adding performance counter mem/swapused[minimum]:50
12857:20241101:185444.033 adding performance counter mem/swapused[minimum,absolute]:50
12857:20241101:185444.033 adding performance counter mem/swapused[maximum]:51
12857:20241101:185444.033 adding performance counter mem/swapused[maximum,absolute]:51
12857:20241101:185444.033 adding performance counter mem/swapunreserved[none]:52
12857:20241101:185444.033 adding performance counter mem/swapunreserved[none,absolute]:52
12857:20241101:185444.033 adding performance counter mem/swapunreserved[average]:53
12857:20241101:185444.033 adding performance counter mem/swapunreserved[average,absolute]:53
12857:20241101:185444.033 adding performance counter mem/swapunreserved[minimum]:54
12857:20241101:185444.033 adding performance counter mem/swapunreserved[minimum,absolute]:54
12857:20241101:185444.033 adding performance counter mem/swapunreserved[maximum]:55
12857:20241101:185444.033 adding performance counter mem/swapunreserved[maximum,absolute]:55
12857:20241101:185444.033 adding performance counter mem/sharedcommon[none]:56
12857:20241101:185444.033 adding performance counter mem/sharedcommon[none,absolute]:56
12857:20241101:185444.034 adding performance counter mem/sharedcommon[average]:57
12857:20241101:185444.034 adding performance counter mem/sharedcommon[average,absolute]:57
12857:20241101:185444.034 adding performance counter mem/sharedcommon[minimum]:58
12857:20241101:185444.034 adding performance counter mem/sharedcommon[minimum,absolute]:58
12857:20241101:185444.034 adding performance counter mem/sharedcommon[maximum]:59
12857:20241101:185444.034 adding performance counter mem/sharedcommon[maximum,absolute]:59
12857:20241101:185444.034 adding performance counter mem/heap[none]:60
12857:20241101:185444.034 adding performance counter mem/heap[none,absolute]:60
12857:20241101:185444.034 adding performance counter mem/heap[average]:61
12857:20241101:185444.034 adding performance counter mem/heap[average,absolute]:61
12857:20241101:185444.034 adding performance counter mem/heap[minimum]:62
12857:20241101:185444.034 adding performance counter mem/heap[minimum,absolute]:62
12857:20241101:185444.034 adding performance counter mem/heap[maximum]:63
12857:20241101:185444.034 adding performance counter mem/heap[maximum,absolute]:63
12857:20241101:185444.034 adding performance counter mem/heapfree[none]:64
12857:20241101:185444.034 adding performance counter mem/heapfree[none,absolute]:64
12857:20241101:185444.034 adding performance counter mem/heapfree[average]:65
12857:20241101:185444.034 adding performance counter mem/heapfree[average,absolute]:65
12857:20241101:185444.034 adding performance counter mem/heapfree[minimum]:66
12857:20241101:185444.034 adding performance counter mem/heapfree[minimum,absolute]:66
12857:20241101:185444.034 adding performance counter mem/heapfree[maximum]:67
12857:20241101:185444.034 adding performance counter mem/heapfree[maximum,absolute]:67
12857:20241101:185444.035 adding performance counter mem/state[latest]:68
12857:20241101:185444.035 adding performance counter mem/state[latest,absolute]:68
12857:20241101:185444.035 adding performance counter mem/swapped[none]:69
12857:20241101:185444.035 adding performance counter mem/swapped[none,absolute]:69
12857:20241101:185444.035 adding performance counter mem/swapped[average]:70
12857:20241101:185444.035 adding performance counter mem/swapped[average,absolute]:70
12857:20241101:185444.035 adding performance counter mem/swapped[minimum]:71
12857:20241101:185444.035 adding performance counter mem/swapped[minimum,absolute]:71
12857:20241101:185444.035 adding performance counter mem/swapped[maximum]:72
12857:20241101:185444.035 adding performance counter mem/swapped[maximum,absolute]:72
12857:20241101:185444.035 adding performance counter mem/swaptarget[none]:73
12857:20241101:185444.035 adding performance counter mem/swaptarget[none,absolute]:73
12857:20241101:185444.035 adding performance counter mem/swaptarget[average]:74
12857:20241101:185444.035 adding performance counter mem/swaptarget[average,absolute]:74
12857:20241101:185444.035 adding performance counter mem/swaptarget[minimum]:75
12857:20241101:185444.035 adding performance counter mem/swaptarget[minimum,absolute]:75
12857:20241101:185444.035 adding performance counter mem/swaptarget[maximum]:76
12857:20241101:185444.035 adding performance counter mem/swaptarget[maximum,absolute]:76
12857:20241101:185444.036 adding performance counter mem/swapIn[none]:77
12857:20241101:185444.036 adding performance counter mem/swapIn[none,absolute]:77
12857:20241101:185444.036 adding performance counter mem/swapIn[average]:78
12857:20241101:185444.036 adding performance counter mem/swapIn[average,absolute]:78
12857:20241101:185444.036 adding performance counter mem/swapIn[minimum]:79
12857:20241101:185444.036 adding performance counter mem/swapIn[minimum,absolute]:79
12857:20241101:185444.036 adding performance counter mem/swapIn[maximum]:80
12857:20241101:185444.036 adding performance counter mem/swapIn[maximum,absolute]:80
12857:20241101:185444.036 adding performance counter mem/swapOut[none]:81
12857:20241101:185444.036 adding performance counter mem/swapOut[none,absolute]:81
12857:20241101:185444.036 adding performance counter mem/swapOut[average]:82
12857:20241101:185444.036 adding performance counter mem/swapOut[average,absolute]:82
12857:20241101:185444.036 adding performance counter mem/swapOut[minimum]:83
12857:20241101:185444.036 adding performance counter mem/swapOut[minimum,absolute]:83
12857:20241101:185444.036 adding performance counter mem/swapOut[maximum]:84
12857:20241101:185444.036 adding performance counter mem/swapOut[maximum,absolute]:84
12857:20241101:185444.036 adding performance counter mem/swapinRate[average]:85
12857:20241101:185444.036 adding performance counter mem/swapinRate[average,rate]:85
12857:20241101:185444.036 adding performance counter mem/swapoutRate[average]:86
12857:20241101:185444.036 adding performance counter mem/swapoutRate[average,rate]:86
12857:20241101:185444.037 adding performance counter managementAgent/swapOut[average]:87
12857:20241101:185444.037 adding performance counter managementAgent/swapOut[average,rate]:87
12857:20241101:185444.037 adding performance counter managementAgent/swapIn[average]:88
12857:20241101:185444.037 adding performance counter managementAgent/swapIn[average,rate]:88
12857:20241101:185444.037 adding performance counter mem/vmmemctl[none]:89
12857:20241101:185444.037 adding performance counter mem/vmmemctl[none,absolute]:89
12857:20241101:185444.037 adding performance counter mem/vmmemctl[average]:90
12857:20241101:185444.037 adding performance counter mem/vmmemctl[average,absolute]:90
12857:20241101:185444.037 adding performance counter mem/vmmemctl[minimum]:91
12857:20241101:185444.037 adding performance counter mem/vmmemctl[minimum,absolute]:91
12857:20241101:185444.037 adding performance counter mem/vmmemctl[maximum]:92
12857:20241101:185444.037 adding performance counter mem/vmmemctl[maximum,absolute]:92
12857:20241101:185444.037 adding performance counter mem/vmmemctltarget[none]:93
12857:20241101:185444.037 adding performance counter mem/vmmemctltarget[none,absolute]:93
12857:20241101:185444.037 adding performance counter mem/vmmemctltarget[average]:94
12857:20241101:185444.037 adding performance counter mem/vmmemctltarget[average,absolute]:94
12857:20241101:185444.037 adding performance counter mem/vmmemctltarget[minimum]:95
12857:20241101:185444.037 adding performance counter mem/vmmemctltarget[minimum,absolute]:95
12857:20241101:185444.038 adding performance counter mem/vmmemctltarget[maximum]:96
12857:20241101:185444.038 adding performance counter mem/vmmemctltarget[maximum,absolute]:96
12857:20241101:185444.038 adding performance counter mem/consumed[none]:97
12857:20241101:185444.038 adding performance counter mem/consumed[none,absolute]:97
12857:20241101:185444.038 adding performance counter mem/consumed[average]:98
12857:20241101:185444.038 adding performance counter mem/consumed[average,absolute]:98
12857:20241101:185444.038 adding performance counter mem/consumed[minimum]:99
12857:20241101:185444.038 adding performance counter mem/consumed[minimum,absolute]:99
12857:20241101:185444.038 adding performance counter mem/consumed[maximum]:100
12857:20241101:185444.038 adding performance counter mem/consumed[maximum,absolute]:100
12857:20241101:185444.038 adding performance counter mem/overhead[none]:101
12857:20241101:185444.038 adding performance counter mem/overhead[none,absolute]:101
12857:20241101:185444.038 adding performance counter mem/overhead[average]:102
12857:20241101:185444.038 adding performance counter mem/overhead[average,absolute]:102
12857:20241101:185444.038 adding performance counter mem/overhead[minimum]:103
12857:20241101:185444.038 adding performance counter mem/overhead[minimum,absolute]:103
12857:20241101:185444.038 adding performance counter mem/overhead[maximum]:104
12857:20241101:185444.038 adding performance counter mem/overhead[maximum,absolute]:104
12857:20241101:185444.038 adding performance counter mem/compressed[average]:105
12857:20241101:185444.039 adding performance counter mem/compressed[average,absolute]:105
12857:20241101:185444.039 adding performance counter mem/compressionRate[average]:106
12857:20241101:185444.039 adding performance counter mem/compressionRate[average,rate]:106
12857:20241101:185444.039 adding performance counter mem/decompressionRate[average]:107
12857:20241101:185444.039 adding performance counter mem/decompressionRate[average,rate]:107
12857:20241101:185444.039 adding performance counter mem/capacity.provisioned[average]:108
12857:20241101:185444.039 adding performance counter mem/capacity.provisioned[average,absolute]:108
12857:20241101:185444.039 adding performance counter mem/capacity.entitlement[average]:109
12857:20241101:185444.039 adding performance counter mem/capacity.entitlement[average,absolute]:109
12857:20241101:185444.039 adding performance counter mem/capacity.usable[average]:110
12857:20241101:185444.039 adding performance counter mem/capacity.usable[average,absolute]:110
12857:20241101:185444.039 adding performance counter mem/capacity.usage[average]:111
12857:20241101:185444.039 adding performance counter mem/capacity.usage[average,absolute]:111
12857:20241101:185444.039 adding performance counter mem/capacity.contention[average]:112
12857:20241101:185444.039 adding performance counter mem/capacity.contention[average,rate]:112
12857:20241101:185444.039 adding performance counter mem/capacity.usage.vm[average]:113
12857:20241101:185444.039 adding performance counter mem/capacity.usage.vm[average,absolute]:113
12857:20241101:185444.039 adding performance counter mem/capacity.usage.vmOvrhd[average]:114
12857:20241101:185444.040 adding performance counter mem/capacity.usage.vmOvrhd[average,absolute]:114
12857:20241101:185444.040 adding performance counter mem/capacity.usage.vmkOvrhd[average]:115
12857:20241101:185444.040 adding performance counter mem/capacity.usage.vmkOvrhd[average,absolute]:115
12857:20241101:185444.040 adding performance counter mem/capacity.usage.userworld[average]:116
12857:20241101:185444.040 adding performance counter mem/capacity.usage.userworld[average,absolute]:116
12857:20241101:185444.040 adding performance counter mem/reservedCapacity.vm[average]:117
12857:20241101:185444.040 adding performance counter mem/reservedCapacity.vm[average,absolute]:117
12857:20241101:185444.040 adding performance counter mem/reservedCapacity.vmOvhd[average]:118
12857:20241101:185444.040 adding performance counter mem/reservedCapacity.vmOvhd[average,absolute]:118
12857:20241101:185444.040 adding performance counter mem/reservedCapacity.vmkOvrhd[average]:119
12857:20241101:185444.040 adding performance counter mem/reservedCapacity.vmkOvrhd[average,absolute]:119
12857:20241101:185444.040 adding performance counter mem/reservedCapacity.userworld[average]:120
12857:20241101:185444.040 adding performance counter mem/reservedCapacity.userworld[average,absolute]:120
12857:20241101:185444.040 adding performance counter mem/reservedCapacityPct[average]:121
12857:20241101:185444.040 adding performance counter mem/reservedCapacityPct[average,absolute]:121
12857:20241101:185444.040 adding performance counter mem/consumed.vms[average]:122
12857:20241101:185444.040 adding performance counter mem/consumed.vms[average,absolute]:122
12857:20241101:185444.041 adding performance counter mem/consumed.userworlds[average]:123
12857:20241101:185444.041 adding performance counter mem/consumed.userworlds[average,absolute]:123
12857:20241101:185444.041 adding performance counter mem/bandwidth.read[latest]:124
12857:20241101:185444.041 adding performance counter mem/bandwidth.read[latest,absolute]:124
12857:20241101:185444.041 adding performance counter mem/bandwidth.write[latest]:125
12857:20241101:185444.041 adding performance counter mem/bandwidth.write[latest,absolute]:125
12857:20241101:185444.041 adding performance counter mem/bandwidth.total[latest]:126
12857:20241101:185444.041 adding performance counter mem/bandwidth.total[latest,absolute]:126
12857:20241101:185444.041 adding performance counter mem/vm.bandwidth.read[latest]:127
12857:20241101:185444.041 adding performance counter mem/vm.bandwidth.read[latest,absolute]:127
12857:20241101:185444.041 adding performance counter mem/missrate[latest]:128
12857:20241101:185444.041 adding performance counter mem/missrate[latest,absolute]:128
12857:20241101:185444.041 adding performance counter mem/latency.read[latest]:129
12857:20241101:185444.041 adding performance counter mem/latency.read[latest,absolute]:129
12857:20241101:185444.041 adding performance counter mem/latency.write[latest]:130
12857:20241101:185444.041 adding performance counter mem/latency.write[latest,absolute]:130
12857:20241101:185444.041 adding performance counter disk/usage[none]:131
12857:20241101:185444.041 adding performance counter disk/usage[none,rate]:131
12857:20241101:185444.042 adding performance counter disk/usage[average]:132
12857:20241101:185444.042 adding performance counter disk/usage[average,rate]:132
12857:20241101:185444.042 adding performance counter disk/usage[minimum]:133
12857:20241101:185444.042 adding performance counter disk/usage[minimum,rate]:133
12857:20241101:185444.042 adding performance counter disk/usage[maximum]:134
12857:20241101:185444.042 adding performance counter disk/usage[maximum,rate]:134
12857:20241101:185444.042 adding performance counter disk/numberRead[summation]:135
12857:20241101:185444.042 adding performance counter disk/numberRead[summation,delta]:135
12857:20241101:185444.042 adding performance counter disk/numberWrite[summation]:136
12857:20241101:185444.042 adding performance counter disk/numberWrite[summation,delta]:136
12857:20241101:185444.042 adding performance counter disk/read[average]:137
12857:20241101:185444.042 adding performance counter disk/read[average,rate]:137
12857:20241101:185444.042 adding performance counter disk/write[average]:138
12857:20241101:185444.042 adding performance counter disk/write[average,rate]:138
12857:20241101:185444.042 adding performance counter disk/totalLatency[average]:139
12857:20241101:185444.042 adding performance counter disk/totalLatency[average,absolute]:139
12857:20241101:185444.042 adding performance counter disk/maxTotalLatency[latest]:140
12857:20241101:185444.042 adding performance counter disk/maxTotalLatency[latest,absolute]:140
12857:20241101:185444.043 adding performance counter disk/commandsAborted[summation]:141
12857:20241101:185444.043 adding performance counter disk/commandsAborted[summation,delta]:141
12857:20241101:185444.043 adding performance counter disk/busResets[summation]:142
12857:20241101:185444.043 adding performance counter disk/busResets[summation,delta]:142
12857:20241101:185444.043 adding performance counter disk/numberReadAveraged[average]:143
12857:20241101:185444.043 adding performance counter disk/numberReadAveraged[average,rate]:143
12857:20241101:185444.043 adding performance counter disk/numberWriteAveraged[average]:144
12857:20241101:185444.043 adding performance counter disk/numberWriteAveraged[average,rate]:144
12857:20241101:185444.043 adding performance counter disk/throughput.usage[average]:145
12857:20241101:185444.043 adding performance counter disk/throughput.usage[average,rate]:145
12857:20241101:185444.043 adding performance counter disk/throughput.contention[average]:146
12857:20241101:185444.043 adding performance counter disk/throughput.contention[average,absolute]:146
12857:20241101:185444.043 adding performance counter disk/scsiReservationConflicts[summation]:147
12857:20241101:185444.043 adding performance counter disk/scsiReservationConflicts[summation,delta]:147
12857:20241101:185444.043 adding performance counter disk/scsiReservationCnflctsPct[average]:148
12857:20241101:185444.043 adding performance counter disk/scsiReservationCnflctsPct[average,absolute]:148
12857:20241101:185444.044 adding performance counter net/usage[none]:149
12857:20241101:185444.044 adding performance counter net/usage[none,rate]:149
12857:20241101:185444.044 adding performance counter net/usage[average]:150
12857:20241101:185444.044 adding performance counter net/usage[average,rate]:150
12857:20241101:185444.044 adding performance counter net/usage[minimum]:151
12857:20241101:185444.044 adding performance counter net/usage[minimum,rate]:151
12857:20241101:185444.044 adding performance counter net/usage[maximum]:152
12857:20241101:185444.044 adding performance counter net/usage[maximum,rate]:152
12857:20241101:185444.044 adding performance counter net/packetsRx[summation]:153
12857:20241101:185444.044 adding performance counter net/packetsRx[summation,delta]:153
12857:20241101:185444.044 adding performance counter net/packetsTx[summation]:154
12857:20241101:185444.044 adding performance counter net/packetsTx[summation,delta]:154
12857:20241101:185444.044 adding performance counter net/received[average]:155
12857:20241101:185444.044 adding performance counter net/received[average,rate]:155
12857:20241101:185444.044 adding performance counter net/transmitted[average]:156
12857:20241101:185444.044 adding performance counter net/transmitted[average,rate]:156
12857:20241101:185444.044 adding performance counter net/throughput.provisioned[average]:157
12857:20241101:185444.044 adding performance counter net/throughput.provisioned[average,absolute]:157
12857:20241101:185444.044 adding performance counter net/throughput.usable[average]:158
12857:20241101:185444.045 adding performance counter net/throughput.usable[average,absolute]:158
12857:20241101:185444.045 adding performance counter net/throughput.usage[average]:159
12857:20241101:185444.045 adding performance counter net/throughput.usage[average,rate]:159
12857:20241101:185444.045 adding performance counter net/throughput.contention[summation]:160
12857:20241101:185444.045 adding performance counter net/throughput.contention[summation,delta]:160
12857:20241101:185444.045 adding performance counter net/throughput.packetsPerSec[average]:161
12857:20241101:185444.045 adding performance counter net/throughput.packetsPerSec[average,rate]:161
12857:20241101:185444.045 adding performance counter sys/uptime[latest]:162
12857:20241101:185444.045 adding performance counter sys/uptime[latest,absolute]:162
12857:20241101:185444.045 adding performance counter sys/heartbeat[summation]:163
12857:20241101:185444.045 adding performance counter sys/heartbeat[summation,delta]:163
12857:20241101:185444.045 adding performance counter power/power[average]:164
12857:20241101:185444.045 adding performance counter power/power[average,rate]:164
12857:20241101:185444.045 adding performance counter power/powerCap[average]:165
12857:20241101:185444.045 adding performance counter power/powerCap[average,absolute]:165
12857:20241101:185444.045 adding performance counter power/energy[summation]:166
12857:20241101:185444.045 adding performance counter power/energy[summation,delta]:166
12857:20241101:185444.046 adding performance counter power/capacity.usagePct[average]:167
12857:20241101:185444.046 adding performance counter power/capacity.usagePct[average,absolute]:167
12857:20241101:185444.046 adding performance counter storageAdapter/commandsAveraged[average]:168
12857:20241101:185444.046 adding performance counter storageAdapter/commandsAveraged[average,rate]:168
12857:20241101:185444.046 adding performance counter storageAdapter/numberReadAveraged[average]:169
12857:20241101:185444.046 adding performance counter storageAdapter/numberReadAveraged[average,rate]:169
12857:20241101:185444.046 adding performance counter storageAdapter/numberWriteAveraged[average]:170
12857:20241101:185444.046 adding performance counter storageAdapter/numberWriteAveraged[average,rate]:170
12857:20241101:185444.046 adding performance counter storageAdapter/read[average]:171
12857:20241101:185444.046 adding performance counter storageAdapter/read[average,rate]:171
12857:20241101:185444.046 adding performance counter storageAdapter/write[average]:172
12857:20241101:185444.046 adding performance counter storageAdapter/write[average,rate]:172
12857:20241101:185444.046 adding performance counter storageAdapter/totalReadLatency[average]:173
12857:20241101:185444.046 adding performance counter storageAdapter/totalReadLatency[average,absolute]:173
12857:20241101:185444.046 adding performance counter storageAdapter/totalWriteLatency[average]:174
12857:20241101:185444.046 adding performance counter storageAdapter/totalWriteLatency[average,absolute]:174
12857:20241101:185444.046 adding performance counter storageAdapter/maxTotalLatency[latest]:175
12857:20241101:185444.046 adding performance counter storageAdapter/maxTotalLatency[latest,absolute]:175
12857:20241101:185444.047 adding performance counter storageAdapter/throughput.cont[average]:176
12857:20241101:185444.047 adding performance counter storageAdapter/throughput.cont[average,absolute]:176
12857:20241101:185444.047 adding performance counter storageAdapter/OIOsPct[average]:177
12857:20241101:185444.047 adding performance counter storageAdapter/OIOsPct[average,absolute]:177
12857:20241101:185444.047 adding performance counter virtualDisk/numberReadAveraged[average]:178
12857:20241101:185444.047 adding performance counter virtualDisk/numberReadAveraged[average,rate]:178
12857:20241101:185444.047 adding performance counter virtualDisk/numberWriteAveraged[average]:179
12857:20241101:185444.047 adding performance counter virtualDisk/numberWriteAveraged[average,rate]:179
12857:20241101:185444.047 adding performance counter virtualDisk/read[average]:180
12857:20241101:185444.047 adding performance counter virtualDisk/read[average,rate]:180
12857:20241101:185444.047 adding performance counter virtualDisk/write[average]:181
12857:20241101:185444.047 adding performance counter virtualDisk/write[average,rate]:181
12857:20241101:185444.047 adding performance counter virtualDisk/totalReadLatency[average]:182
12857:20241101:185444.047 adding performance counter virtualDisk/totalReadLatency[average,absolute]:182
12857:20241101:185444.047 adding performance counter virtualDisk/totalWriteLatency[average]:183
12857:20241101:185444.047 adding performance counter virtualDisk/totalWriteLatency[average,absolute]:183
12857:20241101:185444.047 adding performance counter virtualDisk/throughput.cont[average]:184
12857:20241101:185444.047 adding performance counter virtualDisk/throughput.cont[average,absolute]:184
12857:20241101:185444.048 adding performance counter datastore/numberReadAveraged[average]:185
12857:20241101:185444.048 adding performance counter datastore/numberReadAveraged[average,rate]:185
12857:20241101:185444.048 adding performance counter datastore/numberWriteAveraged[average]:186
12857:20241101:185444.048 adding performance counter datastore/numberWriteAveraged[average,rate]:186
12857:20241101:185444.048 adding performance counter datastore/read[average]:187
12857:20241101:185444.048 adding performance counter datastore/read[average,rate]:187
12857:20241101:185444.048 adding performance counter datastore/write[average]:188
12857:20241101:185444.048 adding performance counter datastore/write[average,rate]:188
12857:20241101:185444.048 adding performance counter datastore/totalReadLatency[average]:189
12857:20241101:185444.048 adding performance counter datastore/totalReadLatency[average,absolute]:189
12857:20241101:185444.048 adding performance counter datastore/totalWriteLatency[average]:190
12857:20241101:185444.048 adding performance counter datastore/totalWriteLatency[average,absolute]:190
12857:20241101:185444.048 adding performance counter datastore/maxTotalLatency[latest]:191
12857:20241101:185444.048 adding performance counter datastore/maxTotalLatency[latest,absolute]:191
12857:20241101:185444.048 adding performance counter datastore/datastoreIops[average]:192
12857:20241101:185444.048 adding performance counter datastore/datastoreIops[average,absolute]:192
12857:20241101:185444.049 adding performance counter datastore/sizeNormalizedDatastoreLatency[average]:193
12857:20241101:185444.049 adding performance counter datastore/sizeNormalizedDatastoreLatency[average,absolute]:193
12857:20241101:185444.049 adding performance counter datastore/throughput.usage[average]:194
12857:20241101:185444.049 adding performance counter datastore/throughput.usage[average,absolute]:194
12857:20241101:185444.049 adding performance counter datastore/throughput.contention[average]:195
12857:20241101:185444.049 adding performance counter datastore/throughput.contention[average,absolute]:195
12857:20241101:185444.049 adding performance counter datastore/busResets[summation]:196
12857:20241101:185444.049 adding performance counter datastore/busResets[summation,delta]:196
12857:20241101:185444.049 adding performance counter datastore/commandsAborted[summation]:197
12857:20241101:185444.049 adding performance counter datastore/commandsAborted[summation,delta]:197
12857:20241101:185444.049 adding performance counter datastore/siocActiveTimePercentage[average]:198
12857:20241101:185444.049 adding performance counter datastore/siocActiveTimePercentage[average,absolute]:198
12857:20241101:185444.049 adding performance counter storagePath/throughput.cont[average]:199
12857:20241101:185444.049 adding performance counter storagePath/throughput.cont[average,absolute]:199
12857:20241101:185444.049 adding performance counter storagePath/maxTotalLatency[latest]:200
12857:20241101:185444.049 adding performance counter storagePath/maxTotalLatency[latest,absolute]:200
12857:20241101:185444.050 adding performance counter virtualDisk/throughput.usage[average]:201
12857:20241101:185444.050 adding performance counter virtualDisk/throughput.usage[average,rate]:201
12857:20241101:185444.050 adding performance counter virtualDisk/commandsAborted[summation]:202
12857:20241101:185444.050 adding performance counter virtualDisk/commandsAborted[summation,delta]:202
12857:20241101:185444.050 adding performance counter virtualDisk/busResets[summation]:203
12857:20241101:185444.050 adding performance counter virtualDisk/busResets[summation,delta]:203
12857:20241101:185444.050 adding performance counter storageAdapter/outstandingIOs[average]:204
12857:20241101:185444.050 adding performance counter storageAdapter/outstandingIOs[average,absolute]:204
12857:20241101:185444.050 adding performance counter storageAdapter/queued[average]:205
12857:20241101:185444.050 adding performance counter storageAdapter/queued[average,absolute]:205
12857:20241101:185444.050 adding performance counter storageAdapter/queueDepth[average]:206
12857:20241101:185444.050 adding performance counter storageAdapter/queueDepth[average,absolute]:206
12857:20241101:185444.050 adding performance counter storageAdapter/queueLatency[average]:207
12857:20241101:185444.050 adding performance counter storageAdapter/queueLatency[average,absolute]:207
12857:20241101:185444.050 adding performance counter storageAdapter/throughput.usag[average]:208
12857:20241101:185444.050 adding performance counter storageAdapter/throughput.usag[average,rate]:208
12857:20241101:185444.050 adding performance counter storagePath/busResets[summation]:209
12857:20241101:185444.050 adding performance counter storagePath/busResets[summation,delta]:209
12857:20241101:185444.051 adding performance counter storagePath/commandsAborted[summation]:210
12857:20241101:185444.051 adding performance counter storagePath/commandsAborted[summation,delta]:210
12857:20241101:185444.051 adding performance counter storagePath/throughput.usage[average]:211
12857:20241101:185444.051 adding performance counter storagePath/throughput.usage[average,rate]:211
12857:20241101:185444.051 adding performance counter net/throughput.usage.vm[average]:212
12857:20241101:185444.051 adding performance counter net/throughput.usage.vm[average,rate]:212
12857:20241101:185444.051 adding performance counter net/throughput.usage.nfs[average]:213
12857:20241101:185444.051 adding performance counter net/throughput.usage.nfs[average,rate]:213
12857:20241101:185444.051 adding performance counter net/throughput.usage.vmotion[average]:214
12857:20241101:185444.051 adding performance counter net/throughput.usage.vmotion[average,rate]:214
12857:20241101:185444.051 adding performance counter net/throughput.usage.ft[average]:215
12857:20241101:185444.051 adding performance counter net/throughput.usage.ft[average,rate]:215
12857:20241101:185444.051 adding performance counter net/throughput.usage.iscsi[average]:216
12857:20241101:185444.051 adding performance counter net/throughput.usage.iscsi[average,rate]:216
12857:20241101:185444.051 adding performance counter net/throughput.usage.hbr[average]:217
12857:20241101:185444.051 adding performance counter net/throughput.usage.hbr[average,rate]:217
12857:20241101:185444.051 adding performance counter power/capacity.usable[average]:218
12857:20241101:185444.051 adding performance counter power/capacity.usable[average,absolute]:218
12857:20241101:185444.052 adding performance counter power/capacity.usage[average]:219
12857:20241101:185444.052 adding performance counter power/capacity.usage[average,absolute]:219
12857:20241101:185444.052 adding performance counter power/capacity.usageIdle[average]:220
12857:20241101:185444.052 adding performance counter power/capacity.usageIdle[average,absolute]:220
12857:20241101:185444.052 adding performance counter power/capacity.usageSystem[average]:221
12857:20241101:185444.052 adding performance counter power/capacity.usageSystem[average,absolute]:221
12857:20241101:185444.052 adding performance counter power/capacity.usageVm[average]:222
12857:20241101:185444.052 adding performance counter power/capacity.usageVm[average,absolute]:222
12857:20241101:185444.052 adding performance counter power/capacity.usageStatic[average]:223
12857:20241101:185444.052 adding performance counter power/capacity.usageStatic[average,absolute]:223
12857:20241101:185444.052 adding performance counter cpu/cpuentitlement[latest]:224
12857:20241101:185444.052 adding performance counter cpu/cpuentitlement[latest,absolute]:224
12857:20241101:185444.052 adding performance counter mem/mementitlement[latest]:225
12857:20241101:185444.052 adding performance counter mem/mementitlement[latest,absolute]:225
12857:20241101:185444.052 adding performance counter clusterServices/vmDrsScore[latest]:226
12857:20241101:185444.052 adding performance counter clusterServices/vmDrsScore[latest,absolute]:226
12857:20241101:185444.053 adding performance counter clusterServices/cpufairness[latest]:227
12857:20241101:185444.053 adding performance counter clusterServices/cpufairness[latest,absolute]:227
12857:20241101:185444.053 adding performance counter clusterServices/memfairness[latest]:228
12857:20241101:185444.053 adding performance counter clusterServices/memfairness[latest,absolute]:228
12857:20241101:185444.053 adding performance counter net/throughput.pktsTx[average]:229
12857:20241101:185444.053 adding performance counter net/throughput.pktsTx[average,absolute]:229
12857:20241101:185444.053 adding performance counter net/throughput.pktsTxMulticast[average]:230
12857:20241101:185444.053 adding performance counter net/throughput.pktsTxMulticast[average,absolute]:230
12857:20241101:185444.053 adding performance counter net/throughput.pktsTxBroadcast[average]:231
12857:20241101:185444.053 adding performance counter net/throughput.pktsTxBroadcast[average,absolute]:231
12857:20241101:185444.053 adding performance counter net/throughput.pktsRx[average]:232
12857:20241101:185444.053 adding performance counter net/throughput.pktsRx[average,absolute]:232
12857:20241101:185444.053 adding performance counter net/throughput.pktsRxMulticast[average]:233
12857:20241101:185444.053 adding performance counter net/throughput.pktsRxMulticast[average,absolute]:233
12857:20241101:185444.053 adding performance counter net/throughput.pktsRxBroadcast[average]:234
12857:20241101:185444.053 adding performance counter net/throughput.pktsRxBroadcast[average,absolute]:234
12857:20241101:185444.054 adding performance counter net/throughput.droppedTx[average]:235
12857:20241101:185444.054 adding performance counter net/throughput.droppedTx[average,absolute]:235
12857:20241101:185444.054 adding performance counter net/throughput.droppedRx[average]:236
12857:20241101:185444.054 adding performance counter net/throughput.droppedRx[average,absolute]:236
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsTx[average]:237
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsTx[average,absolute]:237
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsTxMcast[average]:238
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsTxMcast[average,absolute]:238
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsTxBcast[average]:239
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsTxBcast[average,absolute]:239
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsRx[average]:240
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsRx[average,absolute]:240
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsRxMcast[average]:241
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsRxMcast[average,absolute]:241
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsRxBcast[average]:242
12857:20241101:185444.054 adding performance counter net/throughput.vds.pktsRxBcast[average,absolute]:242
12857:20241101:185444.054 adding performance counter net/throughput.vds.droppedTx[average]:243
12857:20241101:185444.054 adding performance counter net/throughput.vds.droppedTx[average,absolute]:243
12857:20241101:185444.055 adding performance counter net/throughput.vds.droppedRx[average]:244
12857:20241101:185444.055 adding performance counter net/throughput.vds.droppedRx[average,absolute]:244
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagTx[average]:245
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagTx[average,absolute]:245
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagTxMcast[average]:246
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagTxMcast[average,absolute]:246
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagTxBcast[average]:247
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagTxBcast[average,absolute]:247
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagRx[average]:248
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagRx[average,absolute]:248
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagRxMcast[average]:249
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagRxMcast[average,absolute]:249
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagRxBcast[average]:250
12857:20241101:185444.055 adding performance counter net/throughput.vds.lagRxBcast[average,absolute]:250
12857:20241101:185444.056 adding performance counter net/throughput.vds.lagDropTx[average]:251
12857:20241101:185444.056 adding performance counter net/throughput.vds.lagDropTx[average,absolute]:251
12857:20241101:185444.056 adding performance counter net/throughput.vds.lagDropRx[average]:252
12857:20241101:185444.056 adding performance counter net/throughput.vds.lagDropRx[average,absolute]:252
12857:20241101:185444.056 adding performance counter vmop/numPoweron[latest]:253
12857:20241101:185444.056 adding performance counter vmop/numPoweron[latest,absolute]:253
12857:20241101:185444.056 adding performance counter vmop/numPoweroff[latest]:254
12857:20241101:185444.056 adding performance counter vmop/numPoweroff[latest,absolute]:254
12857:20241101:185444.056 adding performance counter vmop/numSuspend[latest]:255
12857:20241101:185444.056 adding performance counter vmop/numSuspend[latest,absolute]:255
12857:20241101:185444.056 adding performance counter vmop/numReset[latest]:256
12857:20241101:185444.056 adding performance counter vmop/numReset[latest,absolute]:256
12857:20241101:185444.056 adding performance counter vmop/numRebootGuest[latest]:257
12857:20241101:185444.056 adding performance counter vmop/numRebootGuest[latest,absolute]:257
12857:20241101:185444.056 adding performance counter vmop/numStandbyGuest[latest]:258
12857:20241101:185444.056 adding performance counter vmop/numStandbyGuest[latest,absolute]:258
12857:20241101:185444.056 adding performance counter vmop/numShutdownGuest[latest]:259
12857:20241101:185444.056 adding performance counter vmop/numShutdownGuest[latest,absolute]:259
12857:20241101:185444.057 adding performance counter vmop/numCreate[latest]:260
12857:20241101:185444.057 adding performance counter vmop/numCreate[latest,absolute]:260
12857:20241101:185444.057 adding performance counter vmop/numDestroy[latest]:261
12857:20241101:185444.057 adding performance counter vmop/numDestroy[latest,absolute]:261
12857:20241101:185444.057 adding performance counter vmop/numRegister[latest]:262
12857:20241101:185444.057 adding performance counter vmop/numRegister[latest,absolute]:262
12857:20241101:185444.057 adding performance counter vmop/numUnregister[latest]:263
12857:20241101:185444.057 adding performance counter vmop/numUnregister[latest,absolute]:263
12857:20241101:185444.057 adding performance counter vmop/numReconfigure[latest]:264
12857:20241101:185444.057 adding performance counter vmop/numReconfigure[latest,absolute]:264
12857:20241101:185444.057 adding performance counter vmop/numClone[latest]:265
12857:20241101:185444.057 adding performance counter vmop/numClone[latest,absolute]:265
12857:20241101:185444.057 adding performance counter vmop/numDeploy[latest]:266
12857:20241101:185444.057 adding performance counter vmop/numDeploy[latest,absolute]:266
12857:20241101:185444.057 adding performance counter vmop/numChangeHost[latest]:267
12857:20241101:185444.057 adding performance counter vmop/numChangeHost[latest,absolute]:267
12857:20241101:185444.058 adding performance counter vmop/numChangeDS[latest]:268
12857:20241101:185444.058 adding performance counter vmop/numChangeDS[latest,absolute]:268
12857:20241101:185444.058 adding performance counter vmop/numChangeHostDS[latest]:269
12857:20241101:185444.058 adding performance counter vmop/numChangeHostDS[latest,absolute]:269
12857:20241101:185444.058 adding performance counter vmop/numVMotion[latest]:270
12857:20241101:185444.058 adding performance counter vmop/numVMotion[latest,absolute]:270
12857:20241101:185444.058 adding performance counter vmop/numSVMotion[latest]:271
12857:20241101:185444.058 adding performance counter vmop/numSVMotion[latest,absolute]:271
12857:20241101:185444.058 adding performance counter vmop/numXVMotion[latest]:272
12857:20241101:185444.058 adding performance counter vmop/numXVMotion[latest,absolute]:272
12857:20241101:185444.058 adding performance counter clusterServices/effectivecpu[average]:273
12857:20241101:185444.058 adding performance counter clusterServices/effectivecpu[average,rate]:273
12857:20241101:185444.058 adding performance counter clusterServices/effectivemem[average]:274
12857:20241101:185444.058 adding performance counter clusterServices/effectivemem[average,absolute]:274
12857:20241101:185444.058 adding performance counter cpu/totalmhz[average]:275
12857:20241101:185444.058 adding performance counter cpu/totalmhz[average,rate]:275
12857:20241101:185444.058 adding performance counter mem/totalmb[average]:276
12857:20241101:185444.058 adding performance counter mem/totalmb[average,absolute]:276
12857:20241101:185444.059 adding performance counter clusterServices/clusterDrsScore[latest]:277
12857:20241101:185444.059 adding performance counter clusterServices/clusterDrsScore[latest,absolute]:277
12857:20241101:185444.059 adding performance counter clusterServices/failover[latest]:278
12857:20241101:185444.059 adding performance counter clusterServices/failover[latest,absolute]:278
12857:20241101:185444.059 adding performance counter gpu/utilization[average]:279
12857:20241101:185444.059 adding performance counter gpu/utilization[average,absolute]:279
12857:20241101:185444.059 adding performance counter gpu/mem.used[average]:280
12857:20241101:185444.059 adding performance counter gpu/mem.used[average,absolute]:280
12857:20241101:185444.059 adding performance counter gpu/mem.reserved[latest]:281
12857:20241101:185444.059 adding performance counter gpu/mem.reserved[latest,absolute]:281
12857:20241101:185444.059 adding performance counter gpu/power.used[latest]:282
12857:20241101:185444.059 adding performance counter gpu/power.used[latest,absolute]:282
12857:20241101:185444.059 adding performance counter gpu/temperature[average]:283
12857:20241101:185444.059 adding performance counter gpu/temperature[average,absolute]:283
12857:20241101:185444.059 adding performance counter gpu/mem.total[latest]:284
12857:20241101:185444.059 adding performance counter gpu/mem.total[latest,absolute]:284
12857:20241101:185444.059 adding performance counter disk/used[latest]:285
12857:20241101:185444.059 adding performance counter disk/used[latest,absolute]:285
12857:20241101:185444.060 adding performance counter disk/provisioned[latest]:286
12857:20241101:185444.060 adding performance counter disk/provisioned[latest,absolute]:286
12857:20241101:185444.060 adding performance counter disk/capacity[latest]:287
12857:20241101:185444.060 adding performance counter disk/capacity[latest,absolute]:287
12857:20241101:185444.060 adding performance counter disk/unshared[latest]:288
12857:20241101:185444.060 adding performance counter disk/unshared[latest,absolute]:288
12857:20241101:185444.060 adding performance counter disk/actualused[latest]:289
12857:20241101:185444.060 adding performance counter disk/actualused[latest,absolute]:289
12857:20241101:185444.060 adding performance counter disk/deltaused[latest]:290
12857:20241101:185444.060 adding performance counter disk/deltaused[latest,absolute]:290
12857:20241101:185444.060 adding performance counter disk/capacity.provisioned[average]:291
12857:20241101:185444.060 adding performance counter disk/capacity.provisioned[average,absolute]:291
12857:20241101:185444.060 adding performance counter disk/capacity.usage[average]:292
12857:20241101:185444.060 adding performance counter disk/capacity.usage[average,absolute]:292
12857:20241101:185444.060 adding performance counter disk/capacity.contention[average]:293
12857:20241101:185444.060 adding performance counter disk/capacity.contention[average,absolute]:293
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationlatencystats[maximum]:294
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationlatencystats[maximum,absolute]:294
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationlatencystats[minimum]:295
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationlatencystats[minimum,absolute]:295
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationlatencystats[summation]:296
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationlatencystats[summation,absolute]:296
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationstats[maximum]:297
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationstats[maximum,absolute]:297
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationstats[minimum]:298
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationstats[minimum,absolute]:298
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationstats[summation]:299
12857:20241101:185444.061 adding performance counter vcDebugInfo/activationstats[summation,absolute]:299
12857:20241101:185444.061 adding performance counter vcResources/buffersz[average]:300
12857:20241101:185444.061 adding performance counter vcResources/buffersz[average,absolute]:300
12857:20241101:185444.061 adding performance counter vcResources/cachesz[average]:301
12857:20241101:185444.061 adding performance counter vcResources/cachesz[average,absolute]:301
12857:20241101:185444.062 adding performance counter vcResources/ctxswitchesrate[average]:302
12857:20241101:185444.062 adding performance counter vcResources/ctxswitchesrate[average,rate]:302
12857:20241101:185444.062 adding performance counter vcResources/diskreadsectorrate[average]:303
12857:20241101:185444.062 adding performance counter vcResources/diskreadsectorrate[average,rate]:303
12857:20241101:185444.062 adding performance counter vcResources/diskreadsrate[average]:304
12857:20241101:185444.062 adding performance counter vcResources/diskreadsrate[average,rate]:304
12857:20241101:185444.062 adding performance counter vcResources/diskwritesectorrate[average]:305
12857:20241101:185444.062 adding performance counter vcResources/diskwritesectorrate[average,rate]:305
12857:20241101:185444.062 adding performance counter vcResources/diskwritesrate[average]:306
12857:20241101:185444.062 adding performance counter vcResources/diskwritesrate[average,rate]:306
12857:20241101:185444.062 adding performance counter vcDebugInfo/hostsynclatencystats[maximum]:307
12857:20241101:185444.062 adding performance counter vcDebugInfo/hostsynclatencystats[maximum,absolute]:307
12857:20241101:185444.062 adding performance counter vcDebugInfo/hostsynclatencystats[minimum]:308
12857:20241101:185444.062 adding performance counter vcDebugInfo/hostsynclatencystats[minimum,absolute]:308
12857:20241101:185444.063 adding performance counter vcDebugInfo/hostsynclatencystats[summation]:309
12857:20241101:185444.063 adding performance counter vcDebugInfo/hostsynclatencystats[summation,absolute]:309
12857:20241101:185444.063 adding performance counter vcDebugInfo/hostsyncstats[maximum]:310
12857:20241101:185444.063 adding performance counter vcDebugInfo/hostsyncstats[maximum,absolute]:310
12857:20241101:185444.063 adding performance counter vcDebugInfo/hostsyncstats[minimum]:311
12857:20241101:185444.063 adding performance counter vcDebugInfo/hostsyncstats[minimum,absolute]:311
12857:20241101:185444.063 adding performance counter vcDebugInfo/hostsyncstats[summation]:312
12857:20241101:185444.063 adding performance counter vcDebugInfo/hostsyncstats[summation,absolute]:312
12857:20241101:185444.063 adding performance counter vcDebugInfo/inventorystats[maximum]:313
12857:20241101:185444.063 adding performance counter vcDebugInfo/inventorystats[maximum,absolute]:313
12857:20241101:185444.063 adding performance counter vcDebugInfo/inventorystats[minimum]:314
12857:20241101:185444.063 adding performance counter vcDebugInfo/inventorystats[minimum,absolute]:314
12857:20241101:185444.063 adding performance counter vcDebugInfo/inventorystats[summation]:315
12857:20241101:185444.063 adding performance counter vcDebugInfo/inventorystats[summation,absolute]:315
12857:20241101:185444.063 adding performance counter vcDebugInfo/lockstats[maximum]:316
12857:20241101:185444.063 adding performance counter vcDebugInfo/lockstats[maximum,absolute]:316
12857:20241101:185444.064 adding performance counter vcDebugInfo/lockstats[minimum]:317
12857:20241101:185444.064 adding performance counter vcDebugInfo/lockstats[minimum,absolute]:317
12857:20241101:185444.064 adding performance counter vcDebugInfo/lockstats[summation]:318
12857:20241101:185444.064 adding performance counter vcDebugInfo/lockstats[summation,absolute]:318
12857:20241101:185444.064 adding performance counter vcDebugInfo/lrostats[maximum]:319
12857:20241101:185444.064 adding performance counter vcDebugInfo/lrostats[maximum,absolute]:319
12857:20241101:185444.064 adding performance counter vcDebugInfo/lrostats[minimum]:320
12857:20241101:185444.064 adding performance counter vcDebugInfo/lrostats[minimum,absolute]:320
12857:20241101:185444.064 adding performance counter vcDebugInfo/lrostats[summation]:321
12857:20241101:185444.064 adding performance counter vcDebugInfo/lrostats[summation,absolute]:321
12857:20241101:185444.064 adding performance counter vcDebugInfo/miscstats[maximum]:322
12857:20241101:185444.064 adding performance counter vcDebugInfo/miscstats[maximum,absolute]:322
12857:20241101:185444.064 adding performance counter vcDebugInfo/miscstats[minimum]:323
12857:20241101:185444.064 adding performance counter vcDebugInfo/miscstats[minimum,absolute]:323
12857:20241101:185444.064 adding performance counter vcDebugInfo/miscstats[summation]:324
12857:20241101:185444.064 adding performance counter vcDebugInfo/miscstats[summation,absolute]:324
12857:20241101:185444.064 adding performance counter vcDebugInfo/morefregstats[maximum]:325
12857:20241101:185444.065 adding performance counter vcDebugInfo/morefregstats[maximum,absolute]:325
12857:20241101:185444.065 adding performance counter vcDebugInfo/morefregstats[minimum]:326
12857:20241101:185444.065 adding performance counter vcDebugInfo/morefregstats[minimum,absolute]:326
12857:20241101:185444.065 adding performance counter vcDebugInfo/morefregstats[summation]:327
12857:20241101:185444.065 adding performance counter vcDebugInfo/morefregstats[summation,absolute]:327
12857:20241101:185444.065 adding performance counter vcResources/packetrecvrate[average]:328
12857:20241101:185444.065 adding performance counter vcResources/packetrecvrate[average,rate]:328
12857:20241101:185444.065 adding performance counter vcResources/packetsentrate[average]:329
12857:20241101:185444.065 adding performance counter vcResources/packetsentrate[average,rate]:329
12857:20241101:185444.065 adding performance counter vcResources/systemcpuusage[average]:330
12857:20241101:185444.065 adding performance counter vcResources/systemcpuusage[average,rate]:330
12857:20241101:185444.065 adding performance counter vcResources/pagefaultrate[average]:331
12857:20241101:185444.065 adding performance counter vcResources/pagefaultrate[average,rate]:331
12857:20241101:185444.066 adding performance counter vcResources/physicalmemusage[average]:332
12857:20241101:185444.066 adding performance counter vcResources/physicalmemusage[average,absolute]:332
12857:20241101:185444.066 adding performance counter vcResources/priviledgedcpuusage[average]:333
12857:20241101:185444.066 adding performance counter vcResources/priviledgedcpuusage[average,rate]:333
12857:20241101:185444.066 adding performance counter vcDebugInfo/scoreboard[maximum]:334
12857:20241101:185444.066 adding performance counter vcDebugInfo/scoreboard[maximum,absolute]:334
12857:20241101:185444.066 adding performance counter vcDebugInfo/scoreboard[minimum]:335
12857:20241101:185444.066 adding performance counter vcDebugInfo/scoreboard[minimum,absolute]:335
12857:20241101:185444.066 adding performance counter vcDebugInfo/scoreboard[summation]:336
12857:20241101:185444.066 adding performance counter vcDebugInfo/scoreboard[summation,absolute]:336
12857:20241101:185444.066 adding performance counter vcDebugInfo/sessionstats[maximum]:337
12857:20241101:185444.066 adding performance counter vcDebugInfo/sessionstats[maximum,absolute]:337
12857:20241101:185444.066 adding performance counter vcDebugInfo/sessionstats[minimum]:338
12857:20241101:185444.066 adding performance counter vcDebugInfo/sessionstats[minimum,absolute]:338
12857:20241101:185444.066 adding performance counter vcDebugInfo/sessionstats[summation]:339
12857:20241101:185444.066 adding performance counter vcDebugInfo/sessionstats[summation,absolute]:339
12857:20241101:185444.066 adding performance counter vcResources/syscallsrate[average]:340
12857:20241101:185444.067 adding performance counter vcResources/syscallsrate[average,rate]:340
12857:20241101:185444.067 adding performance counter vcDebugInfo/systemstats[maximum]:341
12857:20241101:185444.067 adding performance counter vcDebugInfo/systemstats[maximum,absolute]:341
12857:20241101:185444.067 adding performance counter vcDebugInfo/systemstats[minimum]:342
12857:20241101:185444.067 adding performance counter vcDebugInfo/systemstats[minimum,absolute]:342
12857:20241101:185444.067 adding performance counter vcDebugInfo/systemstats[summation]:343
12857:20241101:185444.067 adding performance counter vcDebugInfo/systemstats[summation,absolute]:343
12857:20241101:185444.067 adding performance counter vcResources/usercpuusage[average]:344
12857:20241101:185444.067 adding performance counter vcResources/usercpuusage[average,rate]:344
12857:20241101:185444.067 adding performance counter vcDebugInfo/vcservicestats[maximum]:345
12857:20241101:185444.067 adding performance counter vcDebugInfo/vcservicestats[maximum,absolute]:345
12857:20241101:185444.067 adding performance counter vcDebugInfo/vcservicestats[minimum]:346
12857:20241101:185444.067 adding performance counter vcDebugInfo/vcservicestats[minimum,absolute]:346
12857:20241101:185444.067 adding performance counter vcDebugInfo/vcservicestats[summation]:347
12857:20241101:185444.067 adding performance counter vcDebugInfo/vcservicestats[summation,absolute]:347
12857:20241101:185444.067 adding performance counter vcResources/virtualmemusage[average]:348
12857:20241101:185444.068 adding performance counter vcResources/virtualmemusage[average,absolute]:348
12857:20241101:185444.068 adding performance counter virtualDisk/readOIO[latest]:349
12857:20241101:185444.068 adding performance counter virtualDisk/readOIO[latest,absolute]:349
12857:20241101:185444.068 adding performance counter virtualDisk/writeOIO[latest]:350
12857:20241101:185444.068 adding performance counter virtualDisk/writeOIO[latest,absolute]:350
12857:20241101:185444.068 adding performance counter virtualDisk/readLoadMetric[latest]:351
12857:20241101:185444.068 adding performance counter virtualDisk/readLoadMetric[latest,absolute]:351
12857:20241101:185444.068 adding performance counter virtualDisk/writeLoadMetric[latest]:352
12857:20241101:185444.068 adding performance counter virtualDisk/writeLoadMetric[latest,absolute]:352
12857:20241101:185444.068 adding performance counter rescpu/actav1[latest]:353
12857:20241101:185444.068 adding performance counter rescpu/actav1[latest,absolute]:353
12857:20241101:185444.068 adding performance counter datastore/datastoreReadBytes[latest]:354
12857:20241101:185444.068 adding performance counter datastore/datastoreReadBytes[latest,absolute]:354
12857:20241101:185444.068 adding performance counter datastore/datastoreWriteBytes[latest]:355
12857:20241101:185444.068 adding performance counter datastore/datastoreWriteBytes[latest,absolute]:355
12857:20241101:185444.069 adding performance counter datastore/datastoreReadIops[latest]:356
12857:20241101:185444.069 adding performance counter datastore/datastoreReadIops[latest,absolute]:356
12857:20241101:185444.069 adding performance counter datastore/datastoreWriteIops[latest]:357
12857:20241101:185444.069 adding performance counter datastore/datastoreWriteIops[latest,absolute]:357
12857:20241101:185444.069 adding performance counter datastore/datastoreReadOIO[latest]:358
12857:20241101:185444.069 adding performance counter datastore/datastoreReadOIO[latest,absolute]:358
12857:20241101:185444.069 adding performance counter datastore/datastoreWriteOIO[latest]:359
12857:20241101:185444.069 adding performance counter datastore/datastoreWriteOIO[latest,absolute]:359
12857:20241101:185444.069 adding performance counter datastore/datastoreNormalReadLatency[latest]:360
12857:20241101:185444.069 adding performance counter datastore/datastoreNormalReadLatency[latest,absolute]:360
12857:20241101:185444.069 adding performance counter datastore/datastoreNormalWriteLatency[latest]:361
12857:20241101:185444.069 adding performance counter datastore/datastoreNormalWriteLatency[latest,absolute]:361
12857:20241101:185444.069 adding performance counter datastore/datastoreReadLoadMetric[latest]:362
12857:20241101:185444.069 adding performance counter datastore/datastoreReadLoadMetric[latest,absolute]:362
12857:20241101:185444.070 adding performance counter datastore/datastoreWriteLoadMetric[latest]:363
12857:20241101:185444.070 adding performance counter datastore/datastoreWriteLoadMetric[latest,absolute]:363
12857:20241101:185444.070 adding performance counter datastore/datastoreVMObservedLatency[latest]:364
12857:20241101:185444.070 adding performance counter datastore/datastoreVMObservedLatency[latest,absolute]:364
12857:20241101:185444.070 adding performance counter disk/scsiReservationCnflctsPct[average]:365
12857:20241101:185444.070 adding performance counter disk/scsiReservationCnflctsPct[average,rate]:365
12857:20241101:185444.070 adding performance counter disk/read[latest]:366
12857:20241101:185444.070 adding performance counter disk/read[latest,absolute]:366
12857:20241101:185444.070 adding performance counter disk/readFailed[latest]:367
12857:20241101:185444.070 adding performance counter disk/readFailed[latest,absolute]:367
12857:20241101:185444.070 adding performance counter disk/write[latest]:368
12857:20241101:185444.070 adding performance counter disk/write[latest,absolute]:368
12857:20241101:185444.070 adding performance counter disk/writeFailed[latest]:369
12857:20241101:185444.070 adding performance counter disk/writeFailed[latest,absolute]:369
12857:20241101:185444.070 adding performance counter disk/commands.success[latest]:370
12857:20241101:185444.070 adding performance counter disk/commands.success[latest,absolute]:370
12857:20241101:185444.071 adding performance counter disk/commands.failed[latest]:371
12857:20241101:185444.071 adding performance counter disk/commands.failed[latest,absolute]:371
12857:20241101:185444.071 adding performance counter disk/commands.queued[latest]:372
12857:20241101:185444.071 adding performance counter disk/commands.queued[latest,absolute]:372
12857:20241101:185444.071 adding performance counter disk/commands.active[latest]:373
12857:20241101:185444.071 adding performance counter disk/commands.active[latest,absolute]:373
12857:20241101:185444.071 adding performance counter disk/state[latest]:374
12857:20241101:185444.071 adding performance counter disk/state[latest,absolute]:374
12857:20241101:185444.071 adding performance counter disk/TM.abort[latest]:375
12857:20241101:185444.071 adding performance counter disk/TM.abort[latest,absolute]:375
12857:20241101:185444.071 adding performance counter disk/TM.abortRetry[latest]:376
12857:20241101:185444.071 adding performance counter disk/TM.abortRetry[latest,absolute]:376
12857:20241101:185444.071 adding performance counter disk/TM.abortFailed[latest]:377
12857:20241101:185444.071 adding performance counter disk/TM.abortFailed[latest,absolute]:377
12857:20241101:185444.072 adding performance counter disk/TM.virtReset[latest]:378
12857:20241101:185444.072 adding performance counter disk/TM.virtReset[latest,absolute]:378
12857:20241101:185444.072 adding performance counter disk/TM.virtResetRetry[latest]:379
12857:20241101:185444.072 adding performance counter disk/TM.virtResetRetry[latest,absolute]:379
12857:20241101:185444.072 adding performance counter disk/TM.virtResetFailed[latest]:380
12857:20241101:185444.072 adding performance counter disk/TM.virtResetFailed[latest,absolute]:380
12857:20241101:185444.072 adding performance counter disk/TM.lunReset[latest]:381
12857:20241101:185444.072 adding performance counter disk/TM.lunReset[latest,absolute]:381
12857:20241101:185444.072 adding performance counter disk/TM.lunResetRetry[latest]:382
12857:20241101:185444.072 adding performance counter disk/TM.lunResetRetry[latest,absolute]:382
12857:20241101:185444.072 adding performance counter disk/TM.lunResetFailed[latest]:383
12857:20241101:185444.072 adding performance counter disk/TM.lunResetFailed[latest,absolute]:383
12857:20241101:185444.072 adding performance counter disk/TM.deviceReset[latest]:384
12857:20241101:185444.072 adding performance counter disk/TM.deviceReset[latest,absolute]:384
12857:20241101:185444.072 adding performance counter disk/TM.deviceResetRetry[latest]:385
12857:20241101:185444.072 adding performance counter disk/TM.deviceResetRetry[latest,absolute]:385
12857:20241101:185444.072 adding performance counter disk/TM.deviceResetFailed[latest]:386
12857:20241101:185444.072 adding performance counter disk/TM.deviceResetFailed[latest,absolute]:386
12857:20241101:185444.073 adding performance counter disk/TM.busReset[latest]:387
12857:20241101:185444.073 adding performance counter disk/TM.busReset[latest,absolute]:387
12857:20241101:185444.073 adding performance counter disk/TM.busResetRetry[latest]:388
12857:20241101:185444.073 adding performance counter disk/TM.busResetRetry[latest,absolute]:388
12857:20241101:185444.073 adding performance counter disk/TM.busResetFailed[latest]:389
12857:20241101:185444.073 adding performance counter disk/TM.busResetFailed[latest,absolute]:389
12857:20241101:185444.073 adding performance counter disk/latency.qavg[latest]:390
12857:20241101:185444.073 adding performance counter disk/latency.qavg[latest,absolute]:390
12857:20241101:185444.073 adding performance counter disk/latency.davg[latest]:391
12857:20241101:185444.073 adding performance counter disk/latency.davg[latest,absolute]:391
12857:20241101:185444.073 adding performance counter disk/latency.kavg[latest]:392
12857:20241101:185444.073 adding performance counter disk/latency.kavg[latest,absolute]:392
12857:20241101:185444.073 adding performance counter disk/latency.gavg[latest]:393
12857:20241101:185444.073 adding performance counter disk/latency.gavg[latest,absolute]:393
12857:20241101:185444.074 adding performance counter storageAdapter/outstandingIOs[latest]:394
12857:20241101:185444.074 adding performance counter storageAdapter/outstandingIOs[latest,absolute]:394
12857:20241101:185444.074 adding performance counter storageAdapter/queued[latest]:395
12857:20241101:185444.074 adding performance counter storageAdapter/queued[latest,absolute]:395
12857:20241101:185444.074 adding performance counter storageAdapter/queueDepth[latest]:396
12857:20241101:185444.074 adding performance counter storageAdapter/queueDepth[latest,absolute]:396
12857:20241101:185444.074 adding performance counter cpu/partnerBusyTime[average]:397
12857:20241101:185444.074 adding performance counter cpu/partnerBusyTime[average,rate]:397
12857:20241101:185444.074 adding performance counter cpu/utilization[average]:398
12857:20241101:185444.074 adding performance counter cpu/utilization[average,rate]:398
12857:20241101:185444.074 adding performance counter cpu/corecount.provisioned[latest]:399
12857:20241101:185444.074 adding performance counter cpu/corecount.provisioned[latest,absolute]:399
12857:20241101:185444.074 adding performance counter cpu/cache.l3.occupancy[average]:400
12857:20241101:185444.074 adding performance counter cpu/cache.l3.occupancy[average,absolute]:400
12857:20241101:185444.075 adding performance counter cpu/corecount.usage[latest]:401
12857:20241101:185444.075 adding performance counter cpu/corecount.usage[latest,absolute]:401
12857:20241101:185444.075 adding performance counter cpu/load.avg1min[latest]:402
12857:20241101:185444.075 adding performance counter cpu/load.avg1min[latest,absolute]:402
12857:20241101:185444.075 adding performance counter cpu/load.avg5min[latest]:403
12857:20241101:185444.075 adding performance counter cpu/load.avg5min[latest,absolute]:403
12857:20241101:185444.075 adding performance counter cpu/load.avg15min[latest]:404
12857:20241101:185444.075 adding performance counter cpu/load.avg15min[latest,absolute]:404
12857:20241101:185444.075 adding performance counter mem/capacity.provisioned[latest]:405
12857:20241101:185444.075 adding performance counter mem/capacity.provisioned[latest,absolute]:405
12857:20241101:185444.075 adding performance counter mem/reservedCapacityPct[latest]:406
12857:20241101:185444.075 adding performance counter mem/reservedCapacityPct[latest,absolute]:406
12857:20241101:185444.075 adding performance counter mem/overcommit.avg1min[latest]:407
12857:20241101:185444.075 adding performance counter mem/overcommit.avg1min[latest,absolute]:407
12857:20241101:185444.075 adding performance counter mem/overcommit.avg5min[latest]:408
12857:20241101:185444.075 adding performance counter mem/overcommit.avg5min[latest,absolute]:408
12857:20241101:185444.075 adding performance counter mem/overcommit.avg15min[latest]:409
12857:20241101:185444.075 adding performance counter mem/overcommit.avg15min[latest,absolute]:409
12857:20241101:185444.076 adding performance counter mem/physical.total[latest]:410
12857:20241101:185444.076 adding performance counter mem/physical.total[latest,absolute]:410
12857:20241101:185444.076 adding performance counter mem/physical.user[latest]:411
12857:20241101:185444.076 adding performance counter mem/physical.user[latest,absolute]:411
12857:20241101:185444.076 adding performance counter mem/physical.free[latest]:412
12857:20241101:185444.076 adding performance counter mem/physical.free[latest,absolute]:412
12857:20241101:185444.076 adding performance counter mem/kernel.managed[latest]:413
12857:20241101:185444.076 adding performance counter mem/kernel.managed[latest,absolute]:413
12857:20241101:185444.076 adding performance counter mem/kernel.minfree[latest]:414
12857:20241101:185444.076 adding performance counter mem/kernel.minfree[latest,absolute]:414
12857:20241101:185444.076 adding performance counter mem/kernel.unreserved[latest]:415
12857:20241101:185444.076 adding performance counter mem/kernel.unreserved[latest,absolute]:415
12857:20241101:185444.077 adding performance counter mem/pshare.shared[latest]:416
12857:20241101:185444.077 adding performance counter mem/pshare.shared[latest,absolute]:416
12857:20241101:185444.077 adding performance counter mem/pshare.common[latest]:417
12857:20241101:185444.077 adding performance counter mem/pshare.common[latest,absolute]:417
12857:20241101:185444.077 adding performance counter mem/pshare.sharedSave[latest]:418
12857:20241101:185444.077 adding performance counter mem/pshare.sharedSave[latest,absolute]:418
12857:20241101:185444.077 adding performance counter mem/swap.current[latest]:419
12857:20241101:185444.077 adding performance counter mem/swap.current[latest,absolute]:419
12857:20241101:185444.077 adding performance counter mem/swap.target[latest]:420
12857:20241101:185444.077 adding performance counter mem/swap.target[latest,absolute]:420
12857:20241101:185444.077 adding performance counter mem/swap.readrate[average]:421
12857:20241101:185444.077 adding performance counter mem/swap.readrate[average,rate]:421
12857:20241101:185444.077 adding performance counter mem/swap.writerate[average]:422
12857:20241101:185444.077 adding performance counter mem/swap.writerate[average,rate]:422
12857:20241101:185444.077 adding performance counter mem/zip.zipped[latest]:423
12857:20241101:185444.077 adding performance counter mem/zip.zipped[latest,absolute]:423
12857:20241101:185444.078 adding performance counter mem/zip.saved[latest]:424
12857:20241101:185444.078 adding performance counter mem/zip.saved[latest,absolute]:424
12857:20241101:185444.078 adding performance counter mem/memctl.current[latest]:425
12857:20241101:185444.078 adding performance counter mem/memctl.current[latest,absolute]:425
12857:20241101:185444.078 adding performance counter mem/memctl.target[latest]:426
12857:20241101:185444.078 adding performance counter mem/memctl.target[latest,absolute]:426
12857:20241101:185444.078 adding performance counter mem/memctl.max[latest]:427
12857:20241101:185444.078 adding performance counter mem/memctl.max[latest,absolute]:427
12857:20241101:185444.078 adding performance counter mem/health.reservationState[latest]:428
12857:20241101:185444.078 adding performance counter mem/health.reservationState[latest,absolute]:428
12857:20241101:185444.078 adding performance counter mem/capacity.overhead[average]:429
12857:20241101:185444.078 adding performance counter mem/capacity.overhead[average,absolute]:429
12857:20241101:185444.078 adding performance counter mem/capacity.overheadResv[average]:430
12857:20241101:185444.078 adding performance counter mem/capacity.overheadResv[average,absolute]:430
12857:20241101:185444.078 adding performance counter mem/capacity.consumed[latest]:431
12857:20241101:185444.078 adding performance counter mem/capacity.consumed[latest,absolute]:431
12857:20241101:185444.079 adding performance counter mem/capacity.active[latest]:432
12857:20241101:185444.079 adding performance counter mem/capacity.active[latest,absolute]:432
12857:20241101:185444.079 adding performance counter power/capacity.usageCpu[average]:433
12857:20241101:185444.079 adding performance counter power/capacity.usageCpu[average,absolute]:433
12857:20241101:185444.079 adding performance counter power/capacity.usageMem[average]:434
12857:20241101:185444.079 adding performance counter power/capacity.usageMem[average,absolute]:434
12857:20241101:185444.079 adding performance counter power/capacity.usageOther[average]:435
12857:20241101:185444.079 adding performance counter power/capacity.usageOther[average,absolute]:435
12857:20241101:185444.079 adding performance counter vmotion/vmkernel.downtime[latest]:436
12857:20241101:185444.079 adding performance counter vmotion/vmkernel.downtime[latest,absolute]:436
12857:20241101:185444.079 adding performance counter vmotion/downtime[latest]:437
12857:20241101:185444.079 adding performance counter vmotion/downtime[latest,absolute]:437
12857:20241101:185444.079 adding performance counter vmotion/precopy.time[latest]:438
12857:20241101:185444.079 adding performance counter vmotion/precopy.time[latest,absolute]:438
12857:20241101:185444.080 adding performance counter vmotion/rtt[latest]:439
12857:20241101:185444.080 adding performance counter vmotion/rtt[latest,absolute]:439
12857:20241101:185444.080 adding performance counter vmotion/dst.migration.time[latest]:440
12857:20241101:185444.080 adding performance counter vmotion/dst.migration.time[latest,absolute]:440
12857:20241101:185444.080 adding performance counter vmotion/mem.sizemb[latest]:441
12857:20241101:185444.080 adding performance counter vmotion/mem.sizemb[latest,absolute]:441
12857:20241101:185444.080 adding performance counter hbr/vms[latest]:442
12857:20241101:185444.080 adding performance counter hbr/vms[latest,absolute]:442
12857:20241101:185444.080 adding performance counter net/throughput.hbr.inbound[average]:443
12857:20241101:185444.080 adding performance counter net/throughput.hbr.inbound[average,rate]:443
12857:20241101:185444.080 adding performance counter net/throughput.hbr.outbound[average]:444
12857:20241101:185444.080 adding performance counter net/throughput.hbr.outbound[average,rate]:444
12857:20241101:185444.080 adding performance counter virtualDisk/hbr.readLatencyMS[latest]:445
12857:20241101:185444.080 adding performance counter virtualDisk/hbr.readLatencyMS[latest,absolute]:445
12857:20241101:185444.080 adding performance counter virtualDisk/hbr.stallLatencyMS[latest]:446
12857:20241101:185444.080 adding performance counter virtualDisk/hbr.stallLatencyMS[latest,absolute]:446
12857:20241101:185444.081 adding performance counter net/latency.hbr.outbound[latest]:447
12857:20241101:185444.081 adding performance counter net/latency.hbr.outbound[latest,absolute]:447
12857:20241101:185444.081 adding performance counter lwd/numSnapshots[latest]:448
12857:20241101:185444.081 adding performance counter lwd/numSnapshots[latest,absolute]:448
12857:20241101:185444.081 adding performance counter nfs/apdState[latest]:449
12857:20241101:185444.081 adding performance counter nfs/apdState[latest,absolute]:449
12857:20241101:185444.081 adding performance counter nfs/readIssueTime[latest]:450
12857:20241101:185444.081 adding performance counter nfs/readIssueTime[latest,absolute]:450
12857:20241101:185444.081 adding performance counter nfs/writeIssueTime[latest]:451
12857:20241101:185444.081 adding performance counter nfs/writeIssueTime[latest,absolute]:451
12857:20241101:185444.081 adding performance counter nfs/totalReads[latest]:452
12857:20241101:185444.081 adding performance counter nfs/totalReads[latest,absolute]:452
12857:20241101:185444.081 adding performance counter nfs/readsFailed[latest]:453
12857:20241101:185444.081 adding performance counter nfs/readsFailed[latest,absolute]:453
12857:20241101:185444.081 adding performance counter nfs/totalWrites[latest]:454
12857:20241101:185444.081 adding performance counter nfs/totalWrites[latest,absolute]:454
12857:20241101:185444.082 adding performance counter nfs/writesFailed[latest]:455
12857:20241101:185444.082 adding performance counter nfs/writesFailed[latest,absolute]:455
12857:20241101:185444.082 adding performance counter nfs/readTime[latest]:456
12857:20241101:185444.082 adding performance counter nfs/readTime[latest,absolute]:456
12857:20241101:185444.082 adding performance counter nfs/writeTime[latest]:457
12857:20241101:185444.082 adding performance counter nfs/writeTime[latest,absolute]:457
12857:20241101:185444.082 adding performance counter nfs/ioRequestsQueued[latest]:458
12857:20241101:185444.082 adding performance counter nfs/ioRequestsQueued[latest,absolute]:458
12857:20241101:185444.082 adding performance counter nfs/totalCreate[latest]:459
12857:20241101:185444.082 adding performance counter nfs/totalCreate[latest,absolute]:459
12857:20241101:185444.082 adding performance counter nfs/createFailed[latest]:460
12857:20241101:185444.082 adding performance counter nfs/createFailed[latest,absolute]:460
12857:20241101:185444.082 adding performance counter nfs/socketBufferFull[latest]:461
12857:20241101:185444.082 adding performance counter nfs/socketBufferFull[latest,absolute]:461
12857:20241101:185444.083 adding performance counter datastore/vmfs.totalTxn[latest]:462
12857:20241101:185444.083 adding performance counter datastore/vmfs.totalTxn[latest,absolute]:462
12857:20241101:185444.083 adding performance counter datastore/vmfs.cancelledTxn[latest]:463
12857:20241101:185444.083 adding performance counter datastore/vmfs.cancelledTxn[latest,absolute]:463
12857:20241101:185444.083 adding performance counter datastore/vmfs.apdState[latest]:464
12857:20241101:185444.083 adding performance counter datastore/vmfs.apdState[latest,absolute]:464
12857:20241101:185444.083 adding performance counter datastore/vmfs.apdCount[latest]:465
12857:20241101:185444.083 adding performance counter datastore/vmfs.apdCount[latest,absolute]:465
12857:20241101:185444.083 adding performance counter vvol/pe.isaccessible[latest]:466
12857:20241101:185444.083 adding performance counter vvol/pe.isaccessible[latest,absolute]:466
12857:20241101:185444.083 adding performance counter vvol/pe.reads.done[latest]:467
12857:20241101:185444.083 adding performance counter vvol/pe.reads.done[latest,absolute]:467
12857:20241101:185444.083 adding performance counter vvol/pe.writes.done[latest]:468
12857:20241101:185444.083 adding performance counter vvol/pe.writes.done[latest,absolute]:468
12857:20241101:185444.083 adding performance counter vvol/pe.total.done[latest]:469
12857:20241101:185444.083 adding performance counter vvol/pe.total.done[latest,absolute]:469
12857:20241101:185444.084 adding performance counter vvol/pe.reads.sent[latest]:470
12857:20241101:185444.084 adding performance counter vvol/pe.reads.sent[latest,absolute]:470
12857:20241101:185444.084 adding performance counter vvol/pe.writes.sent[latest]:471
12857:20241101:185444.084 adding performance counter vvol/pe.writes.sent[latest,absolute]:471
12857:20241101:185444.084 adding performance counter vvol/pe.total.sent[latest]:472
12857:20241101:185444.084 adding performance counter vvol/pe.total.sent[latest,absolute]:472
12857:20241101:185444.084 adding performance counter vvol/pe.readsissued.failed[latest]:473
12857:20241101:185444.084 adding performance counter vvol/pe.readsissued.failed[latest,absolute]:473
12857:20241101:185444.084 adding performance counter vvol/pe.writesissued.failed[latest]:474
12857:20241101:185444.084 adding performance counter vvol/pe.writesissued.failed[latest,absolute]:474
12857:20241101:185444.084 adding performance counter vvol/pe.totalissued.failed[latest]:475
12857:20241101:185444.084 adding performance counter vvol/pe.totalissued.failed[latest,absolute]:475
12857:20241101:185444.084 adding performance counter vvol/pe.reads.failed[latest]:476
12857:20241101:185444.084 adding performance counter vvol/pe.reads.failed[latest,absolute]:476
12857:20241101:185444.084 adding performance counter vvol/pe.writes.failed[latest]:477
12857:20241101:185444.084 adding performance counter vvol/pe.writes.failed[latest,absolute]:477
12857:20241101:185444.085 adding performance counter vvol/pe.total.failed[latest]:478
12857:20241101:185444.085 adding performance counter vvol/pe.total.failed[latest,absolute]:478
12857:20241101:185444.085 adding performance counter vvol/pe.read.latency[latest]:479
12857:20241101:185444.085 adding performance counter vvol/pe.read.latency[latest,absolute]:479
12857:20241101:185444.085 adding performance counter vvol/pe.write.latency[latest]:480
12857:20241101:185444.085 adding performance counter vvol/pe.write.latency[latest,absolute]:480
12857:20241101:185444.085 adding performance counter vvol/pe.issue.latency[latest]:481
12857:20241101:185444.085 adding performance counter vvol/pe.issue.latency[latest,absolute]:481
12857:20241101:185444.085 adding performance counter vvol/pe.total.latency[latest]:482
12857:20241101:185444.085 adding performance counter vvol/pe.total.latency[latest,absolute]:482
12857:20241101:185444.085 adding performance counter vvol/pe.cancel.sent[latest]:483
12857:20241101:185444.085 adding performance counter vvol/pe.cancel.sent[latest,absolute]:483
12857:20241101:185444.086 adding performance counter vvol/pe.cancel.failed[latest]:484
12857:20241101:185444.086 adding performance counter vvol/pe.cancel.failed[latest,absolute]:484
12857:20241101:185444.086 adding performance counter vvol/pe.deviceresets.sent[latest]:485
12857:20241101:185444.086 adding performance counter vvol/pe.deviceresets.sent[latest,absolute]:485
12857:20241101:185444.086 adding performance counter vvol/pe.deviceresets.failed[latest]:486
12857:20241101:185444.086 adding performance counter vvol/pe.deviceresets.failed[latest,absolute]:486
12857:20241101:185444.086 adding performance counter vvol/pe.resets.sent[latest]:487
12857:20241101:185444.086 adding performance counter vvol/pe.resets.sent[latest,absolute]:487
12857:20241101:185444.086 adding performance counter vvol/pe.resets.failed[latest]:488
12857:20241101:185444.086 adding performance counter vvol/pe.resets.failed[latest,absolute]:488
12857:20241101:185444.086 adding performance counter vvol/pe.unmaps.sent[latest]:489
12857:20241101:185444.086 adding performance counter vvol/pe.unmaps.sent[latest,absolute]:489
12857:20241101:185444.086 adding performance counter vvol/pe.unmaps.failed[latest]:490
12857:20241101:185444.086 adding performance counter vvol/pe.unmaps.failed[latest,absolute]:490
12857:20241101:185444.086 adding performance counter vvol/container.reads.done[latest]:491
12857:20241101:185444.086 adding performance counter vvol/container.reads.done[latest,absolute]:491
12857:20241101:185444.087 adding performance counter vvol/container.writes.done[latest]:492
12857:20241101:185444.087 adding performance counter vvol/container.writes.done[latest,absolute]:492
12857:20241101:185444.087 adding performance counter vvol/container.total.done[latest]:493
12857:20241101:185444.087 adding performance counter vvol/container.total.done[latest,absolute]:493
12857:20241101:185444.087 adding performance counter vvol/container.reads.sent[latest]:494
12857:20241101:185444.087 adding performance counter vvol/container.reads.sent[latest,absolute]:494
12857:20241101:185444.087 adding performance counter vvol/container.writes.sent[latest]:495
12857:20241101:185444.087 adding performance counter vvol/container.writes.sent[latest,absolute]:495
12857:20241101:185444.087 adding performance counter vvol/container.total.sent[latest]:496
12857:20241101:185444.087 adding performance counter vvol/container.total.sent[latest,absolute]:496
12857:20241101:185444.087 adding performance counter vvol/container.readsissued.failed[latest]:497
12857:20241101:185444.087 adding performance counter vvol/container.readsissued.failed[latest,absolute]:497
12857:20241101:185444.087 adding performance counter vvol/container.writesissued.failed[latest]:498
12857:20241101:185444.087 adding performance counter vvol/container.writesissued.failed[latest,absolute]:498
12857:20241101:185444.087 adding performance counter vvol/container.totalissued.failed[latest]:499
12857:20241101:185444.087 adding performance counter vvol/container.totalissued.failed[latest,absolute]:499
12857:20241101:185444.088 adding performance counter vvol/container.reads.failed[latest]:500
12857:20241101:185444.088 adding performance counter vvol/container.reads.failed[latest,absolute]:500
12857:20241101:185444.088 adding performance counter vvol/container.writes.failed[latest]:501
12857:20241101:185444.088 adding performance counter vvol/container.writes.failed[latest,absolute]:501
12857:20241101:185444.088 adding performance counter vvol/container.total.failed[latest]:502
12857:20241101:185444.088 adding performance counter vvol/container.total.failed[latest,absolute]:502
12857:20241101:185444.088 adding performance counter vvol/container.read.latency[latest]:503
12857:20241101:185444.088 adding performance counter vvol/container.read.latency[latest,absolute]:503
12857:20241101:185444.088 adding performance counter vvol/container.write.latency[latest]:504
12857:20241101:185444.088 adding performance counter vvol/container.write.latency[latest,absolute]:504
12857:20241101:185444.088 adding performance counter vvol/container.issue.latency[latest]:505
12857:20241101:185444.088 adding performance counter vvol/container.issue.latency[latest,absolute]:505
12857:20241101:185444.089 adding performance counter vvol/container.total.latency[latest]:506
12857:20241101:185444.089 adding performance counter vvol/container.total.latency[latest,absolute]:506
12857:20241101:185444.089 adding performance counter vvol/device.reads.done[latest]:507
12857:20241101:185444.089 adding performance counter vvol/device.reads.done[latest,absolute]:507
12857:20241101:185444.089 adding performance counter vvol/device.writes.done[latest]:508
12857:20241101:185444.089 adding performance counter vvol/device.writes.done[latest,absolute]:508
12857:20241101:185444.089 adding performance counter vvol/device.total.done[latest]:509
12857:20241101:185444.089 adding performance counter vvol/device.total.done[latest,absolute]:509
12857:20241101:185444.089 adding performance counter vvol/device.reads.sent[latest]:510
12857:20241101:185444.089 adding performance counter vvol/device.reads.sent[latest,absolute]:510
12857:20241101:185444.089 adding performance counter vvol/device.writes.sent[latest]:511
12857:20241101:185444.089 adding performance counter vvol/device.writes.sent[latest,absolute]:511
12857:20241101:185444.089 adding performance counter vvol/device.total.sent[latest]:512
12857:20241101:185444.089 adding performance counter vvol/device.total.sent[latest,absolute]:512
12857:20241101:185444.089 adding performance counter vvol/device.readsissued.failed[latest]:513
12857:20241101:185444.090 adding performance counter vvol/device.readsissued.failed[latest,absolute]:513
12857:20241101:185444.090 adding performance counter vvol/device.writesissued.failed[latest]:514
12857:20241101:185444.090 adding performance counter vvol/device.writesissued.failed[latest,absolute]:514
12857:20241101:185444.090 adding performance counter vvol/device.totalissued.failed[latest]:515
12857:20241101:185444.090 adding performance counter vvol/device.totalissued.failed[latest,absolute]:515
12857:20241101:185444.090 adding performance counter vvol/device.reads.failed[latest]:516
12857:20241101:185444.090 adding performance counter vvol/device.reads.failed[latest,absolute]:516
12857:20241101:185444.090 adding performance counter vvol/device.writes.failed[latest]:517
12857:20241101:185444.090 adding performance counter vvol/device.writes.failed[latest,absolute]:517
12857:20241101:185444.090 adding performance counter vvol/device.total.failed[latest]:518
12857:20241101:185444.090 adding performance counter vvol/device.total.failed[latest,absolute]:518
12857:20241101:185444.090 adding performance counter vvol/device.read.latency[latest]:519
12857:20241101:185444.090 adding performance counter vvol/device.read.latency[latest,absolute]:519
12857:20241101:185444.090 adding performance counter vvol/device.write.latency[latest]:520
12857:20241101:185444.090 adding performance counter vvol/device.write.latency[latest,absolute]:520
12857:20241101:185444.090 adding performance counter vvol/device.issue.latency[latest]:521
12857:20241101:185444.090 adding performance counter vvol/device.issue.latency[latest,absolute]:521
12857:20241101:185444.090 adding performance counter vvol/device.total.latency[latest]:522
12857:20241101:185444.090 adding performance counter vvol/device.total.latency[latest,absolute]:522
12857:20241101:185444.090 adding performance counter vvol/device.cancel.sent[latest]:523
12857:20241101:185444.090 adding performance counter vvol/device.cancel.sent[latest,absolute]:523
12857:20241101:185444.090 adding performance counter vvol/device.cancel.failed[latest]:524
12857:20241101:185444.090 adding performance counter vvol/device.cancel.failed[latest,absolute]:524
12857:20241101:185444.090 adding performance counter vvol/device.deviceresets.sent[latest]:525
12857:20241101:185444.090 adding performance counter vvol/device.deviceresets.sent[latest,absolute]:525
12857:20241101:185444.090 adding performance counter vvol/device.deviceresets.failed[latest]:526
12857:20241101:185444.090 adding performance counter vvol/device.deviceresets.failed[latest,absolute]:526
12857:20241101:185444.090 adding performance counter vvol/device.resets.sent[latest]:527
12857:20241101:185444.090 adding performance counter vvol/device.resets.sent[latest,absolute]:527
12857:20241101:185444.090 adding performance counter vvol/device.resets.failed[latest]:528
12857:20241101:185444.090 adding performance counter vvol/device.resets.failed[latest,absolute]:528
12857:20241101:185444.091 adding performance counter vvol/device.unmaps.sent[latest]:529
12857:20241101:185444.091 adding performance counter vvol/device.unmaps.sent[latest,absolute]:529
12857:20241101:185444.091 adding performance counter vvol/device.unmaps.failed[latest]:530
12857:20241101:185444.091 adding performance counter vvol/device.unmaps.failed[latest,absolute]:530
12857:20241101:185444.091 adding performance counter cpu/swapwait[summation]:531
12857:20241101:185444.091 adding performance counter cpu/swapwait[summation,delta]:531
12857:20241101:185444.091 adding performance counter cpu/utilization[none]:532
12857:20241101:185444.091 adding performance counter cpu/utilization[none,rate]:532
12857:20241101:185444.091 adding performance counter cpu/utilization[maximum]:533
12857:20241101:185444.091 adding performance counter cpu/utilization[maximum,rate]:533
12857:20241101:185444.091 adding performance counter cpu/utilization[minimum]:534
12857:20241101:185444.091 adding performance counter cpu/utilization[minimum,rate]:534
12857:20241101:185444.091 adding performance counter cpu/coreUtilization[none]:535
12857:20241101:185444.091 adding performance counter cpu/coreUtilization[none,rate]:535
12857:20241101:185444.091 adding performance counter cpu/coreUtilization[average]:536
12857:20241101:185444.091 adding performance counter cpu/coreUtilization[average,rate]:536
12857:20241101:185444.091 adding performance counter cpu/coreUtilization[maximum]:537
12857:20241101:185444.091 adding performance counter cpu/coreUtilization[maximum,rate]:537
12857:20241101:185444.091 adding performance counter cpu/coreUtilization[minimum]:538
12857:20241101:185444.091 adding performance counter cpu/coreUtilization[minimum,rate]:538
12857:20241101:185444.091 adding performance counter cpu/totalCapacity[average]:539
12857:20241101:185444.091 adding performance counter cpu/totalCapacity[average,absolute]:539
12857:20241101:185444.091 adding performance counter cpu/latency[average]:540
12857:20241101:185444.091 adding performance counter cpu/latency[average,rate]:540
12857:20241101:185444.091 adding performance counter cpu/entitlement[latest]:541
12857:20241101:185444.091 adding performance counter cpu/entitlement[latest,absolute]:541
12857:20241101:185444.091 adding performance counter cpu/demand[average]:542
12857:20241101:185444.091 adding performance counter cpu/demand[average,absolute]:542
12857:20241101:185444.091 adding performance counter cpu/costop[summation]:543
12857:20241101:185444.091 adding performance counter cpu/costop[summation,delta]:543
12857:20241101:185444.091 adding performance counter cpu/maxlimited[summation]:544
12857:20241101:185444.091 adding performance counter cpu/maxlimited[summation,delta]:544
12857:20241101:185444.091 adding performance counter cpu/overlap[summation]:545
12857:20241101:185444.091 adding performance counter cpu/overlap[summation,delta]:545
12857:20241101:185444.092 adding performance counter cpu/run[summation]:546
12857:20241101:185444.092 adding performance counter cpu/run[summation,delta]:546
12857:20241101:185444.092 adding performance counter cpu/demandEntitlementRatio[latest]:547
12857:20241101:185444.092 adding performance counter cpu/demandEntitlementRatio[latest,absolute]:547
12857:20241101:185444.092 adding performance counter cpu/readiness[average]:548
12857:20241101:185444.092 adding performance counter cpu/readiness[average,rate]:548
12857:20241101:185444.092 adding performance counter cpu/usage.vcpus[average]:549
12857:20241101:185444.092 adding performance counter cpu/usage.vcpus[average,rate]:549
12857:20241101:185444.092 adding performance counter mem/swapin[none]:550
12857:20241101:185444.092 adding performance counter mem/swapin[none,absolute]:550
12857:20241101:185444.092 adding performance counter mem/swapin[average]:551
12857:20241101:185444.092 adding performance counter mem/swapin[average,absolute]:551
12857:20241101:185444.092 adding performance counter mem/swapin[maximum]:552
12857:20241101:185444.092 adding performance counter mem/swapin[maximum,absolute]:552
12857:20241101:185444.092 adding performance counter mem/swapin[minimum]:553
12857:20241101:185444.092 adding performance counter mem/swapin[minimum,absolute]:553
12857:20241101:185444.092 adding performance counter mem/swapout[none]:554
12857:20241101:185444.092 adding performance counter mem/swapout[none,absolute]:554
12857:20241101:185444.092 adding performance counter mem/swapout[average]:555
12857:20241101:185444.092 adding performance counter mem/swapout[average,absolute]:555
12857:20241101:185444.092 adding performance counter mem/swapout[maximum]:556
12857:20241101:185444.092 adding performance counter mem/swapout[maximum,absolute]:556
12857:20241101:185444.092 adding performance counter mem/swapout[minimum]:557
12857:20241101:185444.092 adding performance counter mem/swapout[minimum,absolute]:557
12857:20241101:185444.092 adding performance counter mem/sysUsage[none]:558
12857:20241101:185444.092 adding performance counter mem/sysUsage[none,absolute]:558
12857:20241101:185444.092 adding performance counter mem/sysUsage[average]:559
12857:20241101:185444.092 adding performance counter mem/sysUsage[average,absolute]:559
12857:20241101:185444.092 adding performance counter mem/sysUsage[maximum]:560
12857:20241101:185444.092 adding performance counter mem/sysUsage[maximum,absolute]:560
12857:20241101:185444.092 adding performance counter mem/sysUsage[minimum]:561
12857:20241101:185444.092 adding performance counter mem/sysUsage[minimum,absolute]:561
12857:20241101:185444.092 adding performance counter mem/activewrite[average]:562
12857:20241101:185444.092 adding performance counter mem/activewrite[average,absolute]:562
12857:20241101:185444.092 adding performance counter mem/overheadMax[average]:563
12857:20241101:185444.092 adding performance counter mem/overheadMax[average,absolute]:563
12857:20241101:185444.093 adding performance counter mem/totalCapacity[average]:564
12857:20241101:185444.093 adding performance counter mem/totalCapacity[average,absolute]:564
12857:20241101:185444.093 adding performance counter mem/zipped[latest]:565
12857:20241101:185444.093 adding performance counter mem/zipped[latest,absolute]:565
12857:20241101:185444.093 adding performance counter mem/zipSaved[latest]:566
12857:20241101:185444.093 adding performance counter mem/zipSaved[latest,absolute]:566
12857:20241101:185444.093 adding performance counter mem/latency[average]:567
12857:20241101:185444.093 adding performance counter mem/latency[average,absolute]:567
12857:20241101:185444.093 adding performance counter mem/entitlement[average]:568
12857:20241101:185444.093 adding performance counter mem/entitlement[average,absolute]:568
12857:20241101:185444.093 adding performance counter mem/lowfreethreshold[average]:569
12857:20241101:185444.093 adding performance counter mem/lowfreethreshold[average,absolute]:569
12857:20241101:185444.093 adding performance counter mem/llSwapUsed[none]:570
12857:20241101:185444.093 adding performance counter mem/llSwapUsed[none,absolute]:570
12857:20241101:185444.093 adding performance counter mem/llSwapInRate[average]:571
12857:20241101:185444.093 adding performance counter mem/llSwapInRate[average,rate]:571
12857:20241101:185444.093 adding performance counter mem/llSwapOutRate[average]:572
12857:20241101:185444.093 adding performance counter mem/llSwapOutRate[average,rate]:572
12857:20241101:185444.093 adding performance counter mem/overheadTouched[average]:573
12857:20241101:185444.093 adding performance counter mem/overheadTouched[average,absolute]:573
12857:20241101:185444.093 adding performance counter mem/llSwapUsed[average]:574
12857:20241101:185444.093 adding performance counter mem/llSwapUsed[average,absolute]:574
12857:20241101:185444.093 adding performance counter mem/llSwapUsed[maximum]:575
12857:20241101:185444.093 adding performance counter mem/llSwapUsed[maximum,absolute]:575
12857:20241101:185444.093 adding performance counter mem/llSwapUsed[minimum]:576
12857:20241101:185444.093 adding performance counter mem/llSwapUsed[minimum,absolute]:576
12857:20241101:185444.093 adding performance counter mem/llSwapIn[none]:577
12857:20241101:185444.093 adding performance counter mem/llSwapIn[none,absolute]:577
12857:20241101:185444.093 adding performance counter mem/llSwapIn[average]:578
12857:20241101:185444.093 adding performance counter mem/llSwapIn[average,absolute]:578
12857:20241101:185444.093 adding performance counter mem/llSwapIn[maximum]:579
12857:20241101:185444.093 adding performance counter mem/llSwapIn[maximum,absolute]:579
12857:20241101:185444.093 adding performance counter mem/llSwapIn[minimum]:580
12857:20241101:185444.093 adding performance counter mem/llSwapIn[minimum,absolute]:580
12857:20241101:185444.093 adding performance counter mem/llSwapOut[none]:581
12857:20241101:185444.093 adding performance counter mem/llSwapOut[none,absolute]:581
12857:20241101:185444.094 adding performance counter mem/llSwapOut[average]:582
12857:20241101:185444.094 adding performance counter mem/llSwapOut[average,absolute]:582
12857:20241101:185444.094 adding performance counter mem/llSwapOut[maximum]:583
12857:20241101:185444.094 adding performance counter mem/llSwapOut[maximum,absolute]:583
12857:20241101:185444.094 adding performance counter mem/llSwapOut[minimum]:584
12857:20241101:185444.094 adding performance counter mem/llSwapOut[minimum,absolute]:584
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.size[latest]:585
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.size[latest,absolute]:585
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.sizeMax[latest]:586
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.sizeMax[latest,absolute]:586
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.workingSet[latest]:587
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.workingSet[latest,absolute]:587
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.workingSetMax[latest]:588
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.workingSetMax[latest,absolute]:588
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.overhead[latest]:589
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.overhead[latest,absolute]:589
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.capMissRatio[latest]:590
12857:20241101:185444.094 adding performance counter mem/vmfs.pbc.capMissRatio[latest,absolute]:590
12857:20241101:185444.094 adding performance counter disk/commands[summation]:591
12857:20241101:185444.094 adding performance counter disk/commands[summation,delta]:591
12857:20241101:185444.094 adding performance counter disk/deviceReadLatency[average]:592
12857:20241101:185444.094 adding performance counter disk/deviceReadLatency[average,absolute]:592
12857:20241101:185444.094 adding performance counter disk/kernelReadLatency[average]:593
12857:20241101:185444.094 adding performance counter disk/kernelReadLatency[average,absolute]:593
12857:20241101:185444.094 adding performance counter disk/totalReadLatency[average]:594
12857:20241101:185444.094 adding performance counter disk/totalReadLatency[average,absolute]:594
12857:20241101:185444.094 adding performance counter disk/queueReadLatency[average]:595
12857:20241101:185444.094 adding performance counter disk/queueReadLatency[average,absolute]:595
12857:20241101:185444.094 adding performance counter disk/deviceWriteLatency[average]:596
12857:20241101:185444.094 adding performance counter disk/deviceWriteLatency[average,absolute]:596
12857:20241101:185444.094 adding performance counter disk/kernelWriteLatency[average]:597
12857:20241101:185444.094 adding performance counter disk/kernelWriteLatency[average,absolute]:597
12857:20241101:185444.094 adding performance counter disk/totalWriteLatency[average]:598
12857:20241101:185444.094 adding performance counter disk/totalWriteLatency[average,absolute]:598
12857:20241101:185444.094 adding performance counter disk/queueWriteLatency[average]:599
12857:20241101:185444.094 adding performance counter disk/queueWriteLatency[average,absolute]:599
12857:20241101:185444.094 adding performance counter disk/deviceLatency[average]:600
12857:20241101:185444.095 adding performance counter disk/deviceLatency[average,absolute]:600
12857:20241101:185444.095 adding performance counter disk/kernelLatency[average]:601
12857:20241101:185444.095 adding performance counter disk/kernelLatency[average,absolute]:601
12857:20241101:185444.095 adding performance counter disk/queueLatency[average]:602
12857:20241101:185444.095 adding performance counter disk/queueLatency[average,absolute]:602
12857:20241101:185444.095 adding performance counter disk/maxQueueDepth[average]:603
12857:20241101:185444.095 adding performance counter disk/maxQueueDepth[average,absolute]:603
12857:20241101:185444.095 adding performance counter disk/commandsAveraged[average]:604
12857:20241101:185444.095 adding performance counter disk/commandsAveraged[average,rate]:604
12857:20241101:185444.095 adding performance counter net/droppedRx[summation]:605
12857:20241101:185444.095 adding performance counter net/droppedRx[summation,delta]:605
12857:20241101:185444.095 adding performance counter net/droppedTx[summation]:606
12857:20241101:185444.095 adding performance counter net/droppedTx[summation,delta]:606
12857:20241101:185444.095 adding performance counter net/bytesRx[average]:607
12857:20241101:185444.095 adding performance counter net/bytesRx[average,rate]:607
12857:20241101:185444.095 adding performance counter net/bytesTx[average]:608
12857:20241101:185444.095 adding performance counter net/bytesTx[average,rate]:608
12857:20241101:185444.095 adding performance counter net/broadcastRx[summation]:609
12857:20241101:185444.095 adding performance counter net/broadcastRx[summation,delta]:609
12857:20241101:185444.095 adding performance counter net/broadcastTx[summation]:610
12857:20241101:185444.095 adding performance counter net/broadcastTx[summation,delta]:610
12857:20241101:185444.095 adding performance counter net/multicastRx[summation]:611
12857:20241101:185444.095 adding performance counter net/multicastRx[summation,delta]:611
12857:20241101:185444.095 adding performance counter net/multicastTx[summation]:612
12857:20241101:185444.095 adding performance counter net/multicastTx[summation,delta]:612
12857:20241101:185444.095 adding performance counter net/errorsRx[summation]:613
12857:20241101:185444.095 adding performance counter net/errorsRx[summation,delta]:613
12857:20241101:185444.095 adding performance counter net/errorsTx[summation]:614
12857:20241101:185444.095 adding performance counter net/errorsTx[summation,delta]:614
12857:20241101:185444.095 adding performance counter net/unknownProtos[summation]:615
12857:20241101:185444.095 adding performance counter net/unknownProtos[summation,delta]:615
12857:20241101:185444.095 adding performance counter net/pnicBytesRx[average]:616
12857:20241101:185444.095 adding performance counter net/pnicBytesRx[average,rate]:616
12857:20241101:185444.095 adding performance counter net/pnicBytesTx[average]:617
12857:20241101:185444.095 adding performance counter net/pnicBytesTx[average,rate]:617
12857:20241101:185444.095 adding performance counter sys/heartbeat[latest]:618
12857:20241101:185444.095 adding performance counter sys/heartbeat[latest,absolute]:618
12857:20241101:185444.096 adding performance counter sys/diskUsage[latest]:619
12857:20241101:185444.096 adding performance counter sys/diskUsage[latest,absolute]:619
12857:20241101:185444.096 adding performance counter sys/resourceCpuUsage[none]:620
12857:20241101:185444.096 adding performance counter sys/resourceCpuUsage[none,rate]:620
12857:20241101:185444.096 adding performance counter sys/resourceCpuUsage[average]:621
12857:20241101:185444.096 adding performance counter sys/resourceCpuUsage[average,rate]:621
12857:20241101:185444.096 adding performance counter sys/resourceCpuUsage[maximum]:622
12857:20241101:185444.096 adding performance counter sys/resourceCpuUsage[maximum,rate]:622
12857:20241101:185444.096 adding performance counter sys/resourceCpuUsage[minimum]:623
12857:20241101:185444.096 adding performance counter sys/resourceCpuUsage[minimum,rate]:623
12857:20241101:185444.096 adding performance counter sys/resourceMemTouched[latest]:624
12857:20241101:185444.096 adding performance counter sys/resourceMemTouched[latest,absolute]:624
12857:20241101:185444.096 adding performance counter sys/resourceMemMapped[latest]:625
12857:20241101:185444.096 adding performance counter sys/resourceMemMapped[latest,absolute]:625
12857:20241101:185444.096 adding performance counter sys/resourceMemShared[latest]:626
12857:20241101:185444.096 adding performance counter sys/resourceMemShared[latest,absolute]:626
12857:20241101:185444.096 adding performance counter sys/resourceMemSwapped[latest]:627
12857:20241101:185444.096 adding performance counter sys/resourceMemSwapped[latest,absolute]:627
12857:20241101:185444.096 adding performance counter sys/resourceMemOverhead[latest]:628
12857:20241101:185444.096 adding performance counter sys/resourceMemOverhead[latest,absolute]:628
12857:20241101:185444.096 adding performance counter sys/resourceMemCow[latest]:629
12857:20241101:185444.096 adding performance counter sys/resourceMemCow[latest,absolute]:629
12857:20241101:185444.096 adding performance counter sys/resourceMemZero[latest]:630
12857:20241101:185444.096 adding performance counter sys/resourceMemZero[latest,absolute]:630
12857:20241101:185444.096 adding performance counter sys/resourceCpuRun1[latest]:631
12857:20241101:185444.096 adding performance counter sys/resourceCpuRun1[latest,absolute]:631
12857:20241101:185444.096 adding performance counter sys/resourceCpuAct1[latest]:632
12857:20241101:185444.096 adding performance counter sys/resourceCpuAct1[latest,absolute]:632
12857:20241101:185444.096 adding performance counter sys/resourceCpuMaxLimited1[latest]:633
12857:20241101:185444.096 adding performance counter sys/resourceCpuMaxLimited1[latest,absolute]:633
12857:20241101:185444.096 adding performance counter sys/resourceCpuRun5[latest]:634
12857:20241101:185444.096 adding performance counter sys/resourceCpuRun5[latest,absolute]:634
12857:20241101:185444.096 adding performance counter sys/resourceCpuAct5[latest]:635
12857:20241101:185444.096 adding performance counter sys/resourceCpuAct5[latest,absolute]:635
12857:20241101:185444.096 adding performance counter sys/resourceCpuMaxLimited5[latest]:636
12857:20241101:185444.096 adding performance counter sys/resourceCpuMaxLimited5[latest,absolute]:636
12857:20241101:185444.096 adding performance counter sys/resourceCpuAllocMin[latest]:637
12857:20241101:185444.096 adding performance counter sys/resourceCpuAllocMin[latest,absolute]:637
12857:20241101:185444.097 adding performance counter sys/resourceCpuAllocMax[latest]:638
12857:20241101:185444.097 adding performance counter sys/resourceCpuAllocMax[latest,absolute]:638
12857:20241101:185444.097 adding performance counter sys/resourceCpuAllocShares[latest]:639
12857:20241101:185444.097 adding performance counter sys/resourceCpuAllocShares[latest,absolute]:639
12857:20241101:185444.097 adding performance counter sys/resourceMemAllocMin[latest]:640
12857:20241101:185444.097 adding performance counter sys/resourceMemAllocMin[latest,absolute]:640
12857:20241101:185444.097 adding performance counter sys/resourceMemAllocMax[latest]:641
12857:20241101:185444.097 adding performance counter sys/resourceMemAllocMax[latest,absolute]:641
12857:20241101:185444.097 adding performance counter sys/resourceMemAllocShares[latest]:642
12857:20241101:185444.097 adding performance counter sys/resourceMemAllocShares[latest,absolute]:642
12857:20241101:185444.097 adding performance counter sys/osUptime[latest]:643
12857:20241101:185444.097 adding performance counter sys/osUptime[latest,absolute]:643
12857:20241101:185444.097 adding performance counter sys/resourceMemConsumed[latest]:644
12857:20241101:185444.097 adding performance counter sys/resourceMemConsumed[latest,absolute]:644
12857:20241101:185444.097 adding performance counter sys/resourceFdUsage[latest]:645
12857:20241101:185444.097 adding performance counter sys/resourceFdUsage[latest,absolute]:645
12857:20241101:185444.097 adding performance counter rescpu/actpk1[latest]:646
12857:20241101:185444.097 adding performance counter rescpu/actpk1[latest,absolute]:646
12857:20241101:185444.097 adding performance counter rescpu/runav1[latest]:647
12857:20241101:185444.097 adding performance counter rescpu/runav1[latest,absolute]:647
12857:20241101:185444.097 adding performance counter rescpu/actav5[latest]:648
12857:20241101:185444.097 adding performance counter rescpu/actav5[latest,absolute]:648
12857:20241101:185444.097 adding performance counter rescpu/actpk5[latest]:649
12857:20241101:185444.097 adding performance counter rescpu/actpk5[latest,absolute]:649
12857:20241101:185444.097 adding performance counter rescpu/runav5[latest]:650
12857:20241101:185444.097 adding performance counter rescpu/runav5[latest,absolute]:650
12857:20241101:185444.097 adding performance counter rescpu/actav15[latest]:651
12857:20241101:185444.097 adding performance counter rescpu/actav15[latest,absolute]:651
12857:20241101:185444.097 adding performance counter rescpu/actpk15[latest]:652
12857:20241101:185444.097 adding performance counter rescpu/actpk15[latest,absolute]:652
12857:20241101:185444.097 adding performance counter rescpu/runav15[latest]:653
12857:20241101:185444.097 adding performance counter rescpu/runav15[latest,absolute]:653
12857:20241101:185444.097 adding performance counter rescpu/runpk1[latest]:654
12857:20241101:185444.097 adding performance counter rescpu/runpk1[latest,absolute]:654
12857:20241101:185444.097 adding performance counter rescpu/maxLimited1[latest]:655
12857:20241101:185444.097 adding performance counter rescpu/maxLimited1[latest,absolute]:655
12857:20241101:185444.098 adding performance counter rescpu/runpk5[latest]:656
12857:20241101:185444.098 adding performance counter rescpu/runpk5[latest,absolute]:656
12857:20241101:185444.098 adding performance counter rescpu/maxLimited5[latest]:657
12857:20241101:185444.098 adding performance counter rescpu/maxLimited5[latest,absolute]:657
12857:20241101:185444.098 adding performance counter rescpu/runpk15[latest]:658
12857:20241101:185444.098 adding performance counter rescpu/runpk15[latest,absolute]:658
12857:20241101:185444.098 adding performance counter rescpu/maxLimited15[latest]:659
12857:20241101:185444.098 adding performance counter rescpu/maxLimited15[latest,absolute]:659
12857:20241101:185444.098 adding performance counter rescpu/sampleCount[latest]:660
12857:20241101:185444.098 adding performance counter rescpu/sampleCount[latest,absolute]:660
12857:20241101:185444.098 adding performance counter rescpu/samplePeriod[latest]:661
12857:20241101:185444.098 adding performance counter rescpu/samplePeriod[latest,absolute]:661
12857:20241101:185444.098 adding performance counter managementAgent/memUsed[average]:662
12857:20241101:185444.098 adding performance counter managementAgent/memUsed[average,absolute]:662
12857:20241101:185444.098 adding performance counter managementAgent/swapUsed[average]:663
12857:20241101:185444.098 adding performance counter managementAgent/swapUsed[average,absolute]:663
12857:20241101:185444.098 adding performance counter managementAgent/cpuUsage[average]:664
12857:20241101:185444.098 adding performance counter managementAgent/cpuUsage[average,rate]:664
12857:20241101:185444.098 adding performance counter storagePath/commandsAveraged[average]:665
12857:20241101:185444.098 adding performance counter storagePath/commandsAveraged[average,rate]:665
12857:20241101:185444.098 adding performance counter storagePath/numberReadAveraged[average]:666
12857:20241101:185444.098 adding performance counter storagePath/numberReadAveraged[average,rate]:666
12857:20241101:185444.098 adding performance counter storagePath/numberWriteAveraged[average]:667
12857:20241101:185444.098 adding performance counter storagePath/numberWriteAveraged[average,rate]:667
12857:20241101:185444.098 adding performance counter storagePath/read[average]:668
12857:20241101:185444.098 adding performance counter storagePath/read[average,rate]:668
12857:20241101:185444.098 adding performance counter storagePath/write[average]:669
12857:20241101:185444.098 adding performance counter storagePath/write[average,rate]:669
12857:20241101:185444.098 adding performance counter storagePath/totalReadLatency[average]:670
12857:20241101:185444.098 adding performance counter storagePath/totalReadLatency[average,absolute]:670
12857:20241101:185444.098 adding performance counter storagePath/totalWriteLatency[average]:671
12857:20241101:185444.098 adding performance counter storagePath/totalWriteLatency[average,absolute]:671
12857:20241101:185444.098 adding performance counter virtualDisk/readIOSize[latest]:672
12857:20241101:185444.098 adding performance counter virtualDisk/readIOSize[latest,absolute]:672
12857:20241101:185444.098 adding performance counter virtualDisk/writeIOSize[latest]:673
12857:20241101:185444.098 adding performance counter virtualDisk/writeIOSize[latest,absolute]:673
12857:20241101:185444.099 adding performance counter virtualDisk/smallSeeks[latest]:674
12857:20241101:185444.099 adding performance counter virtualDisk/smallSeeks[latest,absolute]:674
12857:20241101:185444.099 adding performance counter virtualDisk/mediumSeeks[latest]:675
12857:20241101:185444.099 adding performance counter virtualDisk/mediumSeeks[latest,absolute]:675
12857:20241101:185444.099 adding performance counter virtualDisk/largeSeeks[latest]:676
12857:20241101:185444.099 adding performance counter virtualDisk/largeSeeks[latest,absolute]:676
12857:20241101:185444.099 adding performance counter virtualDisk/readLatencyUS[latest]:677
12857:20241101:185444.099 adding performance counter virtualDisk/readLatencyUS[latest,absolute]:677
12857:20241101:185444.099 adding performance counter virtualDisk/writeLatencyUS[latest]:678
12857:20241101:185444.099 adding performance counter virtualDisk/writeLatencyUS[latest,absolute]:678
12857:20241101:185444.099 adding performance counter datastore/datastoreMaxQueueDepth[latest]:679
12857:20241101:185444.099 adding performance counter datastore/datastoreMaxQueueDepth[latest,absolute]:679
12857:20241101:185444.099 adding performance counter datastore/unmapSize[summation]:680
12857:20241101:185444.099 adding performance counter datastore/unmapSize[summation,delta]:680
12857:20241101:185444.099 adding performance counter datastore/unmapIOs[summation]:681
12857:20241101:185444.099 adding performance counter datastore/unmapIOs[summation,delta]:681
12857:20241101:185444.099 adding performance counter hbr/hbrNumVms[average]:682
12857:20241101:185444.099 adding performance counter hbr/hbrNumVms[average,absolute]:682
12857:20241101:185444.099 adding performance counter hbr/hbrNetRx[average]:683
12857:20241101:185444.099 adding performance counter hbr/hbrNetRx[average,rate]:683
12857:20241101:185444.099 adding performance counter hbr/hbrNetTx[average]:684
12857:20241101:185444.099 adding performance counter hbr/hbrNetTx[average,rate]:684
12857:20241101:185444.099 adding performance counter hbr/hbrNetLatency[average]:685
12857:20241101:185444.099 adding performance counter hbr/hbrNetLatency[average,absolute]:685
12857:20241101:185444.099 adding performance counter hbr/hbrDiskReadLatency[average]:686
12857:20241101:185444.099 adding performance counter hbr/hbrDiskReadLatency[average,absolute]:686
12857:20241101:185444.099 adding performance counter hbr/hbrDiskStallLatency[average]:687
12857:20241101:185444.099 adding performance counter hbr/hbrDiskStallLatency[average,absolute]:687
12857:20241101:185444.099 adding performance counter hbr/hbrDiskTransferSuccess[average]:688
12857:20241101:185444.099 adding performance counter hbr/hbrDiskTransferSuccess[average,absolute]:688
12857:20241101:185444.099 adding performance counter hbr/hbrDiskTransferIdle[average]:689
12857:20241101:185444.099 adding performance counter hbr/hbrDiskTransferIdle[average,absolute]:689
12857:20241101:185444.099 adding performance counter hbr/hbrDiskTransferBytes[average]:690
12857:20241101:185444.099 adding performance counter hbr/hbrDiskTransferBytes[average,absolute]:690
12857:20241101:185444.099 adding performance counter vflashModule/numActiveVMDKs[latest]:691
12857:20241101:185444.099 adding performance counter vflashModule/numActiveVMDKs[latest,absolute]:691
12857:20241101:185444.099 adding performance counter vsanDomObj/readIops[average]:692
12857:20241101:185444.100 adding performance counter vsanDomObj/readIops[average,rate]:692
12857:20241101:185444.100 adding performance counter vsanDomObj/readThroughput[average]:693
12857:20241101:185444.100 adding performance counter vsanDomObj/readThroughput[average,rate]:693
12857:20241101:185444.100 adding performance counter vsanDomObj/readAvgLatency[average]:694
12857:20241101:185444.100 adding performance counter vsanDomObj/readAvgLatency[average,absolute]:694
12857:20241101:185444.100 adding performance counter vsanDomObj/readMaxLatency[latest]:695
12857:20241101:185444.100 adding performance counter vsanDomObj/readMaxLatency[latest,absolute]:695
12857:20241101:185444.100 adding performance counter vsanDomObj/readCacheHitRate[latest]:696
12857:20241101:185444.100 adding performance counter vsanDomObj/readCacheHitRate[latest,absolute]:696
12857:20241101:185444.100 adding performance counter vsanDomObj/readCongestion[average]:697
12857:20241101:185444.100 adding performance counter vsanDomObj/readCongestion[average,rate]:697
12857:20241101:185444.100 adding performance counter vsanDomObj/writeIops[average]:698
12857:20241101:185444.100 adding performance counter vsanDomObj/writeIops[average,rate]:698
12857:20241101:185444.100 adding performance counter vsanDomObj/writeThroughput[average]:699
12857:20241101:185444.100 adding performance counter vsanDomObj/writeThroughput[average,rate]:699
12857:20241101:185444.100 adding performance counter vsanDomObj/writeAvgLatency[average]:700
12857:20241101:185444.100 adding performance counter vsanDomObj/writeAvgLatency[average,absolute]:700
12857:20241101:185444.100 adding performance counter vsanDomObj/writeMaxLatency[latest]:701
12857:20241101:185444.100 adding performance counter vsanDomObj/writeMaxLatency[latest,absolute]:701
12857:20241101:185444.100 adding performance counter vsanDomObj/writeCongestion[average]:702
12857:20241101:185444.100 adding performance counter vsanDomObj/writeCongestion[average,rate]:702
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteIops[average]:703
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteIops[average,rate]:703
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteThroughput[average]:704
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteThroughput[average,rate]:704
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average]:705
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average,absolute]:705
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest]:706
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest,absolute]:706
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteCongestion[average]:707
12857:20241101:185444.100 adding performance counter vsanDomObj/recoveryWriteCongestion[average,rate]:707
12857:20241101:185444.100 adding performance counter gpu/utilization[none]:708
12857:20241101:185444.100 adding performance counter gpu/utilization[none,absolute]:708
12857:20241101:185444.100 adding performance counter gpu/utilization[maximum]:709
12857:20241101:185444.100 adding performance counter gpu/utilization[maximum,absolute]:709
12857:20241101:185444.100 adding performance counter gpu/utilization[minimum]:710
12857:20241101:185444.100 adding performance counter gpu/utilization[minimum,absolute]:710
12857:20241101:185444.101 adding performance counter gpu/mem.used[none]:711
12857:20241101:185444.101 adding performance counter gpu/mem.used[none,absolute]:711
12857:20241101:185444.101 adding performance counter gpu/mem.used[maximum]:712
12857:20241101:185444.101 adding performance counter gpu/mem.used[maximum,absolute]:712
12857:20241101:185444.101 adding performance counter gpu/mem.used[minimum]:713
12857:20241101:185444.101 adding performance counter gpu/mem.used[minimum,absolute]:713
12857:20241101:185444.101 adding performance counter gpu/mem.usage[none]:714
12857:20241101:185444.101 adding performance counter gpu/mem.usage[none,absolute]:714
12857:20241101:185444.101 adding performance counter gpu/mem.usage[average]:715
12857:20241101:185444.101 adding performance counter gpu/mem.usage[average,absolute]:715
12857:20241101:185444.101 adding performance counter gpu/mem.usage[maximum]:716
12857:20241101:185444.101 adding performance counter gpu/mem.usage[maximum,absolute]:716
12857:20241101:185444.101 adding performance counter gpu/mem.usage[minimum]:717
12857:20241101:185444.101 adding performance counter gpu/mem.usage[minimum,absolute]:717
12857:20241101:185444.101 Unknown performance counter 718 type of unitInfo:gigaBytes
12857:20241101:185444.101 adding performance counter gpu/mem.used.gb[latest]:718
12857:20241101:185444.101 Unknown performance counter 718 type of unitInfo:gigaBytes
12857:20241101:185444.101 adding performance counter gpu/mem.used.gb[latest,absolute]:718
12857:20241101:185444.101 Unknown performance counter 719 type of unitInfo:gigaBytes
12857:20241101:185444.101 adding performance counter gpu/mem.reserved.gb[latest]:719
12857:20241101:185444.101 Unknown performance counter 719 type of unitInfo:gigaBytes
12857:20241101:185444.101 adding performance counter gpu/mem.reserved.gb[latest,absolute]:719
12857:20241101:185444.101 Unknown performance counter 720 type of unitInfo:gigaBytes
12857:20241101:185444.101 adding performance counter gpu/mem.total.gb[latest]:720
12857:20241101:185444.101 Unknown performance counter 720 type of unitInfo:gigaBytes
12857:20241101:185444.101 adding performance counter gpu/mem.total.gb[latest,absolute]:720
12857:20241101:185444.101 adding performance counter pmem/available.reservation[latest]:721
12857:20241101:185444.101 adding performance counter pmem/available.reservation[latest,absolute]:721
12857:20241101:185444.101 adding performance counter pmem/drsmanaged.reservation[latest]:722
12857:20241101:185444.101 adding performance counter pmem/drsmanaged.reservation[latest,absolute]:722
12857:20241101:185444.101 adding performance counter vmx/numVCPUs[latest]:723
12857:20241101:185444.101 adding performance counter vmx/numVCPUs[latest,absolute]:723
12857:20241101:185444.101 adding performance counter vmx/vcpusMhzMin[latest]:724
12857:20241101:185444.101 adding performance counter vmx/vcpusMhzMin[latest,absolute]:724
12857:20241101:185444.101 adding performance counter vmx/vcpusMhzMax[latest]:725
12857:20241101:185444.101 adding performance counter vmx/vcpusMhzMax[latest,absolute]:725
12857:20241101:185444.101 adding performance counter vmx/vcpusMhzMean[latest]:726
12857:20241101:185444.101 adding performance counter vmx/vcpusMhzMean[latest,absolute]:726
12857:20241101:185444.101 adding performance counter vmx/cpuSpeed[latest]:727
12857:20241101:185444.102 adding performance counter vmx/cpuSpeed[latest,absolute]:727
12857:20241101:185444.102 adding performance counter vmx/overheadMemSizeMin[latest]:728
12857:20241101:185444.102 adding performance counter vmx/overheadMemSizeMin[latest,absolute]:728
12857:20241101:185444.102 adding performance counter vmx/overheadMemSizeMax[latest]:729
12857:20241101:185444.102 adding performance counter vmx/overheadMemSizeMax[latest,absolute]:729
12857:20241101:185444.102 adding performance counter vmx/vigor.opsTotal[latest]:730
12857:20241101:185444.102 adding performance counter vmx/vigor.opsTotal[latest,absolute]:730
12857:20241101:185444.102 adding performance counter vmx/poll.itersPerS[latest]:731
12857:20241101:185444.102 adding performance counter vmx/poll.itersPerS[latest,absolute]:731
12857:20241101:185444.102 adding performance counter vmx/userRpc.opsPerS[latest]:732
12857:20241101:185444.102 adding performance counter vmx/userRpc.opsPerS[latest,absolute]:732
12857:20241101:185444.103 End of vmware_service_get_perf_counters():SUCCEED
12857:20241101:185444.103 In vmware_service_get_evt_severity()
12857:20241101:185444.157 vmware_service_get_evt_severity() SOAP response:
EventManagerdescriptionInformationinfoWarningwarningErrorerrorUseruserExtendedEventImport certificate successinfoImport certificate succeeded.Import certificate succeeded.Import certificate succeeded.Import certificate succeeded.ad.event.ImportCertEvent|Import certificate succeeded. <EventLongDescription id="ad.event.ImportCertEvent"> <description> Import certificate succeeded </description> </EventLongDescription> ExtendedEventImport certificate failureerrorImport certificate failed.Import certificate failed.Import certificate failed.Import certificate failed.ad.event.ImportCertFailedEvent|Import certificate failed. <EventLongDescription id="ad.event.ImportCertFailedEvent"> <description> Import certificate failed </description> </EventLongDescription> ExtendedEventJoin domain successinfoJoin domain succeeded.Join domain succeeded.Join domain succeeded.Join domain succeeded.ad.event.JoinDomainEvent|Join domain succeeded. <EventLongDescription id="ad.event.JoinDomainEvent"> <description> Join domain succeeded </description> </EventLongDescription> ExtendedEventJoin domain failureerrorJoin domain failed.Join domain failed.Join domain failed.Join domain failed.ad.event.JoinDomainFailedEvent|Join domain failed. <EventLongDescription id="ad.event.JoinDomainFailedEvent"> <description> Join domain failed </description> </EventLongDescription> ExtendedEventLeave domain successinfoLeave domain succeeded.Leave domain succeeded.Leave domain succeeded.Leave domain succeeded.ad.event.LeaveDomainEvent|Leave domain succeeded. <EventLongDescription id="ad.event.LeaveDomainEvent"> <description> Leave domain succeeded </description> </EventLongDescription> ExtendedEventLeave domain failureerrorLeave domain failed.Leave domain failed.Leave domain failed.Leave domain failed.ad.event.LeaveDomainFailedEvent|Leave domain failed. <EventLongDescription id="ad.event.LeaveDomainFailedEvent"> <description> Leave domain failed </description> </EventLongDescription> ExtendedEventBackup job failederrorcom.vmware.applmgmt.backup.job.failed.event|Backup job failed <EventLongDescription id="com.vmware.applmgmt.backup.job.failed.event"> <description> Backup job failed </description> <cause> <description> Backup job failed </description> <action> Check backup server connectivity and available space </action> </cause> </EventLongDescription> ExtendedEventBackup job finished successfullyinfocom.vmware.applmgmt.backup.job.finished.event|Backup job finished successfully <EventLongDescription id="com.vmware.applmgmt.backup.job.finished.event"> <description> Backup job finished successfully </description> <cause> <description> Backup job finished successfully </description> </cause> </EventLongDescription> ExtendedEventGlobal Permission created for user with role and propagation.infocom.vmware.cis.CreateGlobalPermission|Global Permission created for user {User} with role {Role} and propagation {Propagation}.EventExPermission created for user on item with role.infocom.vmware.cis.CreatePermission|Permission created for user {User} on item {DocType} with role {Role}.EventExGlobal Permission removed for user.infocom.vmware.cis.RemoveGlobalPermission|Global Permission removed for user {User}.EventExPermission removed for user on iteminfocom.vmware.cis.RemovePermission|Permission removed for user {User} on item {DocType}EventExUser attached tag(s) to object(s)com.vmware.cis.tagging.attach|User {User} attached tag(s) {Tag} to object(s) {Object}EventExUser detached tag(s) from object(s)com.vmware.cis.tagging.detach|User {User} detached tag(s) {Tag} from object(s) {Object}ExtendedEventHttpNfc service disabled - missing configurationerrorHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationcom.vmware.configuration.httpnfc.missing|HttpNfc service is disabled because of missing configuration. Please check vpxa configuration file and correct the error and reconnect host. <EventLongDescription id="com.vmware.configuration.httpnfc.missing"> <description> The HttpNfc service is disabled because of missing configuration section in vpxa.cfg. Please check vpxa configuration file and correct the error and reconnect host. </description> <cause> <description>The vpxa configuration file requires a configuration section for HttpNfc</description> <action>Please check vpxa configuration file and correct the error and reconnect host.</action> </cause> </EventLongDescription> EventExAdded Licenseinfocom.vmware.license.AddLicenseEvent|License {licenseKey} added to VirtualCenterEventExAssigned Licenseinfocom.vmware.license.AssignLicenseEvent|License {licenseKey} assigned to asset {entityName} with id {entityId}EventExDownload License Informationwarningcom.vmware.license.DLFDownloadFailedEvent|Failed to download license information from the host {hostname} due to {errorReason.@enum.com.vmware.license.DLFDownloadFailedEvent.DLFDownloadFailedReason}EventExDefault License Keys Updatedinfocom.vmware.license.DefaultLicenseKeysUpdatedEvent|Default License Keys for asset {entityName} have been updatedEventExHost License Edition Not Allowedwarningcom.vmware.license.HostLicenseEditionNotAllowedEvent|The host is licensed with {edition}. The license edition of vCenter Server does not support {edition}.ExtendedEventHost license or evaluation period has expiredwarningcom.vmware.license.HostLicenseExpiredEvent|Expired host license or evaluation period. <EventLongDescription id="com.vmware.license.HostLicenseExpiredEvent"> <description> Host license or evaluation period has expired. </description> <cause> <description>Expired host license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventHost time-limited license has expiredwarningcom.vmware.license.HostSubscriptionLicenseExpiredEvent|Expired host time-limited license. <EventLongDescription id="com.vmware.license.HostSubscriptionLicenseExpiredEvent"> <description> Host time-limited license has expired. </description> <cause> <description>Expired host time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> EventExLicense assignment faultsinfocom.vmware.license.LicenseAssignFailedEvent|License assignment on the host fails. Reasons: {errorMessage.@enum.com.vmware.license.LicenseAssignError}. <EventLongDescription id="com.vmware.license.LicenseAssignFailedEvent"> <description> The host license assignment succeeds on vCenter Server but can not be successfully pushed down to the host. Any license assignment to a host proceeds in two stages. In the first stage vCenter Server does preliminary checks on the license key, the license state of the host and determines if the requested assignment is valid. If so, it stores this assignment locally in its database. In the second stage, vCenter Server pushes the newly assigned license to the host. During the second stage the host might reject the assignment under certain circumstances. These circumstances usually result from a mismatch of the information available to vCenter Server and the host concerned. Any such discrepancies are notified to the user via this event. This event lists the reason because of which it was logged and also shows up as a configuration issue on the vSphere Client. </description> <cause> <description>License expiry information mismatch between vCenter Server and host</description> <action>If the system time on the machine running vCenter Server and host are not in sync then put them in sync</action> </cause> <cause> <description>The license key is a per Virtual Machine key and the number of powered on Virtual Machines is larger than the maximum limit of the key</description> <action>Use a different key with a larger capacity</action> </cause> </EventLongDescription> EventExLicense Capacity Exceededwarningcom.vmware.license.LicenseCapacityExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the license capacity ({capacity} {costUnitText})EventExLicense ExpirywarningYour host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires.com.vmware.license.LicenseExpiryEvent|Your host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires. <EventLongDescription id="com.vmware.license.LicenseExpiryEvent"> <description> If a host is assigned a temporary license (a license key with an expiry), this event is logged in order to provide users an advanced warning on the imminent expiry of the license key. The event logging starts 15 days prior to the expiry of the license key. This event also shows up on the host summary page as a configuration issue on the vSphere Client. </description> <cause> <description>License key is about to expire or has expired</description> <action>Assign a different license key</action> </cause> </EventLongDescription> EventExLicense User Threshold Exceededwarningcom.vmware.license.LicenseUserThresholdExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the user-defined threshold ({threshold} {costUnitText}) <EventLongDescription id="com.vmware.license.LicenseUserThresholdExceededEvent"> <description> Users can define thresholds to monitor overuse of the product license. This event is logged when the license usage threshold defined by the user for a product edition is exceeded. </description> <cause> <description> License usage of a product edition has exceeded the user-defined threshold </description> <action> Review license assignments and usage </action> </cause> </EventLongDescription> EventExRemoved Licenseinfocom.vmware.license.RemoveLicenseEvent|License {licenseKey} removed from VirtualCenterEventExUnassigned Licenseinfocom.vmware.license.UnassignLicenseEvent|License unassigned from asset {entityName} with id {entityId}ExtendedEventvCenter Server license or evaluation period has expiredwarningcom.vmware.license.VcLicenseExpiredEvent|Expired vCenter Server license or evaluation period. <EventLongDescription id="com.vmware.license.VcLicenseExpiredEvent"> <description> vCenter Server license or evaluation period has expired. </description> <cause> <description>Expired vCenter Server license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventvCenter Server time-limited license has expiredwarningcom.vmware.license.VcSubscriptionLicenseExpiredEvent|Expired vCenter Server time-limited license. <EventLongDescription id="com.vmware.license.VcSubscriptionLicenseExpiredEvent"> <description> vCenter Server time-limited license has expired. </description> <cause> <description>Expired vCenter Server time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventSome in-use features are not supported by current licensewarningcom.vmware.license.vsan.FeatureBeyondCapability|In-use vSAN features {feature} are not supported by current license.ExtendedEventHost flash capacity exceeds the licensed limit for vSANwarningcom.vmware.license.vsan.HostSsdOverUsageEvent|The capacity of the flash disks on the host exceeds the limit of the vSAN license. <EventLongDescription id="com.vmware.license.vsan.HostSsdOverUsageEvent"> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <cause> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN license or evaluation period has expiredwarningcom.vmware.license.vsan.LicenseExpiryEvent|Expired vSAN license or evaluation period. <EventLongDescription id="com.vmware.license.vsan.LicenseExpiryEvent"> <description> Expired vSAN license or evaluation period. </description> <cause> <description> Expired vSAN license or evaluation period. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN time-limited license has expiredwarningcom.vmware.license.vsan.SubscriptionLicenseExpiredEvent|Expired vSAN time-limited license. <EventLongDescription id="com.vmware.license.vsan.SubscriptionLicenseExpiredEvent"> <description> Expired vSAN time-limited license. </description> <cause> <description> Expired vSAN time-limited license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> EventExStorage policy associatedinfoAssociated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}com.vmware.pbm.profile.associate|Associated storage policy: {ProfileId} with entity: {EntityId}EventExStorage policy createdinfoStorage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.create|Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}EventExStorage policy deletedinfoDeleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}com.vmware.pbm.profile.delete|Deleted storage policy: {ProfileId}EventExStorage policy dissociatedinfoDissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}com.vmware.pbm.profile.dissociate|Dissociated storage policy: {ProfileId} from entity: {EntityId}EventExStorage policy updatedinfoStorage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.update|Storage policy updated for {ProfileId}. Policy name: {ProfileName}EventExStorage policy name updatedinfoStorage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}com.vmware.pbm.profile.updateName|Storage policy name updated for {ProfileId}. New name: {NewProfileName}EventExCertificate Manager event in SSOinfocom.vmware.sso.CertificateManager|Certificate Manager event by {userName} at {timestamp} : {description}EventExConfiguration Management event in SSOinfocom.vmware.sso.ConfigurationManagement|Configuration Management event by {userName} at {timestamp} : {description}EventExDomain Management event in SSOinfocom.vmware.sso.DomainManagement|Domain Management event by {userName} at {timestamp} : {description}EventExIdentity Source Management event in SSOinfocom.vmware.sso.IdentitySourceManagement|Identity Source Management event by {userName} at {timestamp} : {description}EventExIdentity Source LDAP Certificate is about to expireinfocom.vmware.sso.LDAPCertExpiry|Renew Identity Source LDAP Certificate: {description}EventExLockout Policy event in SSOinfocom.vmware.sso.LockoutPolicy|Lockout Policy event by {userName} at {timestamp} : {description}EventExFailed login attempt event in SSOerrorcom.vmware.sso.LoginFailure|Failed login {userName} from {userIp} at {timestamp} in SSOEventExSuccessful login attempt event in SSOinfocom.vmware.sso.LoginSuccess|Successful login {userName} from {userIp} at {timestamp} in SSOEventExLogout attempt event in SSOinfocom.vmware.sso.Logout|Logout event by {userName} from {userIp} at {timestamp} in SSOEventExPassword Policy event in SSOinfocom.vmware.sso.PasswordPolicy|Password Policy event by {userName} at {timestamp} : {description}EventExPrincipal Management event in SSOinfocom.vmware.sso.PrincipalManagement|Principal Management event by {userName} at {timestamp} : {description}EventExRole Management event in SSOinfocom.vmware.sso.RoleManagement|Role Management event by {userName} at {timestamp} : {description}EventExSTS Signing Certificates are about to expireinfocom.vmware.sso.STSCertExpiry|Renew STS Signing Certificates: {description}EventExSMTP Configuration event in SSOinfocom.vmware.sso.SmtpConfiguration|SMTP Configuration event by {userName} at {timestamp} : {description}EventExSystem Management event in SSOinfocom.vmware.sso.SystemManagement|System Management event by {userName} at {timestamp} : {description}EventExvCenter Identity event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentity|vCenter Identity event by {userName} at {timestamp} : {description}EventExvCenter Identity Providers event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentityProviders|vCenter Identity Providers event by {userName} at {timestamp} : {description}EventExvCenter Trusts event in Trustmanagementinfocom.vmware.trustmanagement.VcTrusts|vCenter Trusts event by {userName} at {timestamp} : {description}EventExIdentity Provider SSL Trust Certificate is about to expireinfocom.vmware.trustmanagement.WS1SSLCertExpiry|Renew Identity Provider SSL Trust Certificate: {description}EventExIdentity Provider Users and Groups token is about to expireinfocom.vmware.trustmanagement.WS1SyncTokenExpiry|Renew Identity Provider Users and Groups token: {description}EventExReports that a stage from autonomous cluster creation has failedwarningcom.vmware.vc.A8sCluster.CreateStageFailedEvent|Autonomous cluster creation stage: {stage} failed: {reason}EventExReports that a stage from autonomous cluster creation has completed successfullyinfocom.vmware.vc.A8sCluster.CreateStageSuccessEvent|Autonomous cluster creation stage: {stage} succeededEventExAutonomous cluster health is degraded.warningcom.vmware.vc.A8sCluster.HealthDegradedEvent|Autonomous cluster health is degraded. Reason: {reason}ExtendedEventAutonomous cluster is healthy.infocom.vmware.vc.A8sCluster.HealthHealthyEvent|Autonomous cluster is healthy.EventExAutonomous cluster is unhealthy.warningcom.vmware.vc.A8sCluster.HealthUnhealthyEvent|Autonomous cluster is unhealthy. Reason: {reason}ExtendedEventAuthz service is not running. Authorization data might not be synchronized.errorcom.vmware.vc.AuthzDataNotSynced|Authz service is not running. Authorization data might not be synchronized.ExtendedEventAuthz service is running. Authorization data is being synchronized.infocom.vmware.vc.AuthzDataSynced|Authz service is running. Authorization data is being synchronized.ExtendedEventEvent sequence ID reached its max value and was reset.infocom.vmware.vc.EventIdOverflow|Event sequence ID reached its max value and was reset.ExtendedEventcom.vmware.vc.FailedToApplyPermissionsEvent|ExtendedEventvSphere HA agent can reach all cluster management addressesinfoThe vSphere HA agent on the host {host.name} in cluster {computeResource.name} can reach all the cluster management addressesThe vSphere HA agent on the host {host.name} can reach all the cluster management addressesThe vSphere HA agent on this host can reach all the cluster management addressescom.vmware.vc.HA.AllHostAddrsPingable|The vSphere HA agent on the host {host.name} in cluster {computeResource.name} in {datacenter.name} can reach all the cluster management addresses <EventLongDescription id="com.vmware.vc.HA.AllHostAddrsPingable"> <description> The host is able to ping all of the vSphere HA management addresses of every other cluster host. </description> </EventLongDescription> ExtendedEventvSphere HA agent can reach all isolation addressesinfoAll vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name}All vSphere HA isolation addresses are reachable by this hostAll vSphere HA isolation addresses are reachable by hostcom.vmware.vc.HA.AllIsoAddrsPingable|All vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.AllIsoAddrsPingable"> <description> The host is able to ping all of the vSphere HA isolation addresses. </description> </EventLongDescription> ExtendedEventvSphere HA answered a lock-lost question on a virtual machinewarningvSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name}vSphere HA answered the lock-lost question on virtual machine {vm.name}vSphere HA answered the lock-lost question on this virtual machinecom.vmware.vc.HA.AnsweredVmLockLostQuestionEvent|vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmLockLostQuestionEvent"> <description> The virtual machine running on this host lost the exclusive lock of its files on disk. This will occur if another instance of this virtual machine is running on a different host. This situation can happen if a host loses access to both its storage and management networks but is not configured to shutdown its virtual machines on isolation. The virtual machines on this host will continue to run without access to their disks, while vSphere HA will start a new instance of the virtual machines on another host in the cluster. When the isolated host regains access to the storage network, it will try to reacquire the disk locks. This will fail since the disk locks are held by another host. The host will then issue a question on the virtual machine indicating that disk locks have been lost. vSphere HA will automatically answer this question to allow the virtual machine instance without the disk locks to power off. <description> </EventLongDescription> ExtendedEventvSphere HA answered a question from the host about terminating a virtual machinewarningvSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name}vSphere HA answered a question from host {host.name} about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating this virtual machinecom.vmware.vc.HA.AnsweredVmTerminatePDLEvent|vSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmTerminatePDLEvent"> <description> The virtual machine running on this host had a virtual disk which experienced permenant device loss. The host will issue a question if it is configured to terminate the VM automatically under such condition. This event indicates that vSphere HA answered the question. After the VM is terminated, vSphere HA will make a best effort to restart it. <description> </EventLongDescription> ExtendedEventvSphere HA disabled the automatic VM Startup/Shutdown featureinfovSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on the host {host.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature. Automatic VM restarts will interfere with HA when reacting to a host failure.com.vmware.vc.HA.AutoStartDisabled|vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Automatic VM restarts will interfere with HA when reacting to a host failure. <EventLongDescription id="com.vmware.vc.HA.AutoStartDisabled"> <description> Virtual Machine Startup/Shutdown has been disabled by HA. A host which is contained in an vSphere HA cluster is not permitted to have automatic virtual machine startup and shutdown since it may conflict with HA's attempts to relocate the virtual machines if a host fails. </description> </EventLongDescription> ExtendedEventvSphere HA did not reset a VM which had files on inaccessible datastore(s)warningvSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on host {host.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on this host because the VM had files on inaccessible datastore(s)vSphere HA did not reset this VM because the VM had file(s) on inaccessible datastore(s)com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore|vSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the VM had files on inaccessible datastore(s) <EventLongDescription id=" com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore"> <description> This event is logged when vSphere HA did not reset a VM affected by an inaccessible datastore. It will attempt to reset the VM after storage failure is cleared. </description> <cause> <description> The VM is affected by an inaccessible datastore due to storage connectivity loss. Resetting such a VM might cause the VM to be powered off and not restarted by vSphere HA. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA cluster contains incompatible hosts.warningvSphere HA Cluster {computeResource.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.vSphere HA Cluster contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.com.vmware.vc.HA.ClusterContainsIncompatibleHosts|vSphere HA Cluster {computeResource.name} in {datacenter.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported. <EventLongDescription id="com.vmware.vc.HA.ClusterContainsIncompatibleHosts"> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions. </description> <cause> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions, which isn't fully supported. Failover of VMs from ESX/ESXi 3.5 hosts to newer hosts is not guaranteed. </description> <action> Place ESX/ESXi 3.5 hosts into a separate vSphere HA cluster from hosts with more recent ESX versions. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA completed a failover actioninfovSphere HA completed a virtual machine failover action in cluster {computeResource.name}vSphere HA completed a virtual machine failover actioncom.vmware.vc.HA.ClusterFailoverActionCompletedEvent|vSphere HA completed a virtual machine failover action in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA initiated a failover actionwarningvSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name}vSphere HA initiated a failover action on {pendingVms} virtual machinescom.vmware.vc.HA.ClusterFailoverActionInitiatedEvent|vSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA failover operation in progressWarningvSphere HA failover operation in progress in cluster {computeResource.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMsvSphere HA failover operation in progress: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMscom.vmware.vc.HA.ClusterFailoverInProgressEvent|vSphere HA failover operation in progress in cluster {computeResource.name} in datacenter {datacenter.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMs <EventLongDescription id="com.vmware.vc.HA.ClusterFailoverInProgressEvent"> <description> This event is logged when a vSphere HA failover operation is in progress for virtual machines in the cluster. It also reports the number of virtual machines that are being restarted. There are four different categories of such VMs. (1) VMs being placed: vSphere HA is in the process of trying to restart these VMs; (2) VMs awaiting retry: a previous restart attempt failed, and vSphere HA is waiting for a timeout to expire before trying again; (3) VMs requiring additional resources: insufficient resources are available to restart these VMs. vSphere HA will retry when more resources become available (such as a host comes back on line); (4) Inaccessible vSAN VMs: vSphere HA cannot restart these vSAN VMs because they are not accessible. It will retry when there is a change in accessibility. </description> <cause> <description> vSphere HA is attempting to restart failed virtual machines in the cluster. It might be that the virtual machine restart is pending and has not yet completed. </description> <action> vSphere HA will retry the failover on another host unless the maximum number of failover attempts has been reached. A subsequent retry may succeed in powering on the virtual machine so allow the vSphere HA failover operation to be declared a success or failure. </action> </cause> <cause> <description> This event might also be generated when a required resource in the cluster becomes temporarily unavailabile due to network reconfiguration, hardware upgrade, software update, host overload, etc. which can cause vSphere HA to lose its network or storage hearbeats to certain hosts or virtual machines and mark them inaccessible. </description> <action> In many cases, this may be a temporary condition. If the cluster soon stabilizes to its normal condition vSphere HA will detect the host and virtual machines to be live and discard any failover attempts. In such cases, this event may be treated as a soft alarm caused by such changes. </action> </cause> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It can also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to fail over the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM might be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it might have been powered on by a user on a host. If any hosts have been declared dead, investigate whether a networking or storage issue is the cause. </action> <action> If the error reports that the virtual machine is in an invalid state, there might be an operation in progress that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation, that are taking a long time to complete. </action> </cause> </EventLongDescription> ExtendedEventHost connected to a vSphere HA masterinfovSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}com.vmware.vc.HA.ConnectedToMaster|vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.ConnectedToMaster"> <description> This event is logged whenever a host in a vSphere HA cluster transitions to a slave host state and establishes a connection with a master host. </description> </EventLongDescription> ExtendedEventvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}errorvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}com.vmware.vc.HA.CreateConfigVvolFailedEvent|vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault} <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolFailedEvent"> <description> vSphere HA failed to create a config vvol on the datastore </description> <cause> <description>A possible VP, host, network, or lack of resources prevented vSphere HA from creating a config vvol</description> <action>Look for errors in the environment, then re-enable vSphere HA</action> </cause> </EventLongDescription> ExtendedEventvSphere HA successfully created a configuration vVol after the previous failureinfovSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurecom.vmware.vc.HA.CreateConfigVvolSucceededEvent|vSphere HA successfully created a configuration vVol after the previous failure <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolSucceededEvent"> <description> vSphere HA successfully created a config vvol on the datastore. If there was a failed config vvol datastore configuration issue, it is being cleared </description> <cause> <description> There were no errors during creation of the config vvol on the datastore</description> </cause> </EventLongDescription> ExtendedEventvSphere HA agent is runninginfovSphere HA agent on host {host.name} in cluster {computeResource.name} is runningvSphere HA agent on host {host.name} is runningvSphere HA agent is runningcom.vmware.vc.HA.DasAgentRunningEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is running <EventLongDescription id=" com.vmware.vc.HA.DasAgentRunningEvent"> <description> This event is logged when the vSphere HA agent is running on a host. </description> <cause> <description> This event is reported after vSphere HA is configured on a host or after the vSphere HA agent on a host starts, such as after a host reboot. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected an HA cluster state version inconsistencywarningvSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name}vSphere HA detected an HA cluster state version inconsistencycom.vmware.vc.HA.DasClusterVersionInconsistentEvent|vSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasClusterVersionInconsistentEvent"> <description> This event is logged when vSphere HA cluster has a version inconsistency for cluster state(HostList, ClusterConfiguration, VM protection state). </description> <cause> <description> This situation could primarily occur if vCenter has been restored to an older backed up state causing vCenter to rollback to older version for the vSphere HA cluster state (HostList, ClusterConfiguration, VM protection state) while the hosts on the cluster have the latest version for the cluster state. As a result, protection state for VMs will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster, any new cluster configuration state will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster and if hosts were added or removed to/from this vSphere HA cluster after vCenter backup and before vCenter Restore, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </description> <action> Step 1. If hosts were added or removed to/from the vSphere HA cluster after vCenter backup and before vCenter Restore, please add or remove those respective hosts back to the vSphere HA cluster so that the list of hosts in the vSphere HA cluster is identical to the list of hosts in the cluster before vCenter was last restored. If you do not want to add hosts to the cluster, stop the vSphere HA process on the hosts that were added to vCenter after the backup. If this is not done, in case of a failure, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </action> <action> Step 2. Disable vSphere HA on the cluster and then re-enable vSphere HA on the cluster. This will make sure that vCenter's version for the vSphere HA cluster state(HostList, ClusterConfiguration, VM protection state) is reset with a new fault domain id for the HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a failed failover hosterrorvSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name}vSphere HA detected a possible failure of failover host {host.name}vSphere HA detected a possible failure of this failover hostcom.vmware.vc.HA.DasFailoverHostFailedEvent|vSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostFailedEvent"> <description> This event is logged when vSphere HA has detected the failure of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the failure of a failover host. A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the failover host failure, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is not running and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-isolated failover hosterrorvSphere HA detected that failover host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that failover host {host.name} is network isolated from the clustervSphere HA detected that this failover host is network isolated from the clustercom.vmware.vc.HA.DasFailoverHostIsolatedEvent|Host {host.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the network isolation of a failover host. vSphere HA reports a host as isolated if there are no heartbeats received from the HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the failover host isolation, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is isolated and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-partitioned failover hostwarningvSphere HA detected that failover host {host.name} in {computeResource.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that failover host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this failover host is in a different network partition than the mastercom.vmware.vc.HA.DasFailoverHostPartitionedEvent|Failover Host {host.name} in {computeResource.name} in {datacenter.name} is in a different network partition than the master <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostPartitionedEvent"> <description> This event is logged when vSphere HA has detected a designated failover host is network partitioned. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if a vSphere HA master agent detects a failover host is network partitioned. vSphere HA reports a host as partitioned if it cannot communicate with a subset of hosts in the cluster, yet can determine that the host is alive via its datastore heartbeats. </description> <action> Determine the cause of the partitioned failover host, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if a failover host is partitioned and a host failure occurs. See the prodcut documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA agent on a failover host is unreachableerrorThe vSphere HA agent on the failover host {host.name} in {computeResource.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on the failover host {host.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on this failover host is not reachable but host responds to ICMP pingscom.vmware.vc.HA.DasFailoverHostUnreachableEvent|The vSphere HA agent on the failover host {host.name} in cluster {computeResource.name} in {datacenter.name} is not reachable but host responds to ICMP pingsEventExHost complete datastore failureerrorAll shared datastores failed on the host {hostName} in cluster {computeResource.name}All shared datastores failed on the host {hostName}All shared datastores failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent|All shared datastores failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent"> <description> A host in a Component Protection-enabled cluster has lost connectivity to all shared datastores </description> <cause> <description>Connectivity to all shared datastores has been lost</description> <action>Reconnect at least one shared datastore</action> </cause> </EventLongDescription> EventExHost complete network failureerrorAll VM networks failed on the host {hostName} in cluster {computeResource.name}All VM networks failed on the host {hostName}All VM networks failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent|All VM networks failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent"> <description> A host in a Component Protection enabled cluster has lost connectivity to all virtual machine networks </description> <cause> <description>Connectivity to all virtual machine networks has been lost</description> <action>Reconnect at least one virtual machine network</action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a host failureerrorvSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name}vSphere HA detected a possible host failure of host {host.name}vSphere HA detected a possible host failure of this hostcom.vmware.vc.HA.DasHostFailedEvent|vSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostFailedEvent"> <description> This event is logged when vSphere HA detects a possible host failure. </description> <cause> <description> A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the host failure, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network isolated hosterrorvSphere HA detected that host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that host {host.name} is network isolated from the clustervSphere HA detected that this host is network isolated from the clustercom.vmware.vc.HA.DasHostIsolatedEvent|vSphere HA detected that host {host.name} is isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a host. </description> <cause> <description> This event will be generated if there are no heartbeats received from the vSphere HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the host isolation, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA host monitoring is disabledwarningvSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name}vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabledcom.vmware.vc.HA.DasHostMonitoringDisabledEvent|vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostMonitoringDisabledEvent"> <description> This event is logged when host monitoring has been disabled in a vSphere HA cluster. </description> <cause> <description> Host monitoring is disabled, so vSphere HA will not perform any failover actions. This event is generated to inform the user that their cluster is temporarily not being protected against host or VM failures. If host or VM failures occur while host monitoring is disabled, HA will not attempt to restart the the VMs that were running on the failed hosts. Other vSphere HA features are not impacted by whether host monitoring is disabled. </description> <action> Enable host monitoring to resume hosts monitoring. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA failed to restart a network isolated virtual machineerrorvSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart this virtual machine after it was powered off in response to a network isolation eventcom.vmware.vc.HA.FailedRestartAfterIsolationEvent|vSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} after it was powered off in response to a network isolation event. The virtual machine should be manually powered back on.EventExRunning VMs utilization cannot satisfy the configured failover resources on the cluster.warningRunning VMs utilization cannot satisfy the configured failover resources on cluster {computeResource.name}Running VMs utilization cannot satisfy the configured failover resources on the cluster.com.vmware.vc.HA.FailoverResourcesViolationEvent|Running VMs utilization cannot satisfy the configured failover resources on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.FailoverResourcesViolationEvent"> <description> This event is logged when the total utilization of the running VMs cannot satisfy the configured failover resources on a vSphere HA admission controlled cluster. </description> <cause> <description> The total utilization of the running VMs on this cluster is unable to satisfy the configured failover resources in the cluster. This event is generated to inform the user that their cluster will be running in a compromised state during failover and would not have sufficient failover resources to ensure the optimal functioning of the VMs and their workloads. The side-effect of this situation is that VMs won't be working optimally even though we ensure required failover capacity in case of failures. Other vSphere HA features are not impacted by this and this warning doesn't affect any VM related operations like power-on, vmotion etc. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to ensure that there is sufficient failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA changed a host's heartbeat datastoresinfoDatastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on this hostcom.vmware.vc.HA.HeartbeatDatastoreChanged|Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreSelected"> <description> A datastore is selected or deselected for storage heartbeating monitored by the vSphere agent on this host. vSphere HA employs stroage heartbeating to detect host failures when there is network partition. </description> </EventLongDescription> EventExvSphere HA heartbeat datastore number for a host is insufficientwarningThe number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for host {host.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for this host is {selectedNum}, which is less than required: {requiredNum}com.vmware.vc.HA.HeartbeatDatastoreNotSufficient|The number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} in {datacenter.name} is {selectedNum}, which is less than required: {requiredNum} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreNotSufficient"> <description> The number of heartbeat datastores used for this host is less than required. Multiple heartbeat datastores are needed to tolerate storage failures. The host summary page will report a configuration issue in this case. To ignore the configuration issue, use the vSphere HA cluster advanced option, das.ignoreInsufficientHbDatastore. </description> <cause> <description> The host does not have sufficient number of accessible datastores that are shared among other hosts in the cluster. </description> <action> Add more shared datastores to the host or check if any of its datastore is currently inaccessible. </action> </cause> </EventLongDescription> EventExvSphere HA agent on a host has an errorwarningvSphere HA agent for host {host.name} has an error in {computeResource.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for this host has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}com.vmware.vc.HA.HostAgentErrorEvent|vSphere HA agent for host {host.name} has an error in {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason} <EventLongDescription id="com.vmware.vc.HA.AgentErrorEvent"> <description> This event is logged when the vSphere HA agent for the host has an error. </description> <action> See product documentation for troubleshooting tips. </action> </EventLongDescription> ExtendedEventvSphere HA agent is healthyinfovSphere HA agent on host {host.name} in cluster {computeResource.name} is healthyvSphere HA agent on host {host.name} is healthyvSphere HA agent is healthycom.vmware.vc.HA.HostDasAgentHealthyEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is healthy <EventLongDescription id=" com.vmware.vc.HA.HostDasAgentHealthyEvent"> <description> This event is logged when the vSphere HA agent on a host transitions to a healthy state. </description> <cause> <description> vSphere HA reports this event when the vSphere HA agent on the host is either a master or a slave that is connected to the master over the management network. </description> </cause> </EventLongDescription> EventExvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error. {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}com.vmware.vc.HA.HostDasErrorEvent|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason} <EventLongDescription id="com.vmware.vc.HA.HostDasErrorEvent"> <description> The vSphere HA agent on this host has an error. The event may provide details with extra information indicating the cause of the error. </description> <cause> <description>There was an error configuring the vSphere HA agent on the host</description> <action> Look at the task details for the configure vSphere HA task that failed. That will provide more details about why the failure occurred. Address the problem and reconfigure vSphere HA on the host. </action> </cause> <cause> <description> There was a timeout while communicating with the vSphere HA agent. This can occur if there is a high rate of operations being performed on virtual machines in the cluster resulting in the vSphere HA agents not being able to process the changes fast enough. </description> <action> Verify that this is a transient problem by stopping operations on virtual machines in the cluster for a few minutes to give time to the vSphere HA agents to process all their pending messages. If this resolves the problem, consider reducing the rate of operations performed on the cluster. </action> </cause> <cause> <description>There is vSphere HA agent is in a shutdown or failed state</description> <action>Reconfigure vSphere HA on the host. If this fails, reconfigure vSphere HA on the cluster</action> </cause> </EventLongDescription> EventExvSphere HA detected a datastore failurewarningvSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of datastore {arg1} on host {host.name}vSphere HA detected a failure of datastore {arg1}com.vmware.vc.HA.HostDatastoreFailedEvent|vSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventUnsupported vSphere HA and vCloud Distributed Storage configurationerrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured on host {host.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured because vCloud Distributed Storage is enabled but the host does not support that featurecom.vmware.vc.HA.HostDoesNotSupportVsan|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in {datacenter.name} because vCloud Distributed Storage is enabled but the host does not support that featureExtendedEventHost has no vSphere HA isolation addresseserrorHost {host.name} in cluster {computeResource.name} has no isolation addresses defined as required by vSphere HAHost {host.name} has no isolation addresses defined as required by vSphere HAThis host has no isolation addresses defined as required by vSphere HAcom.vmware.vc.HA.HostHasNoIsolationAddrsDefined|Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no isolation addresses defined as required by vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostHasNoIsolationAddrsDefined"> <description> The host has an vSphere HA configuration issue because there were no IP addresses that vSphere HA could use for detecting network isolation. Without at least one, the host will not take any isolation response. HA, by default, will use the host's default gateway (defined in the host's networking configuration), or use the addresses that were specified in the cluster's advanced settings. </description> <action> Define a default gateway in the host's networking configuration. </action> <action> If the cluster advanced setting das.usedefaultisolationaddress is false, you must define at least one isolation address using the advanced options. </action> <action> Define one or more cluster advanced options, each containing an IP address to be pinged by vSphere HA to detect if it is network-isolated when it no longer receives communication with other hosts in the cluster. The advanced option is das.isolationAddress[n], where 'n' is a number from 1 to 9. You may specify multiple addresses. </action> </EventLongDescription> ExtendedEventvSphere HA cannot be configured on this host because there are no mounted datastores.errorvSphere HA cannot be configured on {host.name} in cluster {computeResource.name} because there are no mounted datastores.vSphere HA cannot be configured on {host.name} because there are no mounted datastores.vSphere HA cannot be configured on this host because there are no mounted datastores.com.vmware.vc.HA.HostHasNoMountedDatastores|vSphere HA cannot be configured on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because there are no mounted datastores.ExtendedEventvSphere HA requires a SSL Thumbprint for hosterrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on {host.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on this host because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for this host has been verified.com.vmware.vc.HA.HostHasNoSslThumbprint|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified. <EventLongDescription id="com.vmware.vc.HA.HostHasNoSslThumbprint"> <description> The host has an vSphere HA configuration issue because it does not have a verified ssl thumbprint. Hosts need verified SSL thumbprints for secure vSphere HA communications. </description> <action> If the host is using self-signed certificates, check that vCenter Server is configured to verify SSL certificates, and verify the thumbprints for the hosts in the vSphere HA cluster. </action> </EventLongDescription> ExtendedEventHost is incompatible with vSphere HAerrorThe product version of host {host.name} in cluster {computeResource.name} is incompatible with vSphere HA.The product version of host {host.name} is incompatible with vSphere HA.The product version of this host is incompatible with vSphere HA.com.vmware.vc.HA.HostIncompatibleWithHA|The product version of host {host.name} in cluster {computeResource.name} in {datacenter.name} is incompatible with vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostIncompatibleWithHA"> <description> The host is in a vSphere HA cluster but its product version is incompatible with HA. </description> <action> To fix the situation the host should either be moved out of the vSphere HA cluster or upgraded to a version supporting HA. </action> </EventLongDescription> EventExvSphere HA detected a network failurewarningvSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of network {network} on host {host.name}vSphere HA detected a failure of network {network}com.vmware.vc.HA.HostNetworkFailedEvent|vSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventvSphere HA detected a network-partitioned hostwarningvSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connected in {computeResource.name}vSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this host is in a different network partition than the master to which vCenter Server is connectedcom.vmware.vc.HA.HostPartitionedFromMasterEvent|vSphere HA detected that host {host.name} is in a different network partition than the master {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HostPartitionedFromMasterEvent"> <description> This event is logged when the host is in a different partition than the master. </description> </EventLongDescription> EventExThe vSphere HA host availability state changedinfoThe vSphere HA availability state of the host {host.name} in cluster {computeResource.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of the host {host.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of this host has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}com.vmware.vc.HA.HostStateChangedEvent|The vSphere HA availability state of the host {host.name} in cluster in {computeResource.name} in {datacenter.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState} <EventLongDescription id="com.vmware.vc.HA.HostStateChangedEvent"> <description> This event is logged when the availability state of a host has changed. </description> </EventLongDescription> ExtendedEventvSphere HA agent unconfigure failed on hostwarningThere was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on host {host.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on this host. To solve this problem, reconnect the host to vCenter Server.com.vmware.vc.HA.HostUnconfigureError|There was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name}. To solve this problem, reconnect the host to vCenter Server. <EventLongDescription id="com.vmware.vc.HA.HostUnconfigureError"> <description> There was an error unconfiguring the vSphere HA agent on this host. </description> <cause> <description> The vSphere HA unconfiguration task failed to send the updated hostList to vSphere HA agent on the host. This condition may interfere with the vSphere HA cluster to which the host used to belong and should be corrected. </description> <action> Add the host back to a vCenter Server of version 5.0 or later. </action> </cause> </EventLongDescription> EventExA disconnected host has vSphere HA protected VMserrorHost {host.name} in cluster {computeResource.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)Host {host.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)This host is disconnected from vCenter Server, but contains {protectedVmCount} vSphere HA protected virtual machine(s)com.vmware.vc.HA.HostUnconfiguredWithProtectedVms|Host {host.name} in cluster {computeResource.name} in {datacenter.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s) <EventLongDescription id="com.vmware.vc.HA.HostUnconfiguredWithProtectedVms"> <description> This host is disconnected and contains one or more virtual machine(s) that are still protected by vSphere HA. Consequently, these virtual machines could be failed over to another host if this host should fail. </description> <cause> <description> If a vSphere HA-enabled host is disconnected and is unable to unprotect the virtual machines currently running on it (perhaps due to datastores being unavailable, or not being able to communicate with the vSphere HA master host) then these virtual machines would still be protected, but reside on the disconnected host. Also, if a virtual machine is migrated using vMotion to a vSphere HA-enabled host that is currently in the process of disconnecting, this can lead to the same result. </description> <action> To correct this situation, ensure that the host has access to the datastores used by these virtual machines, and then reconnect the host to an vSphere HA-enabled cluster. The virtual machines should become unprotected shortly after vSphere HA is configured on the host. </action> </cause> </EventLongDescription> EventExvSphere HA configured failover resources are insufficient to satisfy desired failover levelwarningInsufficient configured resources to satisfy the desired vSphere HA failover level on cluster {computeResource.name}Insufficient configured resources to satisfy the desired vSphere HA failover levelcom.vmware.vc.HA.InsufficientFailoverLevelEvent|Insufficient configured resources to satisfy the desired vSphere HA failover level on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.InsufficientFailoverLevelEvent"> <description> The cluster does not have enough failover capacity to satisfy the desired host failures to tolerate for vSphere HA. Failovers may still be performed by vSphere HA but will be on a best effort basis and configured resources may not sufficient to respect the desired host failures to tolerate. </description> <cause> <description> The desired host failures to tolerate setting might not be completely respected since the cluster does not have the required failover capacity to satisfy the failover of the largest desired number of hosts. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to reserve more failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA detected an invalid master agentwarningvSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.com.vmware.vc.HA.InvalidMaster|vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised. <EventLongDescription id="com.vmware.vc.HA.InvalidMaster"> <description> A host in a vSphere HA cluster that is claiming to be a master has been determined to be invalid be another master host. This occurs when an existing master gets a message from another master in the same cluster. The existing master verifies that the other master is actually a valid master before it considers abdicating to the other master. An invalid master is an indication that there may be a compromised host on the network that is attempting to disrupt the HA cluster. The offending host should be examined to determine if it has been compromised. Its also possible a compromised host is impersonating a valid host so the reported host may not be the actual host that is compromised. </description> </EventLongDescription> ExtendedEventvSphere HA could not identify lock owner host on VM with duplicatesinfovSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name}vSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on this VM with duplicatescom.vmware.vc.HA.LockOwnerUnKnownForDupVms|vSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.LockOwnerUnKnownForDupVms"> <description> The vSphere HA agent could not identify lock owner host on duplicate VMs. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Could not determine the lock owner host on duplicate VM. </action> </cause> </EventLongDescription> EventExvSphere HA agent cannot reach some cluster management addressesinfovSphere HA agent on {host.name} in cluster {computeResource.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on {host.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on host cannot reach some management network addresses of other hosts: {unpingableAddrs}com.vmware.vc.HA.NotAllHostAddrsPingable|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} cannot reach some management network addresses of other hosts: {unpingableAddrs} <EventLongDescription id="com.vmware.vc.HA.NotAllIsoAddrsPingable"> <description> The vSphere HA agent on host cannot reach some of the management network addresses of other hosts, and vSphere HA may not be able to restart VMs if a host failure occurs. </description> <cause> <description> There is a network issue preventing this host from communicating with some or all of the hosts in the cluster over their vSphere HA management networks. vSphere HA reliability ic currently compromised in the cluster and failover may not reliably occur if a host or hosts should fail during this condition. </description> <action> Determine and correct the source of the communication problem. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not terminate the VM that was selected for preemptionerrorvSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name}vSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate this VM that was selected for preemptioncom.vmware.vc.HA.PreemptionFailedWithMaxRetry|vSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.PreemptionFailedWithMaxRetry"> <description> vSphere HA could not terminate the VM that was selected for preemption. </description> <cause> <description> Instances when vSphere HA receives the InsufficientResourcesFault, for any VM with fault reason indicating presence of preemptible VM. vSphere HA terminates appropriate preemptibe VM to free up resources. </description> <action> Terminate the preemptibe VM manually to free up resources. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA remediated duplicates of VMinfovSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of this VMcom.vmware.vc.HA.RemediatedDupVMs|vSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediatedDupVMs"> <description> The vSphere HA agent on host remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Kept the VM running on host which holds the lock on datastore, terminated VM on rest of the hosts where VM was running. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not remediate duplicates of VMwarningvSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of this VMcom.vmware.vc.HA.RemediationFailedForDupVMs|vSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediationFailedForDupVMs"> <description> The vSphere HA agent on host could not remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Duplicates of VM running on multiple hosts could not be terminated. </action> </cause> </EventLongDescription> EventExvSphere HA failed to start a Fault Tolerance secondary VM.errorvSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.com.vmware.vc.HA.StartFTSecondaryFailedEvent|vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name} in {datacenter.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondaryFailedEvent"> <description> vSphere HA agent failed to start a Fault Tolerance secondary VM. vSphere HA will retry until either the operation succeeds or until the maximum number of restart attempts is reached. </description> </EventLongDescription> EventExvSphere HA successfully started a Fault Tolerance secondary VM.infovSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}.com.vmware.vc.HA.StartFTSecondarySucceededEvent|vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondarySucceededEvent"> <description> vSphere HA agent successfully started a Fault Tolerance secondary virtual machine. </description> </EventLongDescription> EventExvSphere HA removed a datastore from preferred heartbeat datastoreswarningvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} because the datastore is removed from inventoryvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster because the datastore is removed from inventorycom.vmware.vc.HA.UserHeartbeatDatastoreRemoved|vSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} in {datacenter.name} because the datastore is removed from inventory <EventLongDescription id="com.vmware.vc.HA.UserHeartbeatDatastoreRemoved"> <description> The datastore is removed from the set of preferred heartbeat datastores selected for this cluster. </description> <cause> <description> The datastore does not exist in the inventory. This happens when the datastore is removed from a host in the cluster manually or via a rescan. </description> <action> Choose a different datastore by reconfiguring the vSphere HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA did not perform an isolation response for vm because its VM restart priority is DisabledinfovSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} because its VM restart priorirty is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is Disabled"vSphere HA did not perform an isolation response because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledIsolationEvent|vSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledIsolationEvent"> <description> This event is logged when a host in a vSphere HA cluster was isolated and no isolation response was taken. </description> <cause> <description> The VM restart priority setting is set to disabled, so vSphere HA did not perform any action on this VM when the host became isolated. If the restart priority is disabled, HA will not attempt to restart the VM on another host, so HA will take no action for this VM on the isolated host. This event is informational only. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA did not attempt to restart vm because its VM restart priority is DisabledinfovSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is Disabled"vSphere HA did not attempt to restart vm because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledRestartEvent|vSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledRestartEvent"> <description> This event is logged when a failed VM in a vSphere HA cluster will not be restarted because its VM restart priority setting is set to disabled. </description> <cause> <description> The restart priority for the cluster or VM is disabled, so vSphere HA did not perform any action on this VM failed. This event is informational only. </description> </cause> </EventLongDescription> EventExvCenter Server cannot communicate with the master vSphere HA agentwarningvCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name}vCenter Server cannot communicate with the master vSphere HA agent on {hostname}com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent|vCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent"> <description> This event is logged when vCenter Server cannot communicate with a vSphere HA master agent. </description> <cause> <description> This event is reported when vCenter Server is not able to communicate with a vSphere HA master agent on the host, but it can communicate with other vSphere HA agents in the cluster and these are reporting the host is a master. </description> <action> Correct the networking issue that is preventing vCenter Server from communicating with the host listed in the event. This problem can occur, for example, if the physical NIC in use by this network connection has failed. </action> </cause> </EventLongDescription> ExtendedEventvCenter Server is unable to find a master vSphere HA agentwarningvCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name}vCenter Server is unable to find a master vSphere HA agentcom.vmware.vc.HA.VcCannotFindMasterEvent|vCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotFindMasterEvent"> <description> This event is logged when vCenter Server is unable to find a master vSphere HA agent. </description> <cause> <description> </description> <action> </action> </cause> </EventLongDescription> EventExvCenter Server connected to a vSphere HA master agentinfovCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is connected to a master HA agent running on host {hostname}com.vmware.vc.HA.VcConnectedToMasterEvent|vCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcConnectedToMasterEvent"> <description> This event is logged when vCenter Server is connected with a master vSphere HA agent. </description> </EventLongDescription> EventExvCenter Server disconnected from a master vSphere HA agentwarningvCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is disconnected from a master HA agent running on host {hostname}com.vmware.vc.HA.VcDisconnectedFromMasterEvent|vCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcDisconnectedFromMasterEvent"> <description> This event is logged when vCenter Server is disconnected from a master vSphere HA agent. </description> </EventLongDescription> ExtendedEventvSphere HA was unable to reset a VM after it exhausted the retrieserrorvSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on host {host.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on this host after {retryTimes} retriesvSphere HA was unable to reset this VM after {retryTimes} retriescom.vmware.vc.HA.VmDasResetAbortedEvent|vSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmDasResetAbortedEvent"> <description> This event is logged when vSphere HA was unable to reset a VM. </description> <cause> <description> The operation to reset the VM continued to fail. vSphere HA stopped resetting the VM after it exhausted the retries. </description> <action>Ensure that the host system is manageable, for example host agent is not hung. Check if there are no other concurrent tasks running for the VM.</action> </cause> </EventLongDescription> ExtendedEventVirtual machine failed to become vSphere HA ProtectederrorVirtual machine {vm.name} in cluster {computeResource.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.This virtual machine failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.com.vmware.vc.HA.VmNotProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmNotProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation but the VM has not transitioned to vSphere HA Protected in the time period expected. This condition exists because the master vSphere HA agent has not yet persisted that the VM successfully powered on or vCenter is unaware that it did. Consequently, vSphere HA may not restart the VM after a failure. </description> <action> There are a number of reasons why a VM may remain not protected for a period of time. First, the system may be heavily loaded, in which case the transition will just take longer. Second, vCenter may be unable to communicate with the vSphere HA master agent. Examine the inventory to see if any hosts in the cluster are not responding. Third, the the management network may be partitioned, which is preventing the master that owns the VM from protecting it or reporting this information to vCenter. The cluster summary page may report a config issue in this case or hosts in the VM inventory will be reported as not responding. Finally, the vSphere HA master election is taking too long to complete. The cluster summary page will report if this situation exists. See the product documentation for additional troubleshooting tips. </action> </EventLongDescription> ExtendedEventVirtual machine is vSphere HA protectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.This virtual machine is vSphere HA Protected and HA will attempt to restart it after a failure.com.vmware.vc.HA.VmProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is vSphere HA Protected and HA will attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation and vSphere HA has persisted this fact. Consequently, vSphere HA will attempt to restart the VM after a failure. </description> </EventLongDescription> ExtendedEventVirtual machine is not vSphere HA ProtectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.This virtual machine is not vSphere HA Protected.com.vmware.vc.HA.VmUnprotectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is not vSphere HA Protected. <EventLongDescription id="com.vmware.vc.HA.VmUnprotectedEvent"> <description> The virtual machine transitioned from the vSphere HA protected to unprotected state. This transition is a result of a user powering off the virtual machine, disabling vSphere HA, disconnecting the host on which the virtual machine is running, or destroying the cluster in which the virtual machine resides. </description> </EventLongDescription> ExtendedEventvSphere HA has unprotected out-of-disk-space VMinfovSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected this virtual machine because it ran out of disk spacecom.vmware.vc.HA.VmUnprotectedOnDiskSpaceFull|vSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} because it ran out of disk spaceExtendedEventvSphere HA did not terminate a VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}warningvSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate this VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore|vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore} <EventLongDescription id=" com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore"> <description> This event is logged when a VM affected by an inaccessible datastore in a vSphere HA cluster was not terminated. </description> <cause> <description> VM Component Protection is configured to not terminate the VM, or vSphere HA host monitoring is disabled, or VM restart priority is diabled, or the VM is an agent VM, or there are no sufficient resources to fail over the VM. For the case of insufficent resources, vSphere HA will attempt to terminate the VM when resources become available. </description> <action>Select VM Component Protection option to terminate VM</action> <action>Enable host monitoring</action> <action>Enable VM Restart priority</action> <action>Reduce resource reservations of other VMs in the cluster</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition or isolation if one exists</action> <action>If vSphere DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> </EventLongDescription> ExtendedEventDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleinfoDatastore {ds.name} mounted on host {host.name} in cluster {computeResource.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on host {host.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessiblecom.vmware.vc.HA.VmcpStorageFailureCleared|Datastore {ds.name} mounted on host {host.name} was inaccessible. The condition was cleared and the datastore is now accessible <EventLongDescription id=" com.vmware.vc.HA.VmcpStorageFailureCleared"> <description> This event is logged when a datastore connectivity was restored. The host can have the following storage access failures: All Paths Down (APD) and Permanent Device Loss (PDL). Datastore was shown as unavailable/inaccessible in storage view. </description> <cause> <description> A datastore on this host was inaccessible. The condition was cleared and the datastore is now accessible. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected that a datastore was inaccessible. This affected the VM with files on the datastorewarningvSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on host {host.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on this host was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected the VM with files on the datastorecom.vmware.vc.HA.VmcpStorageFailureDetectedForVm|vSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} in {datacenter.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastore <EventLongDescription id="com.vmware.vc.HA.VmcpStorageFailureDetectedForVm"> <description> This event is logged when a VM's files were not accessible due to a storage connectivity failure. vSphere HA will take action if VM Component Protection is enabled for the VM. </description> <cause> <description> A datastore was inaccessible due to a storage connectivity loss of All Paths Down or Permenant Device Loss. A VM was affected because it had files on the inaccessible datastore. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA was unable to terminate VM affected by an inaccessible datastore after it exhausted the retrieserrorvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on this host after {retryTimes} retriesvSphere HA was unable to terminate this VM affected by an inaccessible datastore after {retryTimes} retriescom.vmware.vc.HA.VmcpTerminateVmAborted|vSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminateVmAborted"> <description> This event is logged when vSphere HA was unable to terminate a VM affected by an inaccessible datastore. </description> <cause> <description> The operation to terminate the VM continued to fail. vSphere HA stopped terminating the VM after it exhausted the retries. </description> <action> Ensure that the host system is manageable, for example host agent is not hung. Check if there are other concurrent tasks running for the VM.</action> <action> Reset the VM if guest application is not operational after the datastore becomes accessible.</action> </cause> </EventLongDescription> ExtendedEventvSphere HA attempted to terminate a VM affected by an inaccessible datastorewarningvSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on host{host.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on this host because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate this VM because the VM was affected by an inaccessible datastorecom.vmware.vc.HA.VmcpTerminatingVm|vSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} in {datacenter.name} because the VM was affected by an inaccessible datastore <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminatingVm"> <description> This event is logged when vSphere HA attempted to terminate a VM affected by an inaccessible datastore. A VM is terminated by issuing a SIGKILL to the vmx process. </description> <cause> <description> The VM was affected by an inaccessible datastore. vSphere HA VM Component Protection attempted to terminate the VM. </description> </cause> </EventLongDescription> EventExHardware Health Status Changedinfocom.vmware.vc.HardwareSensorEvent|Sensor {sensorNumber} type {sensorType}, Description {sensorName} state {status} for {message}. Part Name/Number {partName} {partNumber} Manufacturer {manufacturer}EventExStatus of each Hardware Health Sensor Groupinfocom.vmware.vc.HardwareSensorGroupStatus|Hardware Sensor Status: Processor {processor}, Memory {memory}, Fan {fan}, Voltage {voltage}, Temperature {temperature}, Power {power}, System Board {systemBoard}, Battery {battery}, Storage {storage}, Other {other}ExtendedEventHost configuration is TPM encrypted.warningcom.vmware.vc.HostTpmConfigEncryptionEvent|Host configuration is TPM encrypted.EventExOperation cleanup encountered errorsinfoOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup with task {taskId} encountered errorscom.vmware.vc.OperationCleanupErrorsEvent|Operation cleanup for {vm.name} with task {taskId} encountered errorsExtendedEventThe user does not have permission to view the entity associated with this event.infocom.vmware.vc.RestrictedAccess|The user does not have permission to view the entity associated with this event.EventExFailed to register host with Intel® SGX Registration Service.errorFailed to register host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.FailedRegistration|Failed to register host {host.name} with Intel® SGX Registration Service {registrationUrl}. The service responded with {statusCode}, {errorCode}: {errorMessage}.EventExSending registration request to Intel® SGX Registration Service.infoSending registration request to Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.InitiatingRegistration|Sending registration request for host {host.name} to Intel® SGX Registration Service {registrationUrl}.EventExSuccessfully registered host with Intel® SGX Registration Service.infoSuccessfully registered host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.SuccessfulRegistration|Successfully registered host {host.name} with Intel® SGX Registration Service {registrationUrl}.EventExStateless Alarm TriggeredinfoAlarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'com.vmware.vc.StatelessAlarmTriggeredEvent|Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'ExtendedEventTrusted Host attestation failed.errorcom.vmware.vc.TaHostAttestFailEvent|Trusted Host attestation failed.ExtendedEventTrusted Host attestation passed.infocom.vmware.vc.TaHostAttestPassEvent|Trusted Host attestation passed.ExtendedEventTrusted Host attestation status unset.infocom.vmware.vc.TaHostAttestUnsetEvent|Trusted Host attestation status unset.EventExHost Time Syncronization establishedinfocom.vmware.vc.TimeSyncEvent|Time service {serviceName} has synchronized with remote time source, details: {message}.EventExHost Time Syncronization losterrorcom.vmware.vc.TimeSyncFailedEvent|Time service {serviceName} is not sychronized with the remote time source, details: {message}.ExtendedEventHost must be decommissioned when moved out of a Trusted Infrastructure cluster.errorHost {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.com.vmware.vc.TrustAuthority.DecommissionHost|Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.ExtendedEventHost is not configured for vSphere Trust Authority.errorHost {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.com.vmware.vc.TrustAuthority.HostNotConfigured|Host {host.name} is not configured for vSphere Trust Authority.EventExThe client certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSClientCertExpirationEvent|The client certificate for the Key Provider {keyProviderId} in the Trust Authority Host {hostName} will expire in {dayNum} day(s).EventExThe server certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSServerCertExpirationEvent|The server cetificate of key server {serverName} in the Trusted Key Provider {keyProviderId} will expire in {dayNum} day(s).ExtendedEventCertificates have changed. Trust authority cluster needs to be reconfigured.errorcom.vmware.vc.TrustAuthority.StsCertificatesChange|Certificates have changed. Trust authority cluster needs to be reconfigured.EventExvCenter Service Overall Health Changedinfocom.vmware.vc.VCHealthStateChangedEvent|vCenter Service overall health changed from '{oldState}' to '{newState}' <EventLongDescription id="com.vmware.vc.VCHealthStateChangedEvent"> <description> This event is logged when the overall health of vCenter Service has changed or become unavailable. </description> <cause> <description> The vCenter Service overall health state has changed or become unavailable </description> <action> Examine the vCenter Service health state and make sure the VimWebServices service is up and running on the vCenter Server </action> </cause> </EventLongDescription> EventExDatastore is in healthy state within the clusterinfoDatastore {dsName} is in healthy state within the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreHealthy|Datastore {dsName} is in healthy state within the cluster {computeResource.name}EventExDatastore is not accessible on the host(s)warningDatastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreInaccessible|Datastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}EventExDatastore unmount is failederrorUnmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreUnmountFailed|Unmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}EventExDatastore in desired configuration is missing on the host(s)warningDatastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}com.vmware.vc.VMCStorage.DesiredDatastoreMissing|Datastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}EventExHost(s) mounted with the datastore which is not present in desired configurationerrorHost(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}com.vmware.vc.VMCStorage.NotDesiredDatastorePresent|Host(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}EventExExecuting VM Instant CloneinfoExecuting Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} to {destVmName}Executing Instant Clone to {destVmName}com.vmware.vc.VmBeginInstantCloneEvent|Executing Instant Clone of {vm.name} on {host.name} to {destVmName}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneFailedInvalidDestinationEvent|Cannot clone {vm.name} as {destVmName} to invalid or non-existent destination with ID {invalidMoRef}: {fault}EventExRestarting VM CloneinfoRestarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} to {destVmName} with task {taskId}Restarting VM Clone to {destVmName} with task {taskId}com.vmware.vc.VmCloneRestartEvent|Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneToResourcePoolFailedEvent|Cannot clone {vm.name} as {destVmName} to resource pool {destResourcePool}: {fault}EventExFailed to create virtual machineerrorFailed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName}Failed to create virtual machine on {host.name}com.vmware.vc.VmCreateFailedEvent|Failed to create virtual machine {vmName} on {host.name}ExtendedEventVirtual machine disks consolidation succeeded.infoVirtual machine {vm.name} disks consolidatation succeeded on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation succeeded on {host.name}.Virtual machine {vm.name} disks consolidation succeeded.Virtual machine disks consolidation succeeded.com.vmware.vc.VmDiskConsolidatedEvent|Virtual machine {vm.name} disks consolidated successfully on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation needed.warningVirtual machine {vm.name} disks consolidatation is needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is needed on {host.name}.Virtual machine {vm.name} disks consolidation is needed.Virtual machine disks consolidation is needed.com.vmware.vc.VmDiskConsolidationNeeded|Virtual machine {vm.name} disks consolidation is needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation no longer needed.infoVirtual machine {vm.name} disks consolidatation is no longer needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is no longer needed on {host.name}.Virtual machine {vm.name} disks consolidation is no longer needed.Virtual machine disks consolidation is no longer needed.com.vmware.vc.VmDiskConsolidationNoLongerNeeded|Virtual machine {vm.name} disks consolidation is no longer needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation failed.warningVirtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation failed on {host.name}.Virtual machine {vm.name} disks consolidation failed.Virtual machine disks consolidation failed.com.vmware.vc.VmDiskFailedToConsolidateEvent|Virtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExcom.vmware.vc.VmForkFailedInvalidDestinationEvent|EventExCannot complete Instant Clone of VMerrorCannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone to {destVmName}. Reason : {fault.msg}com.vmware.vc.VmInstantCloneFailedEvent|Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}EventExInstant Clone WarningwarningInstant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning - {warning}com.vmware.vc.VmInstantCloneWarningEvent|Instant Clone Warning for {vmName} - {warning}EventExInstant Clone of VM has completedinfoInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} has completedInstant Clone of {srcVmName} has completedcom.vmware.vc.VmInstantClonedEvent|Instant Clone of {srcVmName} on {host.name} has completedEventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.infocom.vmware.vc.VpxdMemoryUsageClearEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status}.EventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.errorcom.vmware.vc.VpxdMemoryUsageErrorEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status} (used: {usedMemory}%, soft limit: {limit}%).EventExOperation enabledinfocom.vmware.vc.authorization.MethodEnabled|The operation {MethodName} on the {EntityName} of type {EntityType} is enabled.EventExPrivilege check failedwarningPrivilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}com.vmware.vc.authorization.NoPermission|Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}ExtendedEventErrors occurred during automatic CPVM certificate rotation.errorcom.vmware.vc.certificatemanagement.CPVMCertificateUpdateFailedEvent|Errors occurred during automatic CPVM certificate rotation.ExtendedEventCPVM successfully performed automatic certificate rotation.infocom.vmware.vc.certificatemanagement.CPVMCertificateUpdateHealthyEvent|CPVM successfully performed automatic certificate rotation.ExtendedEventErrors occurred during automatic Spherelet certificate rotation.errorcom.vmware.vc.certificatemanagement.SphereletCertificateUpdateFailedEvent|Errors occurred during automatic Spherelet certificate rotation.ExtendedEventNo errors found during automatic Spherelet certificate rotation.infocom.vmware.vc.certificatemanagement.SphereletCertificateUpdateHealthyEvent|No errors found during automatic Spherelet certificate rotation.ExtendedEventTRUSTED ROOT certificates imported successfully.infocom.vmware.vc.certificatemanagement.TrustedRootsImportedEvent|TRUSTED ROOT certificates imported successfully.ExtendedEventTRUSTED ROOT certificates imported successfully, but with warnings.warningcom.vmware.vc.certificatemanagement.TrustedRootsImportedWithWarningsEvent|TRUSTED ROOT certificates imported successfully, but with warnings.ExtendedEventvCenter Server TLS certificate replaced successfully.infocom.vmware.vc.certificatemanagement.VcCertificateReplacedEvent|vCenter Server TLS certificate replaced successfully.ExtendedEventvCenter Server TLS certificate replaced successfully, but there are warnings detected.warningcom.vmware.vc.certificatemanagement.VcCertificateReplacedWithWarningsEvent|vCenter Server TLS certificate replaced successfully, but there are warnings detected.EventExFailed to update the vCenter server certificate.warningcom.vmware.vc.certificatemanagement.VcServerCertificateUpdateFailureEvent|{cause} for the {serviceName}. Remediation suggested: {remediation}. For more details, please refer to {kbLink}.EventExCA Certificates were updated on hostinfoCA Certificates were updated on {hostname}com.vmware.vc.certmgr.HostCaCertsAndCrlsUpdatedEvent|CA Certificates were updated on {hostname}EventExHost Certificate expiration is imminentwarningHost Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpirationImminentEvent|Host Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}EventExHost Certificate is nearing expirationwarningHost Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringEvent|Host Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}EventExHost Certificate will expire soonwarningHost Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringShortlyEvent|Host Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}ExtendedEventHost Certificate Management Mode changedinfocom.vmware.vc.certmgr.HostCertManagementModeChangedEvent|Host Certificate Management Mode changed from {previousMode} to {presentMode}ExtendedEventHost Certificate Management Metadata changedinfocom.vmware.vc.certmgr.HostCertMetadataChangedEvent|Host Certificate Management Metadata changedEventExHost Certificate revokedwarningHost Certificate on {hostname} is revoked.com.vmware.vc.certmgr.HostCertRevokedEvent|Host Certificate on {hostname} is revoked.EventExHost Certificate was updatedinfoHost Certificate was updated on {hostname}, new thumbprint: {thumbprint}com.vmware.vc.certmgr.HostCertUpdatedEvent|Host Certificate was updated on {hostname}, new thumbprint: {thumbprint}EventExAdding host to cluster store failederrorAdding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.AddHostFailed|Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}EventExInitializing cluster store member cache failederrorInitializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.InitializeMemberCacheFailed|Initializing cluster store member cache failed. Fault Reason : {errorMessage}EventExRemoving host from cluster store failederrorRemoving host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.RemoveHostFailed|Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}EventExUpdating host encryption keyinfocom.vmware.vc.crypto.HostKeyUpdatedEvent|Host encryption key set to {newKey}. Old key: {oldKey}EventExcom.vmware.vc.crypto.IntegrityCheckFailed|EventExcom.vmware.vc.crypto.IntegrityCheckPassed|EventExCrypto operation audit eventinfocom.vmware.vc.crypto.Operation|Cryptographic operations during {description}{operation}{diskOperations}EventExFailed to update VM fileserrorFailed to update VM files on datastore {ds.name}com.vmware.vc.datastore.UpdateVmFilesFailedEvent|Failed to update VM files on datastore {ds.name} using host {hostName}EventExUpdated VM filesinfoUpdated VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatedVmFilesEvent|Updated VM files on datastore {ds.name} using host {hostName}EventExUpdating VM FilesinfoUpdating VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatingVmFilesEvent|Updating VM files on datastore {ds.name} using host {hostName}ExtendedEventLink Aggregation Control Protocol configuration is inconsistentinfoSingle Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.com.vmware.vc.dvs.LacpConfigInconsistentEvent|Single Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.ExtendedEventFault Tolerance VM restart disabledwarningvSphere HA has been disabled in cluster {computeResource.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart this VM or its Secondary VM after a failure.com.vmware.vc.ft.VmAffectedByDasDisabledEvent|vSphere HA has been disabled in cluster {computeResource.name} of datacenter {datacenter.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure. <EventLongDescription id="com.vmware.vc.ft.VmAffectedByDasDisabledEvent"> <description> When vSphere HA is disabled in a cluster, you cannot restart a Primary VM or its Secondary VM after a failure. This event is issued when vSphere HA is disabled and a Fault Tolerant virtual machine is powered on. The event alerts you of the risk to the Fault Tolerant virtual machine that results from disabling vSphere HA. </description> <cause> <description>vSphere HA was disabled when a Fault Tolerant virtual machine was powered on</description> <action>Re-enable vSphere HA</action> </cause> </EventLongDescription> EventExGuest operationinfoGuest operation {operationName.@enum.com.vmware.vc.guestOp} performed.com.vmware.vc.guestOperations.GuestOperation|Guest operation {operationName.@enum.com.vmware.vc.guestOp} performed on Virtual machine {vm.name}.EventExGuest operation authentication failurewarningGuest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp}.com.vmware.vc.guestOperations.GuestOperationAuthFailure|Guest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp} on Virtual machine {vm.name}.ExtendedEventvSphere HA restarted a virtual machinewarningvSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA restarted virtual machine {vm.name} on host {host.name}vSphere HA restarted virtual machine {vm.name}vSphere HA restarted this virtual machinecom.vmware.vc.ha.VmRestartedByHAEvent|vSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.ha.VmRestartedByHAEvent"> <description> The virtual machine was restarted automatically by vSphere HA on this host. This response may be triggered by a failure of the host the virtual machine was originally running on or by an unclean power-off of the virtual machine (eg. if the vmx process was killed). </description> </EventLongDescription> ExtendedEventAutostart power on failederrorPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on this host failedcom.vmware.vc.host.AutoStartPowerOnFailedEvent|Powering on virtual machines according to autostart rules on host {host.name} in datacenter {datacenter.name} failedExtendedEventAutostart rules reconfigure failederrorReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on this host failedcom.vmware.vc.host.AutoStartReconfigureFailedEvent|Reconfiguring autostart rules for virtual machines on {host.name} in datacenter {datacenter.name} failedEventExEncryption mode is enabled on host.infoEncryption mode is enabled on host.com.vmware.vc.host.Crypto.Enabled|Encryption mode is enabled on host {hostName}.EventExThe operation is not supported on hosts which have encryption disabled.errorcom.vmware.vc.host.Crypto.HostCryptoDisabled|The operation is not supported on host {hostName} because encryption is disabled.EventExHost key is being renewed because an error occurred on the key provider.warningHost key is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KMSClusterError|Host key of {hostName} is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.EventExHost key is being renewed because key was missing on the key provider.warningHost key is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KeyMissingOnKMS|Host key of {hostName} is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.EventExHost requires encryption mode enabled and the key provider is not available.errorHost requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KMSClusterError|Host {hostName} requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExHost requires encryption mode enabled and the key is not available on the key provider.errorHost requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KeyMissingOnKMS|Host {hostName} requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExFailed to send keys to host because of host error.errorcom.vmware.vc.host.Crypto.SendKeyError.HostError|Failed to send keys {keys} to host {hostName}. Please check host connection.EventExHost profile {operation} failed with error: {error}.errorHost profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.com.vmware.vc.host.HPOperationFailed|Host profile {operation} failed with error: {error}.ExtendedEventHost booted from stateless cache.warningHost booted from stateless cache.Host booted from stateless cache.Host booted from stateless cache.com.vmware.vc.host.HostBootedFromStatelessCacheEvent|Host booted from stateless cache.EventExHost IP address conflict detectederrorHost IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}com.vmware.vc.host.HostIpConflictEvent|Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}ExtendedEventHost in Memory Mode and active DRAM usage is normalinfo{host.name} is in Memory Mode and its active DRAM usage is normal{host.name} is in Memory Mode and its active DRAM usage is normalThe host is in Memory Mode and its active DRAM usage is normalcom.vmware.vc.host.MemoryModeActiveDRAMGreen|Host {host.name} is in Memory Mode and its active DRAM usage is normalExtendedEventHost in Memory Mode and active DRAM usage is highwarningHost {host.name} is in Memory Mode and its active DRAM usage is highHost {host.name} is in Memory Mode and its active DRAM usage is highThe host is in Memory Mode and its active DRAM usage is highcom.vmware.vc.host.MemoryModeActiveDRAMYellow|Host {host.name} is in Memory Mode and its active DRAM usage is highExtendedEventNSX installation failed on host.errorNSX installation failed on host.NSX installation failed on host.NSX installation failed on host.com.vmware.vc.host.NsxInstallFailed|NSX installation failed on host.ExtendedEventNSX installation successful on host.infoNSX installation successful on host.NSX installation successful on host.NSX installation successful on host.com.vmware.vc.host.NsxInstallSuccess|NSX installation successful on host.ExtendedEventPartial maintenance mode status has changed.infoHost status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.com.vmware.vc.host.PartialMaintenanceModeStatusChanged|Host status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}com.vmware.vc.host.StatelessHPApplyEarlyBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}com.vmware.vc.host.StatelessHPApplyFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}com.vmware.vc.host.StatelessHPApplyPostBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}EventExHost TPM attestation failederrorHost TPM attestation failed for host {host.name}: {1}Host TPM attestation failed for host {host.name}: {1}Host TPM attestation failed: {1}com.vmware.vc.host.TPMAttestationFailedEvent|Host TPM attestation failed for host {host.name} in datacenter {datacenter.name}: {1}ExtendedEventActive DRAM usage of the memory tiered host is normalinfoHost {host.name} is a memory tiered host and its active DRAM usage is normalHost {host.name} is a memory tiered host and its active DRAM usage is normalActive DRAM usage of the memory tiered host is normalcom.vmware.vc.host.TieringMemoryActiveDRAMGreen|Host {host.name} is a memory tiered host and its active DRAM usage is normalExtendedEventActive DRAM usage of the memory tiered host is highwarningHost {host.name} is a memory tiered host and its active DRAM usage is highHost {host.name} is a memory tiered host and its active DRAM usage is highActive DRAM usage of the memory tiered host is highcom.vmware.vc.host.TieringMemoryActiveDRAMYellow|Host {host.name} is a memory tiered host and its active DRAM usage is highExtendedEventNew TPM host endorsement key doesn't match the one in the DBerrorThe new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DBcom.vmware.vc.host.TpmEndorsementKeyMismatch|The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name} in datacenter {datacenter.name}ExtendedEventHost's virtual flash resource is accessible.infoHost's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.com.vmware.vc.host.clear.vFlashResource.inaccessible|Host's virtual flash resource is restored to be accessible.EventExHost's virtual flash resource usage dropped below the threshold.infoHost's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.com.vmware.vc.host.clear.vFlashResource.reachthreshold|Host's virtual flash resource usage dropped below {1}%.ExtendedEventDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.warningDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound|Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.ExtendedEventDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostswarningDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostscom.vmware.vc.host.problem.DeprecatedVMFSVolumeFoundAfterVMFS3EOL|Deprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsExtendedEventImproved virtual disk infrastructure's catalog management turned unhealthywarningcom.vmware.vc.host.problem.VStorageObjectInfraCatalogUnhealthy|Improved virtual disk infrastructure's catalog management turned unhealthyExtendedEventImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.warningImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent|Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. <EventLongDescription id="com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent"> <description> Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. </description> <cause> <description> This is caused by creating improved virtual disk infrastructure namespaces with empty storage policy. </description> <action> Update infrastructure namespaces storage policy. </action> </cause> </EventLongDescription> ExtendedEventHost's virtual flash resource is inaccessible.warningHost's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.com.vmware.vc.host.problem.vFlashResource.inaccessible|Host's virtual flash resource is inaccessible. <EventLongDescription id="com.vmware.vc.host.problem.vFlashResource.inaccessible"> <description> Inaccessible host virtual flash resource indicates that its backing VFFS volume is inaccessible. Due to inaccessible host virtual flash resource, virtual machines with vSphere Flash Read Cache configured cannot be powered on or might experience unpredicted behavior if powered on. </description> <cause> <description> This might be caused by an unmounted VFFS volume or an APD/PDL on the VFFS volume. </description> <action> Check the backing VFFS volume connection status. For example, mount the unmounted volume or resolve the APD/PDL issues. The host virtual flash resource is accessible as long as the backing VFFS volume is accessible. </action> </cause> </EventLongDescription> EventExHost's virtual flash resource usage exceeds the threshold.warningHost's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.com.vmware.vc.host.problem.vFlashResource.reachthreshold|Host's virtual flash resource usage is more than {1}%.ExtendedEventVirtual flash resource is configured on the hostinfoVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostcom.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent|Virtual flash resource is configured on the hostExtendedEventVirtual flash resource is removed from the hostinfoVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostcom.vmware.vc.host.vFlash.VFlashResourceRemovedEvent|Virtual flash resource is removed from the hostEventExDefault virtual flash module is changed to {vFlashModule} on the hostinfoDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostcom.vmware.vc.host.vFlash.defaultModuleChangedEvent|Any new virtual Flash Read Cache configuration request will use {vFlashModule} as default virtual flash module. All existing virtual Flash Read Cache configurations remain unchanged. <EventLongDescription id="com.vmware.vc.host.vFlash.defaultModuleChangedEvent"> <description> The default virtual flash module has been changed. Any new virtual Flash Read Cache configuration uses the new default virtual flash module if undefined in configuration. All existing configurations will remain unchanged. </description> </EventLongDescription> ExtendedEventVirtual flash modules are loaded or reloaded on the hostinfoVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostcom.vmware.vc.host.vFlash.modulesLoadedEvent|Virtual flash modules are loaded or reloaded on the hostEventExEntity became healthyinfo{entityName} became healthycom.vmware.vc.infraUpdateHa.GreenHealthEvent|{entityName} became healthyEventExProvider has posted invalid health updateswarningProvider {providerName} has posted invalid health updatesProvider {providerName} has posted invalid health updatescom.vmware.vc.infraUpdateHa.InvalidUpdatesEvent|Provider {providerName} has posted invalid health updatesEventExProvider reported a healthy statusinfo{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}com.vmware.vc.infraUpdateHa.PostGreenHealthUpdateEvent|{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}EventExProvider reported a severely degraded statuswarning{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostRedHealthUpdateEvent|{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExProvider reported a moderately degraded statuswarning{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostYellowHealthUpdateEvent|{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExEntity has entered quarantine modewarning{entityName} has entered quarantine modecom.vmware.vc.infraUpdateHa.QuarantineEvent|{entityName} has entered quarantine modeEventExEntity has exited quarantine modeinfo{entityName} has exited quarantine modecom.vmware.vc.infraUpdateHa.QuarantineRemovedEvent|{entityName} has exited quarantine modeEventExEntity became severely degradedwarning{entityName} became severely degradedcom.vmware.vc.infraUpdateHa.RedHealthEvent|{entityName} became severely degradedEventExProvider has stale updateswarningProvider {providerName} has not posted an update in {timeout} secondsProvider {providerName} has not posted an update in {timeout} secondscom.vmware.vc.infraUpdateHa.StaleUpdatesEvent|Provider {providerName} has not posted an update in {timeout} secondsEventExEntity has unknown health statewarning{entityName} has unknown health statecom.vmware.vc.infraUpdateHa.UnknownHealthEvent|{entityName} has unknown health stateEventExEntity became moderately degradedwarning{entityName} became moderately degradedcom.vmware.vc.infraUpdateHa.YellowHealthEvent|{entityName} became moderately degradedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters has failederrorvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterInstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterInstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters has failederrorvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUninstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} are successfulcom.vmware.vc.iofilter.FilterUninstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters has failederrorvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} and in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUpgradeFailedEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters is successfulinfovSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterUpgradeSuccessEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has succeededEventExvSphere APIs for I/O Filters (VAIO) host vendor provider registration has failed.errorvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderRegistrationFailedEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully registeredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredcom.vmware.vc.iofilter.HostVendorProviderRegistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredEventExFailed to unregister vSphere APIs for I/O Filters (VAIO) host vendor provider.errorFailed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderUnregistrationFailedEvent|Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully unregisteredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredcom.vmware.vc.iofilter.HostVendorProviderUnregistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredExtendedEventIoFilterManager API invoked with untrusted certificate SSL trust policywarningIoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name}com.vmware.vc.iofilter.UntrustedCertificateEvent|IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventKey providers are backed up.infocom.vmware.vc.kms.crypto.AllBackedUp|All key providers are backed up.EventExKey creation failed on key provider.errorcom.vmware.vc.kms.crypto.KeyGenerateFail|Key creation failed on key provider {clusterName} with error code {errorCode}. Check log for details.EventExKey provider(s) are not backed up.errorcom.vmware.vc.kms.crypto.NotBackedUp|Key provider(s) {providerIds} are not backed up.EventExKey provider backup is suggested after it is updated.warningcom.vmware.vc.kms.crypto.NotBackedUpAfterUpdate|Key provider(s) {providerIds} are not backed up. Backup is suggested after updating a provider.EventExFailed to send keys because of key provider error.errorcom.vmware.vc.kms.crypto.SendKeyError.KMSClusterError|Failed to send keys {keys} because of KMS connection error.EventExFailed to send keys because keys are missing on key provider.errorcom.vmware.vc.kms.crypto.SendKeyError.KeyMissingOnKMS|Failed to send keys {keys} because of keys missing on key provider.EventExThe Trusted Key Provider is not available.warningcom.vmware.vc.kms.crypto.TrustAuthority.ClusterNotAvailable|The Trusted Key Provider {keyProviderId} is not available.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.ClusterUnhealthy|The Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.KmsUnhealthy|The key server {serverName} in the Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExKey Management Server is unreachableerrorcom.vmware.vc.kms.crypto.Unreachable|Key Management Server {serverName}({address}) is unreachableEventExRetrieved Key Management Server vendor information.infocom.vmware.vc.kms.crypto.Vendor|Key Management Server {serverName}({address}) vendor: {vendor}EventExVirtual NIC entered passthrough modeinfoNetwork passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is active on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent|Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExVirtual NIC exited passthrough modeinfoNetwork passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is inactive on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterExitedPassthroughEvent|Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExFailed to clone state for entity on extensionerrorFailed to clone state on extension {extensionName}com.vmware.vc.ovfconsumers.CloneOvfConsumerStateErrorEvent|Failed to clone state for the entity '{entityName}' on extension {extensionName}EventExFailed to retrieve OVF environment sections for VM on extensionerrorFailed to retrieve OVF environment sections from extension {extensionName}com.vmware.vc.ovfconsumers.GetOvfEnvironmentSectionsErrorEvent|Failed to retrieve OVF environment sections for VM '{vm.name}' from extension {extensionName}EventExUnable to power on VM after cloningerrorPowering on after cloning was blocked by an extension. Message: {description}com.vmware.vc.ovfconsumers.PowerOnAfterCloneErrorEvent|Powering on VM '{vm.name}' after cloning was blocked by an extension. Message: {description}EventExFailed to register entity on extensionerrorcom.vmware.vc.ovfconsumers.RegisterEntityErrorEvent|Failed to register entity '{entityName}' on extension {extensionName}EventExFailed to unregister entities on extensionerrorcom.vmware.vc.ovfconsumers.UnregisterEntitiesErrorEvent|Failed to unregister entities on extension {extensionName}EventExFailed to validate OVF descriptor on extensionerrorcom.vmware.vc.ovfconsumers.ValidateOstErrorEvent|Failed to validate OVF descriptor on extension {extensionName}ExtendedEventAnswer file exportedinfoAnswer file for host {host.name} has been exportedAnswer file for host {host.name} has been exportedAnswer file exportedcom.vmware.vc.profile.AnswerFileExportedEvent|Answer file for host {host.name} in datacenter {datacenter.name} has been exportedExtendedEventHost customization settings updatedinfoHost customization settings for host {host.name} has been updatedHost customization settings for host {host.name} has been updatedHost customization settings updatedcom.vmware.vc.profile.AnswerFileUpdatedEvent|Host customization settings for host {host.name} in datacenter {datacenter.name} has been updatedEventExResource pool renamedinfoResource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'com.vmware.vc.rp.ResourcePoolRenamedEvent|Resource pool '{oldName}' has been renamed to '{newName}'ExtendedEventDatastore maintenance mode operation canceledinfoThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledcom.vmware.vc.sdrs.CanceledDatastoreMaintenanceModeEvent|The datastore maintenance mode operation has been canceledExtendedEventDatastore cluster is healthyinfoDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthycom.vmware.vc.sdrs.ClearDatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} is healthyExtendedEventConfigured storage DRSinfoConfigured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.ConfiguredStorageDrsOnPodEvent|Configured storage DRS on datastore cluster {objectName}ExtendedEventDatastore cluster has datastores that belong to different SRM Consistency GroupswarningDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency Groupscom.vmware.vc.sdrs.ConsistencyGroupViolationEvent|Datastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsExtendedEventDatastore entered maintenance modeinfoDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modecom.vmware.vc.sdrs.DatastoreEnteredMaintenanceModeEvent|Datastore {ds.name} has entered maintenance modeExtendedEventDatastore is entering maintenance modeinfoDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modecom.vmware.vc.sdrs.DatastoreEnteringMaintenanceModeEvent|Datastore {ds.name} is entering maintenance modeExtendedEventDatastore exited maintenance modeinfoDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modecom.vmware.vc.sdrs.DatastoreExitedMaintenanceModeEvent|Datastore {ds.name} has exited maintenance modeEventExDatastore cluster has datastores shared across multiple datacenterswarningDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacenterscom.vmware.vc.sdrs.DatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersExtendedEventErrors encountered while datastore entering into maintenance modeerrorDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modecom.vmware.vc.sdrs.DatastoreMaintenanceModeErrorsEvent|Datastore {ds.name} encountered errors while entering maintenance modeExtendedEventStorage DRS disabledinfoDisabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsDisabledEvent|Disabled storage DRS on datastore cluster {objectName}EventExStorage DRS enabledinfoEnabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}com.vmware.vc.sdrs.StorageDrsEnabledEvent|Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}ExtendedEventStorage DRS invocation failederrorStorage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsInvocationFailedEvent|Storage DRS invocation failed on datastore cluster {objectName}ExtendedEventNew storage DRS recommendation generatedinfoA new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsNewRecommendationPendingEvent|A new storage DRS recommendation has been generated on datastore cluster {objectName}EventExDatastore cluster connected to host(s) that do not support storage DRSwarningDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRScom.vmware.vc.sdrs.StorageDrsNotSupportedHostConnectedToPodEvent|Datastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSExtendedEventPending storage recommendations were appliedinfoAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedcom.vmware.vc.sdrs.StorageDrsRecommendationApplied|All pending recommendations on datastore cluster {objectName} were appliedEventExStorage DRS migrated VM disksinfoStorage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStorageMigrationEvent|Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}EventExStorage DRS placed VM disksinfoStorage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStoragePlacementEvent|Storage DRS placed disks of VM {vm.name} on datastore {ds.name}EventExDatastore cluster createdinfoCreated datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodCreatedEvent|Created datastore cluster {objectName}EventExDatastore cluster deletedinfoRemoved datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodDestroyedEvent|Removed datastore cluster {objectName}EventExSIOC: pre-4.1 host connected to SIOC-enabled datastorewarningSIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.com.vmware.vc.sioc.NotSupportedHostConnectedToDatastoreEvent|SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.ExtendedEventESXi VASA client certificate provision has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateProvisionFailure|ESXi VASA client certificate provision has failedExtendedEventESXi VASA client certificate provision has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateProvisionSuccess|ESXi VASA client certificate provision has succeededExtendedEventESXi VASA client certificate register to some/all VP(s) has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateRegisterFailure|ESXi VASA client certificate register to some/all VP(s) has failedExtendedEventESXi VASA client certificate register to VP(s) has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateRegisterSuccess|ESXi VASA client certificate register to VP(s) has succeededEventExSystem capability warning from storage providerwarningcom.vmware.vc.sms.LunCapabilityInitEvent|Storage provider [{providerName}] : system capability warning for {eventSubjectId} : {msgTxt}EventExSystem capability normal event from storage providerinfocom.vmware.vc.sms.LunCapabilityMetEvent|Storage provider [{providerName}] : system capability normal for {eventSubjectId}EventExSystem capability alert from storage providererrorcom.vmware.vc.sms.LunCapabilityNotMetEvent|Storage provider [{providerName}] : system capability alert for {eventSubjectId} : {msgTxt}EventExA Storage Alarm of type 'Object' cleared by the VASA providerinfocom.vmware.vc.sms.ObjectTypeAlarmClearedEvent|Storage provider [{providerName}] cleared a Storage Alarm of type 'Object' on {eventSubjectId} : {msgTxt}EventExAn alert on an object raised by the VASA providererrorcom.vmware.vc.sms.ObjectTypeAlarmErrorEvent|Storage provider [{providerName}] raised an alert type 'Object' on {eventSubjectId} : {msgTxt}EventExA warning on an object raised by the VASA providerwarningcom.vmware.vc.sms.ObjectTypeAlarmWarningEvent|Storage provider [{providerName}] raised a warning of type 'Object' on {eventSubjectId} : {msgTxt}EventExRegistering renewed VC Client Certificate failed for the VASA provider.errorcom.vmware.vc.sms.RegisterVcClientCertOnRenewalFailure|Registering renewed VC Client Certificate failed for VASA provider with url : {provider}.ExtendedEventRegistering renewed VC Client Certificate succeeded for all the VASA providers.infocom.vmware.vc.sms.RegisterVcClientCertOnRenewalSuccess|Registering renewed VC Client Certificate succeeded for all the VASA providers.EventExThin provisioning capacity threshold normal event from storage providerinfocom.vmware.vc.sms.ThinProvisionedLunThresholdClearedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold normal for {eventSubjectId}EventExThin provisioning capacity threshold alert from storage providererrorcom.vmware.vc.sms.ThinProvisionedLunThresholdCrossedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold alert for {eventSubjectId}EventExThin provisioning capacity threshold warning from storage providerwarningcom.vmware.vc.sms.ThinProvisionedLunThresholdInitEvent|Storage provider [{providerName}] : thin provisioning capacity threshold warning for {eventSubjectId}EventExStorage provider certificate will expire very shortlyerrorcom.vmware.vc.sms.VasaProviderCertificateHardLimitReachedEvent|Certificate for storage provider {providerName} will expire very shortly. Expiration date : {expiryDate}EventExVASA Provider certificate is renewedinfocom.vmware.vc.sms.VasaProviderCertificateRenewalEvent|VASA Provider certificate for {providerName} is renewedEventExStorage provider certificate will expire soonwarningcom.vmware.vc.sms.VasaProviderCertificateSoftLimitReachedEvent|Certificate for storage provider {providerName} will expire soon. Expiration date : {expiryDate}EventExStorage provider certificate is validinfocom.vmware.vc.sms.VasaProviderCertificateValidEvent|Certificate for storage provider {providerName} is validEventExStorage provider is connectedinfocom.vmware.vc.sms.VasaProviderConnectedEvent|Storage provider {providerName} is connectedEventExStorage provider is disconnectederrorcom.vmware.vc.sms.VasaProviderDisconnectedEvent|Storage provider {providerName} is disconnectedEventExRefreshing CA certificates and CRLs failed for some VASA providerserrorcom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsFailure|Refreshing CA certificates and CRLs failed for VASA providers with url : {providerUrls}ExtendedEventRefreshing CA certificates and CRLs succeeded for all registered VASA providers.infocom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsSuccess|Refreshing CA certificates and CRLs succeeded for all registered VASA providers.EventExOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate failed for the VASA 5.0 or greater VASA providers.errorcom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationFailure|On VMCA Root Certificate rotation, register and refresh certificates failed for VASA 5.0 or greater VASA provider : {provider}ExtendedEventOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate succeeded for all the VASA 5.0 or greater VASA providers.infocom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationSuccess|On VMCA Root Certificate rotation, register and refresh certificates succeeded for all the VASA 5.0 or greater VASA providers.EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vmName} connected to {datastore.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vmName} connected to datastore {datastore.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vmName} connected to {datastore.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vmName} connected to {datastore.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExHealth event from storage providerinfocom.vmware.vc.sms.provider.health.event|Storage provider [{providerName}] : health event for {eventSubjectId} : {msgTxt}EventExSystem event from storage providerinfocom.vmware.vc.sms.provider.system.event|Storage provider [{providerName}] : system event : {msgTxt}EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant {operationalStatus] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExProfile association/dissociation failederrorProfile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}com.vmware.vc.spbm.ProfileAssociationFailedEvent|Profile association/dissociation failed for {entityName}EventExConfiguring storage policy failed.errorConfiguring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}com.vmware.vc.spbm.ServiceErrorEvent|Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}ExtendedEventQuick stats is not up-to-dateinfoQuick stats on {host.name} in {computeResource.name} is not up-to-dateQuick stats on {host.name} is not up-to-dateQuick stats on {host.name} is not up-to-datecom.vmware.vc.stats.HostQuickStatesNotUpToDateEvent|Quick stats on {host.name} in {computeResource.name} in {datacenter.name} is not up-to-date <EventLongDescription id="com.vmware.vc.stats.HostQuickStatesNotUpToDateEvent"> <description> "Quick stats on the host is not up-to-date. </description> <cause> <description> Quickstats on the host are not up-to-date. This is expected if the host was recently added or reconnected or VC just started up. </description> <action> No specific action needs to be taken. </action> </cause> </EventLongDescription> EventExODBC errorerrorcom.vmware.vc.stats.StatsInsertErrorEvent|Stats insertion failed for entity {entity} due to ODBC error. <EventLongDescription id="com.vmware.vc.stats.StatsInsertErrorEvent"> <description> If a set of performance statistics data insertion fails due to database related issues, this event is logged. </description> <cause> <description>Usually an attempt to insert duplicate entries causes this event</description> <action>Usually it is transient and self-healing. If not then probably the database contains rogue entries. Manually deleting the data for the particular stat provider might fix the issue</action> </cause> </EventLongDescription> EventExRoot user password expired.errorcom.vmware.vc.system.RootPasswordExpiredEvent|Root user password has expired. Log in to https://{pnid}:5480 to update the root password.EventExRoot user password is about to expire.warningcom.vmware.vc.system.RootPasswordExpiryEvent|Root user password expires in {days} days. Log in to https://{pnid}:5480 to update the root password.ExtendedEventFT Disabled VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection will protect this virtul machine as non-FT virtual machine because the FT state is disabledcom.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because the FT state is disabledExtendedEventFailover FT VM due to component failureinfoFT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} on host {host.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} is going to fail over to Secondary VM due to component failureFT Primary VM is going to fail over to Secondary VM due to component failurecom.vmware.vc.vcp.FtFailoverEvent|FT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is going to fail over to Secondary VM due to component failure ExtendedEventFT VM failover failederrorFT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} failed to failover to secondaryFT virtual machine {vm.name} on host {host.name} failed to failover to secondaryFT virtual machine {vm.name} failed to failover to secondaryFT virtual machine failed to failover to secondarycom.vmware.vc.vcp.FtFailoverFailedEvent|FT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to failover to secondaryExtendedEventRestarting FT secondary due to component failureinfoHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine due to component failurecom.vmware.vc.vcp.FtSecondaryRestartEvent|HA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to component failureExtendedEventFT secondary VM restart failederrorFT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} failed to restartFT Secondary VM {vm.name} on host {host.name} failed to restartFT Secondary VM {vm.name} failed to restartFT Secondary VM failed to restartcom.vmware.vc.vcp.FtSecondaryRestartFailedEvent|FT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartExtendedEventNeed secondary VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine since it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects this virtul machine as non-FT virtual machine because it has been in the needSecondary state too longcom.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because it has been in the needSecondary state too longEventExVM Component Protection test endsinfoVM Component Protection test ends on host {host.name} in cluster {computeResource.name}VM Component Protection test ends on host {host.name}VM Component Protection test endscom.vmware.vc.vcp.TestEndEvent|VM Component Protection test ends on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}EventExVM Component Protection test startsinfoVM Component Protection test starts on host {host.name} in cluster {computeResource.name}VM Component Protection test starts on host {host.name}VM Component Protection test startscom.vmware.vc.vcp.TestStartEvent|VM Component Protection test starts on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventNo action on VMinfoHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} due to the feature configuration settingHA VM Component Protection did not take action due to the feature configuration settingcom.vmware.vc.vcp.VcpNoActionEvent|HA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to the feature configuration settingEventExVirtual machine lost datastore accesserrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {datastore}Virtual machine {vm.name} on host {host.name} lost access to {datastore}Virtual machine {vm.name} lost access to {datastore}Virtual machine lost access to {datastore}com.vmware.vc.vcp.VmDatastoreFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {datastore}EventExVirtual machine lost VM network accessibilityerrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {network}Virtual machine {vm.name} on host {host.name} lost access to {network}Virtual machine {vm.name} lost access to {network}Virtual machine lost access to {network}com.vmware.vc.vcp.VmNetworkFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {network}EventExVM power off hangerrorHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine successfully after trying {numTimes} times and will keep tryingcom.vmware.vc.vcp.VmPowerOffHangEvent|HA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} successfully after trying {numTimes} times and will keep tryingExtendedEventRestarting VM due to component failureinfoHA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failureHA VM Component Protection is restarting virtual machine due to component failurecom.vmware.vc.vcp.VmRestartEvent|HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventVirtual machine affected by component failure failed to restarterrorVirtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} failed to restartVirtual machine {vm.name} affected by component failure on host {host.name} failed to restartVirtual machine {vm.name} affected by component failure failed to restartVirtual machine affected by component failure failed to restartcom.vmware.vc.vcp.VmRestartFailedEvent|Virtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartEventExNo candidate host to restarterrorHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for this virtual machine after waiting {numSecWait} seconds and will keep tryingcom.vmware.vc.vcp.VmWaitForCandidateHostEvent|HA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} after waiting {numSecWait} seconds and will keep tryingEventExCertificate will expire soon.warningcom.vmware.vc.vecs.CertExpirationEvent|Certificate '{subject}' from '{store}' expires on {expiryDate}EventExKMS Client Certificate will expire soon.warningcom.vmware.vc.vecs.KMSClientCertExpirationEvent|KMS Client Certificate '{subject}' expires on {expiryDate}EventExKMS Server Certificate will expire soon.warningcom.vmware.vc.vecs.KMSServerCertExpirationEvent|KMS Server Certificate '{subject}' expires on {expiryDate}EventExOperation on the SSD device failederrorConfiguration on disk {disk.path} failed. Reason : {fault.msg}com.vmware.vc.vflash.SsdConfigurationFailedEvent|Configuration on disk {disk.path} failed. Reason : {fault.msg}EventExVirtual machine is locked because an error occurred on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KMSClusterError|Virtual machine {vmName} is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the host.errorVirtual machine is locked because keys were missing on the host {host}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnHost|Virtual machine {vmName} is locked because keys were missing on the host {host}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnKMS|Virtual machine {vmName} is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because the required Trusted Key Provider(s) is unavailable.errorVirtual machine is locked. Before unlocking, check the status of Trusted Key Provider(s) {kmsCluster} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).com.vmware.vc.vm.Crypto.VMLocked.TAKMSClusterUnavaliable|Virtual machine {vmName} is locked. Before unlocking, check the status of Trusted Key Provider(s) {keyProviderId} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).EventExVirtual machine is locked because Trust Authority managed key(s) are missing on the required host.errorVirtual machine is locked because Trust Authority managed key(s) are missing on host {host}.com.vmware.vc.vm.Crypto.VMLocked.TAKeyMissingOnHost|Virtual machine {vmName} is locked because Trust Authority managed key(s) {missedkeys} are missing on the required host {host}.EventExVirtual machine is unlocked.infoVirtual machine is unlocked.com.vmware.vc.vm.Crypto.VMUnlocked|Virtual machine {vmName} is unlocked.EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}com.vmware.vc.vm.DstVmClonedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was cloned from {oldMoRef}EventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}com.vmware.vc.vm.DstVmMigratedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was migrated from {oldMoRef}ExtendedEventVirtual machine PMem bandwidth usage is normalinfoVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalThe virtual machine's PMem bandwidth usage is normalcom.vmware.vc.vm.PMemBandwidthGreen|Virtual machine {vm.name}'s PMem bandwidth usage is normalExtendedEventVirtual machine PMem bandwidth usage is highwarningVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highThe virtual machine's PMem bandwidth usage is highcom.vmware.vc.vm.PMemBandwidthYellow|Virtual machine {vm.name}'s PMem bandwidth usage is highExtendedEventVirtual machine failed to power on after cloning.errorVirtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after performing cloning operation on this host.Virtual machine failed to power on after cloning.com.vmware.vc.vm.PowerOnAfterCloneErrorEvent|Virtual machine {vm.name} failed to power on after cloning on host {host.name} in datacenter {datacenter.name}EventExVirtual machine clone failederrorVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}"Virtual machine on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}com.vmware.vc.vm.SrcVmCloneFailedEvent|Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {oldMoRef} in {computeResource.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} on host {host.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}com.vmware.vc.vm.SrcVmClonedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} was cloned to {newMoRef}ExtendedEventVirtual machine failed to create instant clone childerrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childcom.vmware.vc.vm.SrcVmForkFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to create instant clone childEventExVirtual machine migration failederrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to migrateVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migratecom.vmware.vc.vm.SrcVmMigrateFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to migrateEventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} on {host.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}com.vmware.vc.vm.SrcVmMigratedEvent|Virtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} in {datacenter.name} was migrated to {newMoRef}ExtendedEventTemplate converted to VMinfoTemplate {vm.name} converted to VM on {host.name}Template {vm.name} converted to VM on {host.name}Template {vm.name} converted to VMConverted to VM on {host.name}com.vmware.vc.vm.TemplateConvertedToVmEvent|Template {vm.name} converted to VM on {host.name} in {datacenter.name}ExtendedEventVirtual machine tier 1 bandwidth usage is normalinfoVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalThe virtual machine's tier 1 bandwidth usage is normalcom.vmware.vc.vm.Tier1BandwidthGreen|Virtual machine {vm.name}'s tier 1 bandwidth usage is normalExtendedEventVirtual machine tier 1 bandwidth usage is highwarningVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highThe virtual machine's tier 1 bandwidth usage is highcom.vmware.vc.vm.Tier1BandwidthYellow|Virtual machine {vm.name}'s tier 1 bandwidth usage is highExtendedEventThe network adapter of VM successfully activate UPTinfoUPT on network adapter is activatedcom.vmware.vc.vm.Uptv2Active|The UPT is successfully activated on the network adapterEventExThe network adapter of VM fails to activate UPTwarningUPT on network adapter is not activatedcom.vmware.vc.vm.Uptv2Inactive|The UPT failed to activate on the network adapter.{details}EventExVirtual NIC reservation is not satisfiederrorReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is not satisfiedReservation of Virtual NIC {deviceLabel} is not satisfiedcom.vmware.vc.vm.VmAdapterResvNotSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is not satisfiedEventExVirtual NIC reservation is satisfiedinfoReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is satisfiedReservation of Virtual NIC {deviceLabel} is satisfiedcom.vmware.vc.vm.VmAdapterResvSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is satisfiedExtendedEventVM marked as templateinfoVM {vm.name} marked as template on {host.name}VM {vm.name} marked as template on {host.name}VM {vm.name} marked as templateMarked as template on {host.name}com.vmware.vc.vm.VmConvertedToTemplateEvent|VM {vm.name} marked as template on {host.name} in {datacenter.name}ExtendedEventPromoted disks of virtual machine successfullyinfoPromoted disks of virtual machine {vm.name} in {computeResource.name}Promoted disks of virtual machine {vm.name} on host {host.name}Promoted disks of virtual machine {vm.name}Promoted disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotedEvent|Promoted disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventPromoting disks of virtual machineinfoPromoting disks of virtual machine {vm.name} in {computeResource.name}Promoting disks of virtual machine {vm.name} on host {host.name}Promoting disks of virtual machine {vm.name}Promoting disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotingEvent|Promoting disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}EventExHot migrating virtual machine with encryptioninfoHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating from {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptioncom.vmware.vc.vm.VmHotMigratingWithEncryptionEvent|Hot migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost}, {destDatastore} in {destDatacenter} with encryptionEventExcom.vmware.vc.vm.VmMigratingWithEncryptionEvent|ExtendedEventFailed to promote disks of virtual machineinfoFailed to promote disks of virtual machine {vm.name} in {computeResource.name}Failed to promote disks of virtual machine {vm.name} on host {host.name}Failed to promote disks of virtual machine {vm.name}Failed to promote disks of virtual machine {vm.name}com.vmware.vc.vm.VmPromoteDisksFailedEvent|Failed to promote disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventReconfigure VM failed for {VM} on shared diskwarningReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskcom.vmware.vc.vm.VmReconfigureFailedonSharedDiskEvent|Reconfigure VM failed for {VM} on shared diskExtendedEventVirtual machine register failederrorVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on this host failedVirtual machine registration failedcom.vmware.vc.vm.VmRegisterFailedEvent|Virtual machine {vm.name} registration on {host.name} in datacenter {datacenter.name} failedEventExFailed to revert the virtual machine state to a snapshoterrorFailed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} on host {host.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine to snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateFailedToRevertToSnapshot|Failed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}EventExThe virtual machine state has been reverted to a snapshotinfoThe execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} on host {host.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateRevertedToSnapshot|The execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}EventExFault Tolerance virtual machine syncing to secondary with encryptioninfoFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM syncing to secondary on {dstHost} with encryptioncom.vmware.vc.vm.VmSyncingWithEncryptionEvent|Fault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionExtendedEventVirtual machine termination requestedinfoVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine termination requestedcom.vmware.vc.vm.VmTerminateEvent|Virtual machine {vm.name} termination requestedExtendedEventVirtual machine termination failederrorVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine termination failedcom.vmware.vc.vm.VmTerminateFailedEvent|Virtual machine {vm.name} termination failedEventExThe disk device is encrypted with mixed keys.warningThe disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.com.vmware.vc.vm.crypto.DiskchainUsingMixedKeys|The disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.EventExCryptographic operation failed due to insufficient disk space on datastoreerrorCryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.com.vmware.vc.vm.crypto.NoDiskSpace|Cryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.EventExcom.vmware.vc.vm.crypto.RekeyFail|ExtendedEventApplication Monitoring Is Not SupportedwarningApplication monitoring is not supported on {host.name} in cluster {computeResource.name}Application monitoring is not supported on {host.name}Application monitoring is not supportedcom.vmware.vc.vmam.AppMonitoringNotSupported|Application monitoring is not supported on {host.name} in cluster {computeResource.name} in {datacenter.name}EventExvSphere HA detected application heartbeat status changewarningvSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent|vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent"> <description> Application monitoring state changes indicate a change in the health of the application being monitored or in the application-monitoring process. A transition from gray to green occurs when application heartbeat is being enabled from within the guest. A transition to red occurs after vSphere HA didn't receive any heartbeats within 30 seconds. A transition from red to green occurs if heartbeats begin again before vSphere HA can react. A transition to gray occurs after application heartbeating is disabled from within the guest. </description> <cause> <description> Either the user initiated action from inside the guest or vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> If the state transitions to red, investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. Frequent state transitions to or from gray may indicate a problem with the application-monitoring agent. If they occur, investigate whether the enabling/disabling of monitoring is expected. </action> </cause> </EventLongDescription> EventExvSphere HA detected application state changewarningvSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthStateChangedEvent|vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthStateChangedEvent"> <description> Application state changes indicate that an in-guest application has posted one of the two allowed values - appStateOk or appStateNeedReset. The former indicates that the monitored application is fine, the latter causes an immediate reset if Application Monitoring is enabled for this virtual machine. </description> <cause> <description> This is an in-guest initated action. </description> <action> If vSphere HA and Application Monitoring are enabled for this virtual machine, it is reset if the state is appStateNeedReset. If the virtual machine is being migrated using vMotion the reset will be delayed until the virtual machine has reached its destination. Also, the reset will be delayed until the datastore connectivity issues are resolved. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected application heartbeat failurewarningvSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected application heartbeat failure for {vm.name} on {host.name}vSphere HA detected application heartbeat failure for {vm.name}vSphere HA detected application heartbeat failure for this virtual machinecom.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent|vSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent"> <description> vSphere HA has detected a heartbeat failure from the application-monitoring agent inside the guest. If application monitoring is enabled in vSphere the virtual machine will be reset. </description> <cause> <description> vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> Investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. </action> </cause> </EventLongDescription> EventExvCenter server replication status has changed.infocom.vmware.vc.vmdir.ReplicationStatusChangeEvent|vCenter Server Replication Status : {replicationStatus} . {message}EventExvCenter server replication state has changedinfocom.vmware.vc.vmdir.StateChangeEvent|vCenter Server Replication State changed to '{newState}' from '{oldState}' cause: {reason}EventExvSAN datastore {datastoreName} does not have capacityerrorvSAN datastore {datastoreName} in cluster {computeResource.name} does not have capacityvSAN datastore {datastoreName} does not have capacitycom.vmware.vc.vsan.DatastoreNoCapacityEvent|vSAN datastore {datastoreName} in cluster {computeResource.name} in datacenter {datacenter.name} does not have capacity <EventLongDescription id="com.vmware.vc.vsan.DatastoreNoCapacityEvent"> <description> vSAN datastore does not have capacity. </description> <cause> <description> This might be because no disk is configured for vSAN, local disks configured for vSAN service become inaccessible or flash disks configured for vSAN service become inaccessible. </description> <action> Check if vSAN storage configuration is correct and if the local disks and flash disks configured for vSAN service are accessible. </action> </cause> </EventLongDescription> EventExHost cannot communicate with one or more other nodes in the vSAN enabled clustererrorHost {host.name} in cluster {computeResource.name} cannot communicate with all other nodes in the vSAN enabled clusterHost {host.name} cannot communicate with all other nodes in the vSAN enabled clusterHost cannot communicate with one or more other nodes in the vSAN enabled clustercom.vmware.vc.vsan.HostCommunicationErrorEvent|Host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} cannot communicate with all other nodes in the vSAN enabled cluster <EventLongDescription id="com.vmware.vc.vsan.HostCommunicationErrorEvent"> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. </description> <cause> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. This might be caused by network partition or misconfiguration. Each host needs at least one vmnic with vSAN enabled. Those vmnics need to be on the same physical network. The host should have the vSAN service enabled. </description> <action> Check the host for vSAN service configuration, vSAN network configuration and network connection. </action> </cause> </EventLongDescription> ExtendedEventHost with vSAN service enabled is not in the vCenter clustererror{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name}{host.name} with vSAN service enabled is not in the vCenter clusterHost with vSAN service enabled is not in the vCenter clustercom.vmware.vc.vsan.HostNotInClusterEvent|{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.HostNotInClusterEvent"> <description> Host with the vSAN service enabled is not in the vCenter cluster. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN service is not enabled with the recommended interface. </description> <action> Add the host into the cluster or disable vSAN on the host. </action> </cause> </EventLongDescription> ExtendedEventHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailserror{host.name} is in a vSAN cluster {computeResource.name} but does not have vSAN service enabled{host.name} is in a vSAN cluster but does not have vSAN service enabledHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailscom.vmware.vc.vsan.HostNotInVsanClusterEvent|{host.name} is in a vSAN enabled cluster {computeResource.name} in datacenter {datacenter.name} but does not have vSAN service enabled <EventLongDescription id="com.vmware.vc.vsan.HostNotInVsanClusterEvent"> <description> Host is in a vSAN enabled cluster but does not have vSAN service enabled. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN is not enabled with the recommended interface or the vSAN configuration is not set up appropriately. </description> <action> Re-enable vSAN or check the vSAN configuration. </action> </cause> </EventLongDescription> EventExvSAN host vendor provider registration has failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent"> <description> Cannot deregister host vendor provider in Storage management service </description> <cause> <description>Host vendor provider deregistration failed</description> <action>Check if Storage management service is running</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider has been successfully unregisteredinfovSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredcom.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully unregistered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent"> <description> Deregistered host vendor provider from Storage management service </description> </EventLongDescription> EventExvSAN host vendor provider registration failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent"> <description> Cannot register host vendor provider in Storage management service </description> <cause> <description>Host vendor provider registration failed</description> <action>Check if Storage management service is running</action> <action>Check if the vendor provider on host is running</action> <action>Check if there are network connectivity issues between host and VC</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider registration succeededinfovSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredcom.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully registered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent"> <description> Registered host vendor provider in Storage management service </description> </EventLongDescription> ExtendedEventvSAN network is not configurederrorvSAN network is not configured on {host.name} in cluster {computeResource.name}vSAN network is not configured on {host.name}vSAN network is not configuredcom.vmware.vc.vsan.NetworkMisConfiguredEvent|vSAN network is not configured on {host.name}, in cluster {computeResource.name}, and in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.NetworkMisConfiguredEvent"> <description> vSAN network is not configured. </description> <cause> <description> vSAN network is not set up appropriately. vSAN datastore will not be formed as expected. </description> <action> Create at least one vmnic with vSAN enabled on the host. </action> </cause> </EventLongDescription> EventExFound another host participating in the vSAN service which is not a member of this host's vCenter clustererrorFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter cluster {computeResource.name}Found host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clusterFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clustercom.vmware.vc.vsan.RogueHostFoundEvent|Found host(s) {hostString} participating in the vSAN service in cluster {computeResource.name} in datacenter {datacenter.name} is not a member of this host's vCenter cluster <EventLongDescription id="com.vmware.vc.vsan.RogueHostFoundEvent"> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. </description> <cause> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. This might be caused by misconfiguration. </description> <action> Add the rogue host into the cluster or disable vSAN on the rogue host. </action> </cause> </EventLongDescription> EventExFailed to turn off the disk locator LEDerrorFailed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOffFailedEvent|Failed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}EventExFailed to turn on the disk locator LEDerrorFailed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOnFailedEvent|Failed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}EventExvSAN cluster needs disk format upgradewarningvSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationvSAN cluster has one or more hosts for which disk format upgrade is recommended: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationcom.vmware.vc.vsan.VsanHostNeedsUpgradeEvent|vSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationEventExUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}errorUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}com.vmware.vc.vtpm.FailedProcessingVTpmCertsEvent|Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}ExtendedEventA compute policy has been createdinfocom.vmware.vcenter.compute.policies.createEvent|Compute policy {policyName} has been createdExtendedEventA compute policy has been deletedinfocom.vmware.vcenter.compute.policies.deleteEvent|Compute policy {policyName} has been deletedEventExDatabase replication state changed: sync, async or no replicationinfocom.vmware.vcha.DB.replication.state.changed|Database replication mode changed to {newState}EventExThe management interface (NIC0) IP address you specified as for the Passive node is different than the original IP address used to configure vCenter HA. You must use the same IP address.errorcom.vmware.vcha.cluster.differentFailoverIp|The management interface (NIC0) IP address you specified as {given} for the Passive node is different than the original IP address {original} used to configure vCenter HA. You must use the same IP address.EventExvCenter HA cluster mode changedinfocom.vmware.vcha.cluster.mode.changed|vCenter HA cluster mode changed to {clusterMode}ExtendedEventUnable to enable mode.errorcom.vmware.vcha.cluster.modeEnableFailed|Unable to enable mode.EventExThe hostname for a node does not map to the vCenter Server PNID.errorcom.vmware.vcha.cluster.pnidHostnameMismatch|The hostname for {nodeIp} does not map to the vCenter Server PNID. Review the hostname you specified during the VM clone customization step.ExtendedEventVerify if the Passive and the Witness nodes are up and reachable.errorcom.vmware.vcha.cluster.quorumNotCloned|Verify if the Passive and the Witness nodes are up and reachable.EventExUnable to SSH to a node.errorcom.vmware.vcha.cluster.sshConnectFailed|Unable to SSH to {nodeIp}.ExtendedEventvCenter HA cluster state is currently degradedwarningcom.vmware.vcha.cluster.state.degraded|vCenter HA cluster state is currently degradedExtendedEventvCenter HA cluster is destroyedinfocom.vmware.vcha.cluster.state.destroyed|vCenter HA cluster is destroyedExtendedEventvCenter HA cluster state is currently healthyinfocom.vmware.vcha.cluster.state.healthy|vCenter HA cluster state is currently healthyExtendedEventvCenter HA cluster state is currently isolatederrorcom.vmware.vcha.cluster.state.isolated|vCenter HA cluster state is currently isolatedExtendedEventUnable to get vpxd hostname.errorcom.vmware.vcha.cluster.vcFqdnUnavailable|Unable to get vpxd hostname.ExtendedEventFailover cannot proceed when cluster is in disabled modewarningcom.vmware.vcha.failover.failed.disabled.mode|Failover cannot proceed when cluster is in disabled modeExtendedEventFailover cannot proceed when cluster does not have all three nodes connectedwarningcom.vmware.vcha.failover.failed.node.lost|Failover cannot proceed when cluster does not have all three nodes connectedExtendedEventFailover cannot proceed when Passive node is not ready to takeoverwarningcom.vmware.vcha.failover.failed.passive.not.ready|Failover cannot proceed when vPostgres on Passive node is not ready to takeoverExtendedEventFailover did not succeed. Failed to flush the data to the Passive nodewarningcom.vmware.vcha.failover.flush.failed.degraded|Failover did not succeed. Failed to flush the data to the Passive nodeExtendedEventFailover failure is acknowledgedinfocom.vmware.vcha.failover.flush.failed.healthy|Failover failure is acknowledgedExtendedEventFailover status is unknowninfocom.vmware.vcha.failover.flush.failed.unknown|Failover status is unknownExtendedEventFailover succeededinfocom.vmware.vcha.failover.succeeded|Failover succeededEventExAppliance File replication state changedinfocom.vmware.vcha.file.replication.state.changed|Appliance {fileProviderType} is {state}EventExThis node was forcefully converted to the Active nodeinfocom.vmware.vcha.force.reset.active|Node {nodename} was forcefully converted to the Active nodeEventExOne node joined back to the clusterinfocom.vmware.vcha.node.joined|Node {nodeName} joined back to the clusterEventExOne node left the clusterwarningcom.vmware.vcha.node.left|Node {nodeName} left the clusterExtendedEventPSC HA state is currently degradedinfocom.vmware.vcha.psc.ha.health.degraded|PSC HA state is currently degradedExtendedEventPSC HA state is currently healthyinfocom.vmware.vcha.psc.ha.health.healthy|PSC HA state is currently healthyExtendedEventPSC HA state is not being monitoredinfocom.vmware.vcha.psc.ha.health.unknown|PSC HA is not monitored after vCenter HA cluster is destroyedExtendedEventVMware Directory Service health is currently degradedwarningcom.vmware.vcha.vmdir.health.degraded|VMware Directory Service health is currently degradedExtendedEventVMware Directory Service is currently healthyinfocom.vmware.vcha.vmdir.health.healthy|VMware Directory Service is currently healthyExtendedEventVMware Directory Service health is not being monitoredinfocom.vmware.vcha.vmdir.health.unknown|VMware Directory Service health is not being monitoredExtendedEventvSphere Cluster Services mode is system managed on cluster.infocom.vmware.vcls.cluster.DeploymentModeSystemManagedEvent|vSphere Cluster Services mode is system managed on cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.infocom.vmware.vcls.cluster.DrsDisabledHaDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-enabled cluster.errorcom.vmware.vcls.cluster.DrsEnabledDeployModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-enabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.DrsEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.ExtendedEventvSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.warningcom.vmware.vcls.cluster.HaEnabledDrsDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.HaEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.ExtendedEventVSAN VASA provider became available.infocom.vmware.vcls.cluster.VsanProviderAvailableEvent|VSAN VASA provider became available.ExtendedEventTimed out waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.VsanProviderTimedoutEvent|Timed out waiting for VSAN VASA provider availability.EventExA Data Processing Unit is down.infoA Data Processing Unit is down.com.vmware.vim.dpu.down|The Data Processing Unit with id '{dpuId}' is down.EventExA Data Processing Unit has been removed from the system.infoA Data Processing Unit has been removed from the system.com.vmware.vim.dpu.removed|The Data Processing Unit with id '{dpuId}' has been removed from the system.EventExThe management state for a Data Processing Unit has changed.infoThe management state for a Data Processing Unit has changed.com.vmware.vim.dpu.state.changed|The management state for the Data Processing Unit with id '{dpuId}' has changed to '{state}'.EventExThe dpu failover ended on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.com.vmware.vim.dpuFailover.end|DPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.EventExThe dpu failover started on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.com.vmware.vim.dpuFailover.start|DPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.ExtendedEventInvalid UTF-8 string encountered.warningInvalid UTF-8 string encountered.com.vmware.vim.utf8filter.badvalue|Invalid UTF-8 string encountered.ExtendedEventSome of the disks of the virtual machine failed to load. The information present for them in the virtual machine configuration may be incompletewarningSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} failed to load. The information present for them in the virtual machine configuration may be incompletecom.vmware.vim.vm.DisksNotLoaded|Some of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteExtendedEventSnapshot operations are not allowed due to some of the snapshot related objects failed to load.warningSnapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.com.vmware.vim.vm.SnapshotNotAllowed|Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.ExtendedEventVirtual machine reboot converted to power off because the rebootPowerOff option is enabledinfoReboot converted to power off on virtual machine {vm.name}.Reboot converted to power off.com.vmware.vim.vm.reboot.powerOff|Reboot converted to power off on virtual machine {vm.name} on {host.name} because the rebootPowerOff option is enabled.EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' bound to provider '{providerName}'EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vm|vService dependency '{dependencyName}' on '{vm.name}' bound to provider '{providerName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vApp|Created vService dependency '{dependencyName}' with type '{dependencyType}' on vApp '{targetName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vm|Created vService dependency '{dependencyName}' with type '{dependencyType}' on '{vm.name}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vApp|Destroyed vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vm|Destroyed vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vApp|Reconfigured vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vm|Reconfigured vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' unbound from provider '{providerName}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vm|vService dependency '{dependencyName}' on '{vm.name}' unbound from provider '{providerName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vApp|Updated vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vm|Updated vService dependency '{dependencyName}' on '{vm.name}'EventExvService provider registeredinfocom.vmware.vim.vsm.provider.register|vService provider '{providerName}' with type '{providerType}' registered for extension '{extensionKey}'EventExvService provider unregisteredinfocom.vmware.vim.vsm.provider.unregister|vService provider '{providerName}' with type '{providerType}' unregistered for extension '{extensionKey}'EventExvService provider updatedinfocom.vmware.vim.vsm.provider.update|Updating vService provider '{providerName}' registered for extension '{extensionKey}'EventExDeleting stale vdisks generated by FCD migration failed.errorcom.vmware.vslm.DeleteStaleDiskFailureEvent|Deleting stale vdisk {diskPath} and related files generated as part of FCD migration failed. Try to delete them manually.EventExRegistering vdisk as FCD at source failed during rollback of unsuccessful migration.errorcom.vmware.vslm.RegisterDiskFailed|Registering {fcdPath} with name {fcdName} as FCD at source failed during rollback of unsuccessful migration. Try to register it manually using RegisterDisk API.EventExUnregistering of vdisk at destination failed during rollback of unsuccessful migration.errorcom.vmware.vslm.UnRegisterDiskFailed|Unregistering of FCD {fcdId} failed at destination during rollback of unsuccessful migration. Reconcile of datastore {datastore} should fix inconsistencies if any.EventExConnectivity check completedinfocom.vmware.vsphere.client.security.ConnectivityCheckEvent|Connectivity check completed. Operation: {Operation}. Subscription status: {SubscriptionCheckResult}. Connectivity status: {ConnectivityCheckResult}. Access type: {AccessType}. User: {Username}ExtendedEventDatastore is accessible to all hosts under the cluster.infocom.vmware.wcp.Datastore.accessible|Datastore is accessible to all hosts under the clusterExtendedEventDatastore not accessible to all hosts under the cluster.warningcom.vmware.wcp.Datastore.inaccessible|Datastore not accessible to all hosts under the cluster.EventExRemote access for an ESXi local user account has been locked temporarilly due to multiple failed login attempts.warningesx.audit.account.locked|Remote access for ESXi local user account '{1}' has been locked for {2} seconds after {3} failed login attempts.EventExMultiple remote login failures detected for an ESXi local user account.warningesx.audit.account.loginfailures|Multiple remote login failures detected for ESXi local user account '{1}'.ExtendedEventRestoring factory defaults through DCUI.warningesx.audit.dcui.defaults.factoryrestore|The host has been restored to default factory settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventThe DCUI has been disabled.infoesx.audit.dcui.disabled|The DCUI has been disabled.ExtendedEventThe DCUI has been enabled.infoesx.audit.dcui.enabled|The DCUI has been enabled.ExtendedEventRebooting host through DCUI.warningesx.audit.dcui.host.reboot|The host is being rebooted through the Direct Console User Interface (DCUI).ExtendedEventShutting down host through DCUI.warningesx.audit.dcui.host.shutdown|The host is being shut down through the Direct Console User Interface (DCUI).ExtendedEventRestarting host agents through DCUI.infoesx.audit.dcui.hostagents.restart|The management agents on the host are being restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExLogin authentication on DCUI failederroresx.audit.dcui.login.failed|Authentication of user {1} has failed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExDCUI login password changed.infoesx.audit.dcui.login.passwd.changed|Login password for user {1} has been changed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventFactory network settings restored through DCUI.warningesx.audit.dcui.network.factoryrestore|The host has been restored to factory network settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExRestarting network through DCUI.infoesx.audit.dcui.network.restart|A management interface {1} has been restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.esx.audit.entropy.available.low|Host is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.esx.audit.entropy.external.source.disconnected|Host is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.EventExPowering off host through esxcliwarningesx.audit.esxcli.host.poweroff.reason|The host is being powered off through esxcli. Reason for powering off: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.reboot.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.restart.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExHost acceptance level changedinfoesx.audit.esximage.hostacceptance.changed|Host acceptance level changed from {1} to {2}ExtendedEventUEFI Secure Boot enabled: Cannot skip signature checks.warningesx.audit.esximage.install.nobypasssigcheck|UEFI Secure Boot enabled: Cannot skip signature checks. Installing unsigned VIBs will prevent the system from booting. So the vib signature check will be enforced.ExtendedEventAttempting to install an image profile bypassing signing and acceptance level verification.warningesx.audit.esximage.install.nosigcheck|Attempting to install an image profile bypassing signing and acceptance level verification. This may pose a large security risk.ExtendedEventAttempting to install an image profile with validation disabled.warningesx.audit.esximage.install.novalidation|Attempting to install an image profile with validation disabled. This may result in an image with unsatisfied dependencies, file or package conflicts, and potential security violations.EventExSECURITY ALERT: Installing image profile.warningesx.audit.esximage.install.securityalert|SECURITY ALERT: Installing image profile '{1}' with {2}.EventExSuccessfully installed image profile.infoesx.audit.esximage.profile.install.successful|Successfully installed image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully updated host to new image profile.infoesx.audit.esximage.profile.update.successful|Successfully updated host to image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully changed software on host.infoesx.audit.esximage.software.apply.succeeded|Successfully installed {1} component(s) and removed {2} component(s) on host. To see more details about the transaction, run 'esxcli software profile get'.EventExSuccessfully installed VIBs.infoesx.audit.esximage.vib.install.successful|Successfully installed {1} VIB(s), removed {2} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully removed VIBsinfoesx.audit.esximage.vib.remove.successful|Successfully removed {1} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExDPU trust validation failederroresx.audit.esxtokend.dputrust.failed|DPU: {1} trust validation failedEventExDPU was removedwarningesx.audit.esxtokend.dputrust.removed|DPU:{1} was removed.EventExDPU trust validation succeededinfoesx.audit.esxtokend.dputrust.succeeded|DPU: {1} trust validation succeeded.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Warning tripped.EventExNVDIMM: Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.lifetime.warning|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Warning tripped.EventExNVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.spareblocks|NVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.temperature|NVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).warningesx.audit.hardware.nvd.health.life.pctused|NVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).EventExNVDIMM Count of DRAM correctable ECC errors above threshold.infoesx.audit.hardware.nvd.health.module.ce|NVDIMM (handle {1}, idString {2}): Count of DRAM correctable ECC errors above threshold.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Warning tripped.EventExNVDIMM: Module Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.lifetime.warning|NVDIMM (handle {1}, idString {2}): Module Lifetime Warning tripped.EventExNVDIMM: Module Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.temperature.warning|NVDIMM (handle {1}, idString {2}): Module Temperature Warning tripped.EventExNVDIMM: Maintenance needed.warningesx.audit.hardware.nvd.health.vmw.statusflags.maintNeeded|NVDIMM (handle {1}, idString {2}): Maintenance needed.EventExA physical disk has been inserted.infoA physical disk has been insertedesx.audit.hcm.event.disk.insertion|A physical disk has been inserted ({1}).EventExA physical disk has been removed.infoA physical disk has been removed.esx.audit.hcm.event.disk.removal|A physical disk has been removed ({1}).ExtendedEventHost has booted.infoesx.audit.host.boot|Host has booted.EventExHost experienced a crashinfoesx.audit.host.crash.reason|The crash at {1} occurred due to: {2}. More details will be available in the generated vmkernel-zdump.EventExThe host experienced a crashinfoesx.audit.host.crash.reason.available|The host experienced a crash. Reason: {1}.ExtendedEventHost experienced a crashinfoesx.audit.host.crash.reason.unavailable|Host experienced a crash. More details will be available in the generated vmkernel-zdump.EventExThe number of virtual machines registered on the host exceeded limit.warningThe number of virtual machines registered on host {host.name} in cluster {computeResource.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered on host {host.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered exceeded limit: {current} registered, {limit} is the maximum supported.esx.audit.host.maxRegisteredVMsExceeded|The number of virtual machines registered on host {host.name} in cluster {computeResource.name} in {datacenter.name} exceeded limit: {current} registered, {limit} is the maximum supported.EventExThe host has been powered offinfoesx.audit.host.poweroff.reason.available|The host has been powered off. Reason for powering off: {1}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.management|The power off at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.subsystem|The power off at {1} was requested by {2} due to: {3}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.timestamp|The power off at {1} was requested due to: {2}.ExtendedEventHost had been powered offinfoesx.audit.host.poweroff.reason.unavailable|Host had been powered off. The poweroff was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.user|The power off at {1} was requested by user/entity {2} due to: {3}.EventExThe host experienced Quick Bootinfoesx.audit.host.quickboot.reason.available|The host experienced Quick Boot. Reason for reboot: {1}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.management|The Quick Boot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.subsystem|The Quick Boot at {1} was requested by {2} due to: {3}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.timestamp|The Quick Boot at {1} was requested due to: {2}.ExtendedEventHost experienced Quick Bootinfoesx.audit.host.quickboot.reason.unavailable|Host experienced Quick Boot. The Quick Boot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.user|The Quick Boot at {1} was requested by user/entity {2} due to: {3}.EventExThe host has been rebootedinfoesx.audit.host.reboot.reason.available|The host has been rebooted. Reason for reboot: {1}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.management|The reboot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.subsystem|The reboot at {1} was requested by {2} due to: {3}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.timestamp|The reboot at {1} was requested due to: {2}.ExtendedEventHost had been rebootedinfoesx.audit.host.reboot.reason.unavailable|Host had been rebooted. The reboot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.user|The reboot at {1} was requested by user/entity {2} due to: {3}.ExtendedEventHost is rebooting.infoesx.audit.host.stop.reboot|Host is rebooting.ExtendedEventHost is shutting down.infoesx.audit.host.stop.shutdown|Host is shutting down.EventExPowering off host through hostdwarningesx.audit.hostd.host.poweroff.reason|The host is being powered off through hostd. Reason for powering off: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.reboot.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.restart.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.ExtendedEventAdministrator access to the host has been enabled.infoesx.audit.lockdownmode.disabled|Administrator access to the host has been enabled.ExtendedEventAdministrator access to the host has been disabled.infoesx.audit.lockdownmode.enabled|Administrator access to the host has been disabled.ExtendedEventList of lockdown exception users has been changed.infoesx.audit.lockdownmode.exceptions.changed|List of lockdown exception users has been changed.ExtendedEventThe host has canceled entering maintenance mode.infoesx.audit.maintenancemode.canceled|The host has canceled entering maintenance mode.ExtendedEventThe host has entered maintenance mode.infoesx.audit.maintenancemode.entered|The host has entered maintenance mode.ExtendedEventThe host has begun entering maintenance mode.infoesx.audit.maintenancemode.entering|The host has begun entering maintenance mode.ExtendedEventThe host has exited maintenance mode.infoesx.audit.maintenancemode.exited|The host has exited maintenance mode.ExtendedEventThe host has failed entering maintenance mode.erroresx.audit.maintenancemode.failed|The host has failed entering maintenance mode.EventExFirewall configuration has changed.infoesx.audit.net.firewall.config.changed|Firewall configuration has changed. Operation '{1}' for rule set {2} succeeded.ExtendedEventFirewall has been disabled.warningesx.audit.net.firewall.disabled|Firewall has been disabled.EventExFirewall has been enabled for port.infoesx.audit.net.firewall.enabled|Firewall has been enabled for port {1}.EventExPort is now protected by Firewall.infoesx.audit.net.firewall.port.hooked|Port {1} is now protected by Firewall.EventExPort is no longer protected with Firewall.warningesx.audit.net.firewall.port.removed|Port {1} is no longer protected with Firewall.EventExLACP disabledinfoesx.audit.net.lacp.disable|LACP for VDS {1} is disabled.EventExLACP eabledinfoesx.audit.net.lacp.enable|LACP for VDS {1} is enabled.EventExuplink is connectedinfoesx.audit.net.lacp.uplink.connected|LACP info: uplink {1} on VDS {2} got connected.EventExThe host has canceled entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.canceled|The host has canceled entering '{1}'.EventExThe host has entered a partial maintenance mode.infoesx.audit.partialmaintenancemode.entered|The host has entered '{1}'.EventExThe host has begun entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.entering|The host has begun entering '{1}'.EventExThe host has exited a partial maintenance mode.infoesx.audit.partialmaintenancemode.exited|The host has exited '{1}'.EventExThe host has failed entering a partial maintenance mode.erroresx.audit.partialmaintenancemode.failed|The host has failed entering '{1}'.ExtendedEventThe ESXi command line shell has been disabled.infoesx.audit.shell.disabled|The ESXi command line shell has been disabled.ExtendedEventThe ESXi command line shell has been enabled.infoesx.audit.shell.enabled|The ESXi command line shell has been enabled.ExtendedEventSSH access has been disabled.infoesx.audit.ssh.disabled|SSH access has been disabled.ExtendedEventSSH access has been enabled.infoesx.audit.ssh.enabled|SSH access has been enabled.EventExSSH session was closed.infoesx.audit.ssh.session.closed|SSH session was closed for '{1}@{2}'.EventExSSH login has failed.infoesx.audit.ssh.session.failed|SSH login has failed for '{1}@{2}'.EventExSSH session was opened.infoesx.audit.ssh.session.opened|SSH session was opened for '{1}@{2}'.EventExPowering off hostwarningesx.audit.subsystem.host.poweroff.reason|The host is being powered off. Reason for powering off: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.reboot.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.restart.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.ExtendedEventSupershell session has been started by a user.warningSupershell session has been started by a user.esx.audit.supershell.access|Supershell session has been started by a user.EventExTest with an int argumenterroresx.audit.test.test1d|Test with {1}EventExTest with a string argumenterroresx.audit.test.test1s|Test with {1}ExtendedEventUSB configuration has changed.infoUSB configuration has changed on host {host.name} in cluster {computeResource.name}.USB configuration has changed on host {host.name}.USB configuration has changed.esx.audit.usb.config.changed|USB configuration has changed on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExEnforcement level changed for all security domains.warningesx.audit.uw.secpolicy.alldomains.level.changed|The enforcement level for all security domains has been changed to {1}. The enforcement level must always be set to enforcing.EventExEnforcement level changed for security domain.warningesx.audit.uw.secpolicy.domain.level.changed|The enforcement level for security domain {1} has been changed to {2}. The enforcement level must always be set to enforcing.ExtendedEventExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.warningesx.audit.uw.security.User.ExecInstalledOnly.disabled|ExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.Infoesx.audit.uw.security.User.ExecInstalledOnly.enabled|ExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.EventExExecution of non-installed file prevented.warningesx.audit.uw.security.execInstalledOnly.violation|Execution of unknown (non VIB installed) binary '{1}' prevented. Unknown content can cause malware attacks similar to Ransomware.EventExExecution of non-installed file detected.warningesx.audit.uw.security.execInstalledOnly.warning|Execution of unknown (non VIB installed) binary '{1}'. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventLVM device discovered.infoesx.audit.vmfs.lvm.device.discovered|One or more LVM devices have been discovered on this host.EventExRead IO performance maybe impacted for diskinfoRead IO performance maybe impacted for disk {1}: {2}Read IO performance maybe impacted for disk {1}: {2}esx.audit.vmfs.sesparse.bloomfilter.disabled|Read IO performance maybe impacted for disk {1}: {2}EventExFile system mounted.infoesx.audit.vmfs.volume.mounted|File system {1} on volume {2} has been mounted in {3} mode on this host.EventExLVM volume un-mounted.infoesx.audit.vmfs.volume.umounted|The volume {1} has been safely un-mounted. The datastore is no longer accessible on this host.EventExvSAN device is added back successfully after MEDIUM error.infovSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.devicerebuild|vSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExvSAN diskgroup is rebuilt successfully after MEDIUM error.infovSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.diskgrouprebuild|vSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExFound components with invalid metadatawarning{1} components found with invalid metadata on disk {2} {3}esx.audit.vob.vsan.lsom.foundInvalidMetadataComp|{1} components found with invalid metadata on disk {2} {3}EventExvSAN storagepool is added back successfully after MEDIUM error.infovSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.storagepoolrebuild|vSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExTest with both int and sting arguments.infoesx.audit.vobdtestcorrelator.test|Test with both string: {2} and int: {1}.ExtendedEventvSAN clustering services have been enabled.infovSAN clustering and directory services have been enabled.esx.audit.vsan.clustering.enabled|vSAN clustering and directory services have been enabled.ExtendedEventvSAN virtual NIC has been added.infovSAN virtual NIC has been added.esx.audit.vsan.net.vnic.added|vSAN virtual NIC has been added.ExtendedEventvSAN network configuration has been removed.errorvSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.esx.audit.vsan.net.vnic.deleted|vSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.EventExvSAN RDMA changed for vmknic.infovSAN RDMA changed for vmknic {1}.esx.audit.vsan.rdma.changed|vSAN RDMA changed for vmknic {1}.ExtendedEventHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445warningHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445esx.audit.weak.ssl.protocol|Weak SSL protocols found and disabled. Please refer to KB article: KB 1234567ExtendedEventA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.infoA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.esx.clear.coredump.configured|A vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.ExtendedEventAt least one coredump target has been configured. Host core dumps will be saved.infoAt least one coredump target has been configured. Host core dumps will be saved.esx.clear.coredump.configured2|At least one coredump target has been configured. Host core dumps will be saved.EventExNVDIMM Energy Source is sufficiently charged.infoesx.clear.hardware.nvd.health.module.es.charged|NVDIMM (handle {1}, idString {2}): Energy Source is sufficiently charged.EventExRestored network connectivity to portgroupsinfoesx.clear.net.connectivity.restored|Network connectivity restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExRestored Network Connectivity to DVPortsinfoesx.clear.net.dvport.connectivity.restored|Network connectivity restored on DVPorts: {1}. Physical NIC {2} is up.EventExRestored Network Redundancy to DVPortsinfoesx.clear.net.dvport.redundancy.restored|Uplink redundancy restored on DVPorts: {1}. Physical NIC {2} is up recently.EventExlag transition upinfoesx.clear.net.lacp.lag.transition.up|LACP info: LAG {1} on VDS {2} is up.EventExuplink transition upinfoesx.clear.net.lacp.uplink.transition.up|LACP info: uplink {1} on VDS {2} is moved into link aggregation group.EventExuplink is unblockedinfoesx.clear.net.lacp.uplink.unblocked|LACP info: uplink {1} on VDS {2} is unblocked.EventExRestored uplink redundancy to portgroupsinfoesx.clear.net.redundancy.restored|Uplink redundancy restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExLink state upinfoesx.clear.net.vmnic.linkstate.up|Physical NIC {1} linkstate is up.EventExStorage Device I/O Latency has improvedinfoesx.clear.psastor.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.psastor.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.psastor.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExScsi Device I/O Latency has improvedinfoesx.clear.scsi.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.scsi.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.scsi.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExExited the All Paths Down stateinfoesx.clear.storage.apd.exit|Device or filesystem with identifier {1} has exited the All Paths Down state.EventExRestored connectivity to storage deviceinfoesx.clear.storage.connectivity.restored|Connectivity to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored path redundancy to storage deviceinfoesx.clear.storage.redundancy.restored|Path redundancy to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored connection to NFS serverinfoesx.clear.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExNFS volume I/O Latency has improvedinfoesx.clear.vmfs.nfs.volume.io.latency.improved|NFS volume {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExvSAN device has come online.infovSAN device {1} has come online.esx.clear.vob.vsan.pdl.online|vSAN device {1} has come online.EventExTest with both int and sting arguments.infoesx.clear.vobdtestcorrelator.test|Test with both string: {1} {3} and int: {2}.ExtendedEventvSAN clustering services have now been enabled.infovSAN clustering and directory services have now been enabled.esx.clear.vsan.clustering.enabled|vSAN clustering and directory services have now been enabled.ExtendedEventvSAN now has at least one active network configuration.infovSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.network.available|vSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExA previously reported vmknic now has a valid IP.infovmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.vmknic.ready|vmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExVVol container has come online.infoesx.clear.vvol.container.online|VVol container {1} has come online.EventExA 3rd party component on ESXi has reported an error.erroresx.problem.3rdParty.error|A 3rd party component, {1}, running on ESXi has reported an error. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.info|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.information|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported a warning.warningesx.problem.3rdParty.warning|A 3rd party component, {1}, running on ESXi has reported a warning related to a problem. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA corrected memory error occurrederroresx.problem.apei.bert.memory.error.corrected|A corrected memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA fatal memory error occurrederroresx.problem.apei.bert.memory.error.fatal|A fatal memory error occurred in the last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA recoverable memory error occurrederroresx.problem.apei.bert.memory.error.recoverable|A recoverable memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA corrected PCIe error occurrederroresx.problem.apei.bert.pcie.error.corrected|A corrected PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA fatal PCIe error occurrederroresx.problem.apei.bert.pcie.error.fatal|Platform encounterd a fatal PCIe error in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA recoverable PCIe error occurrederroresx.problem.apei.bert.pcie.error.recoverable|A recoverable PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExAn application running on ESXi host has crashed and core file creation failed.warningesx.problem.application.core.dumpFailed|An application ({1}) running on ESXi host has crashed ({2} time(s) so far), but core dump creation failed.EventExAn application running on ESXi host has crashed and a core file was created.warningesx.problem.application.core.dumped|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). A core file might have been created at {3}.EventExAn application running on ESXi host has crashed and an encrypted core file was created.warningesx.problem.application.core.dumped.encrypted|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). An encrypted core file using keyId {3} might have been created at {4}.ExtendedEventCritical failure detected during boot, please refer to KB 93107.errorA critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.esx.problem.boot.failure.detected|A critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.ExtendedEventSystem clock no longer synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.lostsync|system clock no longer synchronized to upstream time serversExtendedEventSystem clock synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.sync|system clock synchronized to upstream time serversExtendedEventSystem clock lost synchronization to upstream time serverswarningesx.problem.clock.correction.adjtime.unsync|system clock lost synchronization to upstream time serversEventExApplication system changed clock, synchronization lostwarningesx.problem.clock.correction.changed|{1} stepped system clock to {2}.{3}, synchronization lostEventExAllowed system clock update with large time changewarningesx.problem.clock.correction.delta.allowed|Clock stepped to {1}.{2}, but delta {3} > {4} secondsEventExFailed system clock update with large time changeerroresx.problem.clock.correction.delta.failed|Clock step to {1}.{2} failed, delta {3} > {4} seconds, number of large corrections > {5}EventExAllowed system clock update with large time change, but number of future updates limitedwarningesx.problem.clock.correction.delta.warning|Clock stepped to {1}.{2}, but delta {3} > {4} seconds, {5}/{6} large correctionsEventExSystem clock stepped, lost synchronizationwarningesx.problem.clock.correction.step.unsync|system clock stepped to {1}.{2}, lost synchronizationEventExSystem clock maximum number of large corrections changedwarningesx.problem.clock.parameter.set.maxLargeCorrections|system clock max number of correction set to {1}EventExSystem clock maximum negative phase correction changedwarningesx.problem.clock.parameter.set.maxNegPhaseCorrection|system clock max negative phase correction set to {1}EventExSystem clock maximum positive phase correction changedwarningesx.problem.clock.parameter.set.maxPosPhaseCorrection|system clock max positive phase correction set to {1}EventExSystem clock count of number of large corrections changedwarningesx.problem.clock.parameter.set.numLargeCorrections|system clock number of large correction set to {1}EventExSystem clock VOB report interval changedwarningesx.problem.clock.parameter.set.vobReportInterval|system clock max number of correction set to {1}ExtendedEventSystem clock state has been resetwarningesx.problem.clock.state.reset|system clock state has been resetEventExThe storage capacity of the coredump targets is insufficient to capture a complete coredump.warningThe storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.esx.problem.coredump.capacity.insufficient|The storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.EventExThe free space available in default coredump copy location is insufficient to copy new coredumps.warningThe free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.esx.problem.coredump.copyspace|The free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.EventExThe given partition has insufficient amount of free space to extract the coredump.warningThe given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.esx.problem.coredump.extraction.failed.nospace|The given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.ExtendedEventNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.warningNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured|No vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.ExtendedEventNo coredump target has been configured. Host core dumps cannot be saved.warningNo coredump target has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured2|No coredump target has been configured. Host core dumps cannot be saved.ExtendedEventDRAM ECC not enabled. Please enable it in BIOS.erroresx.problem.cpu.amd.mce.dram.disabled|DRAM ECC not enabled. Please enable it in BIOS.ExtendedEventNot all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. erroresx.problem.cpu.intel.ioapic.listing.error|Not all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. ExtendedEventMCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.erroresx.problem.cpu.mce.invalid|MCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.EventExHigh number of corrected errors on a page.infoesx.problem.cpu.page.correctederrors.high|High number of corrected errors on host physical page number {1}EventExDisabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.erroresx.problem.cpu.smp.ht.invalid|Disabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.EventExFound {1} PCPUs, but only using {2} of them due to specified limit.erroresx.problem.cpu.smp.ht.numpcpus.max|Found {1} PCPUs, but only using {2} of them due to specified limit.EventExDisabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.erroresx.problem.cpu.smp.ht.partner.missing|Disabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.EventExError copying ConfigStore from backup.errorError copying ConfigStore from backup.esx.problem.cs.createstore.copy.backup.error|Error copying ConfigStore from backup {1}.ExtendedEventFailed an operation on the ConfigStore database.errorFailed an operation on the ConfigStore database.esx.problem.cs.db.operation.error|Failed an operation on the ConfigStore database.ExtendedEventFailed to setup desired configuration.errorFailed to setup desired configuration.esx.problem.cs.desired.config.error|Failed to setup desired configuration.ExtendedEventError cleaning up Datafile store.errorError cleaning up Datafile store.esx.problem.cs.dfs.cleanup.error|Error cleaning up Datafile store.ExtendedEventDataFile store cannot be restored.errorDataFile store cannot be restored.esx.problem.cs.dfs.restore.error|DataFile store cannot be restored.EventExError processing schema file.errorError processing schema file.esx.problem.cs.schema.file.error|Error processing schema file {1}.EventExInvalid metadata in schema file.errorInvalid metadata in schema file.esx.problem.cs.schema.metadata.error|Invalid metadata in schema file {1}.EventExVibId validation failed for schema file.errorVibId validation failed for schema file.esx.problem.cs.schema.validation.error|VibId validation failed for schema file {1}.EventExError in upgrading config.errorError in upgrading config.esx.problem.cs.upgrade.config.error|Error in upgrading config {1}.EventExUnable to obtain a DHCP lease.erroresx.problem.dhclient.lease.none|Unable to obtain a DHCP lease on interface {1}.EventExNo expiry time on offered DHCP lease.erroresx.problem.dhclient.lease.offered.noexpiry|No expiry time on offered DHCP lease from {1}.EventExThe maintenance mode state for some Data Processing Units may be out of sync with the host.warningThe maintenance mode state for some Data Processing Units may be out of sync with the host.esx.problem.dpu.maintenance.sync.failed|The maintenance mode state for Data Processing Units with ids '{dpus}' may be out of sync with the host.EventExSome drivers need special notice.warningDriver for device {1} is {2}. Please refer to KB article: {3}.esx.problem.driver.abnormal|Driver for device {1} is {2}. Please refer to KB article: {3}.EventExHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.esx.problem.entropy.config.error|Host is configured with external entropy source. Entropy daemon has become non functional because of an {1} change. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.empty|Host is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.inmemory.empty|Host is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.EventExCould not install image profile.erroresx.problem.esximage.install.error|Could not install image profile: {1}EventExHost doesn't meet image profile hardware requirements.erroresx.problem.esximage.install.invalidhardware|Host doesn't meet image profile '{1}' hardware requirements: {2}EventExCould not stage image profile.erroresx.problem.esximage.install.stage.error|Could not stage image profile '{1}': {2}ExtendedEventThe host can not support the applied EVC mode.warningesx.problem.evc.incompatible|The host can not support the applied EVC mode.EventExSkipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.device.invalid|Skipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.EventExSkipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.pin.invalid|Skipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.EventExFPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.clear|FPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.creditstall|FPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.devicespecific|FPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.lostcredit|FPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.oversubscription|FPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.devicespecific|FPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.timeout|FPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.unabletoroute|FPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.unknown|FPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.devicespecific|FPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidCRC|FPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidtransmissionword|FPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link failure: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.linkfailure|FPIN FC link failure: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsignal|FPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsynchronization|FPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.primitivesequenceprotocolerror|FPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.uncorrectableFECerror|FPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.unknown|FPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.clear|FPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.creditstall|FPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.devicespecific|FPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.lostcredit|FPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.oversubscription|FPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.EventExIOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.erroresx.problem.hardware.ioapic.missing|IOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.ExtendedEventFailed to communicate with the BMC. IPMI functionality will be unavailable on this system.erroresx.problem.hardware.ipmi.bmc.bad|Failed to communicate with the BMC. IPMI functionality will be unavailable on this system.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Error tripped.EventExNVDIMM: Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.lifetime.error|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Error tripped.EventExNVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.erroresx.problem.hardware.nvd.health.lastshutdownstatus|NVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.EventExNVDIMM Configuration error detected.erroresx.problem.hardware.nvd.health.module.config.error|NVDIMM (handle {1}, idString {2}): Configuration error detected.EventExNVDIMM Controller failure detected.erroresx.problem.hardware.nvd.health.module.ctlr.fail|NVDIMM (handle {1}, idString {2}): Controller failure detected. Access to the device and its capabilities are lost.EventExNVDIMM Controller firmware error detected.erroresx.problem.hardware.nvd.health.module.ctlr.fw.error|NVDIMM (handle {1}, idString {2}): Controller firmware error detected.EventExNVDIMM Energy Source still charging.warningesx.problem.hardware.nvd.health.module.es.charging|NVDIMM (handle {1}, idString {2}): Energy Source still charging but does not have sufficient charge to support a backup. Persistency is temporarily lost for the device.EventExNVDIMM Energy Source failure detected.erroresx.problem.hardware.nvd.health.module.es.fail|NVDIMM (handle {1}, idString {2}): Energy Source failure detected. Persistency is lost for the device.EventExNVDIMM Previous ARM operation failed.warningesx.problem.hardware.nvd.health.module.ops.arm.fail|NVDIMM (handle {1}, idString {2}): Previous ARM operation failed.EventExNVDIMM Previous ERASE operation failed.warningesx.problem.hardware.nvd.health.module.ops.erase.fail|NVDIMM (handle {1}, idString {2}): Previous ERASE operation failed.EventExThe Platform flush failed. The restored data may be inconsistent.erroresx.problem.hardware.nvd.health.module.ops.flush.fail|NVDIMM (handle {1}, idString {2}): The Platform flush failed. The restored data may be inconsistent.EventExNVDIMM Last RESTORE operation failed.erroresx.problem.hardware.nvd.health.module.ops.restore.fail|NVDIMM (handle {1}, idString {2}): Last RESTORE operation failed.EventExNVDIMM Previous SAVE operation failed.erroresx.problem.hardware.nvd.health.module.ops.save.fail|NVDIMM (handle {1}, idString {2}): Previous SAVE operation failed.EventExNVDIMM Count of DRAM uncorrectable ECC errors above threshold.warningesx.problem.hardware.nvd.health.module.uce|NVDIMM (handle {1}, idString {2}): Count of DRAM uncorrectable ECC errors above threshold.EventExNVDIMM Vendor specific error.erroresx.problem.hardware.nvd.health.module.vendor.error|NVDIMM (handle {1}, idString {2}): Vendor specific error.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Error tripped.EventExNVDIMM: Module Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.lifetime.error|NVDIMM (handle {1}, idString {2}): Module Lifetime Error tripped.EventExNVDIMM: Module Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.temperature.error|NVDIMM (handle {1}, idString {2}): Module Temperature Error tripped.EventExNVDIMM: All data may be lost in the event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInPowerLoss|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of power loss.EventExNVDIMM: All data may be lost in the event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInShutdown|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of shutdown.EventExNVDIMM: Subsequent reads may fail or return invalid data and subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossNow|NVDIMM (handle {1}, idString {2}): Subsequent reads may fail or return invalid data and subsequent writes may not persist.EventExNVDIMM: Performance degraded.erroresx.problem.hardware.nvd.health.vmw.statusflags.perfDegraded|NVDIMM (handle {1}, idString {2}): Performance degraded.EventExNVDIMM: Write persistency loss may happen in event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInPowerLoss|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of power loss.EventExNVDIMM: Write persistency loss may happen in event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInShutdown|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of shutdown.EventExNVDIMM: Subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossNow|NVDIMM (handle {1}, idString {2}): Subsequent writes may not persist.ExtendedEventTPM 2.0 device detected but a connection cannot be established.warningesx.problem.hardware.tpm2.connection|TPM 2.0 device detected but a connection cannot be established.ExtendedEventTPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.nosha256|TPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.ExtendedEventTPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.notis|TPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.ExtendedEventUnable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.warningesx.problem.hardware.tpm2.ownership|Unable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.ExtendedEventesx.problem.hardware.tpm2.provisioning|EventExA physical disk has a predictive failure.warningA physical disk has a predictive failure.esx.problem.hcm.event.disk.predictive.failure|A physical disk has a predictive failure ({1}).ExtendedEventAn unread host kernel core dump has been found.warningesx.problem.host.coredump|An unread host kernel core dump has been found.EventExHostd crashed and a core file was created.warningesx.problem.hostd.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExHostd crashed and an encrypted core file was created.warningesx.problem.hostd.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventThis host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.infoesx.problem.hyperthreading.unmitigated|This host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.ExtendedEventSome of the config entries in the VM inventory were skipped because they are invalid.warningesx.problem.inventory.invalidConfigEntries|Some of the config entries in the VM inventory were skipped because they are invalid.EventExAn iofilter installed on the host has stopped functioning.errorIOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}esx.problem.iofilter.disabled|IOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}EventExStorage I/O Control version mismatchinfoesx.problem.iorm.badversion|Host {1} cannot participate in Storage I/O Control(SIOC) on datastore {2} because the version number {3} of the SIOC agent on this host is incompatible with number {4} of its counterparts on other hosts connected to this datastore.EventExUnmanaged workload detected on SIOC-enabled datastoreinfoesx.problem.iorm.nonviworkload|An unmanaged I/O workload is detected on a SIOC-enabled datastore: {1}.EventExThe metadata store has degraded on one of the hosts in the cluster.errorThe metadata store has degraded on host {1}.esx.problem.metadatastore.degraded|The metadata store has degraded on host {1}.ExtendedEventThe metadata store is healthy.infoThe metadata store is healthy.esx.problem.metadatastore.healthy|The metadata store is healthy.ExtendedEventFailed to create default migration heapwarningesx.problem.migrate.vmotion.default.heap.create.failed|Failed to create default migration heap. This might be the result of severe host memory pressure or virtual address space exhaustion. Migration might still be possible, but will be unreliable in cases of extreme host memory pressure.EventExError with migration listen socketerroresx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown|The ESXi host's vMotion network server encountered an error while monitoring incoming network connections. Shutting down listener socket. vMotion might not be possible with this host until vMotion is manually re-enabled. Failure status: {1}EventExThe max_vfs module option has been set for at least one module.warningSetting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.esx.problem.module.maxvfs.set|Setting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.EventExLost Network Connectivityerroresx.problem.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Connectivity to DVPortserroresx.problem.net.dvport.connectivity.lost|Lost network connectivity on DVPorts: {1}. Physical NIC {2} is down.EventExNetwork Redundancy Degraded on DVPortswarningesx.problem.net.dvport.redundancy.degraded|Uplink redundancy degraded on DVPorts: {1}. Physical NIC {2} is down.EventExLost Network Redundancy on DVPortswarningesx.problem.net.dvport.redundancy.lost|Lost uplink redundancy on DVPorts: {1}. Physical NIC {2} is down.EventExNo IPv6 TSO supporterroresx.problem.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid fenceId configuration on dvPorterroresx.problem.net.fence.port.badfenceid|VMkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: invalid fenceId.EventExMaximum number of fence networks or portserroresx.problem.net.fence.resource.limited|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: maximum number of fence networks or ports have been reached.EventExSwitch fence property is not seterroresx.problem.net.fence.switch.unavailable|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: dvSwitch fence property is not set.EventExFirewall configuration operation failed. The changes were not applied.erroresx.problem.net.firewall.config.failed|Firewall configuration operation '{1}' failed. The changes were not applied to rule set {2}.EventExAdding port to Firewall failed.erroresx.problem.net.firewall.port.hookfailed|Adding port {1} to Firewall failed.EventExFailed to set gatewayerroresx.problem.net.gateway.set.failed|Cannot connect to the specified gateway {1}. Failed to set it.EventExNetwork memory pool thresholdwarningesx.problem.net.heap.belowthreshold|{1} free size dropped below {2} percent.EventExlag transition downwarningesx.problem.net.lacp.lag.transition.down|LACP warning: LAG {1} on VDS {2} is down.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse|LACP error: No peer response on uplink {1} for VDS {2}.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse.2|LACP error: No peer response on VDS {1}.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.incompatible|LACP error: Current teaming policy on VDS {1} is incompatible, supported is IP hash only.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.linkstatus|LACP error: Current teaming policy on VDS {1} is incompatible, supported link failover detection is link status only.EventExuplink is blockedwarningesx.problem.net.lacp.uplink.blocked|LACP warning: uplink {1} on VDS {2} is blocked.EventExuplink is disconnectedwarningesx.problem.net.lacp.uplink.disconnected|LACP warning: uplink {1} on VDS {2} got disconnected.EventExuplink duplex mode is differenterroresx.problem.net.lacp.uplink.fail.duplex|LACP error: Duplex mode across all uplink ports must be full, VDS {1} uplink {2} has different mode.EventExuplink speed is differenterroresx.problem.net.lacp.uplink.fail.speed|LACP error: Speed across all uplink ports must be same, VDS {1} uplink {2} has different speed.EventExAll uplinks must be activeerroresx.problem.net.lacp.uplink.inactive|LACP error: All uplinks on VDS {1} must be active.EventExuplink transition downwarningesx.problem.net.lacp.uplink.transition.down|LACP warning: uplink {1} on VDS {2} is moved out of link aggregation group.EventExInvalid vmknic specified in /Migrate/Vmknicwarningesx.problem.net.migrate.bindtovmk|The ESX advanced configuration option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Update the configuration option with a valid vmknic. Alternatively, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExUnsupported vMotion network latency detectedwarningesx.problem.net.migrate.unsupported.latency|ESXi has detected {1}ms round-trip vMotion network latency between host {2} and {3}. High latency vMotion networks are supported only if both ESXi hosts have been configured for vMotion latency tolerance.EventExFailed to apply for free portserroresx.problem.net.portset.port.full|Portset {1} has reached the maximum number of ports ({2}). Cannot apply for any more free ports.EventExVlan ID of the port is invaliderroresx.problem.net.portset.port.vlan.invalidid|{1} VLANID {2} is invalid. VLAN ID must be between 0 and 4095.EventExTry to register an unsupported portset classwarningesx.problem.net.portset.unsupported.psclass|{1} is not a VMware supported portset class, the relevant module must be unloaded.EventExVirtual NIC connection to switch failedwarningesx.problem.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningesx.problem.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Redundancywarningesx.problem.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExRSPAN src session conflict with teamingerroresx.problem.net.rspan.teaming.uplink.io.conflict|Failed to set RSPAN src session {1} on portset {2} due to it disallows uplink I/O which conflicts with {3} teaming policy {4}.EventExThe teaming policy has an invalid uplinkerroresx.problem.net.teaming.policy.invalid.uplink|Failed to update teaming policy {1} on portset {2} due to an invalid uplink {3} which disallows normal I/O.EventExFailed to set MTU on an uplinkwarningesx.problem.net.uplink.mtu.failed|VMkernel failed to set the MTU value {1} on the uplink {2}.EventExA duplicate IP address was detected on a vmknic interfacewarningesx.problem.net.vmknic.ip.duplicate|A duplicate IP address was detected for {1} on the interface {2}. The current owner is {3}.EventExLink state downwarningesx.problem.net.vmnic.linkstate.down|Physical NIC {1} linkstate is down.EventExLink state unstablewarningesx.problem.net.vmnic.linkstate.flapping|Taking down physical NIC {1} because the link is unstable.EventExNic Watchdog Resetwarningesx.problem.net.vmnic.watchdog.reset|Uplink {1} has recovered from a transient failure due to watchdog timeoutEventExNTP daemon stopped. Time correction out of bounds.erroresx.problem.ntpd.clock.correction.error|NTP daemon stopped. Time correction {1} > {2} seconds. Manually set the time and restart ntpd.EventExOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.osdata.partition.full|OSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212ExtendedEventConfigured OSData cannot be found. Please refer to KB article: KB 87212.warningConfigured OSData cannot be found. Please refer to KB article: KB 87212.esx.problem.osdata.path.notfound|Configured OSData cannot be found. Please refer to KB article: KB 87212.EventExVirtual machine killed as it kept using a corrupted memory page.erroresx.problem.pageretire.mce.injected|Killing virtual machine with config path {1} because at least {2} uncorrectable memory error machine check exceptions were injected for guest physical page {3} but the virtual machine's operating system kept using the page.EventExA virtual machine was killed as it kept using a corrupted memory page.errorThe virtual machine was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.esx.problem.pageretire.mce.injected.2|{1} was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.EventExMemory page retirement requested by platform firmware.infoesx.problem.pageretire.platform.retire.request|Memory page retirement requested by platform firmware. FRU ID: {1}. Refer to System Hardware Log: {2}EventExNumber of host physical memory pages that have been selected for retirement but could not yet be retired is high.warningesx.problem.pageretire.selectedbutnotretired.high|Number of host physical memory pages that have been selected for retirement but could not yet be retired is high: ({1})EventExNumber of host physical memory pages selected for retirement exceeds threshold.warningesx.problem.pageretire.selectedmpnthreshold.host.exceeded|Number of host physical memory pages that have been selected for retirement ({1}) exceeds threshold ({2}).ExtendedEventNo memory to allocate APD Eventwarningesx.problem.psastor.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExStorage Device close failed.warningesx.problem.psastor.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.psastor.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.psastor.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExStorage Device I/O Latency going highwarningesx.problem.psastor.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExPlugin's isLocal entry point failedwarningesx.problem.psastor.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.psastor.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.psastor.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.psastor.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExDevice has been turned off administratively.infoesx.problem.psastor.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.psastor.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.psastor.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExMaximum number of storage pathserroresx.problem.psastor.psastorpath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.psastor.unsupported.plugin.type|Storage Device Allocation not supported for plugin type {1}EventExFailed to delete resource group.warningFailed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.esx.problem.resourcegroup.delete.failed|Failed to delete resource groups with names '{rgnames}'.EventExFailed to Set the Virtual Machine's Latency Sensitivitywarningesx.problem.sched.latency.abort|Unable to apply latency-sensitivity setting to virtual machine {1}. No valid placement on the host.EventExNo Cache Allocation Resourcewarningesx.problem.sched.qos.cat.noresource|Unable to support cache allocation for virtual machine {1}. Out of resources.EventExNo Cache Allocation Supportwarningesx.problem.sched.qos.cat.notsupported|Unable to support L3 cache allocation for virtual machine {1}. No processor capabilities.EventExNo Cache Monitoring Resourcewarningesx.problem.sched.qos.cmt.noresource|Unable to support cache monitoring for virtual machine {1}. Out of resources.EventExNo Cache Monitoring Supportwarningesx.problem.sched.qos.cmt.notsupported|Unable to support L3 cache monitoring for virtual machine {1}. No processor capabilities.ExtendedEventScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.warningScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.esx.problem.scratch.on.usb|Scratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.EventExScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.scratch.partition.full|Scratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212EventExSize of scratch partition is too small.warningSize of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.esx.problem.scratch.partition.size.small|Size of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.EventExNo scratch partition has been configured.warningNo scratch partition has been configured. Recommended scratch partition size is {} MiB.esx.problem.scratch.partition.unconfigured|No scratch partition has been configured. Recommended scratch partition size is {} MiB.ExtendedEventNo memory to allocate APD Eventwarningesx.problem.scsi.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExScsi Device close failed.warningesx.problem.scsi.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.scsi.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExFailed to attach filter to device.warningesx.problem.scsi.device.filter.attach.failed|Failed to attach filters to device '%s' during registration. Plugin load failed or the filter rules are incorrect.EventExInvalid XCOPY request for devicewarningesx.problem.scsi.device.invalid.xcopy.request|Invalid XCOPY request for device {1}. Host {2}, Device {3}, Plugin {4}, {5} sense, sense.key = {6}, sense.asc = {7}, sense.ascq = {8}: {9}EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.scsi.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExFailed to obtain INQUIRY data from the devicewarningesx.problem.scsi.device.io.inquiry.failed|Failed to get standard inquiry for device {1} from Plugin {2}.ExtendedEventScsi device queue parameters incorrectly set.warningesx.problem.scsi.device.io.invalid.disk.qfull.value|QFullSampleSize should be bigger than QFullThreshold. LUN queue depth throttling algorithm will not function as expected. Please set the QFullSampleSize and QFullThreshold disk configuration values in ESX correctly.EventExScsi Device I/O Latency going highwarningesx.problem.scsi.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExQErr cannot be changed on device. Please change it manually on the device if possible.warningesx.problem.scsi.device.io.qerr.change.config|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The system is not configured to change the QErr setting of device. The QErr value supported by system is 0x{3}. Please check the SCSI ChangeQErrSetting configuration value for ESX.EventExScsi Device QErr setting changedwarningesx.problem.scsi.device.io.qerr.changed|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The device was originally configured to the supported QErr setting of 0x{3}, but this has been changed and could not be changed back.EventExPlugin's isLocal entry point failedwarningesx.problem.scsi.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.scsi.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.scsi.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.scsi.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExFailed to apply NMP SATP option during device discovery.warningesx.problem.scsi.device.nmp.satp.option.failed|Invalid config parameter: \"{1}\" provided in the nmp satp claimrule, this setting was not applied while claiming the path {2}EventExDevice has been turned off administratively.infoesx.problem.scsi.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.scsi.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExThin Provisioned Device Nearing Capacitywarningesx.problem.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.scsi.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExvVol PE path going out of vVol-incapable adaptererroresx.problem.scsi.scsipath.badpath.unreachpe|Sanity check failed for path {1}. The path is to a vVol PE, but it goes out of adapter {2} which is not PE capable. Path dropped.EventExCannot safely determine vVol PEerroresx.problem.scsi.scsipath.badpath.unsafepe|Sanity check failed for path {1}. Could not safely determine if the path is to a vVol PE. Path dropped.EventExMaximum number of storage pathserroresx.problem.scsi.scsipath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.scsi.unsupported.plugin.type|Scsi Device Allocation not supported for plugin type {1}ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.infoesx.problem.sgx.addpackage|Support for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.infoesx.problem.sgx.htenabled|Support for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.esx.problem.slp.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.EventExAll paths are downwarningesx.problem.storage.apd.start|Device or filesystem with identifier {1} has entered the All Paths Down state.EventExAll Paths Down timed out, I/Os will be fast failedwarningesx.problem.storage.apd.timeout|Device or filesystem with identifier {1} has entered the All Paths Down Timeout state after being in the All Paths Down state for {2} seconds. I/Os will now be fast failed.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.devicepor|Frequent PowerOn Reset Unit Attentions are occurring on device {1}. This might indicate a storage problem. Affected datastores: {2}EventExLost Storage Connectivityerroresx.problem.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.pathpor|Frequent PowerOn Reset Unit Attentions are occurring on path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExFrequent State Changes of Storage Pathinfoesx.problem.storage.connectivity.pathstatechanges|Frequent path state changes are occurring for path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExiSCSI discovery target login connection problemerroresx.problem.storage.iscsi.discovery.connect.error|iSCSI discovery to {1} on {2} failed. The iSCSI Initiator could not establish a network connection to the discovery address.EventExiSCSI Discovery target login errorerroresx.problem.storage.iscsi.discovery.login.error|iSCSI discovery to {1} on {2} failed. The Discovery target returned a login error of: {3}.EventExiSCSI iSns Discovery errorerroresx.problem.storage.iscsi.isns.discovery.error|iSCSI iSns discovery to {1} on {2} failed. ({3} : {4}).EventExiSCSI Target login connection problemerroresx.problem.storage.iscsi.target.connect.error|Login to iSCSI target {1} on {2} failed. The iSCSI initiator could not establish a network connection to the target.EventExiSCSI Target login errorerroresx.problem.storage.iscsi.target.login.error|Login to iSCSI target {1} on {2} failed. Target returned login error of: {3}.EventExiSCSI target permanently removederroresx.problem.storage.iscsi.target.permanently.lost|The iSCSI target {2} was permanently removed from {1}.EventExiSCSI target was permanently removederroresx.problem.storage.iscsi.target.permanently.removed|The iSCSI target {1} was permanently removed from {2}.EventExDegraded Storage Path Redundancywarningesx.problem.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. Affected datastores: {3}.EventExLost Storage Path Redundancywarningesx.problem.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.esx.problem.swap.systemSwap.isPDL.cannot.remove|System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.EventExSystem swap was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningesx.problem.swap.systemSwap.isPDL.cannot.remove.2|System swap was affected by the PDL of {1} and was removed. System swap has been reconfigured.EventExSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure|System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.EventExSystem swap was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningesx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2|System swap was affected by the PDL of {1}. It was removed but the subsequent reconfiguration failed.ExtendedEventSystem logging is not configured.warningSystem logging is not configured on host {host.name}.esx.problem.syslog.config|System logging is not configured on host {host.name}. Please check Syslog options for the host under Configuration -> Software -> Advanced Settings in vSphere client.ExtendedEventSystem logs are stored on non-persistent storage.warningSystem logs on host {host.name} are stored on non-persistent storage.esx.problem.syslog.nonpersistent|System logs on host {host.name} are stored on non-persistent storage. Consult product documentation to configure a syslog server or a scratch partition.ExtendedEventTest with no argumentserroresx.problem.test.test0|Test with no argumentsEventExTest with both int and string argumentserroresx.problem.test.test2|Test with both {1} and {2}ExtendedEventUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.warningUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.esx.problem.unsupported.tls.protocols|Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.EventExA VFAT filesystem is full.erroresx.problem.vfat.filesystem.full.other|The VFAT filesystem {1} (UUID {2}) is full.EventExA VFAT filesystem, being used as the host's scratch partition, is full.erroresx.problem.vfat.filesystem.full.scratch|The host's scratch partition, which is the VFAT filesystem {1} (UUID {2}), is full.EventExConfigstore is reaching its critical size limit. Please refer to the KB 93362 for more details.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.EventExA ramdisk has a very high usage. Please refer to the KB 93362 for more details.warningRamdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.ExtendedEventAn operation on the root filesystem has failed.erroresx.problem.visorfs.failure|An operation on the root filesystem has failed.EventExThe root filesystem's file table is full.erroresx.problem.visorfs.inodetable.full|The root filesystem's file table is full. As a result, the file {1} could not be created by the application '{2}'.EventExA ramdisk is full.erroresx.problem.visorfs.ramdisk.full|The ramdisk '{1}' is full. As a result, the file {2} could not be written.EventExA ramdisk's file table is full.erroresx.problem.visorfs.ramdisk.inodetable.full|The file table of the ramdisk '{1}' is full. As a result, the file {2} could not be created by the application '{3}'.EventExConfig store is reaching its critical size limit.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.EventExA ramdisk has a very high usage.warningRamdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left.EventExA VM could not fault in the a page. The VM is terminated as further progress is impossible.erroresx.problem.vm.kill.unexpected.fault.failure|The VM using the config file {1} could not fault in a guest physical page from the hypervisor level swap file at {2}. The VM is terminated as further progress is impossible.EventExA virtual machine could not fault in the a page. It is terminated as further progress is impossible.errorThe virtual machine could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleesx.problem.vm.kill.unexpected.fault.failure.2|{1} could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleEventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire.64|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA virtual machine cointained a host physical page that was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.errorThe virtual machine contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2|{1} contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.noSwapResponse|The VM using the config file {1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA virtual machine did not respond to swap actions. It is terminated as further progress is impossible.errorThe virtual machine did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.esx.problem.vm.kill.unexpected.noSwapResponse.2|{1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA VM is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.erroresx.problem.vm.kill.unexpected.vmtrack|The VM using the config file {1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.errorThe virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.esx.problem.vm.kill.unexpected.vmtrack.2|{1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA user world daemon of a virtual machine could not fault in the a page. The VM is terminated as further progress is impossible.errorThe user world daemon of this virtual machine could not fault in a page. The virtual machine is terminated as further progress is impossible.esx.problem.vm.kill.unexpected.vmx.fault.failure.2|The user world daemon of {1} could not fault in a page. The virtual machine is terminated as further progress is impossible.EventExMulti-extent ATS-only VMFS Volume unable to use ATSerroresx.problem.vmfs.ats.incompatibility.detected|Multi-extent ATS-only volume '{1}' ({2}) is unable to use ATS because HardwareAcceleratedLocking is disabled on this host: potential for introducing filesystem corruption. Volume should not be used from other hosts.EventExDevice Backing VMFS has lost ATS Supporterroresx.problem.vmfs.ats.support.lost|ATS-Only VMFS volume '{1}' not mounted. Host does not support ATS or ATS initialization has failed.EventExVMFS Locked By Remote Hosterroresx.problem.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.erroresx.problem.vmfs.extent.offline|An attached device {1} may be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system came onlineinfoesx.problem.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Heartbeat Corruption Detected.erroresx.problem.vmfs.heartbeat.corruptondisk|At least one corrupt on-disk heartbeat region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Volume Connectivity Restoredinfoesx.problem.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfoesx.problem.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterroresx.problem.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerroresx.problem.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Volume will remain in read-only metadata mode with limited write support until journal can be created.EventExTrying to acquire lock on an already locked file. - File descriptionerror{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}esx.problem.vmfs.lock.busy.filedesc|{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}EventExTrying to acquire lock on an already locked file. FilenameerrorLock(s) held on file {1} by other host(s).esx.problem.vmfs.lock.busy.filename|Lock(s) held on file {1} by other host(s).EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk.v2|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected.errorInconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.lockmode.inconsistency.detected|Inconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExFailed to mount NFS volumeerroresx.problem.vmfs.nfs.mount.failed|NFS mount failed for {1}:{2} volume {3}. Status: {4}EventExLost connection to NFS servererroresx.problem.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExvmknic configured for NFS has been removedwarningesx.problem.vmfs.nfs.vmknic.removed|vmknic {1} removed, NFS{2} datastore {3} configured with the vmknic will be inaccessible.EventExNFS volume average I/O Latency has exceeded configured threshold for the current configured periodwarningesx.problem.vmfs.nfs.volume.io.latency.exceed.threshold.period|NFS volume {1} average I/O latency {2}(us) has exceeded threshold {3}(us) for last {4} minutesEventExNFS volume I/O Latency going highwarningesx.problem.vmfs.nfs.volume.io.latency.high|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExNFS volume I/O Latency exceeding thresholdwarningesx.problem.vmfs.nfs.volume.io.latency.high.exceed.threshold|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds. Exceeded threshold {4} microsecondsEventExNo space on NFS volume.warningesx.problem.vmfs.nfs.volume.no.space|{1}: No space on NFS volume.EventExVMFS Resource Corruption Detectederroresx.problem.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected on spanned volume.errorInconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanned.lockmode.inconsistency.detected|Inconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.EventExIncompatible VMFS span state detected.errorIncompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanstate.incompatibility.detected|Incompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExRemote logging host has become unreachable.erroresx.problem.vmsyslogd.remote.failure|The host "{1}" has become unreachable. Remote logging to this host has stopped.ExtendedEventLogging to storage has failed.erroresx.problem.vmsyslogd.storage.failure|Logging to storage has failed. Logs are no longer being stored locally on this host.EventExThe configured log directory cannot be used. The default directory will be used instead.erroresx.problem.vmsyslogd.storage.logdir.invalid|The configured log directory {1} cannot be used. The default directory {2} will be used instead.EventExLog daemon has failed for an unexpected reason.erroresx.problem.vmsyslogd.unexpected|Log daemon has failed for an unexpected reason: {1}EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.errorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.EventExvSAN detected LSN mismatch in mirrorswarningvSAN detected LSN mismatch in mirrors for object {1}.esx.problem.vob.vsan.dom.lsnmismatcherror|vSAN detected LSN mismatch in mirrors for object {1}.EventExResync encountered no space errorwarningResync encountered no space error for component {1} on disk {2}.esx.problem.vob.vsan.dom.nospaceduringresync|Resync encountered no space error for component {1} on disk {2}. Resync will resume once space is freed up on this disk. Need around {3}MB to resync the component on this diskEventExResync is delayed.warningResync is delayed for component {1} on disk {2} for object {3}.esx.problem.vob.vsan.dom.resyncdecisiondelayed|Resync is delayed for component {1} on disk {2} until data availability is regained for object {3} on the remote site.EventExResync timed outwarningResync timed out for component {2} on disk {3}.esx.problem.vob.vsan.dom.resynctimeout|Resync timed out as no progress was made in {1} minute(s) for component {2} on disk {3}. Resync will be tried again for this component. The remaining resync is around {4}MB.EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskerrorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskunrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.unrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.EventExNVMe critical health warning for disk. The disk's backup device has failed.errorNVMe critical health warning for disk {1}. The disk's backup device has failed.esx.problem.vob.vsan.lsom.backupfailednvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's backup device has failed.EventExOffline event on component.warningOffline event issued for component: {1}, flag: {2}, reason: {3}.esx.problem.vob.vsan.lsom.componentoffline|Offline event issued for component: {1}, flag: {2}, reason: {3}.EventExvSAN Node: Near node component count limit.warningvSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).esx.problem.vob.vsan.lsom.componentthreshold|vSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).EventExEvacuation has failed for device and it will be retried by DDH.errorEvacuation has failed for device {1} and it will be retried by DDH.esx.problem.vob.vsan.lsom.ddhEvacFailed|Evacuation has failed for device {1} and it will be retried by DDH.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.devicerepair|vSAN device {1} is being repaired due to I/O failures, and will be out of service until the repair is complete. If the device is part of a dedup disk group, the entire disk group will be out of service until the repair is complete.EventExvSAN device has high latency. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithhighlatency|vSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.EventExvSAN device smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithsmartfailure|vSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.EventExvSAN device is under permanent failure.errorvSAN device {1} is under permanent failure.esx.problem.vob.vsan.lsom.diskerror|vSAN device {1} is under permanent failure.EventExFailed to create a new disk group.errorFailed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.esx.problem.vob.vsan.lsom.diskgrouplimit|Failed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.EventExvSAN diskgroup log is congested.errorvSAN diskgroup {1} log is congestedesx.problem.vob.vsan.lsom.diskgrouplogcongested|vSAN diskgroup {1} log is congested.EventExvSAN disk group is under congestion. It will be remediated. No action is needed.warningvSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.esx.problem.vob.vsan.lsom.diskgroupundercongestion|vSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.EventExFailed to add disk to disk group.errorFailed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.esx.problem.vob.vsan.lsom.disklimit2|Failed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.EventExvSAN device is under propagated error.errorvSAN device {1} is under propagated erroresx.problem.vob.vsan.lsom.diskpropagatederror|vSAN device {1} is under propagated error.EventExvSAN device is under propagated permanent error.errorvSAN device {1} is under propagated permanent erroresx.problem.vob.vsan.lsom.diskpropagatedpermerror|vSAN device {1} is under propagated permanent error.EventExvSAN device is unhealthy.errorvSAN device {1} is unhealthyesx.problem.vob.vsan.lsom.diskunhealthy|vSAN device {1} is unhealthy.EventExEvacuation failed for device due to insufficient resources and it will be retried.errorEvacuation failed for device {1} due to insufficient resources and it will be retried.esx.problem.vob.vsan.lsom.evacFailedInsufficientResources|Evacuation failed for device {1} due to insufficient resources and it will be retried. Please make resources available for evacuation.EventExDeleted invalid metadata component.warningDeleted invalid metadata component: {1}.esx.problem.vob.vsan.lsom.invalidMetadataComponent|Deleted invalid metadata component: {1}.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.metadataURE|vSAN device {1} encountered an unrecoverable read error. This disk will be evacuated and rebuilt. If the device is part of a dedup disk group, the entire disk group will be evacuated and rebuilt.EventExNVMe disk critical health warning for disk. Disk is now read only.errorNVMe critical health warning for disk {1}. Disk is now read only.esx.problem.vob.vsan.lsom.readonlynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1} is: The NVMe disk has become read only.EventExNVMe critical health warning for disk. The disk has become unreliable.errorNVMe critical health warning for disk {1}. The disk has become unreliable.esx.problem.vob.vsan.lsom.reliabilitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk has become unreliable.EventExNVMe critical health warning for disk. The disk's spare capacity is below threshold.errorNVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.esx.problem.vob.vsan.lsom.sparecapacitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.storagepoolURE|vSAN device {1} encountered an unrecoverable read error. This disk will be rebuilt.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.storagepoolrepair|vSAN device {1} is being repaired due to I/O failures and will be out of service until the repair is complete.EventExNo response for I/O on vSAN device.errorNo response for I/O on vSAN device {1}.esx.problem.vob.vsan.lsom.storagepoolstuckio|No response for I/O on vSAN device {1}.EventExvSAN device detected suspended I/Os.errorvSAN device {1} detected suspended I/Os.esx.problem.vob.vsan.lsom.stuckio|vSAN device {1} detected suspended I/Os. Taking the host out of service to avoid affecting the vSAN cluster.EventExvSAN device detected stuck I/O error.errorvSAN device {1} detected stuck I/O error.esx.problem.vob.vsan.lsom.stuckiooffline|vSAN device {1} detected stuck I/O error. Marking the device as offline.EventExvSAN device is under propagated stuck I/O error.errorvSAN device {1} is under propagated stuck I/O error.esx.problem.vob.vsan.lsom.stuckiopropagated|vSAN device {1} is under propagated stuck I/O error. Marking the device as offline.EventExvSAN device detected I/O timeout error.errorvSAN device {1} detected I/O timeout error.esx.problem.vob.vsan.lsom.stuckiotimeout|vSAN device {1} detected I/O timeout error. This may lead to stuck I/O.EventExNVMe critical health warning for disk. The disk's temperature is beyond threshold.errorNVMe critical health warning for disk {1}. The disk's temperature is beyond bethreshold.esx.problem.vob.vsan.lsom.temperaturenvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's temperature beyond threshold.EventExvSAN device has gone offline.errorvSAN device {1} has gone offline.esx.problem.vob.vsan.pdl.offline|vSAN device {1} has gone offline.EventExA ZDOM object is paused due to continuous fail-stops.warningZDOM object {1} is paused on host {2}, numFailStops={3}.esx.problem.vob.vsan.zdom.failstoppaused|ZDOM object {1} is paused on host {2}, numFailStops={3}.ExtendedEventTest with no arguments.infoesx.problem.vobdtestcorrelator.test.0|Test with no argumentsEventExTest with int argument.infoesx.problem.vobdtestcorrelator.test.1d|Test with int argument: {1}EventExTest with sting argument.infoesx.problem.vobdtestcorrelator.test.1s|Test with sting argument: {1}EventExTest with huge sting argument.infoesx.problem.vobdtestcorrelator.test.hugestr|Test with huge sting argument: {1}EventExVpxa crashed and a core file was created.warningesx.problem.vpxa.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExVpxa crashed and an encrypted core file was created.warningesx.problem.vpxa.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventvSAN clustering services have been disabled.warningvSAN clustering and directory services have been disabled thus will be no longer available.esx.problem.vsan.clustering.disabled|vSAN clustering and directory services have been disabled thus will be no longer available.EventExData component found on witness host.warningData component {1} found on witness host is ignored.esx.problem.vsan.dom.component.datacomponent.on.witness.host|Data component {1} found on witness host is ignored.EventExvSAN Distributed Object Manager failed to initializewarningvSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.esx.problem.vsan.dom.init.failed.status|vSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.EventExOne or more disks exceed its/their warning usage of estimated endurance threshold.infoOne or more disks exceed its/their warning usage of estimated endurance threshold.esx.problem.vsan.health.ssd.endurance|Disks {Disk Name} in Cluster {Cluster Name} have exceeded warning usage of their estimated endurance threshold {Disk Percentage Threshold}, currently at {Disk Percentage Used} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds the estimated endurance threshold.errorOne of the disks exceeds the estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.error|Disks {1} have exceeded their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds 90% of its estimated endurance threshold.warningOne of the disks exceeds 90% of its estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.warning|Disks {1} have exceeded 90 percent usage of their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.errorOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.esx.problem.vsan.health.vsanesa.pdl|Disk {1} is detected with PDL in vSAN ESA Cluster. Please check the host for further details.EventExvSAN device Memory/SSD congestion has changed.infoLSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.esx.problem.vsan.lsom.congestionthreshold|LSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.EventExA vmknic added to vSAN network configuration doesn't have valid IP. Network is not ready.errorvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.esx.problem.vsan.net.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.ExtendedEventvSAN doesn't haven any redundancy in its network configuration.warningvSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.lost|vSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.ExtendedEventvSAN is operating on reduced network redundancy.warningvSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.reduced|vSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.ExtendedEventvSAN doesn't have any network configuration for use.errorvSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.esx.problem.vsan.no.network.connectivity|vSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.EventExA vmknic added to vSAN network configuration doesn't have valid IP. It will not be in use.warningvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.esx.problem.vsan.vmknic.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.EventEx Failed to add shared virtual disk. Maximum count reachederroresx.problem.vscsi.shared.vmdk.add.failure.max.count|Failed to add shared virtual disk. Maximum number of shared vmdks supported per ESX host are {1}EventExNo free slots availableerroresx.problem.vscsi.shared.vmdk.no.free.slot.available|No Free slot available. Maximum number of virtual machinies supported in MSCS cluster are {1}EventExFailed to power on virtual machines on shared VMDK with running virtual machineerroresx.problem.vscsi.shared.vmdk.virtual.machine.power.on.failed|Two or more virtual machines (\"{1}\" and \"{2}\") sharing same virtual disk are not allowed to be Powered-On on same host.EventExVVol container has gone offline.erroresx.problem.vvol.container.offline|VVol container {1} has gone offline: isPEAccessible {2}, isVPAccessible {3}.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.EventExCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated.thirdPartyProv|CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.EventExApplication consistent sync completed.infoApplication consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Application consistent sync completed for virtual machine {vm.name} on host {host.name}.Application consistent sync completed for virtual machine {vm.name}.Application consistent sync completed.hbr.primary.AppQuiescedDeltaCompletedEvent|Application consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)ExtendedEventConnection to VR Server restored.infoConnection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Connection to VR Server restored for virtual machine {vm.name} on host {host.name}.Connection to VR Server restored for virtual machine {vm.name}.Connection to VR Server restored.hbr.primary.ConnectionRestoredToHbrServerEvent|Connection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExSync stopped.warningSync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped: {reason.@enum.hbr.primary.ReasonForDeltaAbort}hbr.primary.DeltaAbortedEvent|Sync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}EventExSync completed.infoSync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync completed for virtual machine {vm.name} on host {host.name}.Sync completed for virtual machine {vm.name}.Sync completed.hbr.primary.DeltaCompletedEvent|Sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventSync started.infoSync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync started by {userName} for virtual machine {vm.name} on host {host.name}.Sync started by {userName} for virtual machine {vm.name}.Sync started by {userName}.hbr.primary.DeltaStartedEvent|Sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFile system consistent sync completed.infoFile system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.File system consistent sync completed for virtual machine {vm.name} on host {host.name}.File system consistent sync completed for virtual machine {vm.name}.File system consistent sync completed.hbr.primary.FSQuiescedDeltaCompletedEvent|File system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)EventExFailed to start sync.errorFailed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartDeltaEvent|Failed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExFailed to start full sync.errorFailed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartSyncEvent|Failed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExDisk replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}hbr.primary.InvalidDiskReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}EventExVirtual machine replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}hbr.primary.InvalidVmReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}ExtendedEventVR Server does not support network compression.warningVR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support network compression for virtual machine {vm.name} on host {host.name}.VR Server does not support network compression for virtual machine {vm.name}.VR Server does not support network compression.hbr.primary.NetCompressionNotOkForServerEvent|VR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server supports network compression.infoVR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server supports network compression for virtual machine {vm.name} on host {host.name}.VR Server supports network compression for virtual machine {vm.name}.VR Server supports network compression.hbr.primary.NetCompressionOkForServerEvent|VR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExNo connection to VR Server.warningNo connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server: {reason.@enum.hbr.primary.ReasonForNoServerConnection}hbr.primary.NoConnectionToHbrServerEvent|No connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}EventExVR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}errorVR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}hbr.primary.NoProgressWithHbrServerEvent|VR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}ExtendedEventPrepare Delta Time exceeds configured RPO.warningPrepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name}.Prepare Delta Time exceeds configured RPO.hbr.primary.PrepareDeltaTimeExceedsRpoEvent|Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventQuiescing is not supported for this virtual machine.warningQuiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing is not supported for virtual machine {vm.name} on host {host.name}.Quiescing is not supported for virtual machine {vm.name}.Quiescing is not supported for this virtual machine.hbr.primary.QuiesceNotSupported|Quiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server is compatible with the configured RPO.infoVR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name}.VR Server is compatible with the configured RPO.hbr.primary.RpoOkForServerEvent|VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server does not support the configured RPO.warningVR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server does not support the configured RPO for virtual machine {vm.name}.VR Server does not support the configured RPO.hbr.primary.RpoTooLowForServerEvent|VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFull sync completed.infoFull sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync completed for virtual machine {vm.name} on host {host.name}.Full sync completed for virtual machine {vm.name}.Full sync completed.hbr.primary.SyncCompletedEvent|Full sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventFull sync started.infoFull sync started for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync started for virtual machine {vm.name} on host {host.name}.Full sync started for virtual machine {vm.name}.Full sync started.hbr.primary.SyncStartedEvent|Full sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventReplication paused.infoReplication paused for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Replication paused for virtual machine {vm.name} on host {host.name}.Replication paused for virtual machine {vm.name}.Replication paused.hbr.primary.SystemPausedReplication|Replication paused by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.warningQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.hbr.primary.UnquiescedDeltaCompletedEvent|Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).EventExReplication configuration changed.infoReplication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} on host {host.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).hbr.primary.VmReplicationConfigurationChangedEvent|Replication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).AccountCreatedEventAccount createdinfoAn account was createdAccount {spec.id} was created on host {host.name} <EventLongDescription id="vim.event.AccountCreatedEvent"> <description> An account has been created on the host </description> </EventLongDescription> AccountRemovedEventAccount removedinfoAccount {account} was removedAccount {account} was removed on host {host.name} <EventLongDescription id="vim.event.AccountRemovedEvent"> <description> An account has been removed from the host </description> </EventLongDescription> AccountUpdatedEventAccount updatedinfoAccount {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}' <EventLongDescription id="vim.event.AccountUpdatedEvent"> <description> An account has been updated on the host </description> </EventLongDescription> AdminPasswordNotChangedEventAdministrator password not changedinfoThe default password for the root user has not been changedThe default password for the root user on the host {host.name} has not been changed <EventLongDescription id="vim.event.AdminPasswordNotChangedEvent"> <description> The default password for the Administrator user on the host has not been changed </description> <cause> <description> You have not changed the password for the Administrator user on the host so the default password is still active </description> <action> Change the password for the Administrator user on the host </action> </cause> </EventLongDescription> AlarmAcknowledgedEventAlarm acknowledgedinfoAcknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}'Acknowledged alarm '{alarm.name}' on {entity.name}AlarmActionTriggeredEventAlarm action triggeredinfoAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarmClearedEventAlarm clearedinfoManually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}AlarmCreatedEventAlarm createdinfoCreated alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}'Created alarm '{alarm.name}' on {entity.name}AlarmEmailCompletedEventAlarm email sentinfoAlarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}AlarmEmailFailedEventCannot send alarm emailerrorAlarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to} <EventLongDescription id="vim.event.AlarmEmailFailedEvent"> <description> An error occurred while sending email notification of a triggered alarm </description> <cause> <description>Failed to send email for a triggered alarm</description> <action>Check the vCenter Server SMTP settings for sending email notifications</action> </cause> </EventLongDescription> AlarmEvent<Alarm Event>info<internal>AlarmReconfiguredEventAlarm reconfiguredinfoReconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}'Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}. <EventLongDescription id="vim.event.AlarmReconfiguredEvent"> <description> An alarm has been reconfigured </description> <cause> <description>A user has reconfigured an alarm</description> </cause> </EventLongDescription> AlarmRemovedEventAlarm removedinfoRemoved alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}'Removed alarm '{alarm.name}' on {entity.name}AlarmScriptCompleteEventAlarm script completedinfoAlarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}AlarmScriptFailedEventAlarm script not completederrorAlarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg} <EventLongDescription id="vim.event.AlarmScriptFailedEvent"> <description> The vCenter Server logs this event if an error occurs while running a script after an alarm triggers. </description> <cause> <description>There was an error running the script</description> <action>Fix the script or failure condition</action> </cause> </EventLongDescription> AlarmSnmpCompletedEventAlarm SNMP trap sentinfoAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarmSnmpFailedEventAlarm SNMP trap not senterrorAlarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg} <EventLongDescription id="vim.event.AlarmSnmpFailedEvent"> <description> The vCenter Server logs this event if an error occurs while sending an SNMP trap when an alarm triggers. </description> <cause> <description>An SNMP trap could not be sent for a triggered alarm</description> <action>Check the vCenter Server SNMP settings. Make sure that the vCenter Server network can handle SNMP packets.</action> </cause> </EventLongDescription> AlarmStatusChangedEventAlarm status changedinfoAlarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}AllVirtualMachinesLicensedEventAll virtual machines are licensedinfoAll running virtual machines are licensedAlreadyAuthenticatedSessionEventAlready authenticatedinfoUser cannot logon since the user is already logged onAuthorizationEvent<Authorization Event>info<internal>BadUsernameSessionEventInvalid user nameerrorCannot login {userName}@{ipAddress} <EventLongDescription id="vim.event.BadUsernameSessionEvent"> <description> A user attempted to log in with an unknown or invalid username </description> <cause> <description> The username is unknown to the system </description> <action> Use a username that is included in the system user directory </action> <action> On Linux, verify that the user directory is correctly configured </action> <action> If you are using Active Directory, check the health of the domain controller </action> </cause> <cause> <description> The user provided an invalid password </description> <action> Supply the correct password </action> </cause> </EventLongDescription> CanceledHostOperationEventCanceled host operationinfoThe operation performed on host {host.name} was canceledThe operation performed on host {host.name} was canceledThe operation was canceledThe operation performed on host {host.name} in {datacenter.name} was canceled <EventLongDescription id="vim.event.CanceledHostOperationEvent"> <description> An operation performed on the host was canceled </description> <cause> <description> A previous event in the sequence of events will provide more information about the cause of this cancellation </description> </cause> </EventLongDescription> ClusterComplianceCheckedEventChecked cluster for complianceinfoChecked cluster {computeResource.name} for complianceCluster was checked for compliance with profile {profile.name}Checked cluster for compliance <EventLongDescription id="vim.event.ClusterComplianceCheckedEvent"> <description> The cluster was checked for compliance with a cluster profile </description> <cause> <description> The user initiated a compliance check on the cluster against a cluster profile </description> </cause> <cause> <description> A scheduled has initiated a compliance check for the cluster against a cluster profile </description> </cause> </EventLongDescription> ClusterCreatedEventCluster createdinfoCreated cluster {computeResource.name}Created in folder {parent.name}Created cluster {computeResource.name} in {datacenter.name}ClusterDestroyedEventCluster deletedinfoRemoved cluster {computeResource.name}Removed clusterRemoved cluster {computeResource.name} in datacenter {datacenter.name}ClusterEvent<Cluster Event>info<internal>ClusterOvercommittedEventCluster overcommittederrorInsufficient capacity in cluster {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in cluster {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.ClusterOvercommittedEvent"> <description> The cumulative CPU and/or memory resources of all hosts in the cluster are not adequate to satisfy the resource reservations of all virtual machines in the cluster </description> <cause> <description>You attempted to power on a virtual machine bypassing vCenter Server. This condition occurs when you attempt the power on using the vSphere Client directly connected to the host.</description> <action>In a DRS cluster, do not power on virtual machines bypassing vCenter Server</action> </cause> <cause> <description>A host was placed in Maintenance, Standby, or Disconnected Mode</description> <action>Bring any host in Maintenance, Standby, or Disconnected mode out of these modes</action> </cause> </EventLongDescription> ClusterReconfiguredEventCluster reconfiguredinfoReconfigured cluster {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Cluster reconfiguredReconfigured cluster {computeResource.name} in datacenter {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ClusterReconfiguredEvent"> <description> The cluster configuration was changed. The cluster configuration includes information about the DRS, DPM, EVC and vSphere HA settings of the cluster. All DRS rules are also stored in the cluster configuration. Editing the cluster configuration may trigger an invocation of DRS and/or enabling/disabling of vSphere HA on each host in the cluster. </description> </EventLongDescription> ClusterStatusChangedEventCluster status changedinfoConfiguration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.ClusterStatusChangedEvent"> <description> The cluster status has changed. This status is the status of the root resource pool that encompasses the entire cluster. A cluster status change may be accompanied by the removal of a configuration issue if one was previously detected. A cluster status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The cluster status changed to yellow</description> <action>Add more resources (more hosts), or reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The cluster status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> CustomFieldDefAddedEventCustom field definition addedinfoCreated new custom field definition {name}CustomFieldDefEvent<Custom Field Definition Event>info<internal>CustomFieldDefRemovedEventCustom field definition removedinfoRemoved field definition {name}CustomFieldDefRenamedEventCustom field definition renamedinfoRenamed field definition from {name} to {newName}CustomFieldEvent<Custom Field Event>info<internal>CustomFieldValueChangedEventCustom field value changedinfoChanged custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} in {datacenter.name} from '{prevState}' to '{value}'CustomizationEvent<Customization Event>info<internal>CustomizationFailed<An error occurred during customization>infoAn error occurred during customization, Reason: {reason.@enum.CustomizationFailed.ReasonCode}An error occurred during customization on VM {vm.name}, Reason: {reason.@enum.CustomizationFailed.ReasonCode}. See customization log at {logLocation} on the guest OS for details.CustomizationLinuxIdentityFailedCustomization Linux Identity FailederrorAn error occurred while setting up Linux identity. See log file '{logLocation}' on guest OS for details. <EventLongDescription id="vim.event.CustomizationLinuxIdentityFailed"> <description> The guest operating system Linux distribution is not supported by the customization scripts. Please refer to the VMware vSphere Compatibility Matrix for the list of the supported Linux distributions. </description> <cause> <description> Customization of the target guest operating system Linux distribution is not supported. </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> CustomizationNetworkSetupFailedCannot complete customization network setuperrorAn error occurred while setting up network properties of the guest OS. See the log file {logLocation} in the guest OS for details. <EventLongDescription id="vim.event.CustomizationNetworkSetupFailed"> <description> The customization scripts failed to set the parameters in the corresponding configuration files for Linux or in the Windows registry </description> <cause> <description> The Customization Specification contains an invalid host name or domain name </description> <action> Review the guest operating system log files for this event for more details </action> <action> Provide a valid host name for the target guest operating system. The name must comply with the host name and domain name definitions in RFC 952, 1035, 1123, 2181. </action> </cause> <cause> <description> Could not find a NIC with the MAC address specified in the Customization Package </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that there was no change in the virtual NIC MAC address between the creation of the Customization Package and its deployment. Deployment occurs during the first boot of the virtual machine after customization has been scheduled. </action> </cause> <cause> <description> The customization code needs read/write permissions for certain configuration files. These permissions were not granted to the 'root' account on Linux or to the account used by the VMware Tools Service on the Windows guest operating system. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Grant read/write permissions to the 'root' account for Linux or to the account used by the VMware Tools Service on the Windows guest operating system and the registry keys that need to be modified by the customization code </action> </cause> </EventLongDescription> CustomizationStartedEventStarted customizationinfoStarted customization of VM {vm.name}. Customization log located at {logLocation} in the guest OS.CustomizationSucceededCustomization succeededinfoCustomization of VM {vm.name} succeeded. Customization log located at {logLocation} in the guest OS.CustomizationSysprepFailedCannot complete customization SyspreperrorThe version of Sysprep {sysprepVersion} provided for customizing VM {vm.name} does not match the version of guest OS {systemVersion}. See the log file {logLocation} in the guest OS for more information. <EventLongDescription id="vim.event.CustomizationSysprepFailed"> <description> The sysprep files in the folder corresponding to the selected target guest operating system are not compatible with the actual version of the guest operation system </description> <cause> <description> The sysprep files in the folder corresponding to the target guest operating system (for example Windows XP) are for a different guest operating system (for example Windows 2003) </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> <cause> <description> The sysprep files in the folder corresponding to the guest operating system are for a different Service Pack, for example the guest operating system is Windows XP SP2 and but the sysprep files are for Windows XP SP1. </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> </EventLongDescription> CustomizationUnknownFailureUnknown customization errorerrorAn error occurred while customizing VM {vm.name}. For details reference the log file {logLocation} in the guest OS. <EventLongDescription id="vim.event.CustomizationUnknownFailure"> <description> The customization component failed to set the required parameters inside the guest operating system </description> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the registry keys used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on registry keys used by the customization code </action> </cause> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the files and folders used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on the files and folders used by the customization code </action> </cause> <cause> <description> On Linux, an invalid or unsupported time zone is passed to the customization scripts and the time zone configuration failed as a result </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that a supported time zone is passed in Customization Specification. </action> </cause> <cause> <description> On Linux, the guest operating system 'root' account does not have read/write permissions for the configuration files that the customization scripts need to modify ('/etc/hosts') </description> <action> Grant read/write permissions for the configuration files to the guest operating system 'root' account </action> </cause> <cause> <description> To enable guest customization on Linux, in case open-vm-tools are used, you must also install the deployPkg plug-in. </description> <action> Follow kb.vmware.com/kb/2075048 to install the open-vm-tools deployPkg plug-in. </action> </cause> <cause> <description> Customization of the target guest operating system is not supported </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> DVPortgroupCreatedEventdvPort group createdinfodvPort group {net.name} was added to switch {dvs}.dvPort group {net.name} in {datacenter.name} was added to switch {dvs.name}.DVPortgroupDestroyedEventdvPort group deletedinfodvPort group {net.name} was deleted.dvPort group {net.name} in {datacenter.name} was deleted.DVPortgroupEventdvPort group eventinfodvPort group eventdvPort group eventDVPortgroupReconfiguredEventdvPort group reconfiguredinfodvPort group {net.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}dvPort group {net.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DVPortgroupRenamedEventdvPort group renamedinfodvPort group {oldName} was renamed to {newName}.dvPort group {oldName} in {datacenter.name} was renamed to {newName}DasAdmissionControlDisabledEventvSphere HA admission control disabledinfovSphere HA admission control disabled for cluster {computeResource.name}vSphere HA admission control disabledvSphere HA admission control disabled for cluster {computeResource.name} in {datacenter.name}DasAdmissionControlEnabledEventvSphere HA admission control enabledinfovSphere HA admission control enabled for cluster {computeResource.name}vSphere HA admission control enabledvSphere HA admission control enabled for cluster {computeResource.name} in {datacenter.name}DasAgentFoundEventvSphere HA agent foundinfoRe-established contact with a primary host in this vSphere HA clusterDasAgentUnavailableEventvSphere HA agent unavailableerrorUnable to contact a primary vSphere HA agent in cluster {computeResource.name}Unable to contact a primary vSphere HA agentUnable to contact a primary vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasAgentUnavailableEvent"> <description> vCenter Server is not able to contact any good primary hosts in the vSphere HA cluster. vSphere HA protection may not be available for virtual machines running in the cluster. In addition, you cannot enable or reconfigure vSphere HA on hosts in the cluster until contact between vCenter Server and a good primary host is restored. </description> <cause> <description> There was a network outage, and all hosts show up in the inventory as "not responding" </description> <action>Restore the network</action> </cause> <cause> <description>All the primary hosts in the cluster failed</description> <action> If the failed primary hosts cannot be restored, disable vSphere HA on the cluster, wait for the Unconfigure vSphere HA tasks to complete on all hosts, and re-enable vSphere HA on the cluster </action> </cause> </EventLongDescription> DasClusterIsolatedEventAll vSphere HA hosts isolatederrorAll hosts in the vSphere HA cluster {computeResource.name} were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster {computeResource.name} in {datacenter.name} were isolated from the network. Check the network configuration for proper network redundancy in the management network.DasDisabledEventvSphere HA disabled for clusterinfovSphere HA disabled for cluster {computeResource.name}vSphere HA disabled for this clustervSphere HA disabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasDisabledEvent"> <description> vSphere HA has been disabled on this host due to a user action. vSphere HA is disabled when a host is disconnected from vCenter Server or placed into maintenance or standby mode. Virtual machines on other hosts in the cluster will not be failed over to this host in the event of a host failure. In addition, if the host is disconnected, any virtual machines running on this host will not be failed if the host fails. Further, no attempt will be made by vSphere HA VM and Application Monitoring to reset VMs. </description> </EventLongDescription> DasEnabledEventvSphere HA enabled for clusterinfovSphere HA enabled for cluster {computeResource.name}vSphere HA enabled for this clustervSphere HA enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasEnabledEvent"> <description> vSphere HA has been enabled on this host due to a user action. vSphere HA is enabled when a host is added to or moved into a vSphere HA cluster or when vSphere HA is enabled on a cluster. If the host was already in a vSphere HA cluster, vSphere HA will be enabled when the host is reconnected to vCenter Server or brought out of maintenance or standby mode. vSphere HA will attempt to protect any VMs that are running on the host at the time that HA is enabled on it. </description> </EventLongDescription> DasHostFailedEventvSphere HA host failederrorA possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name} in cluster {computeResource.name} in {datacenter.name}DasHostIsolatedEventvSphere HA host isolatedwarningHost {isolatedHost.name} has been isolated from cluster {computeResource.name}Host {isolatedHost.name} has been isolatedHost has been isolated from clusterHost {isolatedHost.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasHostIsolatedEvent"> <description> vSphere HA detected that the host is network isolated. When a host is in this state, vSphere HA applies the power-off or shutdown host isolation response to virtual machines running on the host, and continues to monitor the virtual machines that are left powered on. While a host is in this state, vSphere HA's ability to restart virtual machines after a failure is impacted. vSphere HA only powers off or shuts down a virtual machine if the agent on the host determines that a master agent is responsible for the virtual machine. </description> <cause> <description> A host is network isolated if both of the following conditions are met: (1) isolation addresses have been configured and the host is unable to ping them; (2) the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <action> Resolve the networking problem that is preventing the host from pinging its isolation addresses and communicating with other hosts. Ensure that there is redundancy in the management networks used by vSphere HA. With redundancy, vSphere HA is able to communicate over more than one path thus reducing the chance of a host becoming isolated. </action> </cause> </EventLongDescription> DatacenterCreatedEventDatacenter createdinfoCreated in folder {parent.name}Created datacenter {datacenter.name}Created datacenter {datacenter.name} in folder {parent.name}DatacenterEvent<Datacenter Event>info<internal>DatacenterRenamedEventDatacenter renamedinfoRenamed datacenterRenamed datacenter from {oldName} to {newName}Renamed datacenter from {oldName} to {newName}DatastoreCapacityIncreasedEventDatastore capacity increasedinfoDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytesDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytes in {datacenter.name}DatastoreDestroyedEventDatastore deletedinfoRemoved unconfigured datastore {datastore.name}Removed unconfigured datastore {datastore.name}DatastoreDiscoveredEventDatastore discoveredinfoDiscovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name}Discovered datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreDiscoveredEvent"> <description> A datastore was discovered on a host </description> <cause> <description> A host that has access to this datastore was added to the datacenter </description> </cause> <cause> <description> The storage backing this datastore was unmasked to a host in the datacenter </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host and the datastore was visible on at least one other host in the datacenter prior to this operation. </description> </cause> </EventLongDescription> DatastoreDuplicatedEventDatastore duplicatederrorMultiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detectedMultiple datastores named {datastore} detected on host {host.name} in {datacenter.name}DatastoreEvent<Datastore Event>info<internal>DatastoreFileCopiedEventFile or directory copied to datastoreinfoCopy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Copy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileDeletedEventFile or directory deletedinfoDeletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Deletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileEvent<Datastore File Event>info<internal>DatastoreFileMovedEventFile or directory moved to datastoreinfoMove of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Move of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreIORMReconfiguredEventReconfigured Storage I/O Control on datastoreinfoReconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}DatastorePrincipalConfiguredDatastore principal configuredinfoConfigured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal}Configured datastore principal {datastorePrincipal} on host {host.name} in {datacenter.name}DatastoreRemovedOnHostEventDatastore removed from hostinfoRemoved datastore {datastore.name} from {host.name}Removed datastore {datastore.name}Removed datastore {datastore.name} from {host.name} in {datacenter.name}DatastoreRenamedEventDatastore renamedinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name}DatastoreRenamedOnHostEventDatastore renamed from hostinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreRenamedOnHostEvent"> <description> A datastore was renamed on a host managed by vCenter Server </description> <cause> <description> vCenter Server discovered datastore on a host and renamed the datastore because it already exists in the vCenter Server inventory under a different name. vCenter Server might also have renamed the datastore because the name conflicts with another datastore in the same datacenter. </description> </cause> </EventLongDescription> DrsDisabledEventDRS disabledinfoDisabled DRS on cluster {computeResource.name}Disabled DRSDisabled DRS on cluster {computeResource.name} in datacenter {datacenter.name}DrsEnabledEventDRS enabledinfoEnabled DRS on cluster {computeResource.name} with automation level {behavior}Enabled DRS with automation level {behavior}Enabled DRS on {computeResource.name} with automation level {behavior} in {datacenter.name}DrsEnteredStandbyModeEventDRS entered standby modeinfoDRS put {host.name} into standby modeDRS put {host.name} into standby modeDRS put the host into standby modeDRS put {host.name} into standby modeDrsEnteringStandbyModeEventDRS entering standby modeinfoDRS is putting {host.name} into standby modeDRS is putting {host.name} into standby modeDRS is putting the host into standby modeDRS is putting {host.name} into standby modeDrsExitStandbyModeFailedEventDRS cannot exit the host out of standby modeerrorDRS cannot move {host.name} out of standby modeDRS cannot move {host.name} out of standby modeDRS cannot move the host out of standby modeDRS cannot move {host.name} out of standby mode <EventLongDescription id="vim.event.DrsExitStandbyModeFailedEvent"> <description> DPM failed to power on a host in standby mode. DPM tried to power on a host using IPMI, iLO or Wake-on-LAN protocol, but the host did not power on. </description> <cause> <description>DPM could not communicate with the BMC on the host</description> <action>Verify the IPMI/iLO credentials entered in vCenter Server</action> <action>Verify that LAN access is enabled in the BMC</action> </cause> <cause> <description>The vMotion NIC on the host does not support Wake-on-LAN</description> <action>Select a vMotion NIC that supports Wake-on-LAN</action> </cause> </EventLongDescription> DrsExitedStandbyModeEventDRS exited standby modeinfoDRS moved {host.name} out of standby modeDRS moved {host.name} out of standby modeDRS moved the host out of standby modeDRS moved {host.name} out of standby modeDrsExitingStandbyModeEventDRS exiting standby modeinfoDRS is moving {host.name} out of standby modeDRS is moving {host.name} out of standby modeDRS is moving the host out of standby modeDRS is moving {host.name} out of standby modeDrsInvocationFailedEventDRS invocation not completederrorDRS invocation not completedDRS invocation not completedDRS invocation not completed <EventLongDescription id="vim.event.DrsInvocationFailedEvent"> <description> A DRS invocation failed to complete successfully. This condition can occur for a variety of reasons, some of which may be transient. </description> <cause> <description>An error was encountered during a DRS invocation</description> <action>Disable and re-enable DRS</action> </cause> </EventLongDescription> DrsRecoveredFromFailureEventDRS has recovered from the failureinfoDRS has recovered from the failureDRS has recovered from the failureDRS has recovered from the failureDrsResourceConfigureFailedEventCannot complete DRS resource configurationerrorUnable to apply DRS resource settings on host. {reason.msg}. This can significantly reduce the effectiveness of DRS.Unable to apply DRS resource settings on host {host.name} in {datacenter.name}. {reason.msg}. This can significantly reduce the effectiveness of DRS. <EventLongDescription id="vim.event.DrsResourceConfigureFailedEvent"> <description> The DRS resource settings could not be successfully applied to a host in the cluster. This condition is typically transient. </description> <cause> <description>DRS resource settings could not be applied to a host.</description> <action>DRS generates resource settings that map the cluster values to the host. However, in this case, the values could not be successfully applied to the host. This is typically a transient error caused by delayed synchronization from DRS to the host. If this condition persists, enable debug logging in vpxa and contact VMware Support. </action> </cause> </EventLongDescription> DrsResourceConfigureSyncedEventDRS resource configuration synchronizedinfoResource configuration specification returns to synchronization from previous failureResource configuration specification returns to synchronization from previous failure on host '{host.name}' in {datacenter.name}DrsRuleComplianceEventVM is now compliant with DRS VM-Host affinity rulesinfo{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} is now compliant with DRS VM-Host affinity rulesvirtual machine on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} in {datacenter.name} is now compliant with DRS VM-Host affinity rulesDrsRuleViolationEventVM is violating a DRS VM-Host affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} is violating a DRS VM-Host affinity rulevirtual machine on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host affinity ruleDrsSoftRuleViolationEventThe VM is violating a DRS VM-Host soft affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} is violating a DRS VM-Host soft affinity rulevirtual machine on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host soft affinity ruleDrsVmMigratedEventDRS VM migratedinfoDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name}DRS migrated {vm.name} from {sourceHost.name} to {host.name}DRS migrated {vm.name} from {sourceHost.name}Migrated from {sourceHost.name} to {host.name} by DRSDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmMigratedEvent"> <description> A virtual machine was migrated based on a DRS recommendation. The recommendation might have been made be to achieve better load balancing in the cluster or to evacuate a host in the cluster that is being put into Standby or Maintenance Mode. </description> <cause> <description>DRS recommended the migration of a virtual machine</description> </cause> </EventLongDescription> DrsVmPoweredOnEventDRS VM powered oninfoDRS powered on {vm.name} on {host.name}DRS powered on {vm.name} on {host.name}DRS powered on {vm.name}DRS powered on the virtual machine on {host.name}DRS powered on {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmPoweredOnEvent"> <description> A virtual machine was powered on by the user and DRS choose a host for the virtual machine based on the current cluster load distribution combined with the virtual machine's resource requirements </description> <cause> <description>DRS chose a host for a virtual machine that was being powered on</description> </cause> </EventLongDescription> DuplicateIpDetectedEventDuplicate IP detectedinfoVirtual machine {macAddress} has a duplicate IP {duplicateIP}Virtual machine {macAddress} on host {host.name} has a duplicate IP {duplicateIP}DvpgImportEventImport Operation eventinfoImport operation with type {importType} was performed on {net.name}Import operation with type {importType} was performed on {net.name}DvpgRestoreEventRestore Operation eventinfoRestore operation was performed on {net.name}Restore operation was performed on {net.name}DvsCreatedEventvSphere Distributed Switch createdinfoA vSphere Distributed Switch {dvs.name} was createdA vSphere Distributed Switch {dvs.name} was created in {datacenter.name}.DvsDestroyedEventvSphere Distributed Switch deletedinfovSphere Distributed Switch {dvs.name} was deleted.vSphere Distributed Switch {dvs.name} in {datacenter.name} was deleted.DvsEventvSphere Distributed Switch eventinfovSphere Distributed Switch eventvSphere Distributed Switch eventDvsHealthStatusChangeEventHealth check status of the switch changed.infoHealth check status changed in vSphere Distributed Switch {dvs.name} on host {host.name}Health check status changed in vSphere Distributed Switch {dvs.name}Health check status was changed in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}DvsHostBackInSyncEventThe vSphere Distributed Switch configuration on the host was synchronized with that of the vCenter Server.infoThe vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.DvsHostJoinedEventHost joined the vSphere Distributed SwitchinfoThe host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name}.The host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostLeftEvent Host left vSphere Distributed SwitchinfoThe host {hostLeft.name} left the vSphere Distributed Switch {dvs.name}.The host {hostLeft.name} left the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostStatusUpdatedHost status changed on the vSphere Distributed SwitchinfoThe host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name}.The host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostWentOutOfSyncEventThe vSphere Distributed Switch configuration on the host differed from that of the vCenter Server.warningThe vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server. <EventLongDescription id="vim.event.DvsHostWentOutOfSyncEvent"> <description> The vSphere Distributed Switch configuration on the host differed from that of the vCenter Server </description> <cause> <description> The host was not connected to the vCenter Server when updates were sent </description> </cause> <cause> <description> vCenter Server failed to push the vSphere Distributed Switch configuration to the host in the past</description> </cause> </EventLongDescription> DvsImportEventImport Operation eventinfoImport operation with type {importType} was performed on {dvs.name}Import operation with type {importType} was performed on {dvs.name}DvsMergedEventvSphere Distributed Switch mergedinfovSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name}.vSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name} in {datacenter.name}.DvsPortBlockedEventdvPort blockedinfoThe dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortConnectedEventdvPort connectedinfoThe dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortCreatedEventdvPort createdinfoNew ports were created in the vSphere Distributed Switch {dvs.name}.New ports were created in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDeletedEventdvPort deletedinfoPorts were deleted in the vSphere Distributed Switch {dvs.name}.Deleted ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDisconnectedEventdvPort disconnectedinfoThe dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortEnteredPassthruEventdvPort in passthrough modeinfoThe dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortExitedPassthruEventdvPort not in passthrough modeinfoThe dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortJoinPortgroupEventA dvPort was moved into the dvPort group.infoThe dvPort {portKey} was moved into the dvPort group {portgroupName}.The dvPort {portKey} was moved into the dvPort group {portgroupName} in {datacenter.name}.DvsPortLeavePortgroupEventA dvPort was moved out of the dvPort group.infoThe dvPort {portKey} was moved out of the dvPort group {portgroupName}.The dvPort {portKey} was moved out of the dvPort group {portgroupName} in {datacenter.name}.DvsPortLinkDownEventdvPort link was downinfoThe dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortLinkUpEventdvPort link was upinfoThe dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortReconfiguredEventdvPort reconfiguredinfoPorts were reconfigured in the vSphere Distributed Switch {dvs.name}.
Ports changed {portKey}.
Changes are {configChanges}Reconfigured ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.
Ports changed {portKey}.
Changes are {configChanges}DvsPortRuntimeChangeEventdvPort runtime information changed.infoThe dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortUnblockedEventdvPort unblockedinfoThe dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortVendorSpecificStateChangeEventdvPort vendor specific state changed.infoThe dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsReconfiguredEventvSphere Distributed Switch reconfiguredinfoThe vSphere Distributed Switch {dvs.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}The vSphere Distributed Switch {dvs.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DvsRenamedEventvSphere Distributed Switch renamedinfoThe vSphere Distributed Switch {oldName} was renamed to {newName}.The vSphere Distributed Switch {oldName} in {datacenter.name} was renamed to {newName}.DvsRestoreEventRestore Operation eventinfoRestore operation was performed on {dvs.name}Restore operation was performed on {dvs.name}DvsUpgradeAvailableEventAn upgrade for the vSphere Distributed Switch is available.infoAn upgrade for vSphere Distributed Switch {dvs.name} is available. An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is available.DvsUpgradeInProgressEventAn upgrade for the vSphere Distributed Switch is in progress.infoAn upgrade for vSphere Distributed Switch {dvs.name} is in progress.An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is in progress.DvsUpgradeRejectedEventCannot complete the upgrade for the vSphere Distributed SwitchinfoAn upgrade for vSphere Distributed Switch {dvs.name} was rejected.Cannot complete an upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name}DvsUpgradedEventThe vSphere Distributed Switch was upgraded.infovSphere Distributed Switch {dvs.name} was upgraded.vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} was upgraded.EnteredMaintenanceModeEventEntered maintenance modeinfoHost {host.name} in {datacenter.name} has entered maintenance modeHost {host.name} in {datacenter.name} has entered maintenance modeEnter maintenance mode completed. All virtual machine operations are disabledHost {host.name} in {datacenter.name} has entered maintenance modeEnteredStandbyModeEventEntered standby modeinfoEntered standby modeThe host {host.name} is in standby modeEnteringMaintenanceModeEventEntering maintenance modeinfoHost {host.name} has started to enter maintenance modeHost {host.name} has started to enter maintenance modeStarted to enter maintenance mode. Waiting for virtual machines to shut down, suspend, or migrateHost {host.name} in {datacenter.name} has started to enter maintenance modeEnteringStandbyModeEventEntering standby modeinfoEntering standby modeThe host {host.name} is entering standby modeErrorUpgradeEventUpgrade errorerror{message} <EventLongDescription id="vim.event.ErrorUpgradeEvent"> <description> An error occurred during agent upgrade </description> </EventLongDescription> Event<Event>info<internal>ExitMaintenanceModeEventExit maintenance modeinfoHost {host.name} has exited maintenance modeHost {host.name} has exited maintenance modeExited maintenance modeHost {host.name} in {datacenter.name} has exited maintenance modeExitStandbyModeFailedEventCannot exit standby modeerrorCould not exit standby modeThe host {host.name} could not exit standby modeExitedStandbyModeEventExited standby modeinfoExited standby modeThe host {host.name} is no longer in standby modeExitingStandbyModeEventExiting standby modeinfoExiting standby modeThe host {host.name} is exiting standby modeFailoverLevelRestoredvSphere HA failover resources are sufficientinfoSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name}Sufficient resources are available to satisfy vSphere HA failover levelSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name} in {datacenter.name}GeneralEventGeneral eventinfoGeneral event: {message}GeneralHostErrorEventHost errorerrorError detected on {host.name}: {message}Error detected on {host.name}: {message}{message}Error detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostErrorEvent"> <description> An error occurred on the host </description> <cause> <description> The agent cannot send heartbeats because of a networking related failure on host </description> </cause> <cause> <description> The agent failed to update the configuration file on host </description> </cause> <cause> <description> The agent failed to save the configuration file to disk on host </description> </cause> <cause> <description> The provisioning module failed to load. As a result, all provisioning operations will fail on host. </description> </cause> </EventLongDescription> GeneralHostInfoEventHost informationinfoIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostInfoEvent"> <description> A general information event occurred on the host </description> </EventLongDescription> GeneralHostWarningEventHost warningwarningIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostWarningEvent"> <description> A general warning event occurred on the host </description> <cause> <description> Virtual machine creation might fail because the agent was unable to retrieve virtual machine creation options from the host </description> </cause> </EventLongDescription> GeneralUserEventUser eventuserUser logged event: {message} <EventLongDescription id="vim.event.GeneralUserEvent"> <description> A general user event occurred on the host </description> <cause> <description> A user initiated an action on the host </description> </cause> </EventLongDescription> GeneralVmErrorEventVM errorerrorError detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name}: {message}{message} on {host.name}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmErrorEvent"> <description> An error occurred on the virtual machine </description> </EventLongDescription> GeneralVmInfoEventVM informationinfoIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmInfoEvent"> <description> A general information event occurred on the virtual machine </description> </EventLongDescription> GeneralVmWarningEventVM warningwarningIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmWarningEvent"> <description> A general warning event occurred on the virtual machine </description> </EventLongDescription> GhostDvsProxySwitchDetectedEventThe vSphere Distributed Switch corresponding to the proxy switches on the host does not exist in vCenter Server or does not contain this host.infoThe vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host does not exist in vCenter Server or does not contain this host.The vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host {host.name} does not exist in vCenter Server or does not contain this host. <EventLongDescription id="vim.event.GhostDvsProxySwitchDetectedEvent"> <description> vCenter Server found a vSphere Distributed Switch proxy switch on the host that does not match any vSphere Distributed Switch in vCenter Server </description> <cause> <description> The vSphere Distributed Switch corresponding to the vSphere Distributed Switch proxy switch on the host was deleted while host was disconnected from the vCenter Server </description> </cause> <cause> <description> The host is no longer a member of the vSphere Distributed Switch that the proxy switch in the host corresponds to </description> </cause> </EventLongDescription> GhostDvsProxySwitchRemovedEventA ghost proxy switch on the host was resolved.infoA ghost proxy switch {switchUuid} on the host was resolved.A ghost proxy switch {switchUuid} on the host {host.name} was resolved.GlobalMessageChangedEventMessage changedinfoThe message changed: from '{prevMessage}' to '{message}'HealthStatusChangedEventStatus changeinfo{componentName} status changed from {oldStatus} to {newStatus}HostAddFailedEventCannot add hosterrorCannot add host {hostname}Cannot add host {hostname}Cannot add host {hostname} to datacenter {datacenter.name} <EventLongDescription id="vim.event.HostAddFailedEvent"> <description> Adding a host failed </description> </EventLongDescription> HostAddedEventHost AddedinfoAdded host {host.name}Added host {host.name}Added host {host.name} to datacenter {datacenter.name}HostAdminDisableEventHost administrator access disabledwarningAdministrator access to the host is disabledAdministrator access to the host {host.name} is disabled <EventLongDescription id="vim.event.HostAdminDisableEvent"> <description> Host permissions have been changed so that only the account used for vCenter Server operations has Administrator permissions </description> <cause> <description> This condition occurs when vCenter Server removes all other Administrator access to the host because the host has been placed in Lockdown Mode. The host can be managed by vCenter Server only and Only vCenter Server can re-enable Administrator access for other accounts. </description> </cause> </EventLongDescription> HostAdminEnableEventHost administrator access enabledwarningAdministrator access to the host has been restoredAdministrator access to the host {host.name} has been restored <EventLongDescription id="vim.event.HostAdminEnableEvent"> <description> vCenter Server has restored Administrator permissions for host user accounts whose permissions were disabled by Lockdown Mode </description> <cause> <description> This condition occurs when vCenter Server restores Administrator access to host user accounts that lost their Administrator permissions when the host was placed in Lockdown Mode </description> </cause> </EventLongDescription> HostCnxFailedAccountFailedEventCannot connect host and configure management accounterrorCannot connect {host.name}: cannot configure management accountCannot connect {host.name}: cannot configure management accountCannot connect: cannot configure management accountCannot connect {host.name} in {datacenter.name}: cannot configure management account <EventLongDescription id="vim.event.HostCnxFailedAccountFailedEvent"> <description> Could not connect to the host because setting up a management account failed </description> <cause> <description> The account used by vCenter Server to manage the host could not be configured </description> </cause> </EventLongDescription> HostCnxFailedAlreadyManagedEventCannot connect host - already managederrorCannot connect {host.name}: already managed by {serverName}Cannot connect {host.name}: already managed by {serverName}Cannot connect: already managed by {serverName}Cannot connect {host.name} in {datacenter.name}: already managed by {serverName} <EventLongDescription id="vim.event.HostCnxFailedAlreadyManagedEvent"> <description> Could not connect to the host because it is already being managed by a different vCenter Server instance. </description> <cause> <description> The host is already being managed by a different vCenter Server instance </description> <action> Remove the host from the inventory for the other vCenter Server instance </action> <action> Force the addition of the host to the current vCenter Server instance </action> </cause> </EventLongDescription> HostCnxFailedBadCcagentEventCannot connect host - incorrect CcagenterrorCannot connect {host.name} : server agent is not respondingCannot connect {host.name} : server agent is not respondingCannot connect: server agent is not respondingCannot connect host {host.name} in {datacenter.name} : server agent is not responding <EventLongDescription id="vim.event.HostCnxFailedBadCcagentEvent"> <description> Could not connect to the host because the host agent did not respond </description> <cause> <description> No response was received from the host agent </description> <action> Restart the host agent on the ESX/ESXi host </action> </cause> </EventLongDescription> HostCnxFailedBadUsernameEventCannot connect host - incorrect user nameerrorCannot connect {host.name}: incorrect user name or passwordCannot connect {host.name}: incorrect user name or passwordCannot connect: incorrect user name or passwordCannot connect {host.name} in {datacenter.name}: incorrect user name or password <EventLongDescription id="vim.event.HostCnxFailedBadUsernameEvent"> <description> Could not connect to the host due to an invalid username and password combination </description> <cause> <description> Invalid username and password combination </description> <action> Use the correct username and password </action> </cause> </EventLongDescription> HostCnxFailedBadVersionEventCannot connect host - incompatible versionerrorCannot connect {host.name}: incompatible versionCannot connect {host.name}: incompatible versionCannot connect: incompatible versionCannot connect {host.name} in {datacenter.name}: incompatible version <EventLongDescription id="vim.event.HostCnxFailedBadVersionEvent"> <description> Could not connect to the host due to an incompatible vSphere Client version </description> <cause> <description> The version of the vSphere Client is incompatible with the ESX/ESXi host so the connection attempt failed </description> <action> Download and use a compatible vSphere Client version to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedCcagentUpgradeEventCannot connect host - Ccagent upgradeerrorCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect: did not install or upgrade vCenter agent serviceCannot connect host {host.name} in {datacenter.name}. Did not install or upgrade vCenter agent service. <EventLongDescription id="vim.event.HostCnxFailedCcagentUpgradeEvent"> <description> Could not connect to the host because a host agent upgrade or installation is in process </description> <cause> <description> The host agent is being upgraded or installed on the host </description> <action> Wait for the host agent upgrade or installation to complete </action> </cause> </EventLongDescription> HostCnxFailedEventCannot connect hosterrorCannot connect host {host.name}: error connecting to hostCannot connect host {host.name}: error connecting to hostCannot connect: error connecting to hostCannot connect {host.name} in {datacenter.name}: error connecting to host <EventLongDescription id="vim.event.HostCnxFailedEvent"> <description> Could not connect to the host due to an unspecified condition </description> <cause> <description> Unknown cause of failure </description> </cause> </EventLongDescription> HostCnxFailedNetworkErrorEventCannot connect host - network errorerrorCannot connect {host.name}: network errorCannot connect {host.name}: network errorCannot connect: network errorCannot connect {host.name} in {datacenter.name}: network error <EventLongDescription id="vim.event.HostCnxFailedNetworkErrorEvent"> <description> Could not connect to the host due to a network error </description> <cause> <description> A Network error occurred while connecting to the host </description> <action> Verify that host networking is configured correctly </action> </cause> </EventLongDescription> HostCnxFailedNoAccessEventCannot connect host - no accesserrorCannot connect {host.name}: account has insufficient privilegesCannot connect {host.name}: account has insufficient privilegesCannot connect: account has insufficient privilegesCannot connect host {host.name} in {datacenter.name}: account has insufficient privileges <EventLongDescription id="vim.event.HostCnxFailedNoAccessEvent"> <description> Could not connect to the host due to insufficient account privileges </description> <cause> <description> The account used to connect to host does not have host access privileges </description> <action> Use an account that has sufficient privileges to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedNoConnectionEventCannot connect host - no connectionerrorCannot connect {host.name}Cannot connect {host.name}Cannot connect to hostCannot connect host {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostCnxFailedNoConnectionEvent"> <description> Could not connect to the host because the host is not in the network </description> <cause> <description> The host that you are attempting to connect to is not present in the network </description> <action> Verify that host networking is configured correctly and the host is connected to the same network as vCenter Server </action> </cause> </EventLongDescription> HostCnxFailedNoLicenseEventCannot connect host - no licenseerrorCannot connect {host.name}: not enough CPU licensesCannot connect {host.name}: not enough CPU licensesCannot connect: not enough CPU licensesCannot connect {host.name} in {datacenter.name}: not enough CPU licenses <EventLongDescription id="vim.event.HostCnxFailedNoLicenseEvent"> <description> Could not connect to the host due to a licensing issue </description> <cause> <description> There are not enough licenses to add the host to the vCenter Server inventory. This event is accompanied by a fault that specifies the missing licenses required to add the host successfully. </description> <action> Add the necessary licenses to vCenter Server and try adding the host again </action> </cause> </EventLongDescription> HostCnxFailedNotFoundEventCannot connect host - host not founderrorCannot connect {host.name}: incorrect host nameCannot connect {host.name}: incorrect host nameCannot connect: incorrect host nameCannot connect {host.name} in {datacenter.name}: incorrect host name <EventLongDescription id="vim.event.HostCnxFailedNotFoundEvent"> <description> Could not connect to the host because vCenter Server could not resolve the host name </description> <cause> <description> Unable to resolve the host name of the host </description> <action> Verify that the correct host name has been supplied for the host </action> <action> Configure the host to use a known-good (resolvable) host name </action> <action> Add the host name to the DNS server </action> </cause> </EventLongDescription> HostCnxFailedTimeoutEventCannot connect host - time-outerrorCannot connect {host.name}: time-out waiting for host responseCannot connect {host.name}: time-out waiting for host responseCannot connect: time-out waiting for host responseCannot connect {host.name} in {datacenter.name}: time-out waiting for host response <EventLongDescription id="vim.event.HostCnxFailedTimeoutEvent"> <description> Could not connect to the host because the connection attempt timed out </description> <cause> <description> A timeout occurred while attempting to connect to the host </description> </cause> </EventLongDescription> HostComplianceCheckedEventChecked host for complianceinfoHost {host.name} checked for compliance with profile {profile.name}Host {host.name} checked for compliance with profile {profile.name}Checked host for compliance with profile {profile.name}Host {host.name} checked for compliance. <EventLongDescription id="vim.event.HostComplianceCheckedEvent"> <description> The host was checked for compliance with a host profile </description> <cause> <description> The user initiated a compliance check on the host against a host profile </description> </cause> <cause> <description> A scheduled task initiated a compliance check for the host against a host profile </description> </cause> </EventLongDescription> HostCompliantEventHost compliant with profileinfoHost is in compliance with the attached profile.Host {host.name} is in compliance with the attached profileHostConfigAppliedEventHost configuration changes applied to hostinfoHost configuration changes applied to {host.name}Host configuration changes applied to {host.name}Host configuration changes applied.Host configuration changes applied.HostConnectedEventHost connectedinfoConnected to {host.name}Connected to {host.name}Established a connectionConnected to {host.name} in {datacenter.name}HostConnectionLostEventHost connection losterrorHost {host.name} is not respondingHost {host.name} is not respondingHost is not respondingHost {host.name} in {datacenter.name} is not responding <EventLongDescription id="vim.event.HostConnectionLostEvent"> <description> Connection to the host has been lost </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostDasDisabledEventvSphere HA agent disabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is disabledvSphere HA agent on {host.name} is disabledvSphere HA agent on this host is disabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is disabledHostDasDisablingEventDisabling vSphere HAinfovSphere HA is being disabled on {host.name}vSphere HA is being disabled on {host.name}Disabling vSphere HAvSphere HA is being disabled on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}HostDasEnabledEventvSphere HA agent enabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is enabledvSphere HA agent on {host.name} is enabledvSphere HA agent on this host is enabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is enabledHostDasEnablingEventEnabling host vSphere HA agentwarningEnabling vSphere HA agent on {host.name}Enabling vSphere HA agent on {host.name}Enabling vSphere HA agentEnabling vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.HostDasEnablingEvent"> <description> vSphere HA is being enabled on this host. </description> </EventLongDescription> HostDasErrorEventvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error {message}: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}HostDasEvent<Host vSphere HA Event>info<internal>HostDasOkEventvSphere HA agent configuredinfovSphere HA agent on host {host.name} is configured correctlyvSphere HA agent on host {host.name} is configured correctlyvSphere HA agent is configured correctlyvSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} is configured correctlyHostDisconnectedEventHost disconnectedinfoDisconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from host. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name} in {datacenter.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}HostEnableAdminFailedEventCannot restore administrator permissions to hosterrorCannot restore some administrator permissions to the hostCannot restore some administrator permissions to the host {host.name}HostEvent<Host Event>info<internal>HostExtraNetworksEventHost has extra vSphere HA networkserrorHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostExtraNetworksEvent"> <description> The host being added to the vSphere HA cluster has more management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, be on the same subnets. This helps to facilitate the pairing up of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnet of the joining member that are not present on the existing member. </description> <cause> <description> The host has extra networks missing on an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for the cluster. </action> </cause> </EventLongDescription> HostGetShortNameFailedEventCannot get short host nameerrorCannot complete command 'hostname -s' or returned incorrect name formatCannot complete command 'hostname -s' on host {host.name} or returned incorrect name format <EventLongDescription id="vim.event.HostGetShortNameFailedEvent"> <description> The hostname -s command has failed on the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostInAuditModeEventHost is in audit mode.infoHost is running in audit mode.Host {host.name} is running in audit mode. The host's configuration will not be persistent across reboots.HostInventoryFullEventHost inventory fullerrorMaximum ({capacity}) number of hosts allowed for this edition of vCenter Server has been reached <EventLongDescription id="vim.event.HostInventoryFullEvent"> <description> The vCenter Server Foundation license key currently allows only three hosts to be added to the inventory. Adding extra hosts results in errors and the logging of this event. </description> <cause> <description>Attempting to add more hosts than the number allowed by the license key assigned to vCenter Server</description> <action>Assign vCenter Server a license key that allows more hosts or has no host limit</action> </cause> </EventLongDescription> HostInventoryUnreadableEventHost Inventory UnreadableinfoThe virtual machine inventory file is damaged or unreadable.The virtual machine inventory file on host {host.name} is damaged or unreadable.HostIpChangedEventHost IP changedinfoIP address changed from {oldIP} to {newIP}IP address of the host {host.name} changed from {oldIP} to {newIP} <EventLongDescription id="vim.event.HostIpChangedEvent"> <description> The IP address of the host was changed </description> <cause> <description> The IP address of the host was changed through vCenter Server </description> </cause> <cause> <description> The IP address of the host was changed through the host </description> </cause> </EventLongDescription> HostIpInconsistentEventHost IP inconsistenterrorConfiguration of host IP address is inconsistent: address resolved to {ipAddress} and {ipAddress2}Configuration of host IP address is inconsistent on host {host.name}: address resolved to {ipAddress} and {ipAddress2}HostIpToShortNameFailedEventHost IP to short name not completederrorCannot resolve IP address to short nameCannot resolve IP address to short name on host {host.name} <EventLongDescription id="vim.event.HostIpToShortNameFailedEvent"> <description> The host's IP address could not be resolved to a short name </description> <cause> <description>The host or DNS records are improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostIsolationIpPingFailedEventvSphere HA isolation address unreachableerrorvSphere HA agent on host {host.name} in cluster {computeResource.name} could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} could not reach isolation address: {isolationIp}vSphere HA agent on this host could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} could not reach isolation address: {isolationIp} <EventLongDescription id="vim.event.HostIsolationIpPingFailedEvent"> <description> vSphere HA was unable to ping one or more of the isolation IP addresses. The inability to ping the addresses may cause HA to incorrectly declare the host as network isolated. A host is declared as isolated if it cannot ping the configured isolation addresses and the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <cause> <description>Could not ping the isolation address</description> <action>Correct the cause of the failure to ping the address</action> <action> Use advanced options to change the addresses used by vSphere HA for determining if a host is network isolated. By default, the isolation address is the default gateway of the management network. You can override the default using advanced options, or specify additional addresses to use for determining if a host is network isolated. Set the das.useDefaultIsolationAddress advanced option to "false" if you prefer that vSphere HA not use the default gateway as the isolation address. Specify the das.isolationAddress[X] advanced option for each isolation address that you want to specify. The new values take effect when vSphere HA is reconfigured for each host. </action> </cause> </EventLongDescription> HostLicenseExpiredEventHost license expirederrorA host license for {host.name} has expired <EventLongDescription id="vim.event.HostLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of host licenses on the license server and uses this event to notify you of any host licenses that are about to expire </description> <cause> <description>Host licenses on the license server are about to expire</description> <action>Update the license server to get a new version of the host license</action> </cause> </EventLongDescription> HostLocalPortCreatedEventA host local port is created to recover from management network connectivity loss.infoA host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic}.A host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic} on the host {host.name}.HostMissingNetworksEventHost is missing vSphere HA networkserrorHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostMissingNetworksEvent"> <description> The host being added to the vSphere HA cluster has fewer management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, have common subnets. This helps facilitate the pairing of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnets of the existing member that are not present on the joining member. </description> <cause> <description> The host does not have networks compatible with an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. After you change the host's network configuration, reconfigure vSphere HA for this host. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for this cluster. </action> </cause> </EventLongDescription> HostMonitoringStateChangedEventvSphere HA host monitoring state changedinfovSphere HA host monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'HostNoAvailableNetworksEventHost has no available networks for vSphere HA communicationerrorHost {host.name} in cluster {computeResource.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}This host currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips} <EventLongDescription id="vim.event.HostNoAvailableNetworksEvent"> <description> The host being added to the vSphere HA cluster has no management networks available for vSphere HA cluster communication. The advanced option das.allowNetwork[X] is set, but no port group names match the advanced option for this host. </description> <cause> <description> The host has no port groups that match the names used by the advanced options to control which port groups vSphere HA uses </description> <action> Delete the advanced options das.allowNetwork[X] to allow vSphere HA to select the default management port groups </action> <action> Correct the names of the port groups specified in the advanced options to match those to be used by vSphere HA for this host </action> <action> Specify additional das.allowNetwork[X] advanced options to match the port group names for this host </action> </cause> </EventLongDescription> HostNoHAEnabledPortGroupsEventHost has no port groups enabled for vSphere HAerrorHost {host.name} in cluster {computeResource.name} has no port groups enabled for vSphere HA communication.Host {host.name} has no port groups enabled for vSphere HA communication.This host has no port groups enabled for vSphere HA communication.Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no port groups enabled for vSphere HA communication. <EventLongDescription id="vim.event.HostNoHAEnabledPortGroupsEvent"> <description> vSphere HA has determined that there are no management networks available on the host for vSphere HA inter-agent communication. </description> <cause> <description> The host has no vSphere HA management networks available </description> <action> If this event is observed when the host is being added to a vSphere HA cluster, change the host's network configuration to enable vSphere HA traffic on one or more port groups. By default, vSphere HA will use the Service Console port groups on ESX and ESXi hosts, the port groups with the Management Traffic checkbox selected. If vSphere HA was already configured on the host, it is possible that the host's network settings have changed and invalidated the management network configuration. Review the settings to make sure the port groups configured for management network still exist on the host and for ESXi the Management Traffic option is enabled. Reconfigure vSphere HA on the host after fixing any configuration issues. </action> </cause> </EventLongDescription> HostNoRedundantManagementNetworkEventNo redundant management network for hostwarningHost {host.name} in cluster {computeResource.name} currently has no management network redundancyHost {host.name} currently has no management network redundancyThis host currently has no management network redundancyHost {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no management network redundancy <EventLongDescription id="vim.event.HostNoRedundantManagementNetworkEvent"> <description> vSphere HA has determined that there is only one path for vSphere HA management traffic, resulting in a single point of failure. Best practices require more than one path for vSphere HA to use for heartbeats and cluster communication. A host with a single path is more likely to be declared dead, network partitioned or isolated after a network failure. If declared dead, vSphere HA will not respond if the host subsequently actually fails, while if declared isolated, vSphere HA may apply the isolation response thus impacting the uptime of the virtual machines running on it. </description> <cause> <description>There is only one port group available for vSphere HA communication</description> <action>Configure another Service Console port group on the ESX host</action> <action> Configure another port group on the ESXi host by selecting the "Management Traffic" check box </action> <action> Use NIC teaming on the management port group to allow ESX or ESXi to direct management traffic out of more than one physical NIC in case of a path failure </action> <action> If you accept the risk of not having redundancy for vSphere HA communication, you can eliminate the configuration issue by setting the das.ignoreRedundantNetWarning advanced option to "true" </action> </cause> </EventLongDescription> HostNonCompliantEventHost non-compliant with profileerrorHost is not in compliance with the attached profile.Host {host.name} is not in compliance with the attached profile <EventLongDescription id="vim.event.HostNonCompliantEvent"> <description> The host does not comply with the host profile </description> <cause> <description> The host is not in compliance with the attached profile </description> <action> Check the Summary tab for the host in the vSphere Client to determine the possible cause(s) of noncompliance </action> </cause></EventLongDescription> HostNotInClusterEventHost not in clustererrorNot a cluster member in {datacenter.name}Host {host.name} is not a cluster member in {datacenter.name}HostOvercommittedEventHost resource overcommittederrorInsufficient capacity in host {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in host {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.HostOvercommittedEvent"> <description> A host does not have sufficient CPU and/or memory capacity to satisfy its resource configuration. The host has its own admission control, so this condition should never occur. </description> <cause> <description>A host has insufficient capacity for its resource configuration</description> <action>If you encounter this condition, contact VMware Support </action> </cause> </EventLongDescription> HostPrimaryAgentNotShortNameEventHost primary agent not specified as short nameerrorPrimary agent {primaryAgent} was not specified as a short namePrimary agent {primaryAgent} was not specified as a short name to host {host.name} <EventLongDescription id="vim.event.HostPrimaryAgentNotShortNameEvent"> <description> The primary agent is not specified in short name format </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostProfileAppliedEventHost profile appliedinfoProfile configuration applied to the hostProfile is applied on the host {host.name}HostReconnectionFailedEventCannot reconnect hosterrorCannot reconnect to {host.name}Cannot reconnect to {host.name}Cannot reconnectCannot reconnect to {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostReconnectionFailedEvent"> <description> Could not reestablish a connection to the host </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostRemovedEventHost removedinfoRemoved host {host.name}Removed host {host.name}Removed from inventoryRemoved host {host.name} in {datacenter.name}HostShortNameInconsistentEventHost short name inconsistenterrorHost names {shortName} and {shortName2} both resolved to the same IP address. Check the host's network configuration and DNS entries <EventLongDescription id="vim.event.HostShortNameInconsistentEvent"> <description> The name resolution check on the host returns different names for the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShortNameToIpFailedEventHost short name to IP not completederrorCannot resolve short name {shortName} to IP addressCannot resolve short name {shortName} to IP address on host {host.name} <EventLongDescription id="vim.event.HostShortNameToIpFailedEvent"> <description> The short name of the host can not be resolved to an IP address </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShutdownEventHost shut downinfoShut down of {host.name}: {reason}Shut down of {host.name}: {reason}Shut down of host: {reason}Shut down of {host.name} in {datacenter.name}: {reason}HostSpecificationChangedEventHost specification is changed on vCenterinfoHost specification of host {host.name} is changed on vCenter.Host specification of host {host.name} is changed on vCenter.Host specification is changed.Host specification of host {host.name} is changed on vCenter.HostSpecificationRequireEventPull host specification from host to vCenterinfoPull host specification of host {host.name} to vCenter.Pull host specification of host {host.name} to vCenter.Pull host specification to vCenter.Pull host specification of host {host.name} to vCenter.HostSpecificationUpdateEventHost specification is changed on hostinfoHost specification is changed on host {host.name}.Host specification is changed on host {host.name}.Host specification is changed.Host specification is changed on host {host.name}.HostStatusChangedEventHost status changedinfoConfiguration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.HostStatusChangedEvent"> <description> The host status has changed. This status is the status of the root resource pool that encompasses the entire host. A host status change may be accompanied by the removal of a configuration issue if one was previously detected. A host status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The host status changed to yellow</description> <action>Reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The host status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> HostSubSpecificationDeleteEventDelete host sub specification {subSpecName}infoDelete host sub specification {subSpecName} of host {host.name}.Delete host sub specification {subSpecName} of host {host.name}.Delete host sub specification.Delete host sub specification {subSpecName} of host {host.name}.HostSubSpecificationUpdateEventHost sub specification {hostSubSpec.name} is changed on hostinfoHost sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed.Host sub specification {hostSubSpec.name} is changed on host {host.name}.HostSyncFailedEventCannot synchronize hosterrorCannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg} <EventLongDescription id="vim.event.HostSyncFailedEvent"> <description> Failed to sync with the vCenter Agent on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> HostUpgradeFailedEventHost upgrade failederrorCannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostUpgradeFailedEvent"> <description> Failed to connect to the host due to an installation or upgrade issue </description> </EventLongDescription> HostUserWorldSwapNotEnabledEventThe userworld swap is not enabled on the hostwarningThe userworld swap is not enabled on the hostThe userworld swap is not enabled on the host {host.name}HostVnicConnectedToCustomizedDVPortEventSome host vNICs were reconfigured to use dvPorts with port level configuration, which might be different from the dvPort group.infoHost vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.Host {host.name} vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.HostWwnChangedEventHost WWN changedwarningWWNs are changedWWNs are changed for {host.name}HostWwnConflictEventHost WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {host.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.HostWwnConflictEvent"> <description> The WWN (World Wide Name) of this host conflicts with the WWN of another host or virtual machine </description> <cause> <description> The WWN of this host conflicts with WWN of another host </description> </cause> <cause> <description> The WWN of this host conflicts with WWN of another virtual machine</description> </cause> </EventLongDescription> IncorrectHostInformationEventIncorrect host informationerrorInformation needed to acquire the correct set of licenses not providedHost {host.name} did not provide the information needed to acquire the correct set of licenses <EventLongDescription id="vim.event.IncorrectHostInformationEvent"> <description> The host did not provide the information needed to acquire the correct set of licenses </description> <cause> <description> The cpuCores, cpuPackages or hostType information on the host is not valid </description> </cause> <cause> <description> The host information is not available because host was added as disconnected </description> </cause> </EventLongDescription> InfoUpgradeEventInformation upgradeinfo{message}InsufficientFailoverResourcesEventvSphere HA failover resources are insufficienterrorInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name}Insufficient resources to satisfy vSphere HA failover levelInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.InsufficientFailoverResourcesEvent"> <description> The cluster does not have enough unreserved capacity to satisfy the level configured for vSphere HA admission control. Failovers may still be performed by vSphere HA but will be on a best effort basis. </description> <cause> <description> If the "number of host failures to tolerate" policy is configured and a few virtual machines have a much higher CPU or memory reservation than the other virtual machines, vSphere HA admission control can be excessively conservative to ensure that there are enough unfragmented resources if a host fails. </description> <action> Use similar CPU and memory reservations for all virtual machines in the cluster. If this is not possible, consider using a different vSphere HA admission control policy, such as reserving a percentage of cluster resource for failover. Alternatively, you can use advanced options to specify a cap for the slot size. See the vSphere Availability Guide for details. </action> </cause> <cause> <description> Hosts with vSphere HA agent errors are not good candidates for providing failover capacity in the cluster, and their resources are not considered for vSphere HA admission control purposes. If many hosts have an vSphere HA agent error, vCenter Server generates this event. </description> <action> Check the event log of the hosts to determine the cause of the vSphere HA agent errors. After addressing any configuration issues, reconfigure vSphere HA on the affected hosts or on the cluster. </action> </cause> </EventLongDescription> InvalidEditionEventInvalid editionerrorThe license edition '{feature}' is invalid <EventLongDescription id="vim.event.InvalidEditionEvent"> <description> vCenter Server attempted to acquire an undefined feature from the license server </description> <cause> <description>Any operation that requires a feature license such as vMotion, DRS, vSphere HA might result in this event if that feature is not defined on the license server</description> <action>Verify that the feature in question is present on the license server</action> </cause> </EventLongDescription> EventExLicense downgradewarningLicense downgradeLicense downgradeLicense downgradevim.event.LicenseDowngradedEvent|License downgrade: {licenseKey} removes the following features: {lostFeatures} <EventLongDescription id="vim.event.LicenseDowngradedEvent"> <description> The installed license reduces the set of available features. Some of the features, previously available, will not be accessible with the new license. </description> <cause> <description>The license has been replaced.</description> <action>Revert to the license previously installed if it is not already expired.</action> <action>Contact VMware in order to obtain new license with the required features.</action> </cause> </EventLongDescription> LicenseEvent<License Event>info<internal>LicenseExpiredEventLicense expirederrorLicense {feature.featureName} has expiredLicenseNonComplianceEventInsufficient licenses.errorLicense inventory is not compliant. Licenses are overused <EventLongDescription id="vim.event.LicenseNonComplianceEvent"> <description> vCenter Server does not strictly enforce license usage. Instead, it checks for license overuse periodically. If vCenter Server detects overuse, it logs this event and triggers an alarm. </description> <cause> <description>Overuse of licenses</description> <action>Check the license reports through the vSphere Client and reduce the number of entities using the license key or add a new license key with a greater capacity</action> </cause> </EventLongDescription> LicenseRestrictedEventUnable to acquire licenses due to a restriction on the license servererrorUnable to acquire licenses due to a restriction in the option file on the license server. <EventLongDescription id="vim.event.LicenseRestrictedEvent"> <description> vCenter Server logs this event if it is unable to check out a license from the license server due to restrictions in the license file </description> <cause> <description>License file in the license server has restrictions that prevent check out</description> <action>Check the license file and remove any restrictions that you can</action> </cause> </EventLongDescription> LicenseServerAvailableEventLicense server availableinfoLicense server {licenseServer} is availableLicenseServerUnavailableEventLicense server unavailableerrorLicense server {licenseServer} is unavailable <EventLongDescription id="vim.event.LicenseServerUnavailableEvent"> <description> vCenter Server tracks the license server state and logs this event if the license server has stopped responding. </description> <cause> <description>License server is not responding and not available to vCenter Server</description> <action>Verify that the license server is running. If it is, check the connectivity between vCenter Server and the license server.</action> </cause> </EventLongDescription> LocalDatastoreCreatedEventLocal datastore createdinfoCreated local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl})Created local datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}LocalTSMEnabledEventESXi Shell is enabledinfoESXi Shell for the host has been enabledESXi Shell for the host {host.name} has been enabledLockerMisconfiguredEventLocker misconfiguredwarningDatastore {datastore} which is configured to back the locker does not existLockerReconfiguredEventLocker reconfiguredinfoLocker was reconfigured from {oldDatastore} to {newDatastore} datastoreMigrationErrorEventMigration errorerrorUnable to migrate {vm.name} from {host.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate from {host.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine from the source host failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationEvent<Migration Event>info<internal>MigrationHostErrorEventMigration host errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to host {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine to the destination host or datastore failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationHostWarningEventMigration host warningwarningMigration of {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination host or datastore is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceErrorEventMigration resource errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Cannot migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceErrorEvent"> <description> A virtual machine failed to migrate due to incompatibilities with target resource pool </description> <cause> <description>Migrating a virtual machine to the destination host or datastore is not possible due to incompatibilities with the target resource pool. </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceWarningEventMigration resource warningwarningMigration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination resource pool is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationWarningEventMigration warningwarningMigration of {vm.name} from {host.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration from {host.name}: {fault.msg}Migration of {vm.name} from {host.name} in {datacenter.name}: {fault.msg}MtuMatchEventThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to the physical NIC.infoThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}MtuMismatchEventThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to the physical NIC.errorThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}NASDatastoreCreatedEventNAS datastore createdinfoCreated NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl})Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}NetworkRollbackEventNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.errorNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.NoAccessUserEventNo access for usererrorCannot login user {userName}@{ipAddress}: no permission <EventLongDescription id="vim.event.NoAccessUserEvent"> <description> A user could not log in due to insufficient access permission </description> <cause> <description> The user account has insufficient access permission </description> <action> Log in with a user account that has the necessary access permissions or grant additional access permissions to the current user </action> </cause> </EventLongDescription> NoDatastoresConfiguredEventNo datastores configuredinfoNo datastores have been configuredNo datastores have been configured on the host {host.name}NoLicenseEventNo licenseerrorA required license {feature.featureName} is not reserved <EventLongDescription id="vim.event.NoLicenseEvent"> <description> vCenter Server logs this event if it fails to acquire a feature from the license server for an unknown reason. </description> <cause> <description>Acquiring a feature license fails for an unknown reason</description> <action>Verify that the license server has the license for the feature</action> </cause> </EventLongDescription> NoMaintenanceModeDrsRecommendationForVMNo maintenance mode DRS recommendation for the VMinfoUnable to automatically migrate {vm.name}Unable to automatically migrate from {host.name}Unable to automatically migrate {vm.name} from {host.name} <EventLongDescription id="vim.event.NoMaintenanceModeDrsRecommendationForVM"> <description> DRS failed to generate a vMotion recommendation for a virtual machine on a host entering Maintenance Mode. This condition typically occurs because no other host in the DRS cluster is compatible with the virtual machine. Unless you manually migrate or power off this virtual machine, the host will be unable to enter Maintenance Mode. </description> <cause> <description>DRS failed to evacuate a powered on virtual machine</description> <action>Manually migrate the virtual machine to another host in the cluster</action> <action>Power off the virtual machine</action> <action>Bring any hosts in Maintenance Mode out of that mode</action> <action>Cancel the task that is making the host enter Maintenance Mode </action> </cause> </EventLongDescription> NonVIWorkloadDetectedOnDatastoreEventUnmanaged workload detected on SIOC-enabled datastoreinfoAn unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.NotEnoughResourcesToStartVmEventInsufficient resources for vSphere HA to start the VM. Reason: {reason.@enum.fdm.placementFault}warningInsufficient resources to fail over {vm.name} in {computeResource.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over this virtual machine. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name} in {computeResource.name} that resides in {datacenter.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault} <EventLongDescription id="vim.event.NotEnoughResourcesToStartVmEvent"> <description> This event is issued by vSphere HA when the master agent was not able to fail over a virtual machine to one of its compatible hosts. This condition is due to one or more of the causes listed below. When this condition occurs, vSphere HA will report a cause for it in the event summary, but note that additional causes might exist. It is more likely to occur if vSphere HA admission control is disabled or more hosts fail than were provisioned for. When a virtual machine cannot be placed, vSphere HA will retry placing it when the cluster state changes. Also, if vSphere DRS is enabled, it will be invoked to try to defragment the cluster or bring hosts out of Standby Mode. </description> <cause> <description> A virtual machine has bandwidth reservations for CPU, memory, vFlash cache, or virtual NICs. There was no compatible host that had enough resources to satisfy the reservations. </description> <action>Decrease the virtual machine resource reservation</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition if one exists</action> <action>If DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> <cause> <description> The cluster has vSAN enabled, and one or more hosts that contribute storage to the cluster is inaccessible, preventing vSphere HA from powering on the virtual machine. This applies to virtual machines that have one or more files on a vSAN datastore. </description> <action>Bring online any failed hosts or resolve a network partition if one exists that involves hosts that contribute storage to the vSAN cluster</action> </cause> <cause> <description>One or more datastores that are associated with a virtual machine are inaccessible by any compatible host in the cluster.</description> <action>Bring online any non-responding host that mounts the virtual machine datastores</action> <action>Fix the all-paths-down (APD) or permanent-device-loss (PDL) issues.</action> </cause> <cause> <description>vSphere HA is enforcing virtual machine to virtual machine anti-affinity rules, and the rule cannot be satisfied. </description> <action>Add more hosts to cluster</action> <action>Bring online any non-responding host or resolve a network partition if one exists</action> <action>Remove any anti-affinity rules that are restricting the placement</action> </cause> <cause> <description>The number of VMs that can run on each host is limited. There is no host that can power on the VM without exceeding the limit.</description> <action>Increase the limit if you have set the limitVmsPerESXHost HA advanced option.</action> <action>Bring online any non-responding host or add new hosts to the cluster</action> </cause> </EventLongDescription> OutOfSyncDvsHostThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.warningThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.The vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.PermissionAddedEventPermission addedinfoPermission created for {principal} on {entity.name}, role is {role.name}, propagation is {propagate.@enum.auth.Permission.propagate}PermissionEvent<Permission Event>info<internal>PermissionRemovedEventPermission removedinfoPermission rule removed for {principal} on {entity.name}PermissionUpdatedEventPermission updatedinfoPermission changed for '{principal}' on '{entity.name}'.
Role changed from '{prevRole.name}' to role '{role.name}'. Propagate changed from '{prevPropagate.@enum.auth.Permission.propagate}' to '{propagate.@enum.auth.Permission.propagate}'.ProfileAssociatedEventProfile attached to hostinfoProfile {profile.name} has been attached.Profile {profile.name} has been attached.Profile {profile.name} has been attached with the host.Profile {profile.name} attached.ProfileChangedEventProfile was changedinfoProfile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.ProfileCreatedEventProfile createdinfoProfile is created.ProfileDissociatedEventProfile detached from hostinfoProfile {profile.name} has been detached.Profile {profile.name} has been detached. Profile {profile.name} has been detached from the host.Profile {profile.name} detached.ProfileEventinfo<internal>ProfileReferenceHostChangedEventThe profile reference host was changedinfoProfile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.ProfileRemovedEventProfile removedinfoProfile {profile.name} was removed.Profile {profile.name} was removed.Profile was removed.RecoveryEventRecovery completed on the host.infoThe host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the management virtual NIC {vnic} by connecting to a new port {portKey} on the vSphere Distributed Switch {dvsUuid}.RemoteTSMEnabledEventSSH is enabledinfoSSH for the host has been enabledSSH for the host {host.name} has been enabledResourcePoolCreatedEventResource pool createdinfoCreated resource pool {resourcePool.name} in compute-resource {computeResource.name}Created resource pool {resourcePool.name}Created resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}ResourcePoolDestroyedEventResource pool deletedinfoRemoved resource pool {resourcePool.name} on {computeResource.name}Removed resource pool {resourcePool.name}Removed resource pool {resourcePool.name} on {computeResource.name} in {datacenter.name}ResourcePoolEvent<Resource Pool Event>info<internal>ResourcePoolMovedEventResource pool movedinfoMoved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name} in {datacenter.name}ResourcePoolReconfiguredEventResource pool reconfiguredinfoUpdated configuration for {resourcePool.name} in compute-resource {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration on {resourcePool.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration for {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ResourcePoolReconfiguredEvent"> <description> The resource pool configuration changed. The resource pool configuration includes information about the resource reservations of the resource pool and the resource reservations of its children. </description> </EventLongDescription> ResourceViolatedEventResource usage exceeds configurationerrorResource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name}'Resource usage exceeds configuration on resource pool {resourcePool.name}Resource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.ResourceViolatedEvent"> <description> The cumulative CPU and/or memory resource consumption of all virtual machines in the resource pool exceeds the resource pool configuration </description> <cause> <description>You attempted to move a virtual machine from one resource pool into another bypassing vCenter Server. This condition occurs when you attempt the move using the vSphere Client directly connected to the host. </description> <action>In a DRS cluster, do not move and power on a virtual machine bypassing vCenter Server</action> </cause> </EventLongDescription> RoleAddedEventRole addedinfoNew role {role.name} createdRoleEvent<Role Event>info<internal>RoleRemovedEventRole removedinfoRole {role.name} removedRoleUpdatedEventRole updatedinfoRole modified.
Previous name: {prevRoleName}, new name: {role.name}.
Added privileges: {privilegesAdded}.
Removed privileges: {privilegesRemoved}.RollbackEventHost Network operation rolled backinfoThe Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.The operation {methodName} on the host {hostName} disconnected the host and was rolled back .The Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.ScheduledTaskCompletedEventScheduled task completedinfoTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} completed successfullyTask {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} completed successfullyScheduledTaskCreatedEventScheduled task createdinfoCreated task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name}Created task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskEmailCompletedEventSent scheduled task emailinfoTask {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} sent email to {to}ScheduledTaskEmailFailedEventScheduled task email not senterrorTask {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot send email to {to}: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskEmailFailedEvent"> <description> An error occurred while sending email notification that a scheduled task is running </description> <cause> <description>Failed to send email for the scheduled task</description> <action>Check the vCenter Server SMTP settings for sending emails</action> </cause> </EventLongDescription> ScheduledTaskEvent<Scheduled Task Event>info<internal>ScheduledTaskFailedEventCannot complete scheduled taskerrorTask {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot be completed: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskFailedEvent"> <description> An error occurred while running a scheduled task </description> <cause> <description>Failed to run a scheduled task</description> <action>Correct the failure condition</action> </cause> </EventLongDescription> ScheduledTaskReconfiguredEventScheduled task reconfiguredinfoReconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name}Reconfigured task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.ScheduledTaskRemovedEventScheduled task removedinfoRemoved task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name}Removed task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskStartedEventScheduled task startedinfoRunning task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name}Running task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ServerLicenseExpiredEventServer license expirederrorA vCenter Server license has expiredServerStartedSessionEventServer started sessioninfovCenter startedSessionEvent<Session Event>info<internal>SessionTerminatedEventSession stoppedinfoA session for user '{terminatedUsername}' has stopped <EventLongDescription id="vim.event.SessionTerminatedEvent"> <description> A session has been terminated </description> </EventLongDescription> ExtendedEventThe time-limited license on the host has expired.warningThe time-limited license on host {host.name} has expired.The time-limited license on host {host.name} has expired.The time-limited license on the host has expired.vim.event.SubscriptionLicenseExpiredEvent|The time-limited license on host {host.name} has expired. To comply with the EULA, renew the license at http://my.vmware.comTaskEventTask eventinfoTask: {info.descriptionId}TaskTimeoutEventTask time-outinfoTask: {info.descriptionId} time-out <EventLongDescription id="vim.event.TaskTimeoutEvent"> <description> A task has been cleaned up because it timed out </description> </EventLongDescription> TeamingMatchEventTeaming configuration in the vSphere Distributed Switch matches the physical switch configurationinfoTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TeamingMisMatchEventTeaming configuration in the vSphere Distributed Switch does not match the physical switch configurationerrorTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TemplateBeingUpgradedEventUpgrading templateinfoUpgrading template {legacyTemplate}TemplateUpgradeEvent<Template Upgrade Event>info<internal>TemplateUpgradeFailedEventCannot upgrade templateinfoCannot upgrade template {legacyTemplate} due to: {reason.msg}TemplateUpgradedEventTemplate upgradedinfoTemplate {legacyTemplate} upgrade completedTimedOutHostOperationEventHost operation timed outwarningThe operation performed on host {host.name} timed outThe operation performed on host {host.name} timed outThe operation timed outThe operation performed on {host.name} in {datacenter.name} timed out <EventLongDescription id="vim.event.TimedOutHostOperationEvent"> <description> An operation performed on the host has timed out </description> <cause> <description> A previous event in the sequence of events will provide information on the reason for the timeout </description> </cause> </EventLongDescription> UnlicensedVirtualMachinesEventUnlicensed virtual machinesinfoThere are {unlicensed} unlicensed virtual machines on host {host} - there are only {available} licenses availableUnlicensedVirtualMachinesFoundEventUnlicensed virtual machines foundinfo{unlicensed} unlicensed virtual machines found on host {host}UpdatedAgentBeingRestartedEventRestarting updated agentinfoThe agent is updated and will soon restartThe agent on host {host.name} is updated and will soon restartUpgradeEvent<Upgrade Event>info<internal>UplinkPortMtuNotSupportEventNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.errorNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortMtuSupportEventAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.infoAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.All VLAN MTU setting on the external physical switch allows the vSphere Distributed Switch max MTU size packets passing on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}All VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanTrunkedEventThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch.infoThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanUntrunkedEventNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch.errorNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UserAssignedToGroupUser assigned to groupinfoUser {userLogin} was added to group {group}UserLoginSessionEventUser logininfoUser {userName}@{ipAddress} logged in as {userAgent}UserLogoutSessionEventUser logoutinfoUser {userName}@{ipAddress} logged out (login time: {loginTime}, number of API invocations: {callCount}, user agent: {userAgent})UserPasswordChangedUser password changedinfoPassword was changed for account {userLogin}Password was changed for account {userLogin} on host {host.name}UserUnassignedFromGroupUser removed from groupinfoUser {userLogin} removed from group {group}UserUpgradeEventUser upgradeuser{message} <EventLongDescription id="vim.event.UserUpgradeEvent"> <description> A general user event occurred due to an upgrade </description> </EventLongDescription> VMFSDatastoreCreatedEventVMFS datastore createdinfoCreated VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl})Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}VMFSDatastoreExpandedEventVMFS datastore expandedinfoExpanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name}Expanded VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExpandedEvent"> <description> An existing extent in a VMFS volume was grown to increase its capacity </description> <cause> <description> A user or system action caused an extent of an existing VMFS datastore to be grown. Only extents with free space immediately after them are expandable. As a result, the action filled the available adjacent capacity on the LUN. </description> </cause> </EventLongDescription> VMFSDatastoreExtendedEventVMFS datastore extendedinfoExtended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name}Extended VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExtendedEvent"> <description> An existing VMFS volume was extended to increase its capacity </description> <cause> <description> A user or system action caused the datastore to be extended with a partition on a LUN to increase its capacity. </description> </cause> </EventLongDescription> VMotionLicenseExpiredEventvMotion license expirederrorA vMotion license for {host.name} has expired <EventLongDescription id="vim.event.VMotionLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of vMotion licenses on the license server and uses this event to notify you of any vMotion licenses that are about to expire </description> <cause> <description>vMotion licenses on the license server are about to expire</description> <action>Update the license server to get a fresher version of the vMotion license</action> </cause> </EventLongDescription> VcAgentUninstallFailedEventCannot uninstall vCenter agenterrorCannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUninstallFailedEvent"> <description> An attempt to uninstall the vCenter Agent failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUninstalledEventvCenter agent uninstalledinfovCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalledvCenter agent has been uninstalled from {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUninstalledEvent"> <description> The vCenter Agent has been uninstalled from host </description> </EventLongDescription> VcAgentUpgradeFailedEventCannot complete vCenter agent upgradeerrorCannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUpgradeFailedEvent"> <description> A vCenter Agent upgrade attempt failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUpgradedEventvCenter agent upgradedinfovCenter agent has been upgraded on {host.name}vCenter agent has been upgraded on {host.name}vCenter agent has been upgradedvCenter agent has been upgraded on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUpgradedEvent"> <description> The vCenter Agent has been upgraded on the host </description> </EventLongDescription> VimAccountPasswordChangedEventVIM account password changedinfoVIM account password changedVIM account password was changed on host {host.name} <EventLongDescription id="vim.event.VimAccountPasswordChangedEvent"> <description> The password for the Vim account user on the host has been changed. This account is created by vCenter Server and used to manage the host. </description> <cause> <description> vCenter Server periodically changes the password of the Vim account that it uses to manage the host </description> </cause> </EventLongDescription> VmAcquiredMksTicketEventVM acquired MKS ticketinfoRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} has been openedRemote console has been opened for this virtual machine on {host.name}Remote console to {vm.name} on {host.name} in {datacenter.name} has been opened <EventLongDescription id="vim.event.VmAcquiredMksTicketEvent"> <description> Successfully acquired MKS Ticket for the virtual machine </description> <cause> <description> The MKS Ticket used to connect to the virtual machine remote console has been successfully acquired. </description> </cause> </EventLongDescription> VmAcquiredTicketEventVM acquired ticketinfoA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket of type {ticketType.@enum.VirtualMachine.TicketType} has been acquired.A ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} on {host.name} in {datacenter.name} has been acquiredVmAutoRenameEventVM auto renameinfoInvalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name}. Renamed from {oldName} to {newName}Conflicting or invalid virtual machine name detected. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name} in {datacenter.name}. Renamed from {oldName} to {newName} <EventLongDescription id="vim.event.VmAutoRenameEvent"> <description> The virtual machine was renamed because of possible name conflicts with another virtual machine </description> <cause> <description>The virtual machine might have been added to the vCenter Server inventory while scanning the datastores of hosts added to the inventory. During such an action, the newly-added virtual machine's name might have been found to be in conflict with a virtual machine name already in the inventory. To resolve this, vCenter Server renames the newly-added virtual machine. </description> </cause> </EventLongDescription> VmBeingClonedEventVM being clonedinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}VmBeingClonedNoFolderEventVM being cloned to a vAppinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on host {host.name}, {ds.name} in {datacenter.name} to {destName} on host {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}VmBeingCreatedEventCreating VMinfoCreating {vm.name} on {host.name}, {ds.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {ds.name} in {datacenter.name}Creating VM on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmBeingDeployedEventDeploying VMinfoDeploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} from template {srcTemplate.name}Deploying VM on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} in {datacenter.name} from template {srcTemplate.name} <EventLongDescription id="vim.event.VmBeingDeployedEvent"> <description> A virtual machine is being created from a template </description> <cause> <description> A user action prompted a virtual machine to be created from this template. </description> </cause> </EventLongDescription> VmBeingHotMigratedEventVM is hot migratinginfoMigrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingHotMigratedEvent"> <description> A powered-on virtual machine is being migrated with vMotion </description> <cause> <description> A user action might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> <cause> <description> A DRS recommendation might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> </EventLongDescription> VmBeingMigratedEventVM migratinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingMigratedEvent"> <description> Changing the host on which the virtual machine is executing </description> <cause> <description> A user action caused the virtual machine to be migrated to a different host </description> </cause> </EventLongDescription> VmBeingRelocatedEventVM relocatinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingRelocatedEvent"> <description> The virtual machine execution and/or storage is being relocated </description> <cause> <description> A user action might have caused the virtual machine's execution and/or storage to be changed </description> </cause> </EventLongDescription> VmCloneEvent<VM Clone Event>info<internal><internal><internal><internal><internal>VmCloneFailedEventCannot complete VM cloneerrorFailed to clone {vm.name} on {host.name}, {ds.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmCloneFailedEvent"> <description> Cloning a virtual machine failed </description> <cause> <description> An error prevented the virtual machine from being cloned </description> </cause> </EventLongDescription> VmClonedEventVM clonedinfo{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {ds.name} in {datacenter.name}{sourceVm.name} cloned to {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmConfigMissingEventVM configuration missinginfoConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} cannot be foundConfiguration file cannot be foundConfiguration file for {vm.name} on {host.name} in {datacenter.name} cannot be found <EventLongDescription id="vim.event.VmConfigMissingEvent"> <description> One or more configuration files for the virtual machine cannot be found </description> <cause> <description> The datastore on which this virtual machine resides may be inaccessible </description> <action> Check the connectivity of the datastore on which this virtual machine resides. If the datastore has a backing LUN, check to see if there are any transient disk failures. </action> </cause> </EventLongDescription> VmConnectedEventVM connectedinfoHost is connectedVirtual machine {vm.name} is connected <EventLongDescription id="vim.event.VmConnectedEvent"> <description> The virtual machine is in a connected state in the inventory and vCenter Server can access it </description> <cause> <description> A user or system action that resulted in operations such as creating, registering, cloning or deploying a virtual machine gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> A user or system action that resulted in operations such as adding or reconnecting a host gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> The state of the virtual machine's host changed from Not Responding to Connected and the host gave vCenter Server access to the virtual machine </description> </cause> </EventLongDescription> VmCreatedEventVM createdinfoNew virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {ds.name} in {datacenter.name}Virtual machine created on {host.name}, {ds.name} in {datacenter.name}Created virtual machine {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmDasBeingResetEventvSphere HA is resetting VMinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode} <EventLongDescription id="vim.event.VmDasBeingResetEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA has been configured, the virtual machine might be reset because the VMware Tools heartbeat or application heartbeat status turned red. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasBeingResetWithScreenshotEventvSphere HA enabled VM reset with screenshotinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}{vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}. <EventLongDescription id="vim.event.VmDasBeingResetWithScreenshotEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA is configured, this condition can occur because the VMware Tools heartbeat or the application heartbeat status turned red. The event contains the location of the screenshot taken of the guest console before it was reset. You can use this information to determine the cause of the heartbeat failure. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> Check the screenshot image to see if the cause was a guest operating system failure. If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasResetFailedEventvSphere HA cannot reset VMwarningvSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA cannot reset {vm.name} on {host.name}vSphere HA cannot reset {vm.name}vSphere HA cannot reset this virtual machinevSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmDasResetFailedEvent"> <description> vSphere HA attempted to reset the virtual machine because of a heartbeat failure from VMware Tools or a guest application, depending on how vSphere HA was configured. However, the reset operation failed. </description> <cause> <description> The most likely reason for the reset failure is that the virtual machine was running another task at the time the reset was initiated. </description> <action>Check to see whether the virtual machine requires attention and reset it manually if necessary.</action> </cause> </EventLongDescription> VmDasUpdateErrorEventVM vSphere HA update errorerrorUnable to update vSphere HA agents given the state of {vm.name}VmDasUpdateOkEventCompleted VM DAS updateinfovSphere HA agents have been updated with the current state of the virtual machineVmDateRolledBackEventVM date rolled backerrorDisconnecting all hosts as the date of virtual machine {vm.name} has been rolled backVmDeployFailedEventCannot deploy VM from templateerrorFailed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmDeployFailedEvent"> <description> Failed to deploy a virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine failed to deploy. This condition can occur if there is not enough disk space, the host or virtual machine loses its network connection, the host is disconnected, and so on. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmDeployedEventVM deployedinfoTemplate {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name} <EventLongDescription id="vim.event.VmDeployedEvent"> <description> A virtual machine has been created from the specified template </description> <cause> <description> A user action caused a virtual machine to be created from the template </description> </cause> <cause> <description> A scheduled task caused a virtual machine to be created from the template </description> </cause> </EventLongDescription> VmDisconnectedEventVM disconnectedinfo{vm.name} on host {host.name} is disconnected{vm.name} on host {host.name} is disconnected{vm.name} is disconnected{host.name} is disconnected{vm.name} on host {host.name} in {datacenter.name} is disconnectedVmDiscoveredEventVM discoveredinfoDiscovered {vm.name} on {host.name}Discovered {vm.name} on {host.name}Discovered {vm.name}Discovered on {host.name}Discovered {vm.name} on {host.name} in {datacenter.name}VmDiskFailedEventCannot create VM diskerrorCannot create virtual disk {disk} <EventLongDescription id="vim.event.VmDiskFailedEvent"> <description> Failed to create a virtual disk for the virtual machine for reasons described in the event message </description> <cause> <description> A virtual disk was not created for the virtual machine. This condition can occur if the operation failed to access the disk, the disk did not have enough space, you do not have permission for the operation, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that disk is accessible, has enough space, and that the permission settings allow the operation. </action> </cause> </EventLongDescription> VmEmigratingEventVM emigratinginfoMigrating {vm.name} off host {host.name}Migrating {vm.name} off host {host.name}Migrating {vm.name} off hostMigrating off host {host.name}Migrating {vm.name} off host {host.name} in {datacenter.name}VmEndRecordingEventEnd a recording sessioninfoEnd a recording sessionEnd a recording session on {vm.name}VmEndReplayingEventEnd a replay sessioninfoEnd a replay sessionEnd a replay session on {vm.name}VmEvent<VM Event>info<internal>VmFailedMigrateEventCannot migrate VMerrorCannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} to {destHost.name}, {destDatastore.name}Cannot migrate to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmFailedMigrateEvent"> <description> Failed to migrate the virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine did not migrate. This condition can occur if vMotion IPs are not configured, the source and destination hosts are not accessible, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that the vMotion IPs are configured on source and destination hosts, the hosts are accessible, and so on. </action> </cause> </EventLongDescription> VmFailedRelayoutEventCannot complete VM relayout.errorCannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name}: {reason.msg}Cannot complete relayout for this virtual machine on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedRelayoutEvent"> <description> Failed to lay out a virtual machine </description> <cause> <description> An attempt to lay out a virtual machine on disk failed for reasons described in the event message. This condition can occur for any of several reasons, for example inability to access the disk. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmFailedRelayoutOnVmfs2DatastoreEventCannot complete VM relayout on Vmfs2 datastoreerrorCannot complete relayout due to disks on a VMFS2 volumeCannot complete relayout for virtual machine {vm.name} which has disks on a VMFS2 volume. <EventLongDescription id="vim.event.VmFailedRelayoutOnVmfs2DatastoreEvent"> <description> Failed to migrate a virtual machine on VMFS2 datastore </description> <cause> <description> An attempt to migrate a virtual machine failed because the virtual machine still has disk(s) on a VMFS2 datastore. VMFS2 datastores are read-only for ESX 3.0 and later hosts. </description> <action> Upgrade the datastore(s) from VMFS2 to VMFS3 </action> </cause> </EventLongDescription> VmFailedStartingSecondaryEventvCenter cannot start the Fault Tolerance secondary VMerrorvCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason} <EventLongDescription id="vim.event.VmFailedStartingSecondaryEvent"> <description> vCenter Server could not start the Secondary VM because of an error </description> <cause> <description> The remote host is incompatible for Secondary VM. For instance, this condition can occur when the host does not have access to the virtual machine's network or datastore. </description> <action>Ensure that the hosts in the cluster are compatible for FT</action> </cause> <cause> <description>Login to a remote host failed. If the host has been newly added to the inventory or just rebooted, it might take some time for SSL thumbprints to be propagated to the hosts. </description> <action>If the problem persists, disconnect and re-connect the host.</action> </cause> <cause> <description>Registration of the Secondary VM on the remote host failed</description> <action>Determine whether the remote host has access to the datastore that the FT virtual machine resides on</action> </cause> <cause> <description>An error occurred while starting the Secondary VM</description> <action>Determine the cause of the migration error. vCenter Server will try to restart the Secondary VM if it can.</action> </cause> </EventLongDescription> VmFailedToPowerOffEventCannot power off the VM.errorCannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name}. {reason.msg}Cannot power off: {reason.msg}Cannot power off {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOffEvent"> <description>The virtual machine failed to power off</description> <cause> <description> The virtual machine might be performing concurrent operations </description> <action>Complete the concurrent operations and retry the power-off operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility. </description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToPowerOnEventCannot power on the VM.errorCannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOnEvent"> <description> The virtual machine failed to power on </description> <cause> <description> Virtual machine power-on attempts can fail because the virtual machine is already in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the power-on failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToRebootGuestEventVM cannot reboot the guest OS.errorCannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot the guest OS for {vm.name} on {host.name} in {datacenter.name}. {reason.msg} <EventLongDescription id="vim.event.VmFailedToRebootGuestEvent"> <description> The guest operating system on the virtual machine failed to reboot. </description> <cause> <description> Guest operating system reboot failures can occur because the virtual machine is not in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the reboot failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToResetEventCannot reset VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToResetEvent"> <description>The virtual machine failed to reset</description> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere client and respond to the question or prompt </action> </cause> <cause> <description>There might not be enough available licenses to perform this operation.</description> <action> Obtain the required licenses and retry the reset operation </action> </cause> <cause> <description> Concurrent operations might be executing on the virtual machine </description> <action>Complete the concurrent operations and retry the reset operation</action> </cause> <cause> <description> The host on which the virtual machine is running is entering maintenance mode </description> <action> Wait until the host exits maintenance mode and retry the operation </action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToShutdownGuestEventCannot shut down the guest OSerrorCannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}{vm.name} cannot shut down the guest OS on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToShutdownGuestEvent"> <description> Guest operating system shutdown failed for the virtual machine </description> <cause> <description> Guest operating system shutdown can fail if VMware Tools is not installed in the virtual machine. </description> <action>Install VMware Tools.</action> </cause> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere Client and respond to the question or prompt </action> </cause> <cause> <description> Concurrent operations might be running on the virtual machine </description> <action>Complete the concurrent operations and retry the shutdown operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToStandbyGuestEventVM cannot standby the guest OSerrorCannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}{vm.name} cannot standby the guest OS on {host.name} in {datacenter.name}: {reason.msg}VmFailedToSuspendEventCannot suspend VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg}VmFailedUpdatingSecondaryConfigvCenter cannot update the Fault Tolerance secondary VM configurationerrorvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name}vCenter cannot update the Fault Tolerance secondary VM configurationvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFailedUpdatingSecondaryConfig"> <description> After a failover, the new Primary VM failed to update the configuration of the Secondary VM </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFailoverFailedvSphere HA virtual machine failover unsuccessfulwarningvSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over this virtual machine. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg} <EventLongDescription id="vim.event.VmFailoverFailed"> <description> vSphere HA did not failover this virtual machine. The event includes the details of the fault that was generated when vSphere HA attempted the failover. vSphere HA will retry the failover on another host unless the maximum number of failover attempts have been exceeded. In many cases, the retry will succeed. </description> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It may also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to failover the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. If any hosts have been declared dead, investigate whether a networking/storage issue may be the cause. </action> <action> If, however, the error reports that the virtual machine is in an invalid state, there may be an in-progress operation that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation that is taking a long time to complete. </action> </cause> </EventLongDescription> VmFaultToleranceStateChangedEventVM Fault Tolerance state changedinfoFault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state on {vm.name} on host {host.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState} <EventLongDescription id="vim.event.VmFaultToleranceStateChangedEvent"> <description> The Fault Tolerance state of the virtual machine changed </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceTurnedOffEventVM Fault Tolerance turned offinfoFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name}Fault Tolerance protection has been turned off for {vm.name} on host {host.name}Fault Tolerance protection has been turned off for {vm.name}Fault Tolerance protection has been turned off for this virtual machineFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFaultToleranceTurnedOffEvent"> <description> All Secondary VMs have been removed and Fault Tolerance protection is turned off for this virtual machine. </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceVmTerminatedEventFault Tolerance VM terminatedinfoThe Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason} <EventLongDescription id="vim.event.VmFaultToleranceVmTerminatedEvent"> <description> A Primary VM or Secondary VM became inactive </description> <cause> <description> The Secondary VM became inactive because its operations are no longer synchronized with those of the Primary VM</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Secondary VM became inactive because a hardware or network failure caused the Primary VM to lose the Primary-to-Secondary connection</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Fault Tolerant VM became inactive due to a partial hardware failure on the physical host</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> A user stopped the Fault Tolerant VM</description> <action>The remaining Fault Tolerant VM takes over as the Primary VM. vSphere HA will attempt to restart the Secondary VM.</action> </cause> </EventLongDescription> VmGuestOSCrashedEventGuest operating system crashederror{vm.name} on {host.name}: Guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.{vm.name}: Guest operating system has crashed.This virtual machine's guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.VmGuestRebootEventGuest rebootinfoGuest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name}Guest OS rebootGuest OS reboot for {vm.name} on {host.name} in {datacenter.name}VmGuestShutdownEventGuest OS shut downinfoGuest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name}Guest OS shut downGuest OS shut down for {vm.name} on {host.name} in {datacenter.name}VmGuestStandbyEventGuest standbyinfoGuest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name}Guest OS standbyGuest OS standby for {vm.name} on {host.name} in {datacenter.name}VmHealthMonitoringStateChangedEventvSphere HA VM monitoring state changedinfovSphere HA VM monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'VmInstanceUuidAssignedEventAssign a new instance UUIDinfoAssign a new instance UUID ({instanceUuid})Assign a new instance UUID ({instanceUuid}) to {vm.name} <EventLongDescription id="vim.event.VmInstanceUuidAssignedEvent"> <description>The virtual machine was assigned a new vCenter Server-specific instance UUID </description> <cause> <description> The user who created the virtual machine did not specify a vCenter Server-specific instance UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmInstanceUuidChangedEventInstance UUID ChangedinfoThe instance UUID has been changed from ({oldInstanceUuid}) to ({newInstanceUuid})The instance UUID of {vm.name} has been changed from ({oldInstanceUuid}) to ({newInstanceUuid}) <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine has changed </description> <cause> <description> A user action resulted in a change to the vCenter Server-specific instance UUID of the virtual machine </description> </cause> <cause> <description> vCenter Server changed the instance UUID of the virtual machine because it detected a conflict </description> </cause> </EventLongDescription> VmInstanceUuidConflictEventInstance UUIDs conflicterrorThe instance UUID ({instanceUuid}) conflicts with the instance UUID assigned to {conflictedVm.name}The instance UUID ({instanceUuid}) of {vm.name} conflicts with the instance UUID assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine conflicted with that of another virtual machine. </description> <cause> <description> Virtual machine instance UUID conflicts can occur if you copy virtual machine files manually without using vCenter Server. </description> </cause> </EventLongDescription> VmMacAssignedEventVM MAC assignedinfoNew MAC address ({mac}) assigned to adapter {adapter}New MAC address ({mac}) assigned to adapter {adapter} for {vm.name}VmMacChangedEventVM MAC changedwarningChanged MAC address from {oldMac} to {newMac} for adapter {adapter}Changed MAC address from {oldMac} to {newMac} for adapter {adapter} for {vm.name} <EventLongDescription id="vim.event.VmMacChangedEvent"> <description> The virtual machine MAC address has changed </description> <cause> <description> A user action changed the virtual machine MAC address </description> </cause> <cause> <description> vCenter changed the virtual machine MAC address because it detected a MAC address conflict </description> </cause> </EventLongDescription> VmMacConflictEventVM MAC conflicterrorThe MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmMacConflictEvent"> <description> The virtual machine MAC address conflicts with that of another virtual machine </description> <cause> <description> This virtual machine's MAC address is the same as that of another virtual machine. Refer to the event details for more information on the virtual machine that caused the conflict. </description> </cause> </EventLongDescription> VmMaxFTRestartCountReachedvSphere HA reached maximum Secondary VM restart count.warningvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxFTRestartCountReached"> <description> The system reached the maximum restart limit in its attempt to restart a Secondary VM </description> <cause> <description>The system exceeded the number of allowed restart attempts for the Secondary VM when it tried to reestablish Fault Tolerance</description> <action>Check the causes for the restart failures and fix them. Then disable and re-enable Fault Tolerance protection.</action> </cause> </EventLongDescription> VmMaxRestartCountReachedvSphere HA reached maximum VM restart countwarningvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name}because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart this VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxRestartCountReached"> <description> vSphere HA has reached the maximum number of failover attempts for this virtual machine and has not been able to restart it. No further failover attempts will be made. By default vSphere HA attempts to failover a virtual machine 5 times. </description> <cause> <description> Failover can fail for a number of reasons including that the configuration file of the virtual machine is corrupt or one or more of the virtual machines datastores are not accessible by any host in the cluster due to an all paths down condition. In addition, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. </description> <action> To determine why previous failover attempts failed, search the events that are logged for the VM for occurrences of the event vSphere HA reports when a failover fails. These events will report the reason for the failed failover. vSphere HA events can be located by searching for the phrase 'vSphere HA'. To determine whether any issues still exist, try to manually power on the virtual machine. If power-on fails, investigate the error that is returned. But, if the power-on remains pending for a long time, investigate whether an all paths down condition exists. Also, if any hosts have been declared dead, investigate whether a networking or storage issue may be the cause. </action> </cause> </EventLongDescription> VmMessageErrorEventVM error messageerrorError message on {vm.name} on {host.name}: {message}Error message on {vm.name} on {host.name}: {message}Error message on {vm.name}: {message}Error message from {host.name}: {message}Error message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageErrorEvent"> <description> An error message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this error occurred </description> </cause> </EventLongDescription> VmMessageEventVM information messageinfoMessage on {vm.name} on {host.name}: {message}Message on {vm.name} on {host.name}: {message}Message on {vm.name}: {message}Message from {host.name}: {message}Message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageEvent"> <description> An information message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on the messages from the virtual machine </description> </cause> </EventLongDescription> VmMessageWarningEventVM warning messagewarningWarning message on {vm.name} on {host.name}: {message}Warning message on {vm.name} on {host.name}: {message}Warning message on {vm.name}: {message}Warning message from {host.name}: {message}Warning message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageWarningEvent"> <description> A warning message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this warning was issued </description> </cause> </EventLongDescription> VmMigratedEventVM migratedinfoVirtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmMigratedEvent"> <description> The virtual machine's host was changed successfully </description> <cause> <description> A user action caused the virtual machine to be successfully migrated to a different host </description> </cause> </EventLongDescription> VmNoCompatibleHostForSecondaryEventNo compatible host for the Fault Tolerance secondary VMerrorNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name}No compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name}No compatible host for the Fault Tolerance secondary VM {vm.name}No compatible host for the Fault Tolerance secondary VMNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmNoCompatibleHostForSecondaryEvent"> <description> No compatible host was found when trying to place a Secondary VM </description> <cause> <description>There was no compatible host available to place a Secondary VM</description> <action>Resolve the incompatibilities and retry the operation</action> </cause> </EventLongDescription> VmNoNetworkAccessEventVM No Network AccesswarningNot all networks are accessible by {destHost.name}Not all networks for {vm.name} are accessible by {destHost.name}VmOrphanedEventVM orphanedwarning{vm.name} does not exist on {host.name}{vm.name} does not exist on {host.name}{vm.name} does not existVirtual machine does not exist on {host.name}{vm.name} does not exist on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmOrphanedEvent"> <description> The virtual machine does not exist on the host with which it is associated </description> <cause> <description> The virtual machine was deleted while its host was disconnected from vCenter Server. </description> </cause> </EventLongDescription> VmPowerOffOnIsolationEventvSphere HA powered off VM on isolated hostinfovSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off this virtual machine on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmPowerOffOnIsolationEvent"> <description> vSphere HA powered off this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmPoweredOffEventVM powered offinfo{vm.name} on {host.name} is powered off{vm.name} on {host.name} is powered off{vm.name} is powered offVirtual machine on {host.name} is powered off{vm.name} on {host.name} in {datacenter.name} is powered offVmPoweredOnEventVM powered oninfo{vm.name} on {host.name} has powered on{vm.name} on {host.name} has powered on{vm.name} has powered onVirtual machine on {host.name} has powered on{vm.name} on {host.name} in {datacenter.name} has powered onVmPoweringOnWithCustomizedDVPortEventVirtual machine powered on with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.infoVirtual machine powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.Virtual machine {vm.name} powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.VmPrimaryFailoverEventFault Tolerance VM failovererrorFault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name} in {datacenter.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}VmReconfiguredEventVM reconfiguredinfoReconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured virtual machine.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name} in {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}VmRegisteredEventVM registeredinfoRegistered {vm.name} on {host.name}Registered {vm.name} on {host.name} in {datacenter.name}Registered {vm.name}Registered VM on {host.name} in {datacenter.name}Registered {vm.name} on {host.name} in {datacenter.name}VmRelayoutSuccessfulEventVM relayout completedinfoRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} completedRelayout of the virtual machine completedRelayout of {vm.name} on {host.name} in {datacenter.name} completedVmRelayoutUpToDateEventVM relayout up-to-dateinfo{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} is in the correct format and relayout is not necessaryIn the correct format and relayout is not necessary{vm.name} on {host.name} in {datacenter.name} is in the correct format and relayout is not necessaryVmReloadFromPathEventVirtual machine reloaded from pathinfo{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} reloaded from new configuration {configPath}.Virtual machine on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.VmReloadFromPathFailedEventVirtual machine not reloaded from patherror{vm.name} on {host.name} could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from path {configPath}.{vm.name} could not be reloaded from {configPath}.This virtual machine could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from {configPath}. <EventLongDescription id="vim.event.VmReloadFromPathFailedEvent"> <description> Reloading the virtual machine from a new datastore path failed </description> <cause> <description>The destination datastore path was inaccessible or invalid </description> <action>Use a valid destination datastore path </action> </cause> <cause> <description>The virtual machine is in an invalid state </description> <action>Check the virtual machine state power state. If the virtual machine is powered on, power it off </action> </cause> <cause> <description>The virtual machine is enabled for Fault Tolerance </description> <action>Disable Fault Tolerance for the virtual machine and retry the operation </action> </cause> </EventLongDescription> VmRelocateFailedEventFailed to relocate VMerrorFailed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmRelocateFailedEvent"> <description> Virtual machine relocation to a different host or datastore failed </description> <cause> <description> Virtual machine relocation can fail for a number of reasons, including network outages, insufficient disk space, and so on </description> <action> Consider the task related to this event, evaluate the failure reason, and take action accordingly </action> </cause> </EventLongDescription> VmRelocateSpecEvent<VM Relocate Spec Event>info<internal><internal><internal><internal><internal>VmRelocatedEventVM relocatedinfoVirtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmRelocatedEvent"> <description> The virtual machine execution and/or storage was successfully relocated </description> <cause> <description> A user action caused the virtual machine's execution and/or storage to be successfully changed </description> </cause> </EventLongDescription> VmRemoteConsoleConnectedEventVM remote console connectedinfoRemote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name}Remote console connectedRemote console connected to {vm.name} on host {host.name}VmRemoteConsoleDisconnectedEventVM remote console disconnectedinfoRemote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name}Remote console connectedRemote console disconnected from {vm.name} on host {host.name}VmRemovedEventVM removedinfoRemoved {vm.name} on {host.name}Removed {vm.name} on {host.name}Removed {vm.name}RemovedRemoved {vm.name} on {host.name} from {datacenter.name}VmRenamedEventVM renamedwarningRenamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName} in {datacenter.name}VmRequirementsExceedCurrentEVCModeEventVirtual machine is using features that exceed the capabilities of the host's current EVC mode.warningFeature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of this host's current EVC mode.Feature requirements of this virtual machine exceed capabilities of this host's current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.VmResettingEventVM resettinginfo{vm.name} on {host.name} is reset{vm.name} on {host.name} is reset{vm.name} is resetVirtual machine on {host.name} is reset{vm.name} on {host.name} in {datacenter.name} is resetVmResourcePoolMovedEventVM resource pool movedinfoMoved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name} in {datacenter.name}VmResourceReallocatedEventVM resource reallocatedinfoResource allocation changed
Modified:
{configChanges.modified}Changed resource allocation for {vm.name}
Modified:
{configChanges.modified}VmRestartedOnAlternateHostEventVM restarted on alternate hostinfoVirtual machine {vm.name} was restarted on this host since {sourceHost.name} failedVirtual machine was restarted on {host.name} since {sourceHost.name} failedVirtual machine {vm.name} was restarted on {host.name} since {sourceHost.name} failedVmResumingEventVM resuminginfo{vm.name} on {host.name} is resuming{vm.name} on {host.name} is resuming{vm.name} is resumingVirtual machine on {host.name} is resuming{vm.name} on {host.name} in {datacenter.name} is resumingVmSecondaryAddedEventFault Tolerance secondary VM addedinfoA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name}A Fault Tolerance secondary VM has been added for {vm.name} on host {host.name}A Fault Tolerance secondary VM has been added for {vm.name}A Fault Tolerance secondary VM has been added for this VMA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryDisabledBySystemEventvCenter disabled Fault ToleranceerrorvCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the Secondary VM could not be powered On. <EventLongDescription id="vim.event.VmSecondaryDisabledBySystemEvent"> <description> vCenter Server disabled a Secondary VM because it could not power on the Secondary VM </description> <cause> <description>vCenter Server failed to power on the Secondary VM </description> <action>Check the reason in the event message for more details, fix the failure, and re-enable Fault Tolerance protection to power on the Secondary VM.</action> </cause> </EventLongDescription> VmSecondaryDisabledEventDisabled Fault Tolerance secondary VMinfoDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Disabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Disabled Fault Tolerance secondary VM for {vm.name}Disabled Fault Tolerance secondary VM for this virtual machineDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryEnabledEventEnabled Fault Tolerance secondary VMinfoEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Enabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Enabled Fault Tolerance secondary VM for {vm.name}Enabled Fault Tolerance secondary VM for this VMEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryStartedEventStarted Fault Tolerance secondary VMinfoStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Started Fault Tolerance secondary VM for {vm.name} on host {host.name}Started Fault Tolerance secondary VM for {vm.name}Started Fault Tolerance secondary VM for this virtual machineStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmShutdownOnIsolationEventvSphere HA shut down VM on isolated hostinfovSphere HA shut down {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down this virtual machine on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} was shut down on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation} <EventLongDescription id="vim.event.VmShutdownOnIsolationEvent"> <description> vSphere HA shut down this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmStartRecordingEventStart a recording sessioninfoStart a recording sessionStart a recording session on {vm.name}VmStartReplayingEventStart a replay sessioninfoStart a replay sessionStart a replay session on {vm.name}VmStartingEventVM startinginfo{vm.name} on {host.name} is starting{vm.name} on {host.name} is starting{vm.name} is startingVirtual machine is starting{vm.name} on {host.name} in {datacenter.name} is startingVmStartingSecondaryEventStarting Fault Tolerance secondary VMinfoStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Starting Fault Tolerance secondary VM for {vm.name} on host {host.name} in clusterStarting Fault Tolerance secondary VM for {vm.name}Starting Fault Tolerance secondary VM for this virtual machineStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmStaticMacConflictEventVM static MAC conflicterrorThe static MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The static MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name}VmStoppingEventVM stoppinginfo{vm.name} on {host.name} is stopping{vm.name} on {host.name} is stopping{vm.name} is stoppingVirtual machine is stopping{vm.name} on {host.name} in {datacenter.name} is stoppingVmSuspendedEventVM suspendedinfo{vm.name} on {host.name} is suspended{vm.name} on {host.name} is suspended{vm.name} is suspendedVirtual machine is suspended{vm.name} on {host.name} in {datacenter.name} is suspendedVmSuspendingEventVM being suspendedinfo{vm.name} on {host.name} is being suspended{vm.name} on {host.name} is being suspended{vm.name} is being suspendedVirtual machine is being suspended{vm.name} on {host.name} in {datacenter.name} is being suspendedVmTimedoutStartingSecondaryEventStarting the Fault Tolerance secondary VM timed outerrorStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} timed out within {timeout} ms <EventLongDescription id="vim.event.VmTimedoutStartingSecondaryEvent"> <description> An attempt to start a Secondary VM timed out. </description> <cause> <description>A user attempted to turn on or enable Fault Tolerance, triggering the start of the Secondary VM. The start operation timed out and, as a result, vCenter Server disables Fault Tolerance. </description> <action>Fix any problems and re-enable Fault Tolerance protection</action> </cause> <cause> <description>The secondary VM was started in response to a failure, but the start attempt timed out</description> <action> vSphere HA will attempt to power on the Secondary VM</action> </cause> </EventLongDescription> VmUnsupportedStartingEventVM unsupported guest OS is startingwarningUnsupported guest OS {guestId} for {vm.name}Unsupported guest OS {guestId} for {vm.name} on {host.name}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name}Unsupported guest OS {guestId}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUnsupportedStartingEvent"> <description> Attempting to power on a virtual machine that has an unsupported guest operating system </description> <cause> <description> A user action initiated a virtual machine power-on operation, but the virtual machine has an unsupported guest operating system. </description> </cause> </EventLongDescription> VmUpgradeCompleteEventVM upgrade completeinfoVirtual machine compatibility upgraded to {version.@enum.vm.hwVersion}VmUpgradeFailedEventCannot upgrade VMerrorCannot upgrade virtual machine compatibility.VmUpgradingEventUpgrading VMinfoUpgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} in {datacenter.name} to {version.@enum.vm.hwVersion} <EventLongDescription id="vim.event.VmUpgradingEvent"> <description>The virtual hardware on this virtual machine is being upgraded</description> <cause> <description>A user-initiated action triggered an upgrade of the virtual machine hardware</description> </cause> <cause> <description>A scheduled task started an upgrade of the virtual machine hardware</description> </cause> </EventLongDescription> VmUuidAssignedEventVM UUID assignedinfoAssigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name}Assigned new BIOS UUID ({uuid})Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidAssignedEvent"> <description>The virtual machine was assigned a new BIOS UUID</description> <cause> <description>The user who created the virtual machine did not specify a BIOS UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmUuidChangedEventVM UUID ChangedwarningChanged BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name}BIOS UUID was changed from {oldUuid} to {newUuid}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidChangedEvent"> <description>The virtual machine BIOS UUID has changed</description> <cause> <description> A user changed the virtual machine BIOS UUID directly on the host </description> </cause> </EventLongDescription> VmUuidConflictEventVM UUID ConflicterrorBIOS ID ({uuid}) conflicts with that of {conflictedVm.name}BIOS ID ({uuid}) of {vm.name} conflicts with that of {conflictedVm.name}VmVnicPoolReservationViolationClearEventVirtual NIC Network Resource Pool Reservation Violation Clear eventinfoThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedVmVnicPoolReservationViolationRaiseEventVirtual NIC Network Resource Pool Reservation Violation eventinfoThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedVmWwnAssignedEventVM WWN assignedinfoNew WWNs assignedNew WWNs assigned to {vm.name} <EventLongDescription id="vim.event.VmWwnAssignedEvent"> <description> The virtual machine was assigned a WWN (World Wide Name) </description> <cause> <description>The virtual machine was assigned a WWN because it was created with an RDM (Raw Device Mappings) disk or was reconfigured to access an RDM disk </description> </cause> </EventLongDescription> VmWwnChangedEventVM WWN changedwarningWWNs are changedWWNs are changed for {vm.name} <EventLongDescription id="vim.event.VmWwnChangedEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine was changed </description> <cause> <description>The virtual machine was assigned a new WWN, possibly due to a conflict caused by another virtual machine being assigned the same WWN </description> </cause> </EventLongDescription> VmWwnConflictEventVM WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {vm.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.VmWwnConflictEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine has a conflict </description> <cause> <description>The WWN assigned to this virtual machine was the same as that of a different virtual machine. </description> <action> Check the event details for more information on the conflict and correct the problem. </action>\</cause> </EventLongDescription> WarningUpgradeEventWarning upgradewarning{message}IScsiBootFailureEventBoot from iSCSI failed.warningBooting from iSCSI failed.Booting from iSCSI failed with an error. See the VMware Knowledge Base for information on configuring iBFT networking.EventExLost Network Connectivityerrorvprob.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExNo IPv6 TSO supporterrorvprob.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid vmknic specified in /Migrate/Vmknicwarningvprob.net.migrate.bindtovmk|The ESX advanced config option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Please update the config option with a valid vmknic or, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExVirtual NIC connection to switch failedwarningvprob.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningvprob.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. {3} uplinks still up. Affected portgroups:{4}.EventExLost Network Redundancywarningvprob.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExThin Provisioned Device Nearing Capacitywarningvprob.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold.EventExLost Storage Connectivityerrorvprob.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExDegraded Storage Path Redundancywarningvprob.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. {3} remaining active paths. Affected datastores: {4}.EventExLost Storage Path Redundancywarningvprob.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExVMFS Locked By Remote Hosterrorvprob.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.errorvprob.vmfs.extent.offline|An attached device {1} might be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system is online.infovprob.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Volume Connectivity Restoredinfovprob.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfovprob.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterrorvprob.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerrorvprob.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Opening volume in read-only metadata mode with limited write support.EventExVMFS Lock Corruption Detectederrorvprob.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume may be damaged too.EventExLost connection to NFS servererrorvprob.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExRestored connection to NFS serverinfovprob.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExVMFS Resource Corruption Detectederrorvprob.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExCopied Library Iteminfocom.vmware.cl.CopyLibraryItemEvent|Copied Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}({sourceLibraryItemId}), source Library {sourceLibraryName}.EventExFailed to copy Library Itemerrorcom.vmware.cl.CopyLibraryItemFailEvent|Failed to copy Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}, source Library {sourceLibraryName}.EventExCreated Libraryinfocom.vmware.cl.CreateLibraryEvent|Created Library {libraryName}EventExFailed to create Libraryerrorcom.vmware.cl.CreateLibraryFailEvent|Failed to create Library {libraryName}EventExCreated Library Iteminfocom.vmware.cl.CreateLibraryItemEvent|Created Library Item {libraryItemName} in Library {libraryName}.EventExFailed to create Library Itemerrorcom.vmware.cl.CreateLibraryItemFailEvent|Failed to create Library Item {libraryItemName} in Library {libraryName}.EventExDeleted Libraryinfocom.vmware.cl.DeleteLibraryEvent|Deleted Library {libraryName}EventExFailed to delete Libraryerrorcom.vmware.cl.DeleteLibraryFailEvent|Failed to delete Library {libraryName}EventExDeleted Library Iteminfocom.vmware.cl.DeleteLibraryItemEvent|Deleted Library Item {libraryItemName} in Library {libraryName}.EventExFailed to delete Library Itemerrorcom.vmware.cl.DeleteLibraryItemFailEvent|Failed to delete Library Item {libraryItemName} in Library {libraryName}.EventExPublished Libraryinfocom.vmware.cl.PublishLibraryEvent|Published Library {libraryName}EventExFailed to publish Libraryerrorcom.vmware.cl.PublishLibraryFailEvent|Failed to publish Library {libraryName}EventExPublished Library Iteminfocom.vmware.cl.PublishLibraryItemEvent|Published Library Item {libraryItemName} in Library {libraryName}EventExFailed to publish Library Itemerrorcom.vmware.cl.PublishLibraryItemFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName}EventExPublished Library Item to Subscriptioninfocom.vmware.cl.PublishLibraryItemSubscriptionEvent|Published Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library Item to Subscriptionerrorcom.vmware.cl.PublishLibraryItemSubscriptionFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExPublished Library to Subscriptioninfocom.vmware.cl.PublishLibrarySubscriptionEvent|Published Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library to Subscriptionerrorcom.vmware.cl.PublishLibrarySubscriptionFailEvent|Failed to publish Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExCreated Subscriptioninfocom.vmware.cl.SubscriptionCreateEvent|Created subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to create Subscriptionerrorcom.vmware.cl.SubscriptionCreateFailEvent|Failed to create subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExDeleted Subscriptioninfocom.vmware.cl.SubscriptionDeleteEvent|Deleted subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to delete Subscriptionerrorcom.vmware.cl.SubscriptionDeleteFailEvent|Failed to delete subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExUpdated Subscriptioninfocom.vmware.cl.SubscriptionUpdateEvent|Updated subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to update Subscriptionerrorcom.vmware.cl.SubscriptionUpdateFailEvent|Failed to update subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExSynchronized Libraryinfocom.vmware.cl.SyncLibraryEvent|Synchronized Library {libraryName}EventExFailed to Synchronize Libraryerrorcom.vmware.cl.SyncLibraryFailEvent|Failed to Synchronize Library {libraryName}EventExSynchronized Library Iteminfocom.vmware.cl.SyncLibraryItemEvent|Synchronized Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Itemerrorcom.vmware.cl.SyncLibraryItemFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Iteminfocom.vmware.cl.SyncNfcFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}. Failure may be due to a network error or a host entering maintenance mode.EventExUpdated Libraryinfocom.vmware.cl.UpdateLibraryEvent|Updated Library {libraryName}EventExFailed to update Libraryerrorcom.vmware.cl.UpdateLibraryFailEvent|Failed to update Library {libraryName}EventExUpdated Library Iteminfocom.vmware.cl.UpdateLibraryItemEvent|Updated Library Item {libraryItemName} in Library {libraryName}.EventExFailed to update Library Itemerrorcom.vmware.cl.UpdateLibraryItemFailEvent|Failed to update Library Item {libraryItemName} in Library {libraryName}.EventExCould not locate Library Item file on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryItemFileOnRestoreEvent|File '{fileName}' in Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library Item folder on the storage backing after restorecom.vmware.cl.restore.DeletedLibraryItemOnRestoreEvent|Folder for Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library folder on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryOnRestoreEvent|Library '{libraryName}' folder could not be located on the storage backing after restoreEventExCould not locate Library Item content after restorecom.vmware.cl.restore.MissingLibraryItemContentOnRestoreEvent|The content of Library Item '{libraryItemName}' could not be located on storage after restoreEventExNew Library Item file found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemFileOnRestoreEvent|New Library Item file '{fileName}' found on the storage backing for Library Item '{libraryItemName}' after restore. Path to the file on storage: '{filePath}'EventExNew Library Item folder found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemOnRestoreEvent|New Library Item folder '{itemFolderName}' found on the storage backing for Library '{libraryName}' after restore. Path to the item folder on storage: '{itemFolderPath}'ExtendedEventCancel LWD snapshotinfoCancelling LWD snapshotcom.vmware.dp.events.cancelsnapshot|Cancelling LWD snapshotExtendedEventLWD snapshot is cancelledinfoLWD snapshot is cancelledcom.vmware.dp.events.cancelsnapshotdone|LWD snapshot is cancelledExtendedEventFailed to cancel LWD snapshoterrorFailed to cancel LWD snapshotcom.vmware.dp.events.cancelsnapshotfailed|Failed to cancel LWD snapshotExtendedEventPerform 'commit' phase of LWD-based restoreinfoPerforming 'commit' phase of LWD-based restorecom.vmware.dp.events.commitrestore|Performing 'commit' phase of LWD-based restoreExtendedEvent'commit' phase of LWD-based restore is completedinfo'commit' phase of LWD-based restore is completedcom.vmware.dp.events.commitrestoredone|'commit' phase of LWD-based restore is completedExtendedEvent'commit' phase of LWD-based restore failederror'commit' phase of LWD-based restore failedcom.vmware.dp.events.commitrestorefailed|'commit' phase of LWD-based restore failedExtendedEventEnabling protection services on hosts in the clusterinfoEnabling protection services on hosts in the clusterEnabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservices|Enabling protection services on hosts in the clusterExtendedEventFinished enabling protection services on hosts in the clusterinfoFinished enabling protection services on hosts in the clusterFinished enabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesdone|Finished enabling protection services on hosts in the clusterExtendedEventFailed to enable protection services on hosts in the clustererrorFailed to enable protection services on hosts in the clusterFailed to enable protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesfailed|Failed to enable protection services on hosts in the clusterExtendedEventPerform 'prepare' phase of LWD-based restoreinfoPerforming 'prepare' phase of LWD-based restorecom.vmware.dp.events.preparerestore|Perform 'prepare' phase of LWD restoreExtendedEvent'prepare' phase of LWD-based restore is completedinfo'prepare' phase of LWD-based restore is completedcom.vmware.dp.events.preparerestoredone|'prepare' phase of LWD-based restore is completedExtendedEvent'prepare' phase of LWD-based restore failederror'prepare' phase of LWD-based restore failedcom.vmware.dp.events.preparerestorefailed|'prepare' phase of LWD-based restore failedExtendedEventEnable LWD data protectioninfoEnabling LWD data protectioncom.vmware.dp.events.protect|Enabling LWD data protectionExtendedEventLWD data protection enabledinfoLWD data protection enabledcom.vmware.dp.events.protectdone|LWD data protection enabledExtendedEventFailed to enable LWD data protectionerrorFailed to enable LWD data protectioncom.vmware.dp.events.protectfailed|Failed to enable LWD data protectionExtendedEventQuerying entity for protection infoinfoQuerying entity for protection infocom.vmware.dp.events.queryprotectedentityinfo|Querying entity for protection infoExtendedEventFinished querying entity for protection infoinfoFinished querying entity for protection infocom.vmware.dp.events.queryprotectedentityinfodone|Finished querying entity for protection infoExtendedEventFailed to query entity for protection infoerrorFailed to query entity for protection infocom.vmware.dp.events.queryprotectedentityinfofailed|Failed to query entity for protection infoExtendedEventRetire LWD snapshotinfoRetiring LWD snapshotcom.vmware.dp.events.retiresnapshot|Retiring LWD snapshotExtendedEventLWD snapshot is retiredinfoLWD snapshot is retiredcom.vmware.dp.events.retiresnapshotdone|LWD snapshot is retiredExtendedEventFailed to retire LWD snapshoterrorFailed to retire LWD snapshotcom.vmware.dp.events.retiresnapshotfailed|Failed to retire LWD snapshotExtendedEventTake LWD application-consistent snapshotinfoTaking LWD application-consistent snapshotcom.vmware.dp.events.snapshot.applicationconsistent|Taking LWD application-consistent snapshotExtendedEventTake LWD crash-consistent snapshotinfoTaking LWD crash-consistent snapshotcom.vmware.dp.events.snapshot.crashconsistent|Taking LWD crash-consistent snapshotExtendedEventTake LWD metadata-only snapshotinfoTaking LWD metadata-only snapshotcom.vmware.dp.events.snapshot.metadataonly|Taking LWD metadata-only snapshotExtendedEventTake LWD VSS application-consistent snapshotinfoTaking LWD VSS application-consistent snapshotcom.vmware.dp.events.snapshot.vssappconsistent|Taking LWD VSS application-consistent snapshotExtendedEventLWD application-consistent snapshot takeninfoLWD application-consistent snapshot takencom.vmware.dp.events.snapshotdone.applicationconsistent|LWD application-consistent snapshot takenExtendedEventLWD crash-consistent snapshot takeninfoLWD crash-consistent snapshot takencom.vmware.dp.events.snapshotdone.crashconsistent|LWD crash-consistent snapshot takenExtendedEventLWD metadata-only snapshot takeninfoLWD metadata-only snapshot takencom.vmware.dp.events.snapshotdone.metadataonly|LWD metadata-only snapshot takenExtendedEventLWD VSS application-consistent snapshot takeninfoLWD VSS application-consistent snapshot takencom.vmware.dp.events.snapshotdone.vssappconsistent|LWD VSS application-consistent snapshot takenExtendedEventLWD application-consistent snapshot failederrorLWD application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.applicationconsistent|LWD application-consistent snapshot failedExtendedEventLWD crash-consistent snapshot failederrorLWD crash-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.crashconsistent|LWD crash-consistent snapshot failedExtendedEventLWD metadata-only snapshot failederrorLWD metadata-only snapshot failedcom.vmware.dp.events.snapshotfailed.metadataonly|LWD metadata-only snapshot failedExtendedEventLWD VSS application-consistent snapshot failederrorLWD VSS application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.vssappconsistent|LWD VSS application-consistent snapshot failedExtendedEventPerform LWD snapshot syncinfoPerforming LWD snapshot synccom.vmware.dp.events.sync|Performing LWD snapshot syncExtendedEventLWD snapshot sync is completedinfoLWD snapshot sync is completedcom.vmware.dp.events.syncdone|LWD snapshot sync is completedExtendedEventLWD snapshot sync failederrorLWD snapshot sync failedcom.vmware.dp.events.syncfailed|LWD snapshot sync failedExtendedEventDisable LWD data protectioninfoDisabling LWD data protectioncom.vmware.dp.events.unprotect|Disabling LWD data protectionExtendedEventLWD data protection disabledinfoLWD data protection disabledcom.vmware.dp.events.unprotectdone|LWD data protection disabledExtendedEventFailed to disable LWD data protectionerrorFailed to disable LWD data protectioncom.vmware.dp.events.unprotectfailed|Failed to disable LWD data protectionEventExDeployed entity from Content Libraryinfocom.vmware.ovfs.DeployEvent|Deployed entity from Library Item {libraryItemName} in Library {libraryName}EventExFailed to deploy entity from Content Libraryerrorcom.vmware.ovfs.DeployFailEvent|Failed to deploy entity from Library Item {libraryItemName} in Library {libraryName}EventExCloned entity to Content Libraryinfocom.vmware.ovfs.ExportEvent|Cloned entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExFailed to clone entity to Content Libraryerrorcom.vmware.ovfs.ExportFailEvent|Failed to clone entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExinfocom.vmware.rbd.activateRuleSet|Activate Rule SetEventExwarningcom.vmware.rbd.fdmPackageMissing|A host in a HA cluster does not have the 'vmware-fdm' package in its image profileEventExwarningcom.vmware.rbd.hostProfileRuleAssocEvent|A host profile associated with one or more active rules was deleted.EventExerrorcom.vmware.rbd.hostScriptFailure|An error encountered while running a user defined script: {scriptName} on the host: {ip}. Status: {status}EventExwarningcom.vmware.rbd.ignoreMachineIdentity|Ignoring the AutoDeploy.MachineIdentity event, since the host is already provisioned through Auto DeployEventExinfocom.vmware.rbd.pxeBootNoImageRule|Unable to PXE boot host since it does not match any rulesEventExinfocom.vmware.rbd.pxeBootUnknownHost|PXE Booting unknown hostEventExinfocom.vmware.rbd.pxeProfileAssoc|Attach PXE ProfileEventExinfocom.vmware.rbd.scriptBundleAssoc|Script Bundle Name: {name} attached to moref {moref}, entity-id {entity-id}EventExerrorcom.vmware.rbd.vmcaCertGenerationFailureEvent|Failed to generate host certificates using VMCAEventExCreated Harbor registryinfocom.vmware.registry.HarborCreateEvent|Created Harbor registry {registryName} on cluster {clusterId}.EventExFailed to create Harbor registryerrorcom.vmware.registry.HarborCreateFailEvent|Failed to create Harbor registry {registryName} on cluster {clusterId}.EventExDeleted Harbor registryinfocom.vmware.registry.HarborDeleteEvent|Deleted Harbor registry {registryName} on cluster {clusterId}.EventExFailed to delete Harbor registryerrorcom.vmware.registry.HarborDeleteFailEvent|Failed to delete Harbor registry {registryName} on cluster {clusterId}.EventExCreated Harbor projectinfocom.vmware.registry.HarborProjectCreateEvent|Created Harbor project {projectName} for registry {registryId}.EventExFailed to create Harbor projecterrorcom.vmware.registry.HarborProjectCreateFailEvent|Failed to create Harbor project {projectName} for registry {registryId}.EventExDeleted Harbor projectinfocom.vmware.registry.HarborProjectDeleteEvent|Deleted Harbor project {projectName} for registry {registryId}.EventExFailed to delete Harbor projecterrorcom.vmware.registry.HarborProjectDeleteFailEvent|Failed to delete Harbor project {projectName} for registry {registryId}.EventExCreated Harbor project memberinfocom.vmware.registry.HarborProjectMemberCreateEvent|Created Harbor project member {memberName} for project {projectName}.EventExFailed to create Harbor project membererrorcom.vmware.registry.HarborProjectMemberCreateFailEvent|Failed to create Harbor project member {memberName} for project {projectName}.EventExDeleted Harbor project memberinfocom.vmware.registry.HarborProjectMemberDeleteEvent|Deleted Harbor project member {memberName} from project {projectName}.EventExFailed to delete Harbor project membererrorcom.vmware.registry.HarborProjectMemberDeleteFailEvent|Failed to delete Harbor project member {memberName} from project {projectName}.EventExUpdated Harbor project memberinfocom.vmware.registry.HarborProjectMemberUpdateEvent|Updated Harbor project member {memberName} for project {projectName}.EventExFailed to update Harbor project membererrorcom.vmware.registry.HarborProjectMemberUpdateFailEvent|Failed to update Harbor project member {memberName} for project {projectName}.EventExPurged Harbor projectinfocom.vmware.registry.HarborProjectPurgeEvent|Purged Harbor project {projectName} for registry {registryId}.EventExFailed to purge Harbor projecterrorcom.vmware.registry.HarborProjectPurgeFailEvent|Failed to purge Harbor project {projectName} for registry {registryId}.EventExRestoring Harbor registryinfocom.vmware.registry.HarborRestoreEvent|Restoring Harbor registry {registryName} on cluster {clusterId}.EventExFailed to restore Harbor registryerrorcom.vmware.registry.HarborRestoreFailEvent|Failed to restore Harbor registry {registryName} on cluster {clusterId}.EventExRestored Harbor registryinfocom.vmware.registry.HarborRestoreSuccessEvent|Restored Harbor registry {registryName} on cluster {clusterId}.ExtendedEventProactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!errorcom.vmware.vc.proactivehdw.DbError|Proactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!EventExProactive hardware management: Host is disabled with proactive hardware management.warningcom.vmware.vc.proactivehdw.Disabled|Host is disabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: Host is enabled with proactive hardware management.infocom.vmware.vc.proactivehdw.Enabled|Host is enabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: received a failure health update from vendor.errorcom.vmware.vc.proactivehdw.Failure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: Polled health updates from HSM are discarded due to health update response content size limit being exceeded.warningcom.vmware.vc.proactivehdw.HealthUpdatesResponseLimitExceed|Proactive hardware management: Polled health updates from HSM {VendorDisplayName} are discarded due to health update response content size limit being exceeded. Refer to vSAN health logs for more details.EventExProactive hardware management: Some health updates from HSM are discarded due to validation failures.warningcom.vmware.vc.proactivehdw.HealthUpdatesValidationFail|Proactive hardware management: Some health updates from HSM {VendorDisplayName} are discarded due to validation failures. Refer to vSAN health logs for more details.EventExProactive hardware management: Error occurred when posting host-level event for unregistration of HSMerrorcom.vmware.vc.proactivehdw.HostEventPostFailed|Proactive hardware management: After HSM {VendorDisplayName} was unregistered an internal error prevented a host event from posting. The following hosts are affected: {AffectedHosts}.EventExProactive hardware management: Failed to contact an HSMerrorcom.vmware.vc.proactivehdw.HsmCommunicationError|Proactive hardware management: Failed to contact HSM with vendor: {VendorDisplayName}.EventExProactive hardware management: Error occured in poll HSM requesterrorcom.vmware.vc.proactivehdw.HsmRequestError|Proactive hardware management: Internal error occurred during polling HSM from vendor {VendorDisplayName}.EventExProactive hardware management: HSM is unregistered.infocom.vmware.vc.proactivehdw.HsmUnregistration|Proactive hardware management: HSM is unregistered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: received a predictive failure health update from vendor.warningcom.vmware.vc.proactivehdw.PredictiveFailure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: HSM is unregistered but with a failure in removing resource bundle.errorcom.vmware.vc.proactivehdw.ResourceBundleCleanupError|Proactive hardware management: HSM from {VendorDisplayName} is unregistered but with a failure in removing resource bundle - likely the resource bundle is currently in use. Please refer to vSAN health logs for the underlying cause and perform manual clean up on the resource bundle.EventExProactive hardware management: Failed to create/update subscription for HSM due to a communication error with HSMerrorcom.vmware.vc.proactivehdw.SubscriptionHsmCommError|Proactive hardware management: Failed to create/update subscription for HSM {VendorDisplayName} due to a communication error with HSM.EventExProactive hardware management: Failed to create/update subscription for HSM due to internal errorerrorcom.vmware.vc.proactivehdw.SubscriptionInternalError|Proactive hardware management: Failed to perform subscription create/update for HSM {VendorDisplayName} due to an internal error. Please refer to the vSAN health logs for more details.EventExProactive hardware management: A new HSM is registered.infocom.vmware.vc.proactivehdw.registration.NewRegistration|Proactive hardware management: A new HSM is registered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: HSM registration is updated.infocom.vmware.vc.proactivehdw.registration.UpdateSuccess|Proactive hardware management: The registration information on the following HSM: '{VendorDisplayName}' has been updated. Here are its supported health update infos: '{EnabledHealthUpdateInfos}'ExtendedEventinfocom.vmware.vcIntegrity.CancelTask|Canceling task on [data.name].ExtendedEventinfocom.vmware.vcIntegrity.CheckNotification|Successfully downloaded notifications. New notifications: [data.Notifications]ExtendedEventerrorcom.vmware.vcIntegrity.CheckNotificationFailed|Could not download notifications.ExtendedEventerrorcom.vmware.vcIntegrity.CheckPXEBootHostFailure|Cannot determine whether host {host.name} is PXE booted. The host will be excluded for the current operation.ExtendedEventwarningcom.vmware.vcIntegrity.ClusterConfigurationOutOfCompliance|Hosts in Cluster [data.resource] are out of compliance.ExtendedEventerrorcom.vmware.vcIntegrity.ClusterOperationCancelledDueToCertRefresh|In-flight VUM task on Cluster [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventwarningcom.vmware.vcIntegrity.CriticallyLowDiskSpace|VMware vSphere Lifecycle Manager is critically low on storage space! Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.DisableToolsRemediateOnReboot|Successfully disabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.DisableToolsRemediateOnRebootFailed|Could not disable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.DownloadAlert|VMware vSphere Lifecycle Manager download alert (critical/total): ESX [data.esxCritical]/[data.esxTotal]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadFailedPatchBinary|Could not download patch packages for following patches: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestPackage|Successfully downloaded guest patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestPackageFailed|Could not download guest patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixPackage|Successfully downloaded guest patch packages for UNIX. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixPackageFailed|Could not download guest patch packages for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixUpdate|Successfully downloaded guest patch definitions for UNIX. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixUpdateFailed|Could not download guest patch definitions for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUpdate|Successfully downloaded guest patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUpdateFailed|Could not download guest patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostPackage|Successfully downloaded host patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostPackageFailed|Could not download host patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostUpdate|Successfully downloaded host patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostUpdateFailed|Could not download host patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.EnableToolsRemediateOnReboot|Successfully enabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.EnableToolsRemediateOnRebootFailed|Could not enable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.FailToLock|There are running tasks for the entity [data.name] that cannot finish within a specific time. The operation will stop.ExtendedEventcom.vmware.vcIntegrity.FtFailedEvent|ExtendedEventerrorcom.vmware.vcIntegrity.GADvdMountError|VMware vSphere Lifecycle Manager Guest Agent could not access the DVD drive on {vm.name}. Verify that a DVD drive is available and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAError|An internal error occurred in communication with VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Verify that the VM is powered on and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAInstallFailed|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Make sure that the VM is powered on.ExtendedEventinfocom.vmware.vcIntegrity.GAInstalled|VMware vSphere Lifecycle Manager Guest Agent successfully installed on {vm.name}.ExtendedEventerrorcom.vmware.vcIntegrity.GARuntimeError|An unknown internal error occurred during the required operation on {vm.name}. Check the logs for more details and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GATimeout|VMware vSphere Lifecycle Manager Guest Agent could not respond in time on {vm.name}. Verify that the VM is powered on and that the Guest Agent is running.ExtendedEventwarningcom.vmware.vcIntegrity.HostConfigurationOutOfCompliance|Configuration of Host [data.resource] is out of compliance.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallClose|Close [data.name] firewall ports.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallOpen|Open [data.name] firewall ports.ExtendedEventerrorcom.vmware.vcIntegrity.HostOperationCancelledDueToCertRefresh|In-flight VUM task on Host [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportCancelled|Host patch offline bundle upload is canceled by user.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportSuccess|[data.numBulletins] new bulletins uploaded successfully through offline bundle.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchBundleImportUnknownError|Host patch offline bundle upload did not succeed.ExtendedEventcom.vmware.vcIntegrity.HostPatchInputRecalledFailure|ExtendedEventcom.vmware.vcIntegrity.HostPatchPrerequisiteRecalledFailure|ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be remediated. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry remediation operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be remediated. Remove either of the patch from the baseline and retry the remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be staged. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be staged. Remove either of the patch from the baseline and retry the stage operation.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmFtEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster and contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmScanEvent|Cannot scan host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmStageEvent|Cannot stage host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtDiffPatchesEvent|Host {host.name} has FT enabled VMs. If you apply different patches to hosts in a cluster, FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtEvent|Cannot remediate host {host.name} because it contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtPairEvent|Host {host.name} has FT enabled VMs. The host on which the Secondary VMs reside is not selected for remediation. As a result FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedHacEvent|Cannot remediate host {host.name} because it is a part of a HA admission control enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedPxeUpgradeUnsupported|Upgrade operations are not supported on host {host.name} because it is PXE booted.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedRemovableDeviceEvent|Cannot remediate host {host.name} because it has VMs with a connected removable device. Disconnect all removable devices before remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorEsxFileDownload|Host [data.name] cannot download files from the VMware vSphere Lifecycle Manager patch store. Check the network connectivity and firewall setup, and verify that the host can access the configured patch store.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorNotInstallable|The selected patches [data.arg1] cannot be installed on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateConflictDependencies|The patches selected for remediation on the host [data.name] depend on other patches that have conflicts.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDefault|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDeviceAttached|Remediation did not succeed for [data.name]. The host has virtual machines [data.arg1] with connected removable media devices. This prevents the host from entering maintenance mode. Disconnect the removable devices and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateEnterMmode|Remediation did not succeed for [data.name]. The host could not enter maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateExitMmode|Remediation did not succeed for [data.name]. The host could not exit maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReboot|Remediation did not succeed for [data.name]. The host did not reboot after remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRebootReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect after a reboot.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRestoreVm|Remediation did not succeed for [data.name]. Restoring the power state or device connection state for one or more virtual machines on the host did not succeed.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateMetadataCorrupt|Remediation did not succeed for [data.name]. The patch metadata is corrupted. This might be caused by an invalid format of metadata content. You can try to re-download the patches.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateVibDownload|Remediation did not succeed for [data.name]. There were errors while downloading one or more software packages. Check the VMware vSphere Lifecycle Manager network connectivity settings.ExtendedEventcom.vmware.vcIntegrity.HostUpdateErrorVsanHealthCheckFailed|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeAgentDeployFailure|Cannot deploy upgrade agent on host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailBootDiskSize|The boot disk has a size of [data.found] MiB, the minimum requirement of the upgrade image is [data.expected] MiB.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailConflictingVibs|The upgrade contains conflicting VIBs. Remove the conflicting VIBs or use Image Builder to create a custom upgrade ISO image that contains the newer versions of the conflicting VIBs, and try to upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreakageUnsure|Cannot determine whether the upgrade breaks Cisco Nexus 1000V virtual network switch feature on the host. If the host does not have the feature, you can ignore this warning.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreaks|Cisco Nexus 1000V virtual network switch feature installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailEESXInsufficientSpaceForImage|Cannot create a ramdisk of size [data.expected]MB to store the upgrade image. Check if the host has sufficient memory.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailESXInsufficientSpaceForImage|Upgrade requires at least [data.expected]MB free space on boot partition to store the upgrade image, only [data.found]MB found. Retry after freeing up sufficient space or perform a CD-based installation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailHostHardwareMismatch|The upgrade is not supported on the host hardware. The upgrade ISO image contains VIBs that failed the host hardware compatibility check.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPInImage|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V software package [data.expected] installed on the host. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPUSupportedHost|The host is managing a DPU(s) and is a part of vLCM baselines-managed cluster, which is not supported. Move the host to vLCM image-managed cluster and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleHostAcceptance|The upgrade package is not compatible with the host. Use an upgrade package that meets the host's acceptance level or change the host's acceptance level to match that of the upgrade package.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePartitionLayout|The host cannot be upgraded due to incompatible partition layout.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePasswords|The passwords cannot be migrated because the password encryption scheme is incompatible.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleSphereletVersion|Spherelet version [data.found] is not compatible with ESXi 8.0 and later version. Please upgrade your WCP cluster to install a compatible Spherelet version, or remove Spherelet if the host is not in a WCP cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleWithDvsCP|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientEntropyCache|Storage entropy cache is not full. A full entropy cache is required for upgrade. Refer to KB 89854 for steps on how to refill the cache.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientMemory|Insufficient memory found on the host: [data.expected]MB required, [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientSpaceForConfig|Upgrade requires at least [data.expected]MB free space on a local VMFS datastore, only [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailLockerSpaceAvail|The system has insufficient locker space for the image profile.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPBreaksDvsCP|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image that is compatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPInImage|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image [data.found]. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingGunzipChecksumVibs|These VIB(s) on the host do not have the required sha-256 gunzip checksum for their payloads: [data.found]. This will prevent VIB security verification and secure boot from functioning properly. Please remove these VIBs and check with your vendor for a replacement of these VIBs.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeBootBank|The system image on the attached iso lacks a storage driver for the installed bootbank.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeNic|The system image on the attached iso lacks a NIC driver for the management network traffic.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMD5RootPassword|The root password is not using MD5 hashing, causing it to be authenticated up to only 8 characters. For instructions on how to correct this, see VMware KB 1024500 at http://kb.vmware.com/kb/1024500.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMinCpuCores|New ESXi version requires a minimum of [data.expected] processor cores.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoVt|Processor does not support hardware virtualization or it is disabled in BIOS. Virtual machine performance may be slow.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNonVmwareSoftware|The software modules [data.found] found on the host are not part of the upgrade image. These modules will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNvdsToCvdsMigration|ESXi host is not ready for NSX-T vSphere Distributed Switch (VDS) migration included with this ESXi upgrade. Please run Upgrade Readiness Tool (URT) from the NSX-T Manager managing this host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNxbitEnabled|No eXecute (NX) bit is not enabled on the host. New ESXi version requires a CPU with NX/XD bit supported and enabled.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPendingReboot|Host software configuration requires a reboot. Reboot the host and try upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPowerPathBreaks|EMC PowerPath module [data.found] installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailRRFTVMsPresent|Legacy FT is not compatible with upgraded version. Disable legacy FT.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailScriptInitFailed|Host upgrade validity checks are not successful.ExtendedEventcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailTbootRequired|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnknown|The upgrade precheck script returned unknown error.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedConfig|Error in ESX configuration file (esx.conf).ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedDevices|Unsupported devices [data.found] found on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedHostVersion|Host version [data.found] is not supported for upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedLongMode|Host CPU is unsupported. New ESXi version requires a 64-bit CPU with support for LAHF/SAHF instructions in long mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedSHA1Cert|SHA-1 signature found in host certificate {data.cert} with subject {data.subject}. Support for certificates with weak signature algorithm SHA-1 has been removed in ESXi 8.0. To proceed with upgrade, replace it with a SHA-2 signature based certificate. Refer to release notes and KB 89424 for more details.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedTPMVersion|TPM 1.2 device detected. Support for TPM version 1.2 is discontinued. Installation may proceed, but may cause the system to behave unexpectedly.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailVFATCorruption|A problem with one or more vFAT bootbank partitions was detected. Please refer to KB 91136 and run dosfsck on bootbank partitions.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeProgressAborted|Host upgrade installer stopped.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressAuth|Host upgrade in progress: Configuring authentication.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressBootloader|Host upgrade in progress: Boot setup.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressClearpart|Host upgrade in progress: Clearing partitions.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressComplete|Host upgrade installer completed.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressKeyboard|Host upgrade in progress: Setting keyboard.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressLanguage|Host upgrade in progress: Setting language.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMigrating|Host upgrade in progress: Migrating ESX v3 configuration to ESX v4.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMount|Host upgrade in progress: Mounting file systems.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressNetworking|Host upgrade in progress: Installing network configuration.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPackages|Host upgrade in progress: Installing packages.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartphys|Host upgrade in progress: Partitioning physical hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartvirt|Host upgrade in progress: Partitioning virtual hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPostscript|Host upgrade in progress: Running postinstallation script.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressRootpass|Host upgrade in progress: Setting root passwordExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressTimezone|Host upgrade in progress: Setting timezone.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressUnknown|Host upgrade in progress.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeRunScriptFailure|Cannot run upgrade script on host.ExtendedEventerrorcom.vmware.vcIntegrity.ImageRecommendationGenerationError|The image recommendation generation failed.ExtendedEventinfocom.vmware.vcIntegrity.ImageRecommendationGenerationFinished|The image recommendation generation finished.ExtendedEventerrorcom.vmware.vcIntegrity.IncompatibleTools|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name} because VMware Tools is not installed or is of an incompatible version. The required version is [data.requiredVersion] and the installed version is [data.installedVersion].ExtendedEventinfocom.vmware.vcIntegrity.InstallAddOnUpdate|The following additional patches are included to resolve a conflict for installation on [data.entityName]: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestion|To resolve a conflict for installation on [data.entityName], the following additional patches might need to be included in the baseline: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestionNotFound|VMware vSphere Lifecycle Manager could not find patches to resolve the conflict for installation on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdate|Installation of patches [data.updateId] started on host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdateComplete|Installation of patches succeeded on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.InstallUpdateError|Could not install patches on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.LinuxOffLineScanNotSupported|Cannot scan [data.name] for patches. Scan of powered off or suspended Linux VMs is not supported.ExtendedEventwarningcom.vmware.vcIntegrity.LowDiskSpace|VMware vSphere Lifecycle Manager is running out of storage space. Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.MetadataCorrupted|Patch definition for [data.name] is corrupt. Check the logs for more details. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.MetadataNotFound|Patch definitions for [data.name] are missing. Download patch definitions first.ExtendedEventerrorcom.vmware.vcIntegrity.NoRequiredLicense|There is no VMware vSphere Lifecycle Manager license for [data.name] for the required operation.ExtendedEventinfocom.vmware.vcIntegrity.NotificationCriticalInfoAlert|VMware vSphere Lifecycle Manager informative notification (critical) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationDownloadAlert|VMware vSphere Lifecycle Manager notification download alertExtendedEventinfocom.vmware.vcIntegrity.NotificationImportantInfoAlert|VMware vSphere Lifecycle Manager informative notification (important) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationModerateInfoAlert|VMware vSphere Lifecycle Manager informative notification (moderate) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallAlert|VMware vSphere Lifecycle Manager recall alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallFixAlert|VMware vSphere Lifecycle Manager recall fix alertExtendedEventerrorcom.vmware.vcIntegrity.OperationCancelledDueToCertRefresh|In-flight VUM task on [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventcom.vmware.vcIntegrity.PXEBootedHostEvent|ExtendedEventinfocom.vmware.vcIntegrity.PackageImport|Package [data.name] is successfully imported.ExtendedEventerrorcom.vmware.vcIntegrity.PackageImportFailure|Import of package: [data.name] did not succeed.ExtendedEventinfocom.vmware.vcIntegrity.RebootHostComplete|Host [data.entityName] is successfully rebooted.ExtendedEventerrorcom.vmware.vcIntegrity.RebootHostError|Cannot reboot host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostStart|Start rebooting host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostWait|Waiting for host [data.entityName] to reboot.ExtendedEventerrorcom.vmware.vcIntegrity.ReconfigureClusterFailedEvent|VMware vSphere Lifecycle Manager could not restore HA admission control/DPM settings for cluster {computeResource.name} to their original values. These settings have been changed for patch installation. Check the cluster settings and restore them manually.ExtendedEventinfocom.vmware.vcIntegrity.Remediate|Remediation succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedHost|Could not remediate {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedVm|Could not remediate {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDpmDisableHost|Could not remediate host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.RemediateFailed|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostInvalidPowerState|Cannot remediate the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostOnUnsupportedHost|Could not remediate {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediateOrphanedVm|Could not remediate orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.RemediateStart|Remediating object [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateVmOnUnsupportedHost|Could not remediate {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediationStatusEvent|Current progress of remediation: [data.noOfSucceededHosts] hosts completed successfully, [data.noOfFailedHosts] hosts completed with errors, [data.noOfHostsBeingRemediatedCurrently] hosts are being remediated, [data.noOfWaitingHosts] hosts are waiting to start remediation, and [data.noOfRetryHosts] hosts could not enter maintenance mode and are waiting to retry.ExtendedEventinfocom.vmware.vcIntegrity.Scan|Successfully scanned [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanCancelled|Scanning of [data.name] is canceled by user.ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedHost|Could not scan {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedVm|Could not scan {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDpmDisableHost|Could not scan host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.ScanFailed|Could not scan [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostInvalidPowerState|Cannot scan the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostOnUnsupportedHost|Could not scan {host.name} for patches because it is of unsupported version [data.version].ExtendedEventwarningcom.vmware.vcIntegrity.ScanMissingUpdate|Found a missing patch: [data.message] when scanning [data.name]. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.ScanOrphanedVm|Could not scan orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.ScanStart|Scanning object [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.ScanUnsupportedVolume|{vm.name} contains an unsupported volume [data.volumeLabel]. Scan results for this VM might be incomplete.ExtendedEventerrorcom.vmware.vcIntegrity.ScanVmOnUnsupportedHost|Could not scan {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventerrorcom.vmware.vcIntegrity.SequentialRemediateFailedEvent|An error occured during the sequential remediation of hosts in cluster {computeResource.name}. Check the related events for more details.ExtendedEventinfocom.vmware.vcIntegrity.SkipSuspendedVm|Suspended VM {vm.name} has been skipped.ExtendedEventinfocom.vmware.vcIntegrity.Stage|Staging succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.StageDisconnectedHost|Could not stage patches to {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageDpmDisableHost|Could not stage patches to host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.StageFailed|Staging did not succeed for [data.name][data.message].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostInvalidPowerState|Cannot stage patches to the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostOnUnsupportedHost|Could not stage patches to {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.StageStart|Staging patches to host [data.name].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdate|Started staging of patches [data.updateId] on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdateComplete|Staging of patch to [data.entityName] succeeded.ExtendedEventerrorcom.vmware.vcIntegrity.StageUpdateError|Cannot stage patch [data.updateId] to [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.SysprepDisabled|Sysprep is disabled during the remediation.ExtendedEventinfocom.vmware.vcIntegrity.SysprepEnabled|Sysprep settings are restored.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepHandleFailure|Cannot access the sysprep settings for VM {vm.name}. Retry the operation after disabling sysprep for the VM.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepNotFound|Cannot locate the sysprep settings for VM {vm.name}. For Windows 7 and Windows 2008 R2, offline VM remediation is supported only if the system volume is present in the primary disk partition. Retry the operation after disabling sysprep for the VM.ExtendedEventinfocom.vmware.vcIntegrity.ToolsRemediate|VMware Tools upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ToolsRemediateFailed|VMware Tools upgrade did not succeed for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.ToolsScan|Successfully scanned [data.name] for VMware Tools upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.ToolsScanFailed|Could not scan [data.name] for VMware Tools upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsScanInstallNotSupported|VMware Tools is not installed on [data.name]. VMware vSphere Lifecycle Manager supports upgrading only an existing VMware Tools installation.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeRemediateSkippedOnHost|VMware Tools upgrade was not performed on {vm.name}. VMware Tools upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade is not supported for virtual appliances.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeScanSkippedOnHost|VMware Tools upgrade scan was not performed on {vm.name}. VMware Tools upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedHostRemediateSpecialVMEvent|The host [data.name] has a VM [data.vm] with VMware vSphere Lifecycle Manager or VMware vCenter Server installed. The VM must be moved to another host for the remediation to proceed.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedLinuxAction|Action is not supported for Linux VM/VA {vm.name}. VMware Tools is not installed or the machine cannot start.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedOs|Scan or remediation is not supported on [data.name] because of unsupported OS [data.os].ExtendedEventinfocom.vmware.vcIntegrity.UnsupportedPXEBootHost|Scanning, remediation, and staging are not supported on PXE booted ESXi hosts.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedSpecialVMEvent|VM [data.name] has either VMware vSphere Lifecycle Manager or VMware vCenter Server installed. This VM will be ignored for scan and remediation.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedVaAction|Action is not supported for offline or suspended virtual appliance {vm.name}. ExtendedEventerrorcom.vmware.vcIntegrity.VAAutoUpdateOn|Auto update is set to ON for virtual appliance [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADiscovery|Successfully discovered virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADiscoveryFailed|Could not discover virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadGenericFailure|Could not download virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadInvalidUrl|[data.name] is not a valid virtual appliance download URL.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadMetadataFailure|Could not download virtual appliance upgrade metadata for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADownloadSuccess|Successfully downloaded virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VARepositoryAddressNotSet|No repository address is set for virtual appliance [data.name]. The appliance does not support updates by vCenter Server.ExtendedEventinfocom.vmware.vcIntegrity.VAScan|Successfully scanned [data.name] for VA upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VAScanFailed|Could not scan [data.name] for VA upgrades.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeRemediate|Virtual Hardware upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateFailed|Could not perform Virtual Hardware upgrade on [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateSkippedOnHost|Virtual Hardware upgrade was not performed for {vm.name}. Virtual Hardware upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade is not supported for virtual appliances.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeScan|Successfully scanned [data.name] for Virtual Hardware upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeScanFailed|Could not scan [data.name] for Virtual Hardware upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeScanSkippedOnHost|Virtual Hardware upgrade scan was not performed for {vm.name}. Virtual Hardware upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotInstalled|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not installed. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotLatest|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not the latest version supported by the host. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnknown|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is unknown. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnmanaged|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is not managed by VMware vSphere. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsAutoUpgradeUnsupported|The version of VMware Tools installed in {vm.name} does not support automatic upgrade. Upgrade VMware Tools manually.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsNotRunning|Error while waiting for VMware Tools to respond. Verify that VMware Tools is running in VM {vm.name}.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForInstall|Patch [data.inputBulletin] was excluded from the remediation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the remediation. You can also add the baselines to a baseline group for convenience and perform the remediation.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForStage|Patch [data.inputBulletin] was excluded from the stage operation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the stage operation. You can also add the baselines to a baseline group for convenience and perform the stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.VmDevicesRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original removable device connection settings for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the settings for the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmMigrationFailedEvent|Cannot migrate VM {vm.name} from [data.srcHost] to [data.destHost].ExtendedEventerrorcom.vmware.vcIntegrity.VmPowerRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original power state for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the original power state of the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmotionCompatibilityCheckFailedEvent|Cannot check compatibility of the VM {vm.name} for migration with vMotion to host [data.hostName].EventExAgency createdinfocom.vmware.vim.eam.agency.create|{agencyName} created by {ownerName}EventExAgency destroyedinfocom.vmware.vim.eam.agency.destroyed|{agencyName} removed from the vSphere ESX Agent ManagerEventExAgency state changedinfocom.vmware.vim.eam.agency.goalstate|{agencyName} changed goal state from {oldGoalState} to {newGoalState}EventExAgency status changedinfocom.vmware.vim.eam.agency.statusChanged|Agency status changed from {oldStatus} to {newStatus}EventExAgency reconfiguredinfocom.vmware.vim.eam.agency.updated|Configuration updated {agencyName}EventExCluster Agent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterPowerOn|Cluster Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterProvisioning|Cluster Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM is about to be powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailablePrePowerOn|Cluster Agent VM {vm.name} is about to be powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent added to hostinfocom.vmware.vim.eam.agent.created|Agent added to host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyed|Agent removed from host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyedNoHost|Agent removed from host ({agencyName})EventExAgent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterPowerOn|Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterProvisioning|Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent status changedinfocom.vmware.vim.eam.agent.statusChanged|Agent status changed from {oldStatus} to {newStatus}EventExAgent VM is deletedinfocom.vmware.vim.eam.agent.task.deleteVm|Agent VM {vmName} is deleted on host {host.name} ({agencyName})EventExAgent VM is provisionedinfocom.vmware.vim.eam.agent.task.deployVm|Agent VM {vm.name} is provisioned on host {host.name} ({agencyName})EventExAgent VM powered offinfocom.vmware.vim.eam.agent.task.powerOffVm|Agent VM {vm.name} powered off, on host {host.name} ({agencyName})EventExAgent VM powered oninfocom.vmware.vim.eam.agent.task.powerOnVm|Agent VM {vm.name} powered on, on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibInstalled|Agent installed VIB {vib} on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibUninstalled|Agent uninstalled VIB {vib} on host {host.name} ({agencyName})EventExwarningcom.vmware.vim.eam.issue.agencyDisabled|Agency is disabledEventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentOVF|Unable to access agent OVF package at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentVib|Unable to access agent VIB module at {url} ({agencyName})EventExcom.vmware.vim.eam.issue.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInMaintenanceMode|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterResources|Cluster Agent VM cannot be powered on due to insufficient resources on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterSpace|Cluster Agent VM on cluster {computeResource.name} cannot be provisioned due to insufficient space on cluster datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.invalidConfig|Cluster Agent VM {vm.name} on cluster {computeResource.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmDatastore|Cluster Agent VM datastore(s) {customAgentVmDatastoreName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmNetwork|Cluster Agent VM network(s) {customAgentVmNetworkName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.ovfInvalidProperty|OVF environment used to provision cluster Agent VM on cluster {computeResource.name} has one or more invalid properties ({agencyName})EventExcom.vmware.vim.eam.issue.cluster.agent.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotDeployed|Cluster Agent VM is missing on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotRemoved|Cluster Agent VM {vm.name} is provisioned when it should be removed ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOff|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOn|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmSuspended|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostInMaintenanceMode|Agent cannot complete an operation since the host {host.name} is in maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.hostInStandbyMode|Agent cannot complete an operation since the host {host.name} is in standby mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostNotReachable|Host {host.name} must be powered on and connected to complete agent operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.immediateHostRebootRequired|Host {host.name} must be rebooted immediately to unblock agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.incompatibleHostVersion|Agent is not deployed due to incompatible host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientResources|Agent cannot be provisioned due to insufficient resources on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientSpace|Agent on {host.name} cannot be provisioned due to insufficient space on datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotDeleteSoftware|Cannot remove the Baseline associated with agency {agencyName} from VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotStageSoftware|The software defined by agency {agencyName} cannot be staged in VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.vUMUnavailable|VMware Update Manager was unavailable during agency {agencyName} operationsEventExerrorcom.vmware.vim.eam.issue.invalidConfig|Agent VM {vm.name} on host {host.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmDatastore|No agent datastore configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmNetwork|No agent network configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmDatastore|Agent datastore(s) {customAgentVmDatastoreName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmNetwork|Agent network(s) {customAgentVmNetworkName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmDatastore|Agent datastore cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmNetwork|Agent network(s) cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidFormat|OVF used to provision agent on host {host.name} has invalid format ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidProperty|OVF environment used to provision agent on host {host.name} has one or more invalid properties ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotConfigureSolutions|The required solutions defined by agency {agencyName} cannot be configured in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotUploadDepot|Software defined by agency {agencyName} cannot be uploaded in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.inaccessibleDepot|Unable to access software defined by agency {agencyName}EventExerrorcom.vmware.vim.eam.issue.personality.agency.invalidDepot|Software defined by agency {agencyName} contains invalid vSphere Lifecycle Manager related metadataEventExerrorcom.vmware.vim.eam.issue.personality.agency.pMUnavailable|vSphere Lifecycle Manager was unavailable during agency {agencyName} operationsEventExinfocom.vmware.vim.eam.issue.personality.agent.awaitingPMRemediation|Agent requires application of configured solutions through vSphere Lifecycle Manager on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agent.blockedByAgencyOperation|Agency issues related to vSphere Lifecycle Manager require resolution to unblock host {host.name} ({agencyName})EventExinfocom.vmware.vim.eam.issue.resolved|Issue {type} resolved (key {key})EventExerrorcom.vmware.vim.eam.issue.vibCannotPutHostInMaintenanceMode|Cannot put host into maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.vibCannotPutHostOutOfMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.vibDependenciesNotMetByHost|VIB module dependencies for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibInvalidFormat|Invalid format for VIB module at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibNotInstalled|VIB module for agent is not installed/removed on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequirementsNotMetByHost|VIB system requirements for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostInMaintenanceMode|Host must be put into maintenance mode to complete agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostReboot|Host {host.name} must be reboot to complete agent VIB installation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualInstallation|VIB {vib} requires manual installation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualUninstallation|VIB {vib} requires manual uninstallation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmCorrupted|Agent VM {vm.name} on host {host.name} is corrupted ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmDeployed|Agent VM {vm.name} is provisioned on host {host.name} when it should be removed ({agencyName})EventExcom.vmware.vim.eam.issue.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.vmNotDeployed|Agent VM is missing on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmOrphaned|Orphaned agent VM {vm.name} on host {host.name} detected ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOff|Agent VM {vm.name} on host {host.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOn|Agent VM {vm.name} on host {host.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmRequiresHostOutOfMaintenanceMode|Agent cannot deploy Agent VM since the host {host.name} is in maintenance mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmSuspended|Agent VM {vm.name} on host {host.name} is expected to be powered on but is suspended ({agencyName})ExtendedEventInvalid loginwarningcom.vmware.vim.eam.login.invalid|Failed login to vSphere ESX Agent ManagerEventExSuccessful login to vSphere ESX Agent Managerinfocom.vmware.vim.eam.login.succeeded|Successful login by {user} into vSphere ESX Agent ManagerEventExUser logged out of vSphere ESX Agent Managerinfocom.vmware.vim.eam.logout|User {user} logged out of vSphere ESX Agent Manager by logging out of the vCenter serverEventExUnauthorized access in vSphere ESX Agent Managerwarningcom.vmware.vim.eam.unauthorized.access|Unauthorized access by {user} in vSphere ESX Agent ManagerEventExChecked in virtual machine into a virtual machine template iteminfocom.vmware.vmtx.LibraryItemCheckInEvent|Checked in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to check in virtual machine into a virtual machine template itemerrorcom.vmware.vmtx.LibraryItemCheckInFailEvent|Failed to check in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'EventExDeleted the virtual machine checked out from the VM template iteminfocom.vmware.vmtx.LibraryItemCheckOutDeleteEvent|Deleted the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExFailed to delete the virtual machine checked out from the VM template itemerrorcom.vmware.vmtx.LibraryItemCheckOutDeleteFailEvent|Failed to delete the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExChecked out virtual machine template item as a virtual machineinfocom.vmware.vmtx.LibraryItemCheckOutEvent|Checked out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExFailed to check out virtual machine template item as a virtual machineerrorcom.vmware.vmtx.LibraryItemCheckOutFailEvent|Failed to check out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExA virtual machine checked out from the VM template item was orphaned after restorewarningcom.vmware.vmtx.LibraryItemCheckoutOrphanedOnRestoreEvent|A virtual machine (ID: {vmId}) checked out from the VM template item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was orphaned after restoreEventExCloned virtual machine to Content Library as VM templateinfocom.vmware.vmtx.LibraryItemCreateEvent|Cloned virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExFailed to clone virtual machine to Content Library as VM templateerrorcom.vmware.vmtx.LibraryItemCreateFailEvent|Failed to clone virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExDeleted a version of the virtual machine template iteminfocom.vmware.vmtx.LibraryItemDeleteVersionEvent|Deleted VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to delete a version of the virtual machine template itemerrorcom.vmware.vmtx.LibraryItemDeleteVersionFailEvent|Failed to delete VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'EventExDeployed virtual machine from Content Libraryinfocom.vmware.vmtx.LibraryItemDeployEvent|Deployed virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExFailed to deploy virtual machine from Content Libraryerrorcom.vmware.vmtx.LibraryItemDeployFailEvent|Failed to deploy virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExRolled back virtual machine template item to a previous versioninfocom.vmware.vmtx.LibraryItemRollbackEvent|Rolled back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'ExtendedEventFailed to roll back virtual machine template item to a previous versionerrorcom.vmware.vmtx.LibraryItemRollbackFailEvent|Failed to roll back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'EventExA virtual machine template managed by Content Library was converted to a virtual machineerrorcom.vmware.vmtx.LibraryItemTemplateConvertedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was converted to a virtual machineEventExA virtual machine template managed by Content Library was converted to a virtual machine after restorewarningcom.vmware.vmtx.LibraryItemTemplateConvertedOnRestoreEvent|The virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was found converted to a virtual machine after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was deletedEventExCould not locate a virtual machine template managed by Content Library after restorewarningcom.vmware.vmtx.LibraryItemTemplateDeletedOnRestoreEvent|Could not locate the virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateLatestVersionDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) is rolled back to the previous version because the latest VM template (ID: {vmId}) was deletedEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplatePreviousVersionDeletedEvent|Previous VM template (ID: {vmId}) of the library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was deletedEventExA virtual machine template managed by Content Library was renamedwarningcom.vmware.vmtx.LibraryItemTemplateRenamedEvent|The name of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will change to '{newItemName}' because the virtual machine template (ID: {vmId}) that the item manages was renamedExtendedEventAdded witness host to the cluster.infoAdded witness host to the cluster.com.vmware.vsan.clusterconfig.events.witnessadditiondone|Added witness host to the cluster.ExtendedEventRemoved witness host from the cluster.infoRemoved witness host from the cluster.com.vmware.vsan.clusterconfig.events.witnessremovaldone|Removed witness host from the cluster.ExtendedEventAdd disk group back to the vSAN cluster.infoAdd disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.adddisks|Add disk group back to the vSAN cluster on host {host.name}.ExtendedEventFailed to add disk group back to the vSAN cluster.errorFailed to add disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.addfail|Failed to add disk group back to the vSAN cluster on host {host.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formatdone|Disk format conversion is done on cluster {computeResource.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formathostdone|Disk format conversion is done on host {host.name}.ExtendedEventFailed to migrate vsanSparse objects.errorFailed to migrate vsanSparse objects.com.vmware.vsan.diskconversion.events.migrationfail|Failed to migrate vsanSparse objects on cluster {computeResource.name}.ExtendedEventNo disk conversion performed, all mounted disk groups on host are compliantinfoNo disk conversion performed, all mounted disk groups on host are compliant.com.vmware.vsan.diskconversion.events.noneed|No disk conversion performed, all mounted disk groups on host {host.name} are already compliant.ExtendedEventCheck existing objects on the vSAN cluster.infoCheck existing objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objectcheck|Check existing objects on the vSAN cluster.ExtendedEventObject conversion is done.infoObject conversion is done.com.vmware.vsan.diskconversion.events.objectdone|Object conversion is done.ExtendedEventFailed to convert objects on the vSAN cluster.errorFailed to convert objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objecterror|Failed to convert objects on the vSAN cluster.ExtendedEventRemove disk group from the vSAN cluster.infoRemove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removedisks|Remove disk group from the vSAN cluster on host {host.name}.ExtendedEventFailed to remove disk group from the vSAN cluster.errorFailed to remove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removefail|Failed to remove disk group on host {host.name} from the vSAN cluster.ExtendedEventRestore disk group from last break point.infoRestore disk group from last break point..com.vmware.vsan.diskconversion.events.restore|Restore disk group from last break point.ExtendedEventNo disk conversion performed, host has no mounted disk groups.infoNo disk conversion performed, host has no mounted disk groups.com.vmware.vsan.diskconversion.events.skiphost|No disk conversion performed, host {host.name} has no mounted disk groups.ExtendedEventCheck cluster status for disk format conversion.infoCheck cluster status for disk format conversion.com.vmware.vsan.diskconversion.events.statuscheck|Check status of cluster {computeResource.name} status for disk format conversion.ExtendedEventcom.vmware.vsan.diskconversion.events.syncingtimeout|ExtendedEventUpdate the vSAN cluster system settings.infoUpdate the vSAN cluster system settings.com.vmware.vsan.diskconversion.events.updatesetting|Update the vSAN cluster system settings on host {host.name}.ExtendedEventDisk format conversion failed in what if upgrade.infoDisk format conversion faild in what if upgrade check.com.vmware.vsan.diskconversion.events.whatifupgradefailed|Disk format conversion failed in what if upgrade check.EventExMark ssd(s) as capacity flash.infoMark {disks} as capacity flash.com.vmware.vsan.diskmgmt.events.tagcapacityflash|Mark {disks} as capacity flash.EventExMark ssd as hdd.infoMark ssd {disk} as hdd.com.vmware.vsan.diskmgmt.events.taghdd|Mark ssd {disk} as hdd.EventExMark remote disk as local disk.infoMark remote disk {disk} as local disk.com.vmware.vsan.diskmgmt.events.taglocal|Mark remote disk {disk} as local disk.EventExMark hdd as ssd.infoMark hdd {disk} as ssd.com.vmware.vsan.diskmgmt.events.tagssd|Mark hdd {disk} as ssd.EventExRemove capacity flash mark from ssd(s).infoRemove capacity flash mark from {disks}.com.vmware.vsan.diskmgmt.events.untagcapacityflash|Remove capacity flash mark from {disks}.EventExAdvisorvSAN Health Test 'Advisor' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.advisor.event|vSAN Health Test 'Advisor' changed from '{prestatus}' to '{curstatus}'EventExAudit CEIP Collected DatavSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.auditceip.event|vSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'EventExCNS Critical Alert - Patch available with important fixesvSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.cnspatchalert.event|vSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.controllercacheconfig.event|vSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExCoredump partition size checkvSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.coredumpartitionsize.event|vSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'EventExUpgrade vSphere CSI driver with cautionvSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.csidriver.event|vSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.diskusage.event|vSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dualencryption.event|vSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExProper vSAN network traffic shaping policy is configuredvSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dvsportspeedlimit.event|vSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'EventExEnd of general support for lower vSphere versionvSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.eoscheck.event|vSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'EventExImportant patch available for vSAN issuevSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.fsvlcmpatchalert.event|vSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.h730.event|vSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExHPE SAS Solid State DrivevSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.hpesasssd.event|vSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration check for large scale clustervSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.largescalecluster.event|vSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lavenderalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExvSAN critical alert regarding a potential data inconsistencyvSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilacdeltacomponenttest.event|vSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Critical Alert - Patch available for critical vSAN issuevSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilypatchalert.event|vSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.marigoldalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmode.event|vSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmodeh730.event|vSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvSAN storage policy compliance up-to-datevSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.objspbm.event|vSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.patchalert.event|vSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.pnicconsistent.event|vSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExVM storage policy is not-recommendedvSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.policyupdate.event|vSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'EventExMaximum host number in vSAN over RDMAvSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.rdmanodesalert.event|vSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExESXi system logs stored outside vSAN datastorevSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.scratchconfig.event|vSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.smalldiskstest.event|vSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExThick-provisioned VMs on vSANvSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.thickprovision.event|vSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExFix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabledvSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.unmaptest.event|vSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'EventExvSAN v1 disk in usevSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.v1diskcheck.event|vSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'EventExvCenter Server up to datevSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vcuptodate.event|vSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'EventExMultiple VMs share the same vSAN home namespacevSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vmns.event|vSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Support InsightvSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanenablesupportinsight.event|vSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpefwtest.event|vSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExCustomer advisory for HPE Smart ArrayvSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpesmartarraytest.event|vSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'EventExvSAN management service resource checkvSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanmgmtresource.event|vSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.witnesshw.event|vSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Advanced Configuration Check for Urgent vSAN ESA PatchvSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.zdomadvcfgenabled.event|vSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all of the hosts in a vSAN cluster have consistent advanced configuration options.vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.advcfgsync.event|vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN host daemon liveness.vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clomdliveness.event|vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSphere cluster members match vSAN cluster members.vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clustermembership.event|vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster configuration consistencyvSAN Health Test 'vSAN cluster configuration consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.consistentconfig.event|vSAN Health Test 'vSAN configuration consistency' status changed from '{prestatus}' to '{curstatus}'EventExESA prescriptive disk claimvSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.ddsconfig.event|vSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk group layoutvSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.dglayout.event|vSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN disk balance statusvSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.diskbalance.event|vSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Conversion HealthvSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.esaconversionhealth.event|vSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN extended configuration in syncvSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.extendedconfig.event|vSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Managed disk claimvSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.hcldiskclaimcheck.event|vSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'EventExCheck host maintenance mode is in sync with vSAN node decommission state.vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.mmdecominsync.event|vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'EventExvSAN optimal datastore default policy configurationvSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.optimaldsdefaultpolicy.event|vSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN with RDMA supports up to 32 hosts.vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.rdmanodes.event|vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExResync operations throttlingvSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.resynclimit.event|vSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN Cluster time sync status among hosts and VCvSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.timedrift.event|vSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk format statusvSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradelowerhosts.event|vSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'EventExSoftware version compatibilityvSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradesoftware.event|vSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'EventExVMware vCenter state is authoritativevSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vcauthoritative.event|vSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Direct homogeneous disk claimingvSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsandconfigconsistency.event|vSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'EventExvSphere Lifecycle Manager (vLCM) configurationvSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsanesavlcmcheck.event|vSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks the object format status of all vSAN objects.vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objectformat.event|vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health status of all vSAN objects.vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objecthealth.event|vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'EventExpNic RX/TX PauseRX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.pausecount.event|RX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX CRC ErrorRX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxcrcerr.event|RX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Generic ErrorRX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxerr.event|RX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX FIFO ErrorRX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxfifoerr.event|RX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Missed ErrorRX missed error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxmisserr.event|RX missed error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Buffer Overflow ErrorRX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxoverr.event|RX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Carrier ErrorTX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txcarerr.event|TX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Generic ErrorTX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txerr.event|TX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExRDT Checksum Mismatch ErrorRDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})vsan.health.test.diagnostics.rdt.checksummismatchcount.event|RDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})EventExData-in-transit encryption configuration checkvSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.ditencryption.ditconfig.event|vSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.dualencryption.event|vSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExChecks if CPU AES-NI is disabled on hostsvSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.hostcpuaesni.event|vSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VMware vCenter or any hosts are not connected to Key Management ServersvSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.kmsconnection.event|vSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Prescriptive Disk Claim ConfigurationsHost {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsvsan.health.test.esaprescriptivediskclaim.noeligibledisk|Host {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsEventExCheck vSAN File Service host file server agent vm state.vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.fileserver.event|vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'EventExInfrastructure HealthvSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.host.event|vSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'EventExFile Share HealthvSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.sharehealth.event|vSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'EventExVDS compliance check for hyperconverged cluster configurationvSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.dvshciconfig.event|vSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExHost compliance check for hyperconverged cluster configurationvSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.hosthciconfig.event|vSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN health alarm enablement statusvSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hciskip.event|vSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'EventExvSAN HCL DB Auto UpdatevSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.autohclupdate.event|vSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllercacheconfig.event|vSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN disk group type (All-Flash or Hybrid) is VMware certified for the used SCSI controllervSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdiskmode.event|vSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller driver is VMware certified.vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdriver.event|vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller firmware is VMware certified.vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerfirmware.event|vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller is compatible with the VMWARE Compatibility GuidevSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controlleronhcl.event|vSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExDisplays information about whether there is any driver supported for a given controller in the release of ESXi installed.vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerreleasesupport.event|vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.h730.event|vSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks the age of the VMware Hardware Compatibility Guid database.vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hcldbuptodate.event|vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any host failed to return its hardware information.vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hclhostbadstate.event|vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'EventExHost physical memory compliance checkvSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hostmemcheck.event|vSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmode.event|vSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmodeh730.event|vSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvsan.health.test.hcl.nvmeonhcl.event|EventExNetwork Interface Cards (NICs) used in vSAN hosts must meet certain requirements. These NIC requirements assume that the packet loss is not more than 0.0001% in the hyper-converged environments. It's recommended to use NIC which link speed can meet the minimum requirement. Otherwise, there can be a drastic impact on the vSAN performance.vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.pniclinkspeed.event|vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NICs used in this RDMA enabled vSAN cluster are certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmaniciscertified.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NIC's driver and firmware combination is certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportdriverfirmware.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the current ESXi release is certified for the RDMA NIC by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportesxrelease.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.vsanhpefwtest.event|vSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExHome objectvSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsihomeobjectstatustest.event|vSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExLUN runtime healthvSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsilunruntimetest.event|vSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExNetwork configurationvSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicenetworktest.event|vSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExService runtime statusvSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicerunningtest.event|vSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster claimed capacity is more than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.error.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExvSAN cluster claimed capacity is less than the entitled capacity.vSAN cluster claimed capacity is less than the entitled capacity.vsan.health.test.licensecapacityusage.green.event|vSAN cluster claimed capacity is less than the entitled capacity.EventExvSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.warn.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExChecks the vSAN cluster storage space utilizationvSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.diskspace.event|vSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN component limits, disk space and RC reservations assuming one host failure.vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.limit1hf.event|vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'EventExChecks the component utilization for the vSAN cluster and each host in the cluster.vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.nodecomponentlimit.event|vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN cluster read cache utilizationvSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.rcreservation.event|vSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN cluster is partitioned due to a network issue.vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.clusterpartition.event|vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExCheck if there are duplicate IP addresses configured for vmknic interfaces.vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.duplicateip.event|vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a connectivity check for vSAN Max Client Network by checking the heartbeats from each host to all other hosts in server clustervSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.externalconnectivity.event|vSAN Health Test 'vSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if API calls from VC to a host are failing while the host is in vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostconnectivity.event|vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VC has an active connection to all hosts in the cluster.vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostdisconnected.event|vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a network latency check via ping small packet size ping test from all hosts to all other hostsvSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostlatencycheck.event|vSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN API calls from each host can reach to other peer hosts in the clustervSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.interhostconnectivity.event|vSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExCheck if LACP is working properly.vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.lacpstatus.event|vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hostsvSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.largeping.event|vSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster receive the multicast heartbeat of the vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastdeepdive.event|vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have matching IP multicast configuration.vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsettings.event|vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any of the hosts in the vSAN cluster have IP multicast connectivity issue.vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsuspected.event|vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'EventExCheck if any host in remote vSAN client or server cluster has more than one vSAN vmknic configured.vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multiplevsanvmknic.event|vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnicconsistent.event|vSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExCheck if TSO is enabled for pNIC.vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnictso.event|vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'EventExCheck if the vSAN RDMA enabled physical NIC is configured for lossless traffic.vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.rdmaconfig.event|vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all hosts in client cluster have been in a single partition with all hosts in server vSAN cluster.vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.serverpartition.event|vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hostsvSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.smallping.event|vSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpinglarge.event|vSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpingsmall.event|vSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExCheck if all hosts in server cluster have a dedicated vSAN external vmknic configured.vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanexternalvmknic.event|vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have a configured vmknic with vSAN traffic enabled.vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanvmknic.event|vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'EventExCheck all remote VMware vCenter network connectivity.vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.xvcconnectivity.event|vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'EventExvSAN overall health statusvSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.overallsummary.event|vSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service data collectionvSAN Health Test 'Checks the statistics collection of the vSAN performance service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.collection.event|vSAN Health Test 'Checks statistics collection of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service network diagnostic mode statusvSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.diagmode.event|vSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'EventExNot all hosts are contributing stats to vSAN Performance ServicevSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.hostsmissing.event|vSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats primary electionvSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.masterexist.event|vSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service statusvSAN Health Test 'Checks status of vSAN Performance Service changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.perfsvcstatus.event|vSAN Health Test 'Checks status of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats DB object conflictsvSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.renameddirs.event|vSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the vSAN performance service statistics database objectvSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.statsdb.event|vSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service verbose mode statusvSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.verbosemode.event|vSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN has encountered an integrity issue of the metadata of a component on this disk.vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.componentmetadata.event|vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.diskusage.event|vSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on vital memory pools, needed for the correct operation of physical disks.vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomheap.event|vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on the vital memory pool, needed for the operation of physical disks.vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomslab.event|vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'EventExStorage Vendor Reported Drive HealthvSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.phmhealth.event|vSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the free space on physical disks in the vSAN cluster.vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcapacity.event|vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the number of components on the physical disk reaches the maximum limitationvSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcomplimithealth.event|vSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN is using the disk with reduced performance.vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcongestion.event|vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'EventExChecks if there is an issue retrieving the physical disk information from hosts in the vSAN cluster.vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskhostissues.event|vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the physical disks for all hosts in the vSAN cluster.vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskoverall.event|vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.smalldiskstest.event|vSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExCluster Name is not found in ssd endurance alarmClusters - {clustername} is/are not found in alarm - vSAN Health Alarm for disk endurance check.vsan.health.test.ssdendurance.clusternotfound.event|Clusters - {clustername} is/are not found. Please edit alarm - 'vSAN Health Alarm for disk endurance check' and correct the cluster name.EventExThe stretched cluster contains multiple unicast agents. This means multiple unicast agents were set on non-witness hostsvSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithmultipleunicastagents.event|vSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain a valid witness hostvSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithoutonewitnesshost.event|vSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain two valid fault domainsvSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithouttwodatafaultdomains.event|vSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'EventExHost should setup unicast agent so that they are able to communicate with the witness nodevSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostunicastagentunset.event|vSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'EventExHost with an invalid unicast agentvsan.health.test.stretchedcluster.hostwithinvalidunicastagent.event|vSAN Health Test 'Invalid unicast agent' status changed from '{prestatus}' to '{curstatus}'EventExCluster contains hosts that do not support stretched clustervSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostwithnostretchedclustersupport.event|vSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'EventExUnexpected number of data hosts in shared witness cluster. This means more than 2 data hosts in one shared witness cluster.vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnessclusterdatahostnumexceed.event|vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'EventExPer cluster component limit scaled down for shared witness host because of insufficient memoryvSAN Health Test 'Shared witness per cluster component limit scaled down' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnesscomponentlimitscaleddown.event|vSAN Health Test 'Shared witness per-cluster component limit inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExChecks the network latency between the two fault domains and the witness hostvSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.siteconnectivity.event|vSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'EventExWitness node is managed by vSphere Lifecycle ManagervSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.vlcmwitnessconfig.event|vSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'EventExThe following witness node resides in one of the data fault domainsvSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessfaultdomaininvalid.event|vSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster incorporates a witness host inside VMware vCenter clustervSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessinsidevccluster.event|vSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'EventExThe following (witness) hosts have invalid preferred fault domainsvSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomaininvalid.event|vSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'EventExThe preferred fault domain does not exist in the cluster for the following witness hostvSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomainnotexist.event|vSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevsan.health.test.stretchedcluster.witnessupgissue.event|vSAN Health Test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExWitness appliance upgrade to vSphere 7.0 or higher with cautionvsan.health.test.stretchedcluster.witnessupgrade.event|vSAN Health Test 'Witness appliance upgrade to vSphere 7.0 or higher with caution' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster contains witness hosts with no disk claimedvSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesswithnodiskmapping.event|vSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'EventExVMware Certified vSAN HardwarevSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vsanhardwarecert.event|vSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.patchalert.event|vSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExvSAN release catalog up-to-datevSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.releasecataloguptodate.event|vSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'EventExCheck configuration issues for vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumconfig.event|vSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'EventExESXi build recommended by vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumrecommendation.event|vSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'EventExThis object has the risk of PSOD issue due to improper DOM object flag leakThis object has the risk of PSOD issue due to improper DOM object flag leakvsan.health.test.zdom.leak|Objects {1} have the risk of PSOD issue due to improper DOM object flag leak. Please refer KB https://kb.vmware.com/s/article/89564VirtualMachineFaultToleranceStateFault Tolerance has not been configured for this virtual machinenotConfiguredFault Tolerance is disableddisabledFault Tolerance is enabledenabledFault Tolerant Secondary VM is not runningneedSecondaryFault Tolerance is startingstartingFault Tolerance is runningrunning
12857:20241101:185444.193 End of vmware_service_get_evt_severity() evt_severities:1989
12857:20241101:185444.194 In vmware_service_get_hv_ds_dc_dvs_list()
12857:20241101:185444.198 vmware_service_get_hv_ds_dc_dvs_list() SOAP response:
group-d1triggeredAlarmState365.1group-d1alarm-365yellowfalse39701datacenter-3nameNTK-corptriggeredAlarmStategroup-n7triggeredAlarmStategroup-h5triggeredAlarmStatedatastore-4041datastore-4050datastore-4046datastore-2007datastore-2006datastore-2005group-v4triggeredAlarmStategroup-n4029triggeredAlarmStategroup-v11triggeredAlarmStategroup-v4027triggeredAlarmStatedvs-21nameNTK-DSwitchuuid50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbgroup-v4056triggeredAlarmStatehost-4047host-4043host-4038
12857:20241101:185444.198 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_datacenters_list'
12857:20241101:185444.198 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_datacenters_list' found:0 total:0
12857:20241101:185444.198 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_hv_ds_dc_dvs_list'
12857:20241101:185444.198 In vmware_service_alarm_details_update() alarm:alarm-365
12857:20241101:185444.200 vmware_service_alarm_details_update() SOAP response:
alarm-365info.descriptionThis alarm is fired when vSphere Health detects new issues in your environment. This alarm will be retriggered even if acknowledged when new issues are detected. Go to Monitor -> Health for a detailed description of the issues.info.enabledtrueinfo.nameSkyline Health has detected issues in your vSphere environmentinfo.systemNameSkyline Health has detected issues in your vSphere environment
12857:20241101:185444.201 End of vmware_service_alarm_details_update() index:0
12857:20241101:185444.201 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_hv_ds_dc_dvs_list' found:1 total:1
12857:20241101:185444.201 End of vmware_service_get_hv_ds_dc_dvs_list():SUCCEED found hv:3 ds:6 dc:1
12857:20241101:185444.201 In vmware_service_create_datastore() datastore:'datastore-4041'
12857:20241101:185444.203 vmware_service_create_datastore() SOAP response:
datastore-4041infoLocal_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/34100425523270368744177664703687441776642024-10-24T08:57:27.792Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0334252364185616396313666.8267155e10-d4545cb2-5b01-3cecef012e78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682111______8falsetruesummarydatastore-4041Local_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12857:20241101:185444.203 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185444.203 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185444.203 End of vmware_service_create_datastore()
12857:20241101:185444.203 In vmware_service_create_datastore() datastore:'datastore-4050'
12857:20241101:185444.206 vmware_service_create_datastore() SOAP response:
datastore-4050infoLocal_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/3410042552327036874417766468169720922112703687441776642024-11-01T13:06:44.907432Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0134252364185616396313666.8267155cc9-bea5e318-19fd-ac1f6bb14c78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______8falsetruetruesummarydatastore-4050Local_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12857:20241101:185444.206 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185444.206 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185444.206 End of vmware_service_create_datastore()
12857:20241101:185444.206 In vmware_service_create_datastore() datastore:'datastore-4046'
12857:20241101:185444.210 vmware_service_create_datastore() SOAP response:
datastore-4046infoLocal_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/34100425523270368744177664703687441776642024-11-01T11:53:36.643999Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0234252364185616396313666.8267155ba7-5e9d16d6-0733-3cecef02b6e0t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682100______8falsetruesummarydatastore-4046Local_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12857:20241101:185444.210 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185444.210 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185444.210 End of vmware_service_create_datastore()
12857:20241101:185444.210 In vmware_service_create_datastore() datastore:'datastore-2007'
12857:20241101:185444.212 vmware_service_create_datastore() SOAP response:
datastore-2007info3PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/5031560478727036874417766468169720922112703687441776642024-11-01T13:06:44.904493Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID53153660247654416396313666.826704dec9-75e6c68a-c19e-9440c9831520naa.60002ac00000000000000054000228a31falsefalsefalsesummarydatastore-20073PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/53660247654450315604787242237661184truetrueVMFSnormaltriggeredAlarmState
12857:20241101:185444.212 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185444.212 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185444.212 End of vmware_service_create_datastore()
12857:20241101:185444.212 In vmware_service_create_datastore() datastore:'datastore-2006'
12857:20241101:185444.214 vmware_service_create_datastore() SOAP response:
datastore-2006info3PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/1592765972487036874417766468169720922112703687441776642024-11-01T13:06:44.898963Z7036874417766468169720922112VMFS3PAR_KARTOHA_SAS_NTK_ID53516079283814416396313666.826703d63f-3516ce66-4bee-9440c9831520naa.60002ac0000000000000042f000219831falsefalsefalsesummarydatastore-20063PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/160792838144159276597248truetrueVMFSnormaltriggeredAlarmState
12857:20241101:185444.214 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185444.214 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185444.214 End of vmware_service_create_datastore()
12857:20241101:185444.214 In vmware_service_create_datastore() datastore:'datastore-2005'
12857:20241101:185444.217 vmware_service_create_datastore() SOAP response:
datastore-2005info3PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/8543356846087036874417766468169720922112703687441776642024-11-01T18:34:30.288888Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID530_mgmt107347338854416396313666.826703d517-82086a06-cec0-9440c9831520naa.60002ac0000000000000004a000228a31falsefalsefalsesummarydatastore-20053PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/10734733885448543356846080truetrueVMFSnormaltriggeredAlarmState
12857:20241101:185444.217 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185444.217 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185444.217 End of vmware_service_create_datastore()
12857:20241101:185444.217 In vmware_service_get_clusters_and_resourcepools()
12857:20241101:185444.220 vmware_service_get_clusters_and_resourcepools() SOAP response:
domain-c1002nameNTK-corptriggeredAlarmStateresgroup-1003nameResourcesparentdomain-c1002resourcePoolresgroup-4001resgroup-4026resgroup-4026nameNTKparentresgroup-1003resourcePoolresgroup-4001namemgmtparentresgroup-1003resourcePool
12857:20241101:185444.220 In vmware_service_process_cluster_data()
12857:20241101:185444.220 In vmware_service_get_alarms_data(), func_parent:'vmware_service_process_cluster_data'
12857:20241101:185444.221 End of vmware_service_get_alarms_data() func_parent:'vmware_service_process_cluster_data' found:0 total:1
12857:20241101:185444.221 End of vmware_service_process_cluster_data():SUCCEED cl:1 rp:3
12857:20241101:185444.221 In vmware_service_get_cluster_state() clusterid:'domain-c1002'
12857:20241101:185444.222 vmware_service_get_cluster_state() SOAP response:
domain-c1002datastoredatastore-2005datastore-2006datastore-2007datastore-4041datastore-4046datastore-4050summary.overallStatusgreen
12857:20241101:185444.223 End of vmware_service_get_cluster_state():SUCCEED
12857:20241101:185444.223 End of vmware_service_get_clusters_and_resourcepools():SUCCEED found cl:1 rp:2
12857:20241101:185444.223 In vmware_service_init_hv() hvid:'host-4047'
12857:20241101:185444.223 In vmware_service_get_hv_data() guesthvid:'host-4047'
12857:20241101:185444.223 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4047false
12857:20241101:185444.242 vmware_service_get_hv_data() SOAP response:
host-4047config.network.dnsConfigfalsentk-esxi-01m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrueac:1f:6b:b1:4c:783ac:1f:6b:b1:4c:7800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrueac:1f:6b:b1:4c:793ac:1f:6b:b1:4c:7900falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:20:55:a8350:7c:6f:20:55:a800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:20:55:a9350:7c:6f:20:55:a900falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383139353420202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4050overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-01.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686832898048summary.hardware.modelSuper Serversummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-ac1f6bb14c78summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage164summary.quickStats.overallMemoryUsage16596summary.quickStats.uptime691010summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6300-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen107-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen103-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen184-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen341-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen516-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen183-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen186-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen340-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen510-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1170-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen690000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 4 FAN4The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen650000-2RPMnonefanFan Device 1 FAN1The sensor is operating under normal conditionsGreen660000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3100-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4800-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen4400-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5800-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5300-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4060
12857:20241101:185444.243 End of vmware_service_get_hv_data():SUCCEED
12857:20241101:185444.244 In vmware_service_get_hv_pnics_data()
12857:20241101:185444.244 End of vmware_service_get_hv_pnics_data() found:4
12857:20241101:185444.244 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12857:20241101:185444.244 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12857:20241101:185444.245 In vmware_hv_ip_search()
12857:20241101:185444.245 End of vmware_hv_ip_search() ip:10.50.242.11
12857:20241101:185444.245 In vmware_hv_get_parent_data() id:'host-4047'
12857:20241101:185444.247 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12857:20241101:185444.248 End of vmware_hv_get_parent_data():SUCCEED
12857:20241101:185444.248 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4047"
12857:20241101:185444.248 In vmware_service_hv_disks_get_info() hvid:'host-4047'
12857:20241101:185444.248 vmware_service_hv_disks_get_info() count of scsiLun:21
12857:20241101:185444.266 vmware_service_hv_disks_get_info() SOAP response:
host-4047config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].canonicalNamet10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].modelSAMSUNG MZ7LH480config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].queueDepth31config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].revision904Qconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].serialNumberS45PNC0R681954 config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].vendorATA config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].canonicalNamenaa.2ff70002ac021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].canonicalNamenaa.2ff70002ac0228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].canonicalNamenaa.60002ac0000000000000004a000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].canonicalNamenaa.60002ac00000000000000054000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].canonicalNamenaa.60002ac0000000000000042f00021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].vendor3PARdata
12857:20241101:185444.266 In vmware_service_hv_disks_parse_info()
12857:20241101:185444.267 End of vmware_service_hv_disks_parse_info() created:6
12857:20241101:185444.267 End of vmware_service_hv_disks_get_info():SUCCEED for 6(vsan:0) / 21
12857:20241101:185444.267 In vmware_service_hv_get_multipath_data() hvid:'host-4047'
12857:20241101:185444.277 vmware_service_hv_get_multipath_data() SOAP response:
host-4047config.storageDevice.multipathInfokey-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554e0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.MultipathInfo.Path-vmhba1:C0:T0:L0vmhba1:C0:T0:L0activeactivetruekey-vim.host.BlockHba-vmhba1key-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554eFIXEDkey-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a3565620202020020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L530vmhba2:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L530vmhba2:C0:T3:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L530vmhba3:C0:T1:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L530vmhba3:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a3565620202020020013020060002ac00000000000000054000228a3565620202020key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L531vmhba2:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L531vmhba2:C0:T3:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L531vmhba3:C0:T1:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L531vmhba3:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f00021983565620202020020017020060002ac0000000000000042f00021983565620202020key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L535vmhba3:C0:T3:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L535vmhba3:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L535vmhba2:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L535vmhba2:C0:T1:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202002000001002ff70002ac0228a3565620202020key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L256vmhba2:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L256vmhba2:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L256vmhba3:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L256vmhba3:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202002000001002ff70002ac021983565620202020key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L256vmhba3:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L256vmhba3:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L256vmhba2:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L256vmhba2:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUA
12857:20241101:185444.277 End of vmware_service_hv_get_multipath_data():SUCCEED
12857:20241101:185444.277 In vmware_hv_ds_access_update() hv id:host-4047 hv dss:4 dss:6
12857:20241101:185444.279 vmware_hv_ds_access_update() SOAP response:
datastore-2005host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2006host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2007host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-4050host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtrue
12857:20241101:185444.280 In vmware_hv_ds_access_parse()
12857:20241101:185444.280 In vmware_hv_get_ds_access() for DS:datastore-2005
12857:20241101:185444.280 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185444.280 In vmware_hv_get_ds_access() for DS:datastore-2006
12857:20241101:185444.280 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185444.280 In vmware_hv_get_ds_access() for DS:datastore-2007
12857:20241101:185444.280 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185444.280 In vmware_hv_get_ds_access() for DS:datastore-4050
12857:20241101:185444.280 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185444.280 End of vmware_hv_ds_access_parse() parsed:4
12857:20241101:185444.280 End of vmware_hv_ds_access_update():SUCCEED for 4 / 4
12857:20241101:185444.280 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID530_mgmt"
12857:20241101:185444.280 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_KARTOHA_SAS_NTK_ID535"
12857:20241101:185444.280 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID531"
12857:20241101:185444.280 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"Local_ntk-m1-esxi-01"
12857:20241101:185444.280 In vmware_service_create_vm() vmid:'vm-4060'
12857:20241101:185444.280 In vmware_service_get_vm_data() vmid:'vm-4060'
12857:20241101:185444.284 vmware_service_get_vm_data() SOAP response:
vm-4060availableFieldconfig.hardware218192falsefalse200IDE 00201IDE 11300PS2 controller 00600700100PCI controller 00500120001000150004000400SIO controller 00600Keyboard3000700Pointing device; Devicefalseautodetect3001500Video card100040961falsefalseautomatic26214412000Device on the virtual machine PCI bus that provides support for the virtual machine communication interface10017-1079927627falsetrue1000LSI Logic16100302000truenoSharing715000AHCI321002401600016000ISO [3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.iso[3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.isodatastore-2005truetruefalseok1500002000104,857,600 KB[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdatastore-2005persistentfalsefalsefalsefalse6000C29d-45c9-aa9f-3d54-a04187209ee5fa74bccac7959c5d95abe5bffffffffefalsesharingNone100001048576001073741824001000normal-11000normal05-20004000DVSwitch: 50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 db50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-400628630340996truefalsetrueok1601007assigned00:50:56:b0:80:48true050normal-1trueconfig.instanceUuid50304101-157a-f442-58f4-550f05de33feconfig.uuid42306756-2f64-b85a-a4fe-276cbfa19cb5customValuedatastoredatastore-2005guest.disk/5146047283240655228928/boot20403732481785856000guest.guestFamilylinuxGuestguest.guestFullNameUbuntu Linux (64-bit)guest.guestStaterunningguest.hostNamezabb-ntk-proxyguest.ipAddress10.50.242.76guest.netntk_dmz_vlan_112910.50.242.76fe80::250:56ff:feb0:804800:50:56:b0:80:48true400010.50.242.7628preferredfe80::250:56ff:feb0:804864unknownguest.toolsRunningStatusguestToolsRunningguest.toolsVersion12389layoutEx0[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmxconfig23822382true1[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmsdsnapshotList00true2[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdiskDescriptor458458true3[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-flat.vmdkdiskExtent107374182400107374182400true4[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.nvramnvram86848684true5[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-13083273.vswpswap85899345928589934592true6[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/vmx-zabbix-proxy-ntk-4e9138c7e268bf86a750769daba1b562730af6a5e74aa2ad704e8731824ba105-1.vswpuwswap8598323285983232true2000232024-11-01T18:26:30.170712Zparentgroup-v11resourcePoolresgroup-4001summary.config.memorySizeMB8192summary.config.namezabbix-proxy-ntksummary.config.numCpu2summary.quickStats.balloonedMemory0summary.quickStats.compressedMemory0summary.quickStats.guestMemoryUsage81summary.quickStats.hostMemoryUsage8222summary.quickStats.overallCpuUsage28summary.quickStats.privateMemory8165summary.quickStats.sharedMemory3summary.quickStats.swappedMemory0summary.quickStats.uptimeSeconds75787summary.runtime.consolidationNeededfalsesummary.runtime.powerStatepoweredOnsummary.storage.committed116050111748summary.storage.uncommitted0summary.storage.unshared107374182858triggeredAlarmStategroup-v11nameDiscovered virtual machineparentgroup-v4group-v4namevmparentdatacenter-3
12857:20241101:185444.284 End of vmware_service_get_vm_data():SUCCEED
12857:20241101:185444.285 In vmware_service_get_vm_folder() folder id:'group-v11'
12857:20241101:185444.285 End of vmware_service_get_vm_folder(): vm folder:Discovered virtual machine
12857:20241101:185444.285 In vmware_vm_get_nic_devices()
12857:20241101:185444.286 End of vmware_vm_get_nic_devices() found:1
12857:20241101:185444.286 In vmware_vm_get_disk_devices()
12857:20241101:185444.286 End of vmware_vm_get_disk_devices() found:1
12857:20241101:185444.286 In vmware_vm_get_file_systems()
12857:20241101:185444.286 End of vmware_vm_get_file_systems() found:2
12857:20241101:185444.286 In vmware_vm_get_custom_attrs()
12857:20241101:185444.286 End of vmware_vm_get_custom_attrs() attributes:0
12857:20241101:185444.286 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_vm'
12857:20241101:185444.286 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_vm' found:0 total:1
12857:20241101:185444.286 End of vmware_service_create_vm():SUCCEED
12857:20241101:185444.286 End of vmware_service_init_hv():SUCCEED
12857:20241101:185444.286 In vmware_service_init_hv() hvid:'host-4043'
12857:20241101:185444.286 In vmware_service_get_hv_data() guesthvid:'host-4043'
12857:20241101:185444.286 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4043false
12857:20241101:185444.300 vmware_service_get_hv_data() SOAP response:
host-4043config.network.dnsConfigfalsentk-esxi-02m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrue3c:ec:ef:02:b6:e033c:ec:ef:02:b6:e000falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrue3c:ec:ef:02:b6:e133c:ec:ef:02:b6:e100falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:3b:d8:c6350:7c:6f:3b:d8:c600falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:3b:d8:c7350:7c:6f:3b:d8:c700falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383231303020202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383231303020202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4046overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-02.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686831919104summary.hardware.modelSYS-6019P-WTRsummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-3cecef02b6e0summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage441summary.quickStats.overallMemoryUsage8871summary.quickStats.uptime691133summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6700-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen106-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen102-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen182-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen335-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen343-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1164-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen560000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen590000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen610000-2RPMnonefanFan Device 2 FAN2The sensor is operating under normal conditionsGreen600000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3600-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen4000-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen4600-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5000-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4057
12857:20241101:185444.300 End of vmware_service_get_hv_data():SUCCEED
12857:20241101:185444.301 In vmware_service_get_hv_pnics_data()
12857:20241101:185444.301 End of vmware_service_get_hv_pnics_data() found:4
12857:20241101:185444.301 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12857:20241101:185444.302 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12857:20241101:185444.302 In vmware_hv_ip_search()
12857:20241101:185444.302 End of vmware_hv_ip_search() ip:10.50.242.12
12857:20241101:185444.302 In vmware_hv_get_parent_data() id:'host-4043'
12857:20241101:185444.305 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12857:20241101:185444.305 End of vmware_hv_get_parent_data():SUCCEED
12857:20241101:185444.305 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4043"
12857:20241101:185444.305 In vmware_service_hv_disks_get_info() hvid:'host-4043'
12857:20241101:185444.305 vmware_service_hv_disks_get_info() count of scsiLun:21
12861:20241101:185444.951 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002595 sec]'
12861:20241101:185444.951 In vmware_job_get() queue:2
12861:20241101:185444.951 End of vmware_job_get() queue:2 type:none
12855:20241101:185444.951 In vmware_job_get() queue:2
12855:20241101:185444.951 End of vmware_job_get() queue:2 type:none
12859:20241101:185444.952 In vmware_job_get() queue:2
12859:20241101:185444.952 End of vmware_job_get() queue:2 type:none
12861:20241101:185445.951 In vmware_job_get() queue:2
12861:20241101:185445.951 End of vmware_job_get() queue:2 type:none
12855:20241101:185445.952 In vmware_job_get() queue:2
12855:20241101:185445.952 End of vmware_job_get() queue:2 type:none
12859:20241101:185445.952 In vmware_job_get() queue:2
12859:20241101:185445.952 End of vmware_job_get() queue:2 type:none
12861:20241101:185446.952 In vmware_job_get() queue:2
12861:20241101:185446.952 End of vmware_job_get() queue:2 type:none
12855:20241101:185446.953 In vmware_job_get() queue:2
12855:20241101:185446.953 End of vmware_job_get() queue:2 type:none
12859:20241101:185446.953 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002687 sec]'
12859:20241101:185446.953 In vmware_job_get() queue:2
12859:20241101:185446.953 End of vmware_job_get() queue:2 type:none
12861:20241101:185447.953 In vmware_job_get() queue:2
12861:20241101:185447.953 End of vmware_job_get() queue:2 type:none
12859:20241101:185447.953 In vmware_job_get() queue:2
12859:20241101:185447.953 End of vmware_job_get() queue:2 type:none
12855:20241101:185447.953 In vmware_job_get() queue:2
12855:20241101:185447.953 End of vmware_job_get() queue:2 type:none
12837:20241101:185448.227 received configuration data from server at "10.50.242.78", datalen 437
12861:20241101:185448.953 In vmware_job_get() queue:2
12861:20241101:185448.953 End of vmware_job_get() queue:2 type:none
12859:20241101:185448.953 In vmware_job_get() queue:2
12859:20241101:185448.954 End of vmware_job_get() queue:2 type:none
12855:20241101:185448.954 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002980 sec]'
12855:20241101:185448.954 In vmware_job_get() queue:2
12855:20241101:185448.954 End of vmware_job_get() queue:2 type:none
12861:20241101:185449.953 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002253 sec]'
12861:20241101:185449.953 In vmware_job_get() queue:2
12861:20241101:185449.953 End of vmware_job_get() queue:2 type:none
12859:20241101:185449.954 In vmware_job_get() queue:2
12859:20241101:185449.954 End of vmware_job_get() queue:2 type:none
12855:20241101:185449.954 In vmware_job_get() queue:2
12855:20241101:185449.954 End of vmware_job_get() queue:2 type:none
12861:20241101:185450.954 In vmware_job_get() queue:2
12861:20241101:185450.954 End of vmware_job_get() queue:2 type:none
12859:20241101:185450.954 In vmware_job_get() queue:2
12859:20241101:185450.954 End of vmware_job_get() queue:2 type:none
12855:20241101:185450.954 In vmware_job_get() queue:2
12855:20241101:185450.954 End of vmware_job_get() queue:2 type:none
12861:20241101:185451.954 In vmware_job_get() queue:2
12861:20241101:185451.955 End of vmware_job_get() queue:2 type:none
12859:20241101:185451.955 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001957 sec]'
12859:20241101:185451.955 In vmware_job_get() queue:2
12859:20241101:185451.955 End of vmware_job_get() queue:2 type:none
12855:20241101:185451.955 In vmware_job_get() queue:2
12855:20241101:185451.955 End of vmware_job_get() queue:2 type:none
12861:20241101:185452.955 In vmware_job_get() queue:2
12861:20241101:185452.955 End of vmware_job_get() queue:2 type:none
12859:20241101:185452.955 In vmware_job_get() queue:2
12859:20241101:185452.955 End of vmware_job_get() queue:2 type:none
12855:20241101:185452.955 In vmware_job_get() queue:2
12855:20241101:185452.955 End of vmware_job_get() queue:2 type:none
12861:20241101:185453.955 In vmware_job_get() queue:2
12861:20241101:185453.955 End of vmware_job_get() queue:2 type:none
12859:20241101:185453.955 In vmware_job_get() queue:2
12859:20241101:185453.955 End of vmware_job_get() queue:2 type:none
12855:20241101:185453.956 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001602 sec]'
12855:20241101:185453.956 In vmware_job_get() queue:2
12855:20241101:185453.956 End of vmware_job_get() queue:2 type:none
12857:20241101:185454.307 End of vmware_service_hv_disks_get_info():FAIL for 0(vsan:0) / 21
12857:20241101:185454.307 End of vmware_service_init_hv():FAIL
12857:20241101:185454.307 Unable initialize hv host-4043: Timeout was reached.
12857:20241101:185454.307 In vmware_service_init_hv() hvid:'host-4038'
12857:20241101:185454.307 In vmware_service_get_hv_data() guesthvid:'host-4038'
12857:20241101:185454.307 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4038false
12861:20241101:185454.955 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001844 sec]'
12861:20241101:185454.955 In vmware_job_get() queue:2
12861:20241101:185454.955 End of vmware_job_get() queue:2 type:none
12859:20241101:185454.956 In vmware_job_get() queue:2
12859:20241101:185454.956 End of vmware_job_get() queue:2 type:none
12855:20241101:185454.956 In vmware_job_get() queue:2
12855:20241101:185454.956 End of vmware_job_get() queue:2 type:none
12861:20241101:185455.955 In vmware_job_get() queue:2
12861:20241101:185455.955 End of vmware_job_get() queue:2 type:none
12859:20241101:185455.956 In vmware_job_get() queue:2
12859:20241101:185455.956 End of vmware_job_get() queue:2 type:none
12855:20241101:185455.956 In vmware_job_get() queue:2
12855:20241101:185455.956 End of vmware_job_get() queue:2 type:none
12861:20241101:185456.956 In vmware_job_get() queue:2
12861:20241101:185456.957 End of vmware_job_get() queue:2 type:none
12859:20241101:185456.957 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001843 sec]'
12859:20241101:185456.957 In vmware_job_get() queue:2
12859:20241101:185456.957 End of vmware_job_get() queue:2 type:none
12855:20241101:185456.957 In vmware_job_get() queue:2
12855:20241101:185456.957 End of vmware_job_get() queue:2 type:none
12861:20241101:185457.957 In vmware_job_get() queue:2
12861:20241101:185457.957 End of vmware_job_get() queue:2 type:none
12859:20241101:185457.957 In vmware_job_get() queue:2
12859:20241101:185457.957 End of vmware_job_get() queue:2 type:none
12855:20241101:185457.957 In vmware_job_get() queue:2
12855:20241101:185457.957 End of vmware_job_get() queue:2 type:none
12837:20241101:185458.253 received configuration data from server at "10.50.242.78", datalen 10211
12861:20241101:185458.957 In vmware_job_get() queue:2
12861:20241101:185458.957 End of vmware_job_get() queue:2 type:none
12859:20241101:185458.957 In vmware_job_get() queue:2
12859:20241101:185458.957 End of vmware_job_get() queue:2 type:none
12855:20241101:185458.957 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001836 sec]'
12855:20241101:185458.957 In vmware_job_get() queue:2
12855:20241101:185458.957 End of vmware_job_get() queue:2 type:none
12861:20241101:185459.957 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001754 sec]'
12861:20241101:185459.957 In vmware_job_get() queue:2
12861:20241101:185459.957 End of vmware_job_get() queue:2 type:none
12859:20241101:185459.957 In vmware_job_get() queue:2
12859:20241101:185459.957 End of vmware_job_get() queue:2 type:none
12855:20241101:185459.957 In vmware_job_get() queue:2
12855:20241101:185459.957 End of vmware_job_get() queue:2 type:none
12861:20241101:185500.957 In vmware_job_get() queue:2
12861:20241101:185500.957 End of vmware_job_get() queue:2 type:none
12855:20241101:185500.958 In vmware_job_get() queue:2
12855:20241101:185500.958 End of vmware_job_get() queue:2 type:none
12859:20241101:185500.958 In vmware_job_get() queue:2
12859:20241101:185500.958 End of vmware_job_get() queue:2 type:none
12861:20241101:185501.957 In vmware_job_get() queue:2
12861:20241101:185501.957 End of vmware_job_get() queue:2 type:none
12855:20241101:185501.958 In vmware_job_get() queue:2
12855:20241101:185501.958 End of vmware_job_get() queue:2 type:none
12859:20241101:185501.958 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002127 sec]'
12859:20241101:185501.958 In vmware_job_get() queue:2
12859:20241101:185501.958 End of vmware_job_get() queue:2 type:none
12861:20241101:185502.958 In vmware_job_get() queue:2
12861:20241101:185502.958 End of vmware_job_get() queue:2 type:none
12855:20241101:185502.958 In vmware_job_get() queue:2
12855:20241101:185502.958 End of vmware_job_get() queue:2 type:none
12859:20241101:185502.958 In vmware_job_get() queue:2
12859:20241101:185502.958 End of vmware_job_get() queue:2 type:none
12861:20241101:185503.958 In vmware_job_get() queue:2
12861:20241101:185503.958 End of vmware_job_get() queue:2 type:none
12855:20241101:185503.958 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000917 sec]'
12855:20241101:185503.958 In vmware_job_get() queue:2
12855:20241101:185503.958 End of vmware_job_get() queue:2 type:none
12859:20241101:185503.958 In vmware_job_get() queue:2
12859:20241101:185503.958 End of vmware_job_get() queue:2 type:none
12857:20241101:185504.309 End of vmware_service_get_hv_data():FAIL
12857:20241101:185504.309 End of vmware_service_init_hv():FAIL
12857:20241101:185504.309 Unable initialize hv host-4038: Timeout was reached.
12857:20241101:185504.309 In vmware_service_dvswitch_load() dvs count:0
12857:20241101:185504.309 End of vmware_service_dvswitch_load() count: 0 / 0
12857:20241101:185504.309 In vmware_service_props_load() props total:0
12857:20241101:185504.309 End of vmware_service_props_load() count: 0 / 0
12857:20241101:185504.309 In vmware_service_get_maxquerymetrics()
12861:20241101:185504.958 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000985 sec]'
12861:20241101:185504.958 In vmware_job_get() queue:2
12861:20241101:185504.958 End of vmware_job_get() queue:2 type:none
12855:20241101:185504.958 In vmware_job_get() queue:2
12855:20241101:185504.958 End of vmware_job_get() queue:2 type:none
12859:20241101:185504.958 In vmware_job_get() queue:2
12859:20241101:185504.958 End of vmware_job_get() queue:2 type:none
12861:20241101:185505.958 In vmware_job_get() queue:2
12861:20241101:185505.958 End of vmware_job_get() queue:2 type:none
12855:20241101:185505.958 In vmware_job_get() queue:2
12855:20241101:185505.958 End of vmware_job_get() queue:2 type:none
12859:20241101:185505.958 In vmware_job_get() queue:2
12859:20241101:185505.958 End of vmware_job_get() queue:2 type:none
12861:20241101:185506.958 In vmware_job_get() queue:2
12861:20241101:185506.958 End of vmware_job_get() queue:2 type:none
12855:20241101:185506.958 In vmware_job_get() queue:2
12855:20241101:185506.958 End of vmware_job_get() queue:2 type:none
12859:20241101:185506.959 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000655 sec]'
12859:20241101:185506.959 In vmware_job_get() queue:2
12859:20241101:185506.959 End of vmware_job_get() queue:2 type:none
12861:20241101:185507.958 In vmware_job_get() queue:2
12861:20241101:185507.959 End of vmware_job_get() queue:2 type:none
12855:20241101:185507.959 In vmware_job_get() queue:2
12855:20241101:185507.959 End of vmware_job_get() queue:2 type:none
12859:20241101:185507.959 In vmware_job_get() queue:2
12859:20241101:185507.959 End of vmware_job_get() queue:2 type:none
12837:20241101:185508.273 received configuration data from server at "10.50.242.78", datalen 437
12861:20241101:185508.959 In vmware_job_get() queue:2
12861:20241101:185508.959 End of vmware_job_get() queue:2 type:none
12855:20241101:185508.959 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000770 sec]'
12855:20241101:185508.959 In vmware_job_get() queue:2
12855:20241101:185508.959 End of vmware_job_get() queue:2 type:none
12859:20241101:185508.959 In vmware_job_get() queue:2
12859:20241101:185508.959 End of vmware_job_get() queue:2 type:none
12861:20241101:185509.959 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000876 sec]'
12861:20241101:185509.959 In vmware_job_get() queue:2
12861:20241101:185509.959 End of vmware_job_get() queue:2 type:none
12855:20241101:185509.959 In vmware_job_get() queue:2
12855:20241101:185509.959 End of vmware_job_get() queue:2 type:none
12859:20241101:185509.959 In vmware_job_get() queue:2
12859:20241101:185509.959 End of vmware_job_get() queue:2 type:none
12861:20241101:185510.959 In vmware_job_get() queue:2
12861:20241101:185510.959 End of vmware_job_get() queue:2 type:none
12855:20241101:185510.959 In vmware_job_get() queue:2
12855:20241101:185510.959 End of vmware_job_get() queue:2 type:none
12859:20241101:185510.959 In vmware_job_get() queue:2
12859:20241101:185510.959 End of vmware_job_get() queue:2 type:none
12861:20241101:185511.959 In vmware_job_get() queue:2
12861:20241101:185511.960 End of vmware_job_get() queue:2 type:none
12855:20241101:185511.960 In vmware_job_get() queue:2
12855:20241101:185511.960 End of vmware_job_get() queue:2 type:none
12859:20241101:185511.960 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000901 sec]'
12859:20241101:185511.960 In vmware_job_get() queue:2
12859:20241101:185511.960 End of vmware_job_get() queue:2 type:none
12861:20241101:185512.960 In vmware_job_get() queue:2
12861:20241101:185512.960 End of vmware_job_get() queue:2 type:none
12859:20241101:185512.960 In vmware_job_get() queue:2
12859:20241101:185512.961 End of vmware_job_get() queue:2 type:none
12855:20241101:185512.961 In vmware_job_get() queue:2
12855:20241101:185512.961 End of vmware_job_get() queue:2 type:none
12861:20241101:185513.961 In vmware_job_get() queue:2
12861:20241101:185513.961 End of vmware_job_get() queue:2 type:none
12859:20241101:185513.961 In vmware_job_get() queue:2
12859:20241101:185513.961 End of vmware_job_get() queue:2 type:none
12855:20241101:185513.961 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001961 sec]'
12855:20241101:185513.961 In vmware_job_get() queue:2
12855:20241101:185513.961 End of vmware_job_get() queue:2 type:none
12857:20241101:185514.311 End of vmware_service_get_maxquerymetrics():FAIL
12857:20241101:185514.311 In vmware_service_update_perf_entities()
12857:20241101:185514.311 In vmware_service_add_perf_entity() type:HostSystem id:host-4047
12857:20241101:185514.311 In zbx_vmware_service_get_perf_entity() type:HostSystem id:host-4047
12857:20241101:185514.311 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185514.311 In zbx_vmware_service_get_counterid() path:net/packetsRx[summation]
12857:20241101:185514.311 zbx_vmware_service_get_counterid() counterid:153
12857:20241101:185514.311 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.311 In zbx_vmware_service_get_counterid() path:net/packetsTx[summation]
12857:20241101:185514.311 zbx_vmware_service_get_counterid() counterid:154
12857:20241101:185514.311 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:net/received[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:155
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:net/transmitted[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:156
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:datastore/totalReadLatency[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:189
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:datastore/totalWriteLatency[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:190
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:datastore/numberReadAveraged[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:185
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:datastore/numberWriteAveraged[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:186
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:cpu/usage[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:2
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:cpu/utilization[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:398
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:power/power[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:164
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.312 In zbx_vmware_service_get_counterid() path:power/powerCap[average]
12857:20241101:185514.312 zbx_vmware_service_get_counterid() counterid:165
12857:20241101:185514.312 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:net/droppedRx[summation]
12857:20241101:185514.313 zbx_vmware_service_get_counterid() counterid:605
12857:20241101:185514.313 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:net/droppedTx[summation]
12857:20241101:185514.313 zbx_vmware_service_get_counterid() counterid:606
12857:20241101:185514.313 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:net/errorsRx[summation]
12857:20241101:185514.313 zbx_vmware_service_get_counterid() counterid:613
12857:20241101:185514.313 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:net/errorsTx[summation]
12857:20241101:185514.313 zbx_vmware_service_get_counterid() counterid:614
12857:20241101:185514.313 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:net/broadcastRx[summation]
12857:20241101:185514.313 zbx_vmware_service_get_counterid() counterid:609
12857:20241101:185514.313 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:net/broadcastTx[summation]
12857:20241101:185514.313 zbx_vmware_service_get_counterid() counterid:610
12857:20241101:185514.313 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 End of vmware_service_add_perf_entity() perfcounters:18
12857:20241101:185514.313 In vmware_service_add_perf_entity() type:VirtualMachine id:vm-4060
12857:20241101:185514.313 In zbx_vmware_service_get_perf_entity() type:VirtualMachine id:vm-4060
12857:20241101:185514.313 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:virtualDisk/read[average]
12857:20241101:185514.313 zbx_vmware_service_get_counterid() counterid:180
12857:20241101:185514.313 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:virtualDisk/write[average]
12857:20241101:185514.313 zbx_vmware_service_get_counterid() counterid:181
12857:20241101:185514.313 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:virtualDisk/numberReadAveraged[average]
12857:20241101:185514.313 zbx_vmware_service_get_counterid() counterid:178
12857:20241101:185514.313 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.313 In zbx_vmware_service_get_counterid() path:virtualDisk/numberWriteAveraged[average]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:179
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:net/packetsRx[summation]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:153
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:net/packetsTx[summation]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:154
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:net/received[average]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:155
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:net/transmitted[average]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:156
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:cpu/ready[summation]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:12
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:net/usage[average]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:150
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:cpu/usage[average]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:2
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:cpu/latency[average]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:540
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:cpu/readiness[average]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:548
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:cpu/swapwait[summation]
12857:20241101:185514.314 zbx_vmware_service_get_counterid() counterid:531
12857:20241101:185514.314 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.314 In zbx_vmware_service_get_counterid() path:sys/osUptime[latest]
12857:20241101:185514.315 zbx_vmware_service_get_counterid() counterid:643
12857:20241101:185514.315 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.315 In zbx_vmware_service_get_counterid() path:mem/consumed[average]
12857:20241101:185514.315 zbx_vmware_service_get_counterid() counterid:98
12857:20241101:185514.315 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.315 In zbx_vmware_service_get_counterid() path:mem/usage[average]
12857:20241101:185514.315 zbx_vmware_service_get_counterid() counterid:24
12857:20241101:185514.315 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.315 In zbx_vmware_service_get_counterid() path:mem/swapped[average]
12857:20241101:185514.315 zbx_vmware_service_get_counterid() counterid:70
12857:20241101:185514.315 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.315 In zbx_vmware_service_get_counterid() path:net/usage[average]
12857:20241101:185514.315 zbx_vmware_service_get_counterid() counterid:150
12857:20241101:185514.315 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.315 In zbx_vmware_service_get_counterid() path:virtualDisk/readOIO[latest]
12857:20241101:185514.315 zbx_vmware_service_get_counterid() counterid:349
12857:20241101:185514.315 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.315 In zbx_vmware_service_get_counterid() path:virtualDisk/writeOIO[latest]
12857:20241101:185514.315 zbx_vmware_service_get_counterid() counterid:350
12857:20241101:185514.315 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.315 In zbx_vmware_service_get_counterid() path:virtualDisk/totalWriteLatency[average]
12857:20241101:185514.315 zbx_vmware_service_get_counterid() counterid:183
12857:20241101:185514.315 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.315 In zbx_vmware_service_get_counterid() path:virtualDisk/totalReadLatency[average]
12857:20241101:185514.315 zbx_vmware_service_get_counterid() counterid:182
12857:20241101:185514.315 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.315 End of vmware_service_add_perf_entity() perfcounters:23
12857:20241101:185514.315 vmware_service_update_perf_entities() for type: VirtualMachine hv id: host-4047 hv uuid: 00000000-0000-0000-0000-ac1f6bb14c78 linked vm id: vm-4060 vm uuid: 50304101-157a-f442-58f4-550f05de33fe
12857:20241101:185514.315 In vmware_service_add_perf_entity() type:Datastore id:datastore-2005
12857:20241101:185514.315 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2005
12857:20241101:185514.315 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185514.315 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185514.316 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185514.316 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.316 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185514.316 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185514.316 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.316 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185514.316 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185514.316 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.316 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185514.316 vmware_service_update_perf_entities() for type: Datastore id: datastore-2005 name: 3PAR_GOROH_SSD_NTK_ID530_mgmt uuid: 6703d517-82086a06-cec0-9440c9831520
12857:20241101:185514.316 In vmware_service_add_perf_entity() type:Datastore id:datastore-2006
12857:20241101:185514.316 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2006
12857:20241101:185514.316 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185514.316 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185514.316 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185514.316 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.316 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185514.316 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185514.316 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.316 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185514.316 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185514.316 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.316 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185514.316 vmware_service_update_perf_entities() for type: Datastore id: datastore-2006 name: 3PAR_KARTOHA_SAS_NTK_ID535 uuid: 6703d63f-3516ce66-4bee-9440c9831520
12857:20241101:185514.316 In vmware_service_add_perf_entity() type:Datastore id:datastore-2007
12857:20241101:185514.316 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2007
12857:20241101:185514.316 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185514.316 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185514.316 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185514.316 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.316 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185514.316 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185514.317 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.317 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185514.317 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185514.317 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.317 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185514.317 vmware_service_update_perf_entities() for type: Datastore id: datastore-2007 name: 3PAR_GOROH_SSD_NTK_ID531 uuid: 6704dec9-75e6c68a-c19e-9440c9831520
12857:20241101:185514.317 In vmware_service_add_perf_entity() type:Datastore id:datastore-4046
12857:20241101:185514.317 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4046
12857:20241101:185514.317 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185514.317 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185514.317 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185514.317 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.317 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185514.317 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185514.317 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.317 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185514.317 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185514.317 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.317 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185514.317 vmware_service_update_perf_entities() for type: Datastore id: datastore-4046 name: Local_ntk-m1-esxi-02 uuid: 67155ba7-5e9d16d6-0733-3cecef02b6e0
12857:20241101:185514.317 In vmware_service_add_perf_entity() type:Datastore id:datastore-4050
12857:20241101:185514.317 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4050
12857:20241101:185514.317 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185514.317 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185514.317 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185514.317 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.317 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185514.317 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185514.317 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.317 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185514.317 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185514.317 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.318 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185514.318 vmware_service_update_perf_entities() for type: Datastore id: datastore-4050 name: Local_ntk-m1-esxi-01 uuid: 67155cc9-bea5e318-19fd-ac1f6bb14c78
12857:20241101:185514.318 In vmware_service_add_perf_entity() type:Datastore id:datastore-4041
12857:20241101:185514.318 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4041
12857:20241101:185514.318 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185514.318 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185514.318 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185514.318 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.318 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185514.318 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185514.318 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.318 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185514.318 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185514.318 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185514.318 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185514.318 vmware_service_update_perf_entities() for type: Datastore id: datastore-4041 name: Local_ntk-m1-esxi-03 uuid: 67155e10-d4545cb2-5b01-3cecef012e78
12857:20241101:185514.318 End of vmware_service_update_perf_entities() entities:8
12857:20241101:185514.318 === memory statistics for vmware cache size ===
12857:20241101:185514.318 free chunks of size >= 256 bytes: 4
12857:20241101:185514.318 min chunk size: 760 bytes
12857:20241101:185514.318 max chunk size: 1073164312 bytes
12857:20241101:185514.318 memory of total size 1073625776 bytes fragmented into 7203 chunks
12857:20241101:185514.318 of those, 1073166912 bytes are in 4 free chunks
12857:20241101:185514.318 of those, 458864 bytes are in 7199 used chunks
12857:20241101:185514.318 of those, 115232 bytes are used by allocation overhead
12857:20241101:185514.318 ================================
12857:20241101:185514.318 End of zbx_vmware_service_update():FAIL processed:1638400 bytes of data. Events:0 DC:1 DS:6 CL:1 HV:1 VM:1 DVS:1 Alarms:1 VMwareCache memory usage (free/strpool/total): 1073166912 / 2908784 / 1073741008
12857:20241101:185514.318 End of vmware_job_exec() type:update_conf ret:FAIL
12857:20241101:185514.318 In vmware_job_schedule() queue:2 type:update_conf
12857:20241101:185514.318 End of vmware_job_schedule() type:update_conf nextcheck:18:55:43
12857:20241101:185514.318 In vmware_job_get() queue:3
12857:20241101:185514.319 End of vmware_job_get() queue:3 type:none
12857:20241101:185514.319 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 0.000000 sec during 30.368409 sec]'
12857:20241101:185514.319 In vmware_job_get() queue:3
12857:20241101:185514.319 End of vmware_job_get() queue:3 type:none
12861:20241101:185514.961 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001869 sec]'
12861:20241101:185514.961 In vmware_job_get() queue:3
12861:20241101:185514.961 End of vmware_job_get() queue:3 type:none
12859:20241101:185514.961 In vmware_job_get() queue:3
12859:20241101:185514.961 End of vmware_job_get() queue:3 type:none
12855:20241101:185514.961 In vmware_job_get() queue:3
12855:20241101:185514.961 End of vmware_job_get() queue:3 type:none
12857:20241101:185515.319 In vmware_job_get() queue:3
12857:20241101:185515.319 End of vmware_job_get() queue:3 type:none
12861:20241101:185515.961 In vmware_job_get() queue:3
12861:20241101:185515.961 End of vmware_job_get() queue:3 type:none
12859:20241101:185515.961 In vmware_job_get() queue:3
12859:20241101:185515.961 End of vmware_job_get() queue:3 type:none
12855:20241101:185515.962 In vmware_job_get() queue:3
12855:20241101:185515.962 End of vmware_job_get() queue:3 type:none
12857:20241101:185516.319 In vmware_job_get() queue:3
12857:20241101:185516.319 End of vmware_job_get() queue:3 type:none
12861:20241101:185516.962 In vmware_job_get() queue:3
12861:20241101:185516.963 End of vmware_job_get() queue:3 type:none
12859:20241101:185516.963 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002170 sec]'
12859:20241101:185516.963 In vmware_job_get() queue:3
12859:20241101:185516.963 End of vmware_job_get() queue:3 type:none
12855:20241101:185516.963 In vmware_job_get() queue:3
12855:20241101:185516.963 End of vmware_job_get() queue:3 type:none
12857:20241101:185517.319 In vmware_job_get() queue:3
12857:20241101:185517.319 End of vmware_job_get() queue:3 type:none
12861:20241101:185517.963 In vmware_job_get() queue:3
12861:20241101:185517.963 End of vmware_job_get() queue:3 type:none
12859:20241101:185517.963 In vmware_job_get() queue:3
12859:20241101:185517.963 End of vmware_job_get() queue:3 type:none
12855:20241101:185517.963 In vmware_job_get() queue:3
12855:20241101:185517.963 End of vmware_job_get() queue:3 type:none
12837:20241101:185518.289 received configuration data from server at "10.50.242.78", datalen 437
12857:20241101:185518.319 In vmware_job_get() queue:3
12857:20241101:185518.319 End of vmware_job_get() queue:3 type:none
12861:20241101:185518.963 In vmware_job_get() queue:3
12861:20241101:185518.963 End of vmware_job_get() queue:3 type:none
12859:20241101:185518.963 In vmware_job_get() queue:3
12859:20241101:185518.963 End of vmware_job_get() queue:3 type:none
12855:20241101:185518.963 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002599 sec]'
12855:20241101:185518.963 In vmware_job_get() queue:3
12855:20241101:185518.963 End of vmware_job_get() queue:3 type:none
12857:20241101:185519.319 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000907 sec]'
12857:20241101:185519.320 In vmware_job_get() queue:3
12857:20241101:185519.320 End of vmware_job_get() queue:3 type:none
12861:20241101:185519.963 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002369 sec]'
12861:20241101:185519.963 In vmware_job_get() queue:3
12861:20241101:185519.963 End of vmware_job_get() queue:3 type:none
12859:20241101:185519.963 In vmware_job_get() queue:3
12859:20241101:185519.963 End of vmware_job_get() queue:3 type:none
12855:20241101:185519.963 In vmware_job_get() queue:3
12855:20241101:185519.963 End of vmware_job_get() queue:3 type:none
12857:20241101:185520.320 In vmware_job_get() queue:3
12857:20241101:185520.320 End of vmware_job_get() queue:3 type:none
12861:20241101:185520.963 In vmware_job_get() queue:3
12861:20241101:185520.963 End of vmware_job_get() queue:3 type:none
12859:20241101:185520.964 In vmware_job_get() queue:3
12859:20241101:185520.964 End of vmware_job_get() queue:3 type:none
12855:20241101:185520.964 In vmware_job_get() queue:3
12855:20241101:185520.964 End of vmware_job_get() queue:3 type:none
12857:20241101:185521.320 In vmware_job_get() queue:3
12857:20241101:185521.320 End of vmware_job_get() queue:3 type:none
12861:20241101:185521.963 In vmware_job_get() queue:3
12861:20241101:185521.965 End of vmware_job_get() queue:3 type:none
12859:20241101:185521.965 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002195 sec]'
12859:20241101:185521.965 In vmware_job_get() queue:3
12859:20241101:185521.965 End of vmware_job_get() queue:3 type:none
12855:20241101:185521.965 In vmware_job_get() queue:3
12855:20241101:185521.965 End of vmware_job_get() queue:3 type:none
12857:20241101:185522.320 In vmware_job_get() queue:3
12857:20241101:185522.320 End of vmware_job_get() queue:3 type:none
12855:20241101:185522.965 In vmware_job_get() queue:3
12855:20241101:185522.965 End of vmware_job_get() queue:3 type:none
12861:20241101:185522.965 In vmware_job_get() queue:3
12861:20241101:185522.965 End of vmware_job_get() queue:3 type:none
12859:20241101:185522.965 In vmware_job_get() queue:3
12859:20241101:185522.965 End of vmware_job_get() queue:3 type:none
12857:20241101:185523.320 In vmware_job_get() queue:3
12857:20241101:185523.320 End of vmware_job_get() queue:3 type:none
12855:20241101:185523.965 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002079 sec]'
12855:20241101:185523.965 In vmware_job_get() queue:3
12855:20241101:185523.965 End of vmware_job_get() queue:3 type:none
12861:20241101:185523.965 In vmware_job_get() queue:3
12861:20241101:185523.966 End of vmware_job_get() queue:3 type:none
12859:20241101:185523.966 In vmware_job_get() queue:3
12859:20241101:185523.966 End of vmware_job_get() queue:3 type:none
12857:20241101:185524.320 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000929 sec]'
12857:20241101:185524.321 In vmware_job_get() queue:3
12857:20241101:185524.321 End of vmware_job_get() queue:3 type:none
12855:20241101:185524.966 In vmware_job_get() queue:3
12855:20241101:185524.966 End of vmware_job_get() queue:3 type:none
12861:20241101:185524.966 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002622 sec]'
12861:20241101:185524.966 In vmware_job_get() queue:3
12861:20241101:185524.966 End of vmware_job_get() queue:3 type:none
12859:20241101:185524.966 In vmware_job_get() queue:3
12859:20241101:185524.966 End of vmware_job_get() queue:3 type:none
12857:20241101:185525.321 In vmware_job_get() queue:3
12857:20241101:185525.321 End of vmware_job_get() queue:3 type:none
12855:20241101:185525.966 In vmware_job_get() queue:3
12855:20241101:185525.966 End of vmware_job_get() queue:3 type:none
12861:20241101:185525.966 In vmware_job_get() queue:3
12861:20241101:185525.966 End of vmware_job_get() queue:3 type:none
12859:20241101:185525.966 In vmware_job_get() queue:3
12859:20241101:185525.966 End of vmware_job_get() queue:3 type:none
12857:20241101:185526.321 In vmware_job_get() queue:3
12857:20241101:185526.321 End of vmware_job_get() queue:3 type:none
12861:20241101:185526.966 In vmware_job_get() queue:3
12861:20241101:185526.966 End of vmware_job_get() queue:3 type:none
12855:20241101:185526.966 In vmware_job_get() queue:3
12855:20241101:185526.966 End of vmware_job_get() queue:3 type:none
12859:20241101:185526.966 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002399 sec]'
12859:20241101:185526.966 In vmware_job_get() queue:3
12859:20241101:185526.966 End of vmware_job_get() queue:3 type:none
12857:20241101:185527.321 In vmware_job_get() queue:3
12857:20241101:185527.321 End of vmware_job_get() queue:3 type:none
12855:20241101:185527.966 In vmware_job_get() queue:3
12855:20241101:185527.966 End of vmware_job_get() queue:3 type:none
12859:20241101:185527.967 In vmware_job_get() queue:3
12859:20241101:185527.967 End of vmware_job_get() queue:3 type:none
12861:20241101:185527.967 In vmware_job_get() queue:3
12861:20241101:185527.967 End of vmware_job_get() queue:3 type:none
12837:20241101:185528.305 received configuration data from server at "10.50.242.78", datalen 437
12857:20241101:185528.321 In vmware_job_get() queue:3
12857:20241101:185528.321 End of vmware_job_get() queue:3 type:none
12855:20241101:185528.967 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001271 sec]'
12855:20241101:185528.967 In vmware_job_get() queue:3
12855:20241101:185528.967 End of vmware_job_get() queue:3 type:none
12859:20241101:185528.967 In vmware_job_get() queue:3
12859:20241101:185528.967 End of vmware_job_get() queue:3 type:none
12861:20241101:185528.967 In vmware_job_get() queue:3
12861:20241101:185528.967 End of vmware_job_get() queue:3 type:none
12857:20241101:185529.321 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000900 sec]'
12857:20241101:185529.321 In vmware_job_get() queue:3
12857:20241101:185529.321 End of vmware_job_get() queue:3 type:none
12855:20241101:185529.967 In vmware_job_get() queue:3
12855:20241101:185529.967 End of vmware_job_get() queue:3 type:none
12859:20241101:185529.967 In vmware_job_get() queue:3
12859:20241101:185529.967 End of vmware_job_get() queue:3 type:none
12861:20241101:185529.967 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001337 sec]'
12861:20241101:185529.967 In vmware_job_get() queue:3
12861:20241101:185529.967 End of vmware_job_get() queue:3 type:none
12857:20241101:185530.321 In vmware_job_get() queue:3
12857:20241101:185530.321 End of vmware_job_get() queue:3 type:none
12859:20241101:185530.967 In vmware_job_get() queue:3
12859:20241101:185530.967 End of vmware_job_get() queue:3 type:none
12855:20241101:185530.967 In vmware_job_get() queue:3
12855:20241101:185530.967 End of vmware_job_get() queue:3 type:none
12861:20241101:185530.967 In vmware_job_get() queue:3
12861:20241101:185530.967 End of vmware_job_get() queue:3 type:none
12857:20241101:185531.322 In vmware_job_get() queue:3
12857:20241101:185531.322 End of vmware_job_get() queue:3 type:none
12859:20241101:185531.967 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001013 sec]'
12859:20241101:185531.967 In vmware_job_get() queue:3
12859:20241101:185531.967 End of vmware_job_get() queue:3 type:none
12855:20241101:185531.967 In vmware_job_get() queue:3
12855:20241101:185531.967 End of vmware_job_get() queue:3 type:none
12861:20241101:185531.967 In vmware_job_get() queue:3
12861:20241101:185531.967 End of vmware_job_get() queue:3 type:none
12857:20241101:185532.322 In vmware_job_get() queue:3
12857:20241101:185532.322 End of vmware_job_get() queue:3 type:none
12859:20241101:185532.967 In vmware_job_get() queue:3
12859:20241101:185532.967 End of vmware_job_get() queue:3 type:none
12861:20241101:185532.967 In vmware_job_get() queue:3
12861:20241101:185532.967 End of vmware_job_get() queue:3 type:none
12855:20241101:185532.967 In vmware_job_get() queue:3
12855:20241101:185532.967 End of vmware_job_get() queue:3 type:none
12857:20241101:185533.322 In vmware_job_get() queue:3
12857:20241101:185533.322 End of vmware_job_get() queue:3 type:none
12859:20241101:185533.968 In vmware_job_get() queue:3
12859:20241101:185533.968 End of vmware_job_get() queue:3 type:none
12861:20241101:185533.968 In vmware_job_get() queue:3
12861:20241101:185533.968 End of vmware_job_get() queue:3 type:none
12855:20241101:185533.968 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001038 sec]'
12855:20241101:185533.968 In vmware_job_get() queue:3
12855:20241101:185533.968 End of vmware_job_get() queue:3 type:none
12857:20241101:185534.322 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000751 sec]'
12857:20241101:185534.322 In vmware_job_get() queue:3
12857:20241101:185534.322 End of vmware_job_get() queue:3 type:none
12861:20241101:185534.968 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000707 sec]'
12861:20241101:185534.968 In vmware_job_get() queue:3
12861:20241101:185534.968 End of vmware_job_get() queue:3 type:none
12859:20241101:185534.968 In vmware_job_get() queue:3
12859:20241101:185534.968 End of vmware_job_get() queue:3 type:none
12855:20241101:185534.968 In vmware_job_get() queue:3
12855:20241101:185534.968 End of vmware_job_get() queue:3 type:none
12857:20241101:185535.322 In vmware_job_get() queue:3
12857:20241101:185535.322 End of vmware_job_get() queue:3 type:none
12859:20241101:185535.968 In vmware_job_get() queue:3
12859:20241101:185535.968 End of vmware_job_get() queue:3 type:none
12861:20241101:185535.968 In vmware_job_get() queue:3
12861:20241101:185535.968 End of vmware_job_get() queue:3 type:none
12855:20241101:185535.968 In vmware_job_get() queue:3
12855:20241101:185535.968 End of vmware_job_get() queue:3 type:none
12857:20241101:185536.322 In vmware_job_get() queue:3
12857:20241101:185536.322 End of vmware_job_get() queue:3 type:none
12859:20241101:185536.968 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000899 sec]'
12859:20241101:185536.968 In vmware_job_get() queue:3
12859:20241101:185536.968 End of vmware_job_get() queue:3 type:none
12861:20241101:185536.968 In vmware_job_get() queue:3
12861:20241101:185536.968 End of vmware_job_get() queue:3 type:none
12855:20241101:185536.968 In vmware_job_get() queue:3
12855:20241101:185536.968 End of vmware_job_get() queue:3 type:none
12857:20241101:185537.323 In vmware_job_get() queue:3
12857:20241101:185537.323 End of vmware_job_get() queue:3 type:none
12861:20241101:185537.968 In vmware_job_get() queue:3
12861:20241101:185537.968 End of vmware_job_get() queue:3 type:none
12855:20241101:185537.968 In vmware_job_get() queue:3
12855:20241101:185537.968 End of vmware_job_get() queue:3 type:none
12859:20241101:185537.968 In vmware_job_get() queue:3
12859:20241101:185537.969 End of vmware_job_get() queue:3 type:none
12837:20241101:185538.319 received configuration data from server at "10.50.242.78", datalen 437
12857:20241101:185538.323 In vmware_job_get() queue:3
12857:20241101:185538.323 End of vmware_job_get() queue:3 type:none
12855:20241101:185538.969 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000835 sec]'
12855:20241101:185538.969 In vmware_job_get() queue:3
12855:20241101:185538.969 End of vmware_job_get() queue:3 type:none
12861:20241101:185538.969 In vmware_job_get() queue:3
12861:20241101:185538.969 End of vmware_job_get() queue:3 type:none
12859:20241101:185538.969 In vmware_job_get() queue:3
12859:20241101:185538.969 End of vmware_job_get() queue:3 type:none
12857:20241101:185539.323 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000825 sec]'
12857:20241101:185539.323 In vmware_job_get() queue:3
12857:20241101:185539.323 End of vmware_job_get() queue:3 type:none
12855:20241101:185539.969 In vmware_job_get() queue:3
12855:20241101:185539.969 End of vmware_job_get() queue:3 type:none
12861:20241101:185539.969 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000993 sec]'
12861:20241101:185539.969 In vmware_job_get() queue:3
12861:20241101:185539.969 End of vmware_job_get() queue:3 type:none
12859:20241101:185539.969 In vmware_job_get() queue:3
12859:20241101:185539.969 End of vmware_job_get() queue:3 type:none
12857:20241101:185540.323 In vmware_job_get() queue:3
12857:20241101:185540.323 End of vmware_job_get() queue:3 type:none
12861:20241101:185540.969 In vmware_job_get() queue:3
12861:20241101:185540.969 End of vmware_job_get() queue:3 type:none
12855:20241101:185540.969 In vmware_job_get() queue:3
12855:20241101:185540.969 End of vmware_job_get() queue:3 type:none
12859:20241101:185540.969 In vmware_job_get() queue:3
12859:20241101:185540.969 End of vmware_job_get() queue:3 type:none
12857:20241101:185541.323 In vmware_job_get() queue:3
12857:20241101:185541.323 End of vmware_job_get() queue:3 type:none
12855:20241101:185541.969 In vmware_job_get() queue:3
12855:20241101:185541.969 End of vmware_job_get() queue:3 type:none
12859:20241101:185541.969 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000952 sec]'
12859:20241101:185541.969 In vmware_job_get() queue:3
12859:20241101:185541.969 End of vmware_job_get() queue:3 type:none
12861:20241101:185541.969 In vmware_job_get() queue:3
12861:20241101:185541.969 End of vmware_job_get() queue:3 type:none
12857:20241101:185542.323 In vmware_job_get() queue:3
12857:20241101:185542.323 End of vmware_job_get() queue:3 type:none
12861:20241101:185542.969 In vmware_job_get() queue:3
12861:20241101:185542.970 End of vmware_job_get() queue:3 type:none
12859:20241101:185542.970 In vmware_job_get() queue:3
12859:20241101:185542.970 End of vmware_job_get() queue:3 type:none
12855:20241101:185542.970 In vmware_job_get() queue:3
12855:20241101:185542.970 End of vmware_job_get() queue:3 type:none
12857:20241101:185543.324 In vmware_job_get() queue:3
12857:20241101:185543.324 End of vmware_job_get() queue:2 type:update_tags
12857:20241101:185543.324 In vmware_job_exec() type:update_tags
12857:20241101:185543.324 End of vmware_job_exec() type:update_tags ret:FAIL
12857:20241101:185543.324 In vmware_job_schedule() queue:2 type:update_tags
12857:20241101:185543.324 End of vmware_job_schedule() type:update_tags nextcheck:18:56:43
12857:20241101:185543.324 In vmware_job_get() queue:3
12857:20241101:185543.324 End of vmware_job_get() queue:2 type:update_conf
12857:20241101:185543.324 In vmware_job_exec() type:update_conf
12857:20241101:185543.324 In zbx_vmware_service_update() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12857:20241101:185543.324 In vmware_service_cust_query_prep() cust_queries:0
12857:20241101:185543.324 End of vmware_service_cust_query_prep() cq_values:0
12857:20241101:185543.324 In vmware_service_cust_query_prep() cust_queries:0
12857:20241101:185543.324 End of vmware_service_cust_query_prep() cq_values:0
12857:20241101:185543.324 In vmware_service_authenticate() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12857:20241101:185543.372 vmware_service_authenticate() SOAP response:
5214194c-2dbd-4026-fd58-7aca1972c969VSPHERE.LOCAL\zabbix2024-11-01T18:55:43.378634Z2024-11-01T18:55:43.378634Zenenfalse10.50.242.760
12857:20241101:185543.372 End of vmware_service_authenticate():SUCCEED
12857:20241101:185543.375 vmware_service_get_contents() SOAP response:
group-d1propertyCollectorViewManagerVMware vCenter ServerVMware vCenter Server 8.0.3 build-24322831VMware, Inc.8.0.324322831INTL000linux-x64vpxVirtualCenter8.0.3.09a31b4b0-64a6-48e1-919a-e9f7ca1668b6VMware VirtualCenter Server8.0VpxSettingsUserDirectorySessionManagerAuthorizationManagerServiceMgrPerfMgrScheduledTaskManagerAlarmManagerEventManagerTaskManagerExtensionManagerCustomizationSpecManagerCustomFieldsManagerDiagMgrLicenseManagerSearchIndexFileManagerDatastoreNamespaceManagervirtualDiskManagerSnmpSystemProvCheckerCompatCheckerOvfManagerIpPoolManagerDVSManagerHostProfileManagerClusterProfileManagerMoComplianceManagerLocalizationManagerStorageResourceManagerguestOperationsManagerOverheadMemoryManagercertificateManagerIoFilterManager
12857:20241101:185543.375 In vmware_service_get_perf_counters()
12857:20241101:185543.407 vmware_service_get_perf_counters() SOAP response:
PerfMgrperfCounter1CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentnonerate442CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentaveragerate133CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentminimumrate444CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentmaximumrate445CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertznonerate446CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzaveragerate137CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzminimumrate448CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzmaximumrate449Total CPU capacity reserved by virtual machinesreservedCapacityCPUcpuMegahertzmegaHertzaverageabsolute2310Amount of time spent on system processes on each virtual CPU in the virtual machinesystemCPUcpuMillisecondmillisecondsummationdelta3311Total CPU time spent in wait statewaitCPUcpuMillisecondmillisecondsummationdelta3312Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement intervalreadyCPUcpuMillisecondmillisecondsummationdelta1313Total time that the CPU spent in an idle stateidleCPUcpuMillisecondmillisecondsummationdelta2314Total CPU usageusedCPUcpuMillisecondmillisecondsummationdelta3315Capacity in MHz of the physical CPU corescapacity.provisionedCPUcpuMegahertzmegaHertzaverageabsolute4416CPU resources devoted by the ESXi scheduler to the virtual machines and resource poolscapacity.entitlementCPUcpuMegahertzmegaHertzaverageabsolute4417CPU usage as a percent during the intervalcapacity.usageCPUcpuMegahertzmegaHertzaveragerate4418The amount of CPU resources a VM would use if there were no CPU contentioncapacity.demandCPUcpuMegahertzmegaHertzaverageabsolute4419Percent of time the VM is unable to run because it is contending for access to the physical CPU(s)capacity.contentionCPUcpuPercentagepercentaveragerate4420The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberaverageabsolute4421The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberaverageabsolute4422Time the VM vCPU is ready to run, but is unable to run due to co-scheduling constraintscorecount.contentionCPUcpuPercentagepercentaveragerate4423Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentnoneabsolute4424Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentaverageabsolute1325Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentminimumabsolute4426Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentmaximumabsolute4427Memory reservation consumed by powered-on virtual machinesreservedCapacityMemorymemMegabytemegaBytesaverageabsolute2328Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesnoneabsolute4429Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesaverageabsolute2330Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesminimumabsolute4431Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesmaximumabsolute4432Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesnoneabsolute4433Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesaverageabsolute2334Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesminimumabsolute4435Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesmaximumabsolute4436Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesnoneabsolute4437Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesaverageabsolute2338Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesminimumabsolute4439Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesmaximumabsolute4440Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesnoneabsolute4441Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesaverageabsolute2342Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesminimumabsolute4443Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesmaximumabsolute4444Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesnoneabsolute4445Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesaverageabsolute2346Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesminimumabsolute4447Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesmaximumabsolute4448Swap storage space consumedswapusedMemorymemKilobytekiloBytesnoneabsolute4449Swap storage space consumedswapusedMemorymemKilobytekiloBytesaverageabsolute2350Swap storage space consumedswapusedMemorymemKilobytekiloBytesminimumabsolute4451Swap storage space consumedswapusedMemorymemKilobytekiloBytesmaximumabsolute4452swapunreservedMemorymemKilobytekiloBytesnoneabsolute4453swapunreservedMemorymemKilobytekiloBytesaverageabsolute4454swapunreservedMemorymemKilobytekiloBytesminimumabsolute4455swapunreservedMemorymemKilobytekiloBytesmaximumabsolute4456Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesnoneabsolute4457Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesaverageabsolute2358Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesminimumabsolute4459Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesmaximumabsolute4460Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesnoneabsolute4461Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesaverageabsolute4462Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesminimumabsolute4463Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesmaximumabsolute4464Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesnoneabsolute4465Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesaverageabsolute4466Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesminimumabsolute4467Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesmaximumabsolute4468Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machinesstateMemorymemNumbernumberlatestabsolute2369Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesnoneabsolute4470Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesaverageabsolute2371Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesminimumabsolute4472Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesmaximumabsolute4473Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesnoneabsolute4474Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesaverageabsolute2375Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesminimumabsolute4476Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesmaximumabsolute4477swapInMemorymemKilobytekiloBytesnoneabsolute4478swapInMemorymemKilobytekiloBytesaverageabsolute2379swapInMemorymemKilobytekiloBytesminimumabsolute4480swapInMemorymemKilobytekiloBytesmaximumabsolute4481swapOutMemorymemKilobytekiloBytesnoneabsolute4482swapOutMemorymemKilobytekiloBytesaverageabsolute2383swapOutMemorymemKilobytekiloBytesminimumabsolute4484swapOutMemorymemKilobytekiloBytesmaximumabsolute4485Rate at which guest physical memory is swapped in from the swap spaceswapinRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1386Rate at which guest physical memory is swapped out to the swap spaceswapoutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1387Amount of memory that is swapped out for the Service ConsoleswapOutManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3388Amount of memory that is swapped in for the Service ConsoleswapInManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3389Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesnoneabsolute4490Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesaverageabsolute1391Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesminimumabsolute4492Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesmaximumabsolute4493Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesnoneabsolute4494Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesaverageabsolute2395Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesminimumabsolute4496Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesmaximumabsolute4497Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesnoneabsolute4498Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesaverageabsolute1399Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesminimumabsolute44100Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesmaximumabsolute44101Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesnoneabsolute44102Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesaverageabsolute11103Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesminimumabsolute44104Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesmaximumabsolute44105Guest physical memory pages that have undergone memory compressioncompressedMemorymemKilobytekiloBytesaverageabsolute23106Rate of guest physical memory page compression by ESXicompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23107Rate of guest physical memory decompressiondecompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23108Total amount of memory available to the hostcapacity.provisionedMemorymemKilobytekiloBytesaverageabsolute44109Amount of host physical memory the VM is entitled to, as determined by the ESXi schedulercapacity.entitlementMemorymemKilobytekiloBytesaverageabsolute44110Amount of physical memory available for use by virtual machines on this hostcapacity.usableMemorymemKilobytekiloBytesaverageabsolute44111Amount of physical memory actively usedcapacity.usageMemorymemKilobytekiloBytesaverageabsolute44112Percentage of time VMs are waiting to access swapped, compressed or ballooned memorycapacity.contentionMemorymemPercentagepercentaveragerate44113capacity.usage.vmMemorymemKilobytekiloBytesaverageabsolute24114capacity.usage.vmOvrhdMemorymemKilobytekiloBytesaverageabsolute24115capacity.usage.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24116capacity.usage.userworldMemorymemKilobytekiloBytesaverageabsolute24117reservedCapacity.vmMemorymemKilobytekiloBytesaverageabsolute24118reservedCapacity.vmOvhdMemorymemKilobytekiloBytesaverageabsolute24119reservedCapacity.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24120reservedCapacity.userworldMemorymemKilobytekiloBytesaverageabsolute24121Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentaverageabsolute44122Amount of physical memory consumed by VMs on this hostconsumed.vmsMemorymemKilobytekiloBytesaverageabsolute24123Amount of physical memory consumed by userworlds on this hostconsumed.userworldsMemorymemKilobytekiloBytesaverageabsolute24124Current read bandwidth of this memory typebandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22125Current write bandwidth of this memory typebandwidth.writeMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22126Total read and write bandwidth of this memory typebandwidth.totalMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute11127vm.bandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22128Get the current miss rate of this memory typemissrateMemorymemPercentagepercentlatestabsolute22129Get the current read latency of this memory typelatency.readMemorymemNanosecondnanosecondlatestabsolute33130Get the current write latency of this memory typelatency.writeMemorymemNanosecondnanosecondlatestabsolute33131Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondnonerate44132Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate13133Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondminimumrate44134Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondmaximumrate44135Number of disk reads during the collection intervalnumberReadDiskdiskNumbernumbersummationdelta33136Number of disk writes during the collection intervalnumberWriteDiskdiskNumbernumbersummationdelta33137Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23138Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23139Average amount of time taken during the collection interval to process a Storage command issued by the guest OS to the virtual machinetotalLatencyDiskdiskMillisecondmillisecondaverageabsolute33140Highest latency value across all disks used by the hostmaxTotalLatencyDiskdiskMillisecondmillisecondlatestabsolute13141Number of Storage commands aborted during the collection intervalcommandsAbortedDiskdiskNumbernumbersummationdelta23142Number of Storage-bus reset commands issued during the collection intervalbusResetsDiskdiskNumbernumbersummationdelta23143Average number of disk reads per second during the collection intervalnumberReadAveragedDiskdiskNumbernumberaveragerate13144Average number of disk writes per second during the collection intervalnumberWriteAveragedDiskdiskNumbernumberaveragerate13145Aggregated disk I/O rate, including the rates for all virtual machines running on the host during the collection intervalthroughput.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate44146Average amount of time for an I/O operation to complete successfullythroughput.contentionDiskdiskMillisecondmillisecondaverageabsolute44147Number of Storage reservation conflicts for the LUN during the collection intervalscsiReservationConflictsDiskdiskNumbernumbersummationdelta22148Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaverageabsolute44149Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondnonerate44150Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate13151Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondminimumrate44152Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondmaximumrate44153Number of packets received during the intervalpacketsRxNetworknetNumbernumbersummationdelta23154Number of packets transmitted during the intervalpacketsTxNetworknetNumbernumbersummationdelta23155Average rate at which data was received during the intervalreceivedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23156Average rate at which data was transmitted during the intervaltransmittedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23157The maximum network bandwidth for the hostthroughput.provisionedNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44158The current available network bandwidth for the hostthroughput.usableNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44159The current network bandwidth usage for the hostthroughput.usageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44160The aggregate network droppped packets for the hostthroughput.contentionNetworknetNumbernumbersummationdelta44161Average rate of packets received and transmitted per secondthroughput.packetsPerSecNetworknetNumbernumberaveragerate44162Total time elapsed, in seconds, since last system startupuptimeSystemsysSecondsecondlatestabsolute13163Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumbersummationdelta13164Current power usagepowerPowerpowerWattwattaveragerate23165Maximum allowed power usagepowerCapPowerpowerWattwattaverageabsolute33166Total energy used since last stats resetenergyPowerpowerJoulejoulesummationdelta33167Current power usage as a percentage of maximum allowed powercapacity.usagePctPowerpowerPercentagepercentaverageabsolute44168Average number of commands issued per second by the storage adapter during the collection intervalcommandsAveragedStorage adapterstorageAdapterNumbernumberaveragerate22169Average number of read commands issued per second by the storage adapter during the collection intervalnumberReadAveragedStorage adapterstorageAdapterNumbernumberaveragerate22170Average number of write commands issued per second by the storage adapter during the collection intervalnumberWriteAveragedStorage adapterstorageAdapterNumbernumberaveragerate22171Rate of reading data by the storage adapterreadStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22172Rate of writing data by the storage adapterwriteStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22173The average time a read by the storage adapter takestotalReadLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22174The average time a write by the storage adapter takestotalWriteLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22175Highest latency value across all storage adapters used by the hostmaxTotalLatencyStorage adapterstorageAdapterMillisecondmillisecondlatestabsolute33176Average amount of time for an I/O operation to complete successfullythroughput.contStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute44177The percent of I/Os that have been issued but have not yet completedOIOsPctStorage adapterstorageAdapterPercentagepercentaverageabsolute44178Average number of read commands issued per second to the virtual disk during the collection intervalnumberReadAveragedVirtual diskvirtualDiskNumbernumberaveragerate13179Average number of write commands issued per second to the virtual disk during the collection intervalnumberWriteAveragedVirtual diskvirtualDiskNumbernumberaveragerate13180Rate of reading data from the virtual diskreadVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22181Rate of writing data to the virtual diskwriteVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22182The average time a read from the virtual disk takestotalReadLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13183The average time a write to the virtual disk takestotalWriteLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13184Average amount of time for an I/O operation to complete successfullythroughput.contVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute44185Average number of read commands issued per second to the datastore during the collection intervalnumberReadAveragedDatastoredatastoreNumbernumberaveragerate13186Average number of write commands issued per second to the datastore during the collection intervalnumberWriteAveragedDatastoredatastoreNumbernumberaveragerate13187Rate of reading data from the datastorereadDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22188Rate of writing data to the datastorewriteDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22189The average time a read from the datastore takestotalReadLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13190The average time a write to the datastore takestotalWriteLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13191Highest latency value across all datastores used by the hostmaxTotalLatencyDatastoredatastoreMillisecondmillisecondlatestabsolute33192Storage I/O Control aggregated IOPSdatastoreIopsDatastoredatastoreNumbernumberaverageabsolute13193Storage I/O Control size-normalized I/O latencysizeNormalizedDatastoreLatencyDatastoredatastoreMicrosecondmicrosecondaverageabsolute13194throughput.usageDatastoredatastoreKilobytes per secondkiloBytesPerSecondaverageabsolute44195throughput.contentionDatastoredatastoreMillisecondmillisecondaverageabsolute44196busResetsDatastoredatastoreNumbernumbersummationdelta22197commandsAbortedDatastoredatastoreNumbernumbersummationdelta22198Percentage of time Storage I/O Control actively controlled datastore latencysiocActiveTimePercentageDatastoredatastorePercentagepercentaverageabsolute13199Average amount of time for an I/O operation to complete successfullythroughput.contStorage pathstoragePathMillisecondmillisecondaverageabsolute44200Highest latency value across all storage paths used by the hostmaxTotalLatencyStorage pathstoragePathMillisecondmillisecondlatestabsolute33201Virtual disk I/O ratethroughput.usageVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate44202Number of terminations to a virtual diskcommandsAbortedVirtual diskvirtualDiskNumbernumbersummationdelta24203Number of resets to a virtual diskbusResetsVirtual diskvirtualDiskNumbernumbersummationdelta24204The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberaverageabsolute22205The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberaverageabsolute22206The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberaverageabsolute22207Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22208The storage adapter's I/O ratethroughput.usagStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate44209Number of Storage-bus reset commands issued during the collection intervalbusResetsStorage pathstoragePathNumbernumbersummationdelta23210Number of Storage commands terminated during the collection intervalcommandsAbortedStorage pathstoragePathNumbernumbersummationdelta23211Storage path I/O ratethroughput.usageStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate44212Average pNic I/O rate for VMsthroughput.usage.vmNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33213Average pNic I/O rate for NFSthroughput.usage.nfsNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33214Average pNic I/O rate for vMotionthroughput.usage.vmotionNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33215Average pNic I/O rate for FTthroughput.usage.ftNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33216Average pNic I/O rate for iSCSIthroughput.usage.iscsiNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33217Average pNic I/O rate for HBRthroughput.usage.hbrNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33218Current maximum allowed power usagecapacity.usablePowerpowerWattwattaverageabsolute44219Current power usagecapacity.usagePowerpowerWattwattaverageabsolute44220Power usage due to host idlenesscapacity.usageIdlePowerpowerWattwattaverageabsolute23221Power usage due to non-VM activitiescapacity.usageSystemPowerpowerWattwattaverageabsolute23222Power usage due to VM workloadscapacity.usageVmPowerpowerWattwattaverageabsolute23223Static power usage of VMcapacity.usageStaticPowerpowerWattwattaverageabsolute23224Amount of CPU resources allocated to the virtual machine or resource pool, based on the total cluster capacity and the resource configuration of the resource hierarchycpuentitlementCPUcpuMegahertzmegaHertzlatestabsolute23225Memory allocation as calculated by the VMkernel scheduler based on current estimated demand and reservation, limit, and shares policies set for all virtual machines and resource pools in the host or clustermementitlementMemorymemMegabytemegaByteslatestabsolute23226DRS score of the virtual machinevmDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11227Fairness of distributed CPU resource allocationcpufairnessCluster servicesclusterServicesNumbernumberlatestabsolute13228Aggregate available memory resources of all the hosts within a clustermemfairnessCluster servicesclusterServicesNumbernumberlatestabsolute13229The rate of transmitted packets for this VDSthroughput.pktsTxNetworknetNumbernumberaverageabsolute33230The rate of transmitted Multicast packets for this VDSthroughput.pktsTxMulticastNetworknetNumbernumberaverageabsolute33231The rate of transmitted Broadcast packets for this VDSthroughput.pktsTxBroadcastNetworknetNumbernumberaverageabsolute33232The rate of received packets for this vDSthroughput.pktsRxNetworknetNumbernumberaverageabsolute33233The rate of received Multicast packets for this VDSthroughput.pktsRxMulticastNetworknetNumbernumberaverageabsolute33234The rate of received Broadcast packets for this VDSthroughput.pktsRxBroadcastNetworknetNumbernumberaverageabsolute33235Count of dropped transmitted packets for this VDSthroughput.droppedTxNetworknetNumbernumberaverageabsolute33236Count of dropped received packets for this VDSthroughput.droppedRxNetworknetNumbernumberaverageabsolute33237The rate of transmitted packets for this DVPortthroughput.vds.pktsTxNetworknetNumbernumberaverageabsolute33238The rate of transmitted multicast packets for this DVPortthroughput.vds.pktsTxMcastNetworknetNumbernumberaverageabsolute33239The rate of transmitted broadcast packets for this DVPortthroughput.vds.pktsTxBcastNetworknetNumbernumberaverageabsolute33240The rate of received packets for this DVPortthroughput.vds.pktsRxNetworknetNumbernumberaverageabsolute33241The rate of received multicast packets for this DVPortthroughput.vds.pktsRxMcastNetworknetNumbernumberaverageabsolute33242The rate of received broadcast packets for this DVPortthroughput.vds.pktsRxBcastNetworknetNumbernumberaverageabsolute33243Count of dropped transmitted packets for this DVPortthroughput.vds.droppedTxNetworknetNumbernumberaverageabsolute33244Count of dropped received packets for this DVPortthroughput.vds.droppedRxNetworknetNumbernumberaverageabsolute33245The rate of transmitted packets for this LAGthroughput.vds.lagTxNetworknetNumbernumberaverageabsolute33246The rate of transmitted Multicast packets for this LAGthroughput.vds.lagTxMcastNetworknetNumbernumberaverageabsolute33247The rate of transmitted Broadcast packets for this LAGthroughput.vds.lagTxBcastNetworknetNumbernumberaverageabsolute33248The rate of received packets for this LAGthroughput.vds.lagRxNetworknetNumbernumberaverageabsolute33249The rate of received multicast packets for this LAGthroughput.vds.lagRxMcastNetworknetNumbernumberaverageabsolute33250The rate of received Broadcast packets for this LAGthroughput.vds.lagRxBcastNetworknetNumbernumberaverageabsolute33251Count of dropped transmitted packets for this LAGthroughput.vds.lagDropTxNetworknetNumbernumberaverageabsolute33252Count of dropped received packets for this LAGthroughput.vds.lagDropRxNetworknetNumbernumberaverageabsolute33253Number of virtual machine power on operationsnumPoweronVirtual machine operationsvmopNumbernumberlatestabsolute13254Number of virtual machine power off operationsnumPoweroffVirtual machine operationsvmopNumbernumberlatestabsolute13255Number of virtual machine suspend operationsnumSuspendVirtual machine operationsvmopNumbernumberlatestabsolute13256Number of virtual machine reset operationsnumResetVirtual machine operationsvmopNumbernumberlatestabsolute13257Number of virtual machine guest reboot operationsnumRebootGuestVirtual machine operationsvmopNumbernumberlatestabsolute13258Number of virtual machine standby guest operationsnumStandbyGuestVirtual machine operationsvmopNumbernumberlatestabsolute13259Number of virtual machine guest shutdown operationsnumShutdownGuestVirtual machine operationsvmopNumbernumberlatestabsolute13260Number of virtual machine create operationsnumCreateVirtual machine operationsvmopNumbernumberlatestabsolute13261Number of virtual machine delete operationsnumDestroyVirtual machine operationsvmopNumbernumberlatestabsolute13262Number of virtual machine register operationsnumRegisterVirtual machine operationsvmopNumbernumberlatestabsolute13263Number of virtual machine unregister operationsnumUnregisterVirtual machine operationsvmopNumbernumberlatestabsolute13264Number of virtual machine reconfigure operationsnumReconfigureVirtual machine operationsvmopNumbernumberlatestabsolute13265Number of virtual machine clone operationsnumCloneVirtual machine operationsvmopNumbernumberlatestabsolute13266Number of virtual machine template deploy operationsnumDeployVirtual machine operationsvmopNumbernumberlatestabsolute13267Number of host change operations for powered-off and suspended VMsnumChangeHostVirtual machine operationsvmopNumbernumberlatestabsolute13268Number of datastore change operations for powered-off and suspended virtual machinesnumChangeDSVirtual machine operationsvmopNumbernumberlatestabsolute13269Number of host and datastore change operations for powered-off and suspended virtual machinesnumChangeHostDSVirtual machine operationsvmopNumbernumberlatestabsolute13270Number of migrations with vMotion (host change operations for powered-on VMs)numVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13271Number of migrations with Storage vMotion (datastore change operations for powered-on VMs)numSVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13272Number of host and datastore change operations for powered-on and suspended virtual machinesnumXVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13273Total available CPU resources of all hosts within a clustereffectivecpuCluster servicesclusterServicesMegahertzmegaHertzaveragerate13274Total amount of machine memory of all hosts in the cluster that is available for use for virtual machine memory and overhead memoryeffectivememCluster servicesclusterServicesMegabytemegaBytesaverageabsolute13275Total amount of CPU resources of all hosts in the clustertotalmhzCPUcpuMegahertzmegaHertzaveragerate13276Total amount of host physical memory of all hosts in the cluster that is available for virtual machine memory (physical memory for use by the guest OS) and virtual machine overhead memorytotalmbMemorymemMegabytemegaBytesaverageabsolute13277DRS score of the clusterclusterDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11278vSphere HA number of failures that can be toleratedfailoverCluster servicesclusterServicesNumbernumberlatestabsolute13279The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentaverageabsolute13280The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesaverageabsolute13281The amount of GPU memory reserved in kilobytesmem.reservedGPUgpuKilobytekiloByteslatestabsolute13282The power used by a GPU in wattspower.usedGPUgpuWattwattlatestabsolute13283The temperature of a GPU in degrees celsiustemperatureGPUgpuTemperature in degrees Celsiuscelsiusaverageabsolute13284The total amount of GPU memory in kilobytesmem.totalGPUgpuKilobytekiloByteslatestabsolute13285Amount of space actually used by the virtual machine or the datastoreusedDiskdiskKilobytekiloByteslatestabsolute11286Amount of storage set aside for use by a datastore or a virtual machineprovisionedDiskdiskKilobytekiloByteslatestabsolute11287Configured size of the datastorecapacityDiskdiskKilobytekiloByteslatestabsolute13288Amount of space associated exclusively with a virtual machineunsharedDiskdiskKilobytekiloByteslatestabsolute11289Amount of disk actually used on the datastoreactualusedDiskdiskMegabytemegaByteslatestabsolute23290Storage overhead of a virtual machine or a datastore due to delta disk backingsdeltausedDiskdiskKilobytekiloByteslatestabsolute23291Virtual disk: The maximum capacity size of the virtual disk. Virtual machine: The provisioned size of all virtual disks plus snapshot files and the swap file, if the VM is running. Datastore: The maximum capacity of the datastore. POD: The maximum capacity of all datastores in the POD.capacity.provisionedDiskdiskKilobytekiloBytesaverageabsolute44292The amount of storage capacity currently being consumed by the entity or on the entity.capacity.usageDiskdiskKilobytekiloBytesaverageabsolute44293The amount of storage capacity overcommitment for the entity, measured in percent.capacity.contentionDiskdiskPercentagepercentaverageabsolute44294The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44295The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44296The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11297Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44298Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44299Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11300Total size of in-memory cache of blocks (buffer cache) read in from block devices (i.e., disk devices) on the system where vCenter Server is runningbufferszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44301Total size of in-memory caches of pages (page cache) for files from on-disk and in-memory filesystems on the system where vCenter Server is runningcacheszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44302Number of context switches per second on the system where vCenter Server is runningctxswitchesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11303Disk sectors read per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskreadsectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44304Number of disk reads per second on the system where vCenter Server is runningdiskreadsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11305Disk sectors written per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskwritesectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44306Number of disk writes per second on the system where vCenter Server is runningdiskwritesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11307The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44308The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44309The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11310The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44311The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44312The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11313vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44314vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44315vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11316vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44317vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44318vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11319vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44320vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44321vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11322Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44323Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44324Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11325Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44326Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44327Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11328Rate of the number of total packets received per second on the system where vCenter Server is runningpacketrecvratevCenter resource usage informationvcResourcesNumbernumberaveragerate11329Number of total packets sent per second on the system where vCenter Server is runningpacketsentratevCenter resource usage informationvcResourcesNumbernumberaveragerate11330Total system CPU used on the system where vCenter Server in runningsystemcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11331Number of page faults per second on the system where vCenter Server is runningpagefaultratevCenter resource usage informationvcResourcesNumbernumberaveragerate11332Physical memory used by vCenterphysicalmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11333CPU used by vCenter Server in privileged modepriviledgedcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11334Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44335Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44336Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute33337The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44338The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44339The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11340Number of systems calls made per second on the system where vCenter Server is runningsyscallsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11341The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44342The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44343The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11344CPU used by vCenter Server in user modeusercpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11345vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44346vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44347vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11348Virtual memory used by vCenter ServervirtualmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11349Average number of outstanding read requests to the virtual disk during the collection intervalreadOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22350Average number of outstanding write requests to the virtual disk during the collection intervalwriteOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22351Storage DRS virtual disk metric for the read workload modelreadLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22352Storage DRS virtual disk metric for the write workload modelwriteLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22353CPU active average over 1 minuteactav1Resource group CPUrescpuPercentagepercentlatestabsolute33354Storage DRS datastore bytes readdatastoreReadBytesDatastoredatastoreNumbernumberlatestabsolute22355Storage DRS datastore bytes writtendatastoreWriteBytesDatastoredatastoreNumbernumberlatestabsolute22356Storage DRS datastore read I/O ratedatastoreReadIopsDatastoredatastoreNumbernumberlatestabsolute13357Storage DRS datastore write I/O ratedatastoreWriteIopsDatastoredatastoreNumbernumberlatestabsolute13358Storage DRS datastore outstanding read requestsdatastoreReadOIODatastoredatastoreNumbernumberlatestabsolute13359Storage DRS datastore outstanding write requestsdatastoreWriteOIODatastoredatastoreNumbernumberlatestabsolute13360Storage DRS datastore normalized read latencydatastoreNormalReadLatencyDatastoredatastoreNumbernumberlatestabsolute22361Storage DRS datastore normalized write latencydatastoreNormalWriteLatencyDatastoredatastoreNumbernumberlatestabsolute22362Storage DRS datastore metric for read workload modeldatastoreReadLoadMetricDatastoredatastoreNumbernumberlatestabsolute44363Storage DRS datastore metric for write workload modeldatastoreWriteLoadMetricDatastoredatastoreNumbernumberlatestabsolute44364The average datastore latency as seen by virtual machinesdatastoreVMObservedLatencyDatastoredatastoreMicrosecondmicrosecondlatestabsolute13365Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaveragerate44366Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskNumbernumberlatestabsolute44367Number of failed reads on the diskreadFailedDiskdiskNumbernumberlatestabsolute44368Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskNumbernumberlatestabsolute44369Number of failed writes on the diskwriteFailedDiskdiskNumbernumberlatestabsolute44370Number of successful commands on the diskcommands.successDiskdiskNumbernumberlatestabsolute44371Number of failed commands on the diskcommands.failedDiskdiskNumbernumberlatestabsolute44372Number of queued commands on the diskcommands.queuedDiskdiskNumbernumberlatestabsolute44373Number of active commands on the diskcommands.activeDiskdiskNumbernumberlatestabsolute44374Current state of devicestateDiskdiskNumbernumberlatestabsolute44375Total number of aborts on a diskTM.abortDiskdiskNumbernumberlatestabsolute44376Total number of aborts retries on a diskTM.abortRetryDiskdiskNumbernumberlatestabsolute44377Total number of failed aborts on a diskTM.abortFailedDiskdiskNumbernumberlatestabsolute44378Total number of virt resets TM.virtResetDiskdiskNumbernumberlatestabsolute44379Total number of virt-reset retries TM.virtResetRetryDiskdiskNumbernumberlatestabsolute44380Total number of failed virt-resetsTM.virtResetFailedDiskdiskNumbernumberlatestabsolute44381Total number of lun resets TM.lunResetDiskdiskNumbernumberlatestabsolute44382Total number of lun-reset retries TM.lunResetRetryDiskdiskNumbernumberlatestabsolute44383Total number of failed lun-resetsTM.lunResetFailedDiskdiskNumbernumberlatestabsolute44384Total number of device resets TM.deviceResetDiskdiskNumbernumberlatestabsolute44385Total number of device-reset retries TM.deviceResetRetryDiskdiskNumbernumberlatestabsolute44386Total number of failed device-resetsTM.deviceResetFailedDiskdiskNumbernumberlatestabsolute44387Total number of bus resets TM.busResetDiskdiskNumbernumberlatestabsolute44388Total number of bus-reset retries TM.busResetRetryDiskdiskNumbernumberlatestabsolute44389Total number of failed bus-resetsTM.busResetFailedDiskdiskNumbernumberlatestabsolute44390Average time, in microseconds, spent by Queue to process each Storage commandlatency.qavgDiskdiskMicrosecondmicrosecondlatestabsolute44391Average time, in microseconds, spent by Device to process each Storage commandlatency.davgDiskdiskMicrosecondmicrosecondlatestabsolute44392Average time, in microseconds, spent by kernel to process each Storage commandlatency.kavgDiskdiskMicrosecondmicrosecondlatestabsolute44393Average time, in microseconds, spent by Guest to process each Storage commandlatency.gavgDiskdiskMicrosecondmicrosecondlatestabsolute44394The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberlatestabsolute44395The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberlatestabsolute44396The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberlatestabsolute44397The percentage HT partner usage per physical CPUpartnerBusyTimeCPUcpuPercentagepercentaveragerate44398CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentaveragerate23399The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberlatestabsolute44400The amount of L3 cache the VM usescache.l3.occupancyCPUcpuKilobytekiloBytesaverageabsolute44401The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberlatestabsolute44402CPU load average over the past 1 minute, sampled on every 6 secondsload.avg1minCPUcpuPercentagepercentlatestabsolute44403CPU load average over the past 5 minutes, sampled on every 6 secondsload.avg5minCPUcpuPercentagepercentlatestabsolute44404CPU load average over the past 15 minutes, sampled on every 6 secondsload.avg15minCPUcpuPercentagepercentlatestabsolute44405Total amount of memory available to the hostcapacity.provisionedMemorymemMegabytemegaByteslatestabsolute44406Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentlatestabsolute44407Ratio of total requested memory and the managed memory minus 1 over the past 1 minuteovercommit.avg1minMemorymemNumbernumberlatestabsolute44408Ratio of total requested memory and the managed memory minus 1 over the past 5 minutesovercommit.avg5minMemorymemNumbernumberlatestabsolute44409Ratio of total requested memory and the managed memory minus 1 over the past 15 minutesovercommit.avg15minMemorymemNumbernumberlatestabsolute44410Total amount of machine memory on the ESXi hostphysical.totalMemorymemMegabytemegaByteslatestabsolute44411Amount of machine memory being used by everything other than VMkernelphysical.userMemorymemMegabytemegaByteslatestabsolute44412Amount of machine memory that is free on the ESXi hostphysical.freeMemorymemMegabytemegaByteslatestabsolute44413Total amount of machine memory managed by VMkernelkernel.managedMemorymemMegabytemegaByteslatestabsolute44414Mininum amount of machine memory that VMkernel likes to keep freekernel.minfreeMemorymemMegabytemegaByteslatestabsolute44415Amount of machine memory that is currently unreservedkernel.unreservedMemorymemMegabytemegaByteslatestabsolute44416Amount of physical memory that is being sharedpshare.sharedMemorymemMegabytemegaByteslatestabsolute44417Amount of machine memory that is common across World(s)pshare.commonMemorymemMegabytemegaByteslatestabsolute44418Amount of machine memory saved due to page-sharingpshare.sharedSaveMemorymemMegabytemegaByteslatestabsolute44419Current swap usageswap.currentMemorymemMegabytemegaByteslatestabsolute44420Where ESXi expects the reclaimed memory using swapping and compression to beswap.targetMemorymemMegabytemegaByteslatestabsolute44421Rate at which memory is swapped in by ESXi from diskswap.readrateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44422Rate at which memory is swapped to disk by the ESXiswap.writerateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44423Total compressed physical memoryzip.zippedMemorymemMegabytemegaByteslatestabsolute44424Saved memory by compressionzip.savedMemorymemMegabytemegaByteslatestabsolute44425Total amount of physical memory reclaimed using the vmmemctl modulesmemctl.currentMemorymemMegabytemegaByteslatestabsolute44426Total amount of physical memory ESXi would like to reclaim using the vmmemctl modulesmemctl.targetMemorymemMegabytemegaByteslatestabsolute44427Maximum amount of physical memory ESXi can reclaim using the vmmemctl modulesmemctl.maxMemorymemMegabytemegaByteslatestabsolute44428Memory reservation health state, 2->Red, 1->Greenhealth.reservationStateMemorymemNumbernumberlatestabsolute44429Amount of Overhead memory actively usedcapacity.overheadMemorymemMegabytemegaBytesaverageabsolute44430Amount of OverheadResv memorycapacity.overheadResvMemorymemMegabytemegaBytesaverageabsolute44431Per tier consumed memory. This value is expressed in megabytescapacity.consumedMemorymemMegabytemegaByteslatestabsolute44432Per tier active memory. This value is expressed in megabytescapacity.activeMemorymemMegabytemegaByteslatestabsolute44433Current CPU power usagecapacity.usageCpuPowerpowerWattwattaverageabsolute44434Current memory power usagecapacity.usageMemPowerpowerWattwattaverageabsolute44435Current other power usagecapacity.usageOtherPowerpowerWattwattaverageabsolute44436vmkernel.downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44437downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44438precopy.timeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44439rttMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44440dst.migration.timeMigration of powered on VMvmotionSecondsecondlatestabsolute44441mem.sizembMigration of powered on VMvmotionMegabytemegaByteslatestabsolute44442Current number of replicated virtual machinesvmsvSphere ReplicationhbrNumbernumberlatestabsolute44443Average amount of data received per secondthroughput.hbr.inboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44444Average amount of data transmitted per secondthroughput.hbr.outboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44445Average disk read latency seen by vSphere Replicationhbr.readLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44446Average guest I/O stall introduced by vSphere Replicationhbr.stallLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44447Average latency seen by vSphere Replicationlatency.hbr.outboundNetworknetMillisecondmillisecondlatestabsolute44448Number of Lightweight Delta (LWD) snapshots takennumSnapshotsvSphere Data Protection (LWD)lwdNumbernumberlatestabsolute44449APD state of the nfs volumeapdStateNFSnfsNumbernumberlatestabsolute44450Cumulative read issue time on NFS volumereadIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44451Cumulative write issue time on NFS volumewriteIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44452Total reads on NFS volumetotalReadsNFSnfsNumbernumberlatestabsolute44453Total reads failed on NFS volumereadsFailedNFSnfsNumbernumberlatestabsolute44454Total writes on NFS volumetotalWritesNFSnfsNumbernumberlatestabsolute44455Total writes failed on NFS volumewritesFailedNFSnfsNumbernumberlatestabsolute44456Cumulative readTime on NFS volumereadTimeNFSnfsMicrosecondmicrosecondlatestabsolute44457Cumulative writeTime on NFS volumewriteTimeNFSnfsMicrosecondmicrosecondlatestabsolute44458Total IO requests queued in NFS volumeioRequestsQueuedNFSnfsNumbernumberlatestabsolute44459Total create calls on NFS volumetotalCreateNFSnfsNumbernumberlatestabsolute44460Total create calls failed on NFS volumecreateFailedNFSnfsNumbernumberlatestabsolute44461Number of times we hit into socket buffer out of space condition for NFS volumesocketBufferFullNFSnfsNumbernumberlatestabsolute44462Total journal transactions on VMFS volumevmfs.totalTxnDatastoredatastoreNumbernumberlatestabsolute44463Total cancelled journal transactions on VMFS volumevmfs.cancelledTxnDatastoredatastoreNumbernumberlatestabsolute44464Current APD state of the VMFS volumevmfs.apdStateDatastoredatastoreNumbernumberlatestabsolute44465Total apd timeout events received on the VMFS volumevmfs.apdCountDatastoredatastoreNumbernumberlatestabsolute44466vVol PE is accessiblepe.isaccessiblevVol object related statsvvolNumbernumberlatestabsolute44467Total no. of read cmds done on vVol PEpe.reads.donevVol object related statsvvolNumbernumberlatestabsolute44468Total no. of write cmds done on vVol PEpe.writes.donevVol object related statsvvolNumbernumberlatestabsolute44469Total no. of cmds done on vVol PEpe.total.donevVol object related statsvvolNumbernumberlatestabsolute44470Total no. of read cmds sent on vVol PEpe.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44471Total no. of write cmds sent on vVol PEpe.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44472Total no. of cmds sent on vVol PEpe.total.sentvVol object related statsvvolNumbernumberlatestabsolute44473No. of read cmds issued on vVol PE that failedpe.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44474No. of write cmds issued on vVol PE that failedpe.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44475Total no. of cmds issued on vVol PE that failedpe.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44476Total no. of read cmds failed on vVol PEpe.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44477Total no. of write cmds failed on vVol PEpe.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44478Total no. of cmds failed on vVol PEpe.total.failedvVol object related statsvvolNumbernumberlatestabsolute44479Cumulative latency of successful reads on vVol PEpe.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44480Cumulative latency of successful writes on vVol PEpe.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44481Cumulative latency of cmds that failed before issue on vVol PEpe.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44482Cumulative latency of all issued cmds on vVol PEpe.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44483Total no. of cancel cmds sent on vVol PEpe.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44484Total no. of cancel cmds failed on vVol PEpe.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44485Total no. of device reset cmds sent on vVol PEpe.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44486Total no. of device reset cmds failed on vVol PEpe.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44487Total no. of reset cmds sent on vVol PEpe.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44488Total no. of reset cmds failed on vVol PEpe.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44489Total no. of unmap cmds sent on vVol PEpe.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44490Total no. of unmap cmds failed on vVol PEpe.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44491Total no. of read cmds done by vVol Containercontainer.reads.donevVol object related statsvvolNumbernumberlatestabsolute44492Total no. of write cmds done by vVol Containercontainer.writes.donevVol object related statsvvolNumbernumberlatestabsolute44493Total no. of cmds done by vVol Containercontainer.total.donevVol object related statsvvolNumbernumberlatestabsolute44494Total no. of read cmds sent by vVol Containercontainer.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44495Total no. of write cmds sent by vVol Containercontainer.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44496Total no. of cmds sent by vVol Containercontainer.total.sentvVol object related statsvvolNumbernumberlatestabsolute44497No. of read cmds issued by vVol Container that failedcontainer.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44498No. of write cmds issued by vVol Container that failedcontainer.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44499Total no. of cmds issued by vVol Container that failedcontainer.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44500Total no. of read cmds failed by vVol Containercontainer.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44501Container:Total no. of write cmds failed by vVol Containercontainer.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44502Total no. of cmds failed by vVol Containercontainer.total.failedvVol object related statsvvolNumbernumberlatestabsolute44503Cumulative latency of successful reads by vVol Containercontainer.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44504Cumulative latency of successful writes by vVol Containercontainer.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44505Cumulative latency of cmds that failed before issue by vVol Containercontainer.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44506Cumulative latency of all issued cmds by vVol Containercontainer.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44507Total no. of read cmds done by vVol Devicedevice.reads.donevVol object related statsvvolNumbernumberlatestabsolute44508Total no. of write cmds done by vVol Devicedevice.writes.donevVol object related statsvvolNumbernumberlatestabsolute44509Total no. of cmds done by vVol Devicedevice.total.donevVol object related statsvvolNumbernumberlatestabsolute44510Total no. of read cmds sent by vVol Devicedevice.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44511Total no. of write cmds sent by vVol Devicedevice.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44512Total no. of cmds sent by vVol Devicedevice.total.sentvVol object related statsvvolNumbernumberlatestabsolute44513No. of read cmds issued by vVol Device that faileddevice.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44514No. of write cmds issued by vVol Device that faileddevice.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44515Total no. of cmds issued by vVol Device that faileddevice.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44516Total no. of read cmds failed by vVol Devicedevice.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44517Total no. of write cmds failed by vVol Devicedevice.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44518Total no. of cmds failed by vVol Devicedevice.total.failedvVol object related statsvvolNumbernumberlatestabsolute44519Cumulative latency of successful reads by vVol Devicedevice.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44520Cumulative latency of successful writes by vVol Devicedevice.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44521Cumulative latency of cmds that failed before issue by vVol Devicedevice.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44522Cumulative latency of all issued cmds by vVol Devicedevice.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44523Total no. of cancel cmds sent by vVol Devicedevice.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44524Total no. of cancel cmds failed by vVol Devicedevice.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44525Total no. of device reset cmds sent by vVol Devicedevice.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44526Total no. of device reset cmds failed by vVol Devicedevice.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44527Total no. of reset cmds sent by vVol Devicedevice.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44528Total no. of reset cmds failed by vVol Devicedevice.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44529Total no. of unmap cmds sent by vVol Devicedevice.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44530Total no. of unmap cmds failed by vVol Devicedevice.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44531CPU time spent waiting for swap-inswapwaitCPUcpuMillisecondmillisecondsummationdelta33532CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentnonerate44533CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentmaximumrate44534CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentminimumrate44535CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentnonerate44536CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentaveragerate23537CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentmaximumrate44538CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentminimumrate44539Total CPU capacity reserved by and available for virtual machinestotalCapacityCPUcpuMegahertzmegaHertzaverageabsolute23540Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)latencyCPUcpuPercentagepercentaveragerate23541CPU resources devoted by the ESX schedulerentitlementCPUcpuMegahertzmegaHertzlatestabsolute23542The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limitdemandCPUcpuMegahertzmegaHertzaverageabsolute23543Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraintscostopCPUcpuMillisecondmillisecondsummationdelta23544Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit settingmaxlimitedCPUcpuMillisecondmillisecondsummationdelta23545Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machinesoverlapCPUcpuMillisecondmillisecondsummationdelta33546Time the virtual machine is scheduled to runrunCPUcpuMillisecondmillisecondsummationdelta23547CPU resource entitlement to CPU demand ratio (in percents)demandEntitlementRatioCPUcpuPercentagepercentlatestabsolute44548Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPUreadinessCPUcpuPercentagepercentaveragerate44549Virtual CPU usage as a percentage during the intervalusage.vcpusCPUcpuPercentagepercentaveragerate44550Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesnoneabsolute44551Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesaverageabsolute23552Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesmaximumabsolute44553Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesminimumabsolute44554Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesnoneabsolute44555Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesaverageabsolute23556Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesmaximumabsolute44557Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesminimumabsolute44558Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesnoneabsolute44559Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesaverageabsolute23560Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesmaximumabsolute44561Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesminimumabsolute44562Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXiactivewriteMemorymemKilobytekiloBytesaverageabsolute23563Host physical memory reserved by ESXi, for its data structures, for running the virtual machineoverheadMaxMemorymemKilobytekiloBytesaverageabsolute23564Total reservation, available and consumed, for powered-on virtual machinestotalCapacityMemorymemMegabytemegaBytesaverageabsolute23565Amount of guest physical memory pages compressed by ESXizippedMemorymemKilobytekiloByteslatestabsolute23566Host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memoryzipSavedMemorymemKilobytekiloByteslatestabsolute23567Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memorylatencyMemorymemPercentagepercentaverageabsolute23568Amount of host physical memory the virtual machine deserves, as determined by ESXientitlementMemorymemKilobytekiloBytesaverageabsolute23569Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooninglowfreethresholdMemorymemKilobytekiloBytesaverageabsolute23570Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesnoneabsolute44571Rate at which guest physical memory is swapped in from the host swap cachellSwapInRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23572Rate at which guest physical memory is swapped out to the host swap cachellSwapOutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23573Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXioverheadTouchedMemorymemKilobytekiloBytesaverageabsolute44574Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesaverageabsolute44575Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesmaximumabsolute44576Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesminimumabsolute44577Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesnoneabsolute44578Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesaverageabsolute44579Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesmaximumabsolute44580Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesminimumabsolute44581Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesnoneabsolute44582Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesaverageabsolute44583Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesmaximumabsolute44584Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesminimumabsolute44585Space used for holding VMFS Pointer Blocks in memoryvmfs.pbc.sizeMemorymemMegabytemegaByteslatestabsolute44586Maximum size the VMFS Pointer Block Cache can grow tovmfs.pbc.sizeMaxMemorymemMegabytemegaByteslatestabsolute44587Amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMemorymemTerabyteteraByteslatestabsolute44588Maximum amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMaxMemorymemTerabyteteraByteslatestabsolute44589Amount of VMFS heap used by the VMFS PB Cachevmfs.pbc.overheadMemorymemKilobytekiloByteslatestabsolute44590Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cachevmfs.pbc.capMissRatioMemorymemPercentagepercentlatestabsolute44591Number of Storage commands issued during the collection intervalcommandsDiskdiskNumbernumbersummationdelta23592Average amount of time, in milliseconds, to read from the physical devicedeviceReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23593Average amount of time, in milliseconds, spent by VMkernel to process each Storage read commandkernelReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23594Average amount of time taken during the collection interval to process a Storage read command issued from the guest OS to the virtual machinetotalReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23595Average amount of time spent in the VMkernel queue, per Storage read command, during the collection intervalqueueReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23596Average amount of time, in milliseconds, to write to the physical devicedeviceWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23597Average amount of time, in milliseconds, spent by VMkernel to process each Storage write commandkernelWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23598Average amount of time taken during the collection interval to process a Storage write command issued by the guest OS to the virtual machinetotalWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23599Average amount of time spent in the VMkernel queue, per Storage write command, during the collection intervalqueueWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23600Average amount of time, in milliseconds, to complete a Storage command from the physical devicedeviceLatencyDiskdiskMillisecondmillisecondaverageabsolute13601Average amount of time, in milliseconds, spent by VMkernel to process each Storage commandkernelLatencyDiskdiskMillisecondmillisecondaverageabsolute23602Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyDiskdiskMillisecondmillisecondaverageabsolute23603Maximum queue depthmaxQueueDepthDiskdiskNumbernumberaverageabsolute13604Average number of Storage commands issued per second during the collection intervalcommandsAveragedDiskdiskNumbernumberaveragerate23605Number of receives droppeddroppedRxNetworknetNumbernumbersummationdelta23606Number of transmits droppeddroppedTxNetworknetNumbernumbersummationdelta23607Average amount of data received per secondbytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23608Average amount of data transmitted per secondbytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23609Number of broadcast packets received during the sampling intervalbroadcastRxNetworknetNumbernumbersummationdelta23610Number of broadcast packets transmitted during the sampling intervalbroadcastTxNetworknetNumbernumbersummationdelta23611Number of multicast packets received during the sampling intervalmulticastRxNetworknetNumbernumbersummationdelta23612Number of multicast packets transmitted during the sampling intervalmulticastTxNetworknetNumbernumbersummationdelta23613Number of packets with errors received during the sampling intervalerrorsRxNetworknetNumbernumbersummationdelta23614Number of packets with errors transmitted during the sampling intervalerrorsTxNetworknetNumbernumbersummationdelta23615Number of frames with unknown protocol received during the sampling intervalunknownProtosNetworknetNumbernumbersummationdelta23616Average amount of data received per second by a pNicpnicBytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44617Average amount of data transmitted per second through a pNicpnicBytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44618Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumberlatestabsolute44619Amount of disk space usage for each mount pointdiskUsageSystemsysPercentagepercentlatestabsolute33620Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertznonerate44621Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzaveragerate33622Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzmaximumrate44623Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzminimumrate44624Memory touched by the system resource groupresourceMemTouchedSystemsysKilobytekiloByteslatestabsolute33625Memory mapped by the system resource groupresourceMemMappedSystemsysKilobytekiloByteslatestabsolute33626Memory saved due to sharing by the system resource groupresourceMemSharedSystemsysKilobytekiloByteslatestabsolute33627Memory swapped out by the system resource groupresourceMemSwappedSystemsysKilobytekiloByteslatestabsolute33628Overhead memory consumed by the system resource groupresourceMemOverheadSystemsysKilobytekiloByteslatestabsolute33629Memory shared by the system resource groupresourceMemCowSystemsysKilobytekiloByteslatestabsolute33630Zero filled memory used by the system resource groupresourceMemZeroSystemsysKilobytekiloByteslatestabsolute33631CPU running average over 1 minute of the system resource groupresourceCpuRun1SystemsysPercentagepercentlatestabsolute33632CPU active average over 1 minute of the system resource groupresourceCpuAct1SystemsysPercentagepercentlatestabsolute33633CPU maximum limited over 1 minute of the system resource groupresourceCpuMaxLimited1SystemsysPercentagepercentlatestabsolute33634CPU running average over 5 minutes of the system resource groupresourceCpuRun5SystemsysPercentagepercentlatestabsolute33635CPU active average over 5 minutes of the system resource groupresourceCpuAct5SystemsysPercentagepercentlatestabsolute33636CPU maximum limited over 5 minutes of the system resource groupresourceCpuMaxLimited5SystemsysPercentagepercentlatestabsolute33637CPU allocation reservation (in MHz) of the system resource groupresourceCpuAllocMinSystemsysMegahertzmegaHertzlatestabsolute33638CPU allocation limit (in MHz) of the system resource groupresourceCpuAllocMaxSystemsysMegahertzmegaHertzlatestabsolute33639CPU allocation shares of the system resource groupresourceCpuAllocSharesSystemsysNumbernumberlatestabsolute33640Memory allocation reservation (in KB) of the system resource groupresourceMemAllocMinSystemsysKilobytekiloByteslatestabsolute33641Memory allocation limit (in KB) of the system resource groupresourceMemAllocMaxSystemsysKilobytekiloByteslatestabsolute33642Memory allocation shares of the system resource groupresourceMemAllocSharesSystemsysNumbernumberlatestabsolute33643Total time elapsed, in seconds, since last operating system boot-uposUptimeSystemsysSecondsecondlatestabsolute44644Memory consumed by the system resource groupresourceMemConsumedSystemsysKilobytekiloByteslatestabsolute44645Number of file descriptors used by the system resource groupresourceFdUsageSystemsysNumbernumberlatestabsolute44646CPU active peak over 1 minuteactpk1Resource group CPUrescpuPercentagepercentlatestabsolute33647CPU running average over 1 minuterunav1Resource group CPUrescpuPercentagepercentlatestabsolute33648CPU active average over 5 minutesactav5Resource group CPUrescpuPercentagepercentlatestabsolute33649CPU active peak over 5 minutesactpk5Resource group CPUrescpuPercentagepercentlatestabsolute33650CPU running average over 5 minutesrunav5Resource group CPUrescpuPercentagepercentlatestabsolute33651CPU active average over 15 minutesactav15Resource group CPUrescpuPercentagepercentlatestabsolute33652CPU active peak over 15 minutesactpk15Resource group CPUrescpuPercentagepercentlatestabsolute33653CPU running average over 15 minutesrunav15Resource group CPUrescpuPercentagepercentlatestabsolute33654CPU running peak over 1 minuterunpk1Resource group CPUrescpuPercentagepercentlatestabsolute33655Amount of CPU resources over the limit that were refused, average over 1 minutemaxLimited1Resource group CPUrescpuPercentagepercentlatestabsolute33656CPU running peak over 5 minutesrunpk5Resource group CPUrescpuPercentagepercentlatestabsolute33657Amount of CPU resources over the limit that were refused, average over 5 minutesmaxLimited5Resource group CPUrescpuPercentagepercentlatestabsolute33658CPU running peak over 15 minutesrunpk15Resource group CPUrescpuPercentagepercentlatestabsolute33659Amount of CPU resources over the limit that were refused, average over 15 minutesmaxLimited15Resource group CPUrescpuPercentagepercentlatestabsolute33660Group CPU sample countsampleCountResource group CPUrescpuNumbernumberlatestabsolute33661Group CPU sample periodsamplePeriodResource group CPUrescpuMillisecondmillisecondlatestabsolute33662Amount of total configured memory that is available for usememUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33663Sum of the memory swapped by all powered-on virtual machines on the hostswapUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33664Amount of Service Console CPU usagecpuUsageManagement agentmanagementAgentMegahertzmegaHertzaveragerate33665Average number of commands issued per second on the storage path during the collection intervalcommandsAveragedStorage pathstoragePathNumbernumberaveragerate33666Average number of read commands issued per second on the storage path during the collection intervalnumberReadAveragedStorage pathstoragePathNumbernumberaveragerate33667Average number of write commands issued per second on the storage path during the collection intervalnumberWriteAveragedStorage pathstoragePathNumbernumberaveragerate33668Rate of reading data on the storage pathreadStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33669Rate of writing data on the storage pathwriteStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33670The average time a read issued on the storage path takestotalReadLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33671The average time a write issued on the storage path takestotalWriteLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33672Average read request size in bytesreadIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44673Average write request size in byteswriteIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44674Number of seeks during the interval that were less than 64 LBNs apartsmallSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44675Number of seeks during the interval that were between 64 and 8192 LBNs apartmediumSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44676Number of seeks during the interval that were greater than 8192 LBNs apartlargeSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44677Read latency in microsecondsreadLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44678Write latency in microsecondswriteLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44679Storage I/O Control datastore maximum queue depthdatastoreMaxQueueDepthDatastoredatastoreNumbernumberlatestabsolute13680Unmapped size in MBunmapSizeDatastoredatastoreMegabytemegaBytessummationdelta44681Number of unmap IOs issuedunmapIOsDatastoredatastoreNumbernumbersummationdelta44682Current number of replicated virtual machineshbrNumVmsvSphere ReplicationhbrNumbernumberaverageabsolute44683Average amount of data received per secondhbrNetRxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44684Average amount of data transmitted per secondhbrNetTxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44685Average network latency seen by vSphere ReplicationhbrNetLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44686Average disk read latency seen by vSphere ReplicationhbrDiskReadLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44687Average guest I/O stall introduced by vSphere ReplicationhbrDiskStallLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44688Average amount of successful transfer time per diskhbrDiskTransferSuccessvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44689Average amount of idle time per diskhbrDiskTransferIdlevSphere ReplicationhbrMillisecondmillisecondaverageabsolute44690Average amount of data in KB successfully transferred per diskhbrDiskTransferBytesvSphere ReplicationhbrKilobytekiloBytesaverageabsolute44691Number of caches controlled by the virtual flash modulenumActiveVMDKsVirtual flash module related statistical valuesvflashModuleNumbernumberlatestabsolute44692Read IOPSreadIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44693Read throughput in kBpsreadThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44694Average read latency in msreadAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44695Max read latency in msreadMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44696Cache hit rate percentagereadCacheHitRatevSAN DOM object related statistical valuesvsanDomObjPercentagepercentlatestabsolute44697Read congestionreadCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44698Write IOPSwriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44699Write throughput in kBpswriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44700Average write latency in mswriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44701Max write latency in mswriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44702Write congestionwriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44703Recovery write IOPSrecoveryWriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44704Recovery write through-put in kBpsrecoveryWriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44705Average recovery write latency in msrecoveryWriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44706Max recovery write latency in msrecoveryWriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44707Recovery write congestionrecoveryWriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44708The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentnoneabsolute44709The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentmaximumabsolute44710The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentminimumabsolute44711The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesnoneabsolute44712The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesmaximumabsolute44713The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesminimumabsolute44714The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentnoneabsolute44715The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentaverageabsolute44716The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentmaximumabsolute44717The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentminimumabsolute44718The amount of GPU memory used in gigabytesmem.used.gbGPUgpuGigabytegigaByteslatestabsolute33719The amount of GPU memory reserved in gigabytesmem.reserved.gbGPUgpuGigabytegigaByteslatestabsolute33720The total amount of GPU memory in gigabytesmem.total.gbGPUgpuGigabytegigaByteslatestabsolute33721Persistent memory available reservation on a host.available.reservationPMEMpmemMegabytemegaByteslatestabsolute44722Persistent memory reservation managed by DRS on a host.drsmanaged.reservationPMEMpmemMegabytemegaByteslatestabsolute44723Total count of virtual CPUs in VMnumVCPUsVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44724Minimum clock speed of the vCPUs during last stats intervalvcpusMhzMinVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44725Maximum clock speed of the vCPUs during last stats intervalvcpusMhzMaxVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44726Average clock speed of the vCPUs during last stats intervalvcpusMhzMeanVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44727Actual clock speed of host CPUcpuSpeedVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44728Minimum overhead heap memory usage since the VM started runningoverheadMemSizeMinVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44729Maximum overhead heap memory usage since the VM started runningoverheadMemSizeMaxVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44730vigor.opsTotalVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44731poll.itersPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44732userRpc.opsPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44
12857:20241101:185543.411 adding performance counter cpu/usage[none]:1
12857:20241101:185543.411 adding performance counter cpu/usage[none,rate]:1
12857:20241101:185543.411 adding performance counter cpu/usage[average]:2
12857:20241101:185543.411 adding performance counter cpu/usage[average,rate]:2
12857:20241101:185543.411 adding performance counter cpu/usage[minimum]:3
12857:20241101:185543.411 adding performance counter cpu/usage[minimum,rate]:3
12857:20241101:185543.411 adding performance counter cpu/usage[maximum]:4
12857:20241101:185543.411 adding performance counter cpu/usage[maximum,rate]:4
12857:20241101:185543.411 adding performance counter cpu/usagemhz[none]:5
12857:20241101:185543.411 adding performance counter cpu/usagemhz[none,rate]:5
12857:20241101:185543.411 adding performance counter cpu/usagemhz[average]:6
12857:20241101:185543.411 adding performance counter cpu/usagemhz[average,rate]:6
12857:20241101:185543.411 adding performance counter cpu/usagemhz[minimum]:7
12857:20241101:185543.411 adding performance counter cpu/usagemhz[minimum,rate]:7
12857:20241101:185543.411 adding performance counter cpu/usagemhz[maximum]:8
12857:20241101:185543.411 adding performance counter cpu/usagemhz[maximum,rate]:8
12857:20241101:185543.411 adding performance counter cpu/reservedCapacity[average]:9
12857:20241101:185543.411 adding performance counter cpu/reservedCapacity[average,absolute]:9
12857:20241101:185543.411 adding performance counter cpu/system[summation]:10
12857:20241101:185543.411 adding performance counter cpu/system[summation,delta]:10
12857:20241101:185543.411 adding performance counter cpu/wait[summation]:11
12857:20241101:185543.411 adding performance counter cpu/wait[summation,delta]:11
12857:20241101:185543.411 adding performance counter cpu/ready[summation]:12
12857:20241101:185543.411 adding performance counter cpu/ready[summation,delta]:12
12857:20241101:185543.411 adding performance counter cpu/idle[summation]:13
12857:20241101:185543.411 adding performance counter cpu/idle[summation,delta]:13
12857:20241101:185543.411 adding performance counter cpu/used[summation]:14
12857:20241101:185543.411 adding performance counter cpu/used[summation,delta]:14
12857:20241101:185543.411 adding performance counter cpu/capacity.provisioned[average]:15
12857:20241101:185543.411 adding performance counter cpu/capacity.provisioned[average,absolute]:15
12857:20241101:185543.411 adding performance counter cpu/capacity.entitlement[average]:16
12857:20241101:185543.411 adding performance counter cpu/capacity.entitlement[average,absolute]:16
12857:20241101:185543.412 adding performance counter cpu/capacity.usage[average]:17
12857:20241101:185543.412 adding performance counter cpu/capacity.usage[average,rate]:17
12857:20241101:185543.412 adding performance counter cpu/capacity.demand[average]:18
12857:20241101:185543.412 adding performance counter cpu/capacity.demand[average,absolute]:18
12857:20241101:185543.412 adding performance counter cpu/capacity.contention[average]:19
12857:20241101:185543.412 adding performance counter cpu/capacity.contention[average,rate]:19
12857:20241101:185543.412 adding performance counter cpu/corecount.provisioned[average]:20
12857:20241101:185543.412 adding performance counter cpu/corecount.provisioned[average,absolute]:20
12857:20241101:185543.412 adding performance counter cpu/corecount.usage[average]:21
12857:20241101:185543.412 adding performance counter cpu/corecount.usage[average,absolute]:21
12857:20241101:185543.412 adding performance counter cpu/corecount.contention[average]:22
12857:20241101:185543.412 adding performance counter cpu/corecount.contention[average,rate]:22
12857:20241101:185543.412 adding performance counter mem/usage[none]:23
12857:20241101:185543.412 adding performance counter mem/usage[none,absolute]:23
12857:20241101:185543.412 adding performance counter mem/usage[average]:24
12857:20241101:185543.412 adding performance counter mem/usage[average,absolute]:24
12857:20241101:185543.412 adding performance counter mem/usage[minimum]:25
12857:20241101:185543.412 adding performance counter mem/usage[minimum,absolute]:25
12857:20241101:185543.412 adding performance counter mem/usage[maximum]:26
12857:20241101:185543.412 adding performance counter mem/usage[maximum,absolute]:26
12857:20241101:185543.412 adding performance counter mem/reservedCapacity[average]:27
12857:20241101:185543.412 adding performance counter mem/reservedCapacity[average,absolute]:27
12857:20241101:185543.412 adding performance counter mem/granted[none]:28
12857:20241101:185543.412 adding performance counter mem/granted[none,absolute]:28
12857:20241101:185543.412 adding performance counter mem/granted[average]:29
12857:20241101:185543.412 adding performance counter mem/granted[average,absolute]:29
12857:20241101:185543.412 adding performance counter mem/granted[minimum]:30
12857:20241101:185543.412 adding performance counter mem/granted[minimum,absolute]:30
12857:20241101:185543.412 adding performance counter mem/granted[maximum]:31
12857:20241101:185543.412 adding performance counter mem/granted[maximum,absolute]:31
12857:20241101:185543.412 adding performance counter mem/active[none]:32
12857:20241101:185543.412 adding performance counter mem/active[none,absolute]:32
12857:20241101:185543.412 adding performance counter mem/active[average]:33
12857:20241101:185543.412 adding performance counter mem/active[average,absolute]:33
12857:20241101:185543.412 adding performance counter mem/active[minimum]:34
12857:20241101:185543.412 adding performance counter mem/active[minimum,absolute]:34
12857:20241101:185543.412 adding performance counter mem/active[maximum]:35
12857:20241101:185543.412 adding performance counter mem/active[maximum,absolute]:35
12857:20241101:185543.413 adding performance counter mem/shared[none]:36
12857:20241101:185543.413 adding performance counter mem/shared[none,absolute]:36
12857:20241101:185543.413 adding performance counter mem/shared[average]:37
12857:20241101:185543.413 adding performance counter mem/shared[average,absolute]:37
12857:20241101:185543.413 adding performance counter mem/shared[minimum]:38
12857:20241101:185543.413 adding performance counter mem/shared[minimum,absolute]:38
12857:20241101:185543.413 adding performance counter mem/shared[maximum]:39
12857:20241101:185543.413 adding performance counter mem/shared[maximum,absolute]:39
12857:20241101:185543.413 adding performance counter mem/zero[none]:40
12857:20241101:185543.413 adding performance counter mem/zero[none,absolute]:40
12857:20241101:185543.413 adding performance counter mem/zero[average]:41
12857:20241101:185543.413 adding performance counter mem/zero[average,absolute]:41
12857:20241101:185543.413 adding performance counter mem/zero[minimum]:42
12857:20241101:185543.413 adding performance counter mem/zero[minimum,absolute]:42
12857:20241101:185543.413 adding performance counter mem/zero[maximum]:43
12857:20241101:185543.413 adding performance counter mem/zero[maximum,absolute]:43
12857:20241101:185543.413 adding performance counter mem/unreserved[none]:44
12857:20241101:185543.413 adding performance counter mem/unreserved[none,absolute]:44
12857:20241101:185543.413 adding performance counter mem/unreserved[average]:45
12857:20241101:185543.413 adding performance counter mem/unreserved[average,absolute]:45
12857:20241101:185543.413 adding performance counter mem/unreserved[minimum]:46
12857:20241101:185543.413 adding performance counter mem/unreserved[minimum,absolute]:46
12857:20241101:185543.413 adding performance counter mem/unreserved[maximum]:47
12857:20241101:185543.413 adding performance counter mem/unreserved[maximum,absolute]:47
12857:20241101:185543.413 adding performance counter mem/swapused[none]:48
12857:20241101:185543.413 adding performance counter mem/swapused[none,absolute]:48
12857:20241101:185543.413 adding performance counter mem/swapused[average]:49
12857:20241101:185543.413 adding performance counter mem/swapused[average,absolute]:49
12857:20241101:185543.413 adding performance counter mem/swapused[minimum]:50
12857:20241101:185543.413 adding performance counter mem/swapused[minimum,absolute]:50
12857:20241101:185543.413 adding performance counter mem/swapused[maximum]:51
12857:20241101:185543.413 adding performance counter mem/swapused[maximum,absolute]:51
12857:20241101:185543.413 adding performance counter mem/swapunreserved[none]:52
12857:20241101:185543.413 adding performance counter mem/swapunreserved[none,absolute]:52
12857:20241101:185543.413 adding performance counter mem/swapunreserved[average]:53
12857:20241101:185543.413 adding performance counter mem/swapunreserved[average,absolute]:53
12857:20241101:185543.413 adding performance counter mem/swapunreserved[minimum]:54
12857:20241101:185543.413 adding performance counter mem/swapunreserved[minimum,absolute]:54
12857:20241101:185543.413 adding performance counter mem/swapunreserved[maximum]:55
12857:20241101:185543.413 adding performance counter mem/swapunreserved[maximum,absolute]:55
12857:20241101:185543.414 adding performance counter mem/sharedcommon[none]:56
12857:20241101:185543.414 adding performance counter mem/sharedcommon[none,absolute]:56
12857:20241101:185543.414 adding performance counter mem/sharedcommon[average]:57
12857:20241101:185543.414 adding performance counter mem/sharedcommon[average,absolute]:57
12857:20241101:185543.414 adding performance counter mem/sharedcommon[minimum]:58
12857:20241101:185543.414 adding performance counter mem/sharedcommon[minimum,absolute]:58
12857:20241101:185543.414 adding performance counter mem/sharedcommon[maximum]:59
12857:20241101:185543.414 adding performance counter mem/sharedcommon[maximum,absolute]:59
12857:20241101:185543.414 adding performance counter mem/heap[none]:60
12857:20241101:185543.414 adding performance counter mem/heap[none,absolute]:60
12857:20241101:185543.414 adding performance counter mem/heap[average]:61
12857:20241101:185543.414 adding performance counter mem/heap[average,absolute]:61
12857:20241101:185543.414 adding performance counter mem/heap[minimum]:62
12857:20241101:185543.414 adding performance counter mem/heap[minimum,absolute]:62
12857:20241101:185543.414 adding performance counter mem/heap[maximum]:63
12857:20241101:185543.414 adding performance counter mem/heap[maximum,absolute]:63
12857:20241101:185543.414 adding performance counter mem/heapfree[none]:64
12857:20241101:185543.414 adding performance counter mem/heapfree[none,absolute]:64
12857:20241101:185543.414 adding performance counter mem/heapfree[average]:65
12857:20241101:185543.414 adding performance counter mem/heapfree[average,absolute]:65
12857:20241101:185543.414 adding performance counter mem/heapfree[minimum]:66
12857:20241101:185543.414 adding performance counter mem/heapfree[minimum,absolute]:66
12857:20241101:185543.414 adding performance counter mem/heapfree[maximum]:67
12857:20241101:185543.414 adding performance counter mem/heapfree[maximum,absolute]:67
12857:20241101:185543.414 adding performance counter mem/state[latest]:68
12857:20241101:185543.414 adding performance counter mem/state[latest,absolute]:68
12857:20241101:185543.414 adding performance counter mem/swapped[none]:69
12857:20241101:185543.414 adding performance counter mem/swapped[none,absolute]:69
12857:20241101:185543.414 adding performance counter mem/swapped[average]:70
12857:20241101:185543.414 adding performance counter mem/swapped[average,absolute]:70
12857:20241101:185543.414 adding performance counter mem/swapped[minimum]:71
12857:20241101:185543.414 adding performance counter mem/swapped[minimum,absolute]:71
12857:20241101:185543.414 adding performance counter mem/swapped[maximum]:72
12857:20241101:185543.414 adding performance counter mem/swapped[maximum,absolute]:72
12857:20241101:185543.414 adding performance counter mem/swaptarget[none]:73
12857:20241101:185543.414 adding performance counter mem/swaptarget[none,absolute]:73
12857:20241101:185543.414 adding performance counter mem/swaptarget[average]:74
12857:20241101:185543.414 adding performance counter mem/swaptarget[average,absolute]:74
12857:20241101:185543.415 adding performance counter mem/swaptarget[minimum]:75
12857:20241101:185543.415 adding performance counter mem/swaptarget[minimum,absolute]:75
12857:20241101:185543.415 adding performance counter mem/swaptarget[maximum]:76
12857:20241101:185543.415 adding performance counter mem/swaptarget[maximum,absolute]:76
12857:20241101:185543.415 adding performance counter mem/swapIn[none]:77
12857:20241101:185543.415 adding performance counter mem/swapIn[none,absolute]:77
12857:20241101:185543.415 adding performance counter mem/swapIn[average]:78
12857:20241101:185543.415 adding performance counter mem/swapIn[average,absolute]:78
12857:20241101:185543.415 adding performance counter mem/swapIn[minimum]:79
12857:20241101:185543.415 adding performance counter mem/swapIn[minimum,absolute]:79
12857:20241101:185543.415 adding performance counter mem/swapIn[maximum]:80
12857:20241101:185543.415 adding performance counter mem/swapIn[maximum,absolute]:80
12857:20241101:185543.415 adding performance counter mem/swapOut[none]:81
12857:20241101:185543.415 adding performance counter mem/swapOut[none,absolute]:81
12857:20241101:185543.415 adding performance counter mem/swapOut[average]:82
12857:20241101:185543.415 adding performance counter mem/swapOut[average,absolute]:82
12857:20241101:185543.415 adding performance counter mem/swapOut[minimum]:83
12857:20241101:185543.415 adding performance counter mem/swapOut[minimum,absolute]:83
12857:20241101:185543.415 adding performance counter mem/swapOut[maximum]:84
12857:20241101:185543.415 adding performance counter mem/swapOut[maximum,absolute]:84
12857:20241101:185543.415 adding performance counter mem/swapinRate[average]:85
12857:20241101:185543.415 adding performance counter mem/swapinRate[average,rate]:85
12857:20241101:185543.415 adding performance counter mem/swapoutRate[average]:86
12857:20241101:185543.415 adding performance counter mem/swapoutRate[average,rate]:86
12857:20241101:185543.415 adding performance counter managementAgent/swapOut[average]:87
12857:20241101:185543.415 adding performance counter managementAgent/swapOut[average,rate]:87
12857:20241101:185543.415 adding performance counter managementAgent/swapIn[average]:88
12857:20241101:185543.415 adding performance counter managementAgent/swapIn[average,rate]:88
12857:20241101:185543.415 adding performance counter mem/vmmemctl[none]:89
12857:20241101:185543.415 adding performance counter mem/vmmemctl[none,absolute]:89
12857:20241101:185543.415 adding performance counter mem/vmmemctl[average]:90
12857:20241101:185543.415 adding performance counter mem/vmmemctl[average,absolute]:90
12857:20241101:185543.415 adding performance counter mem/vmmemctl[minimum]:91
12857:20241101:185543.415 adding performance counter mem/vmmemctl[minimum,absolute]:91
12857:20241101:185543.415 adding performance counter mem/vmmemctl[maximum]:92
12857:20241101:185543.415 adding performance counter mem/vmmemctl[maximum,absolute]:92
12857:20241101:185543.415 adding performance counter mem/vmmemctltarget[none]:93
12857:20241101:185543.415 adding performance counter mem/vmmemctltarget[none,absolute]:93
12857:20241101:185543.415 adding performance counter mem/vmmemctltarget[average]:94
12857:20241101:185543.415 adding performance counter mem/vmmemctltarget[average,absolute]:94
12857:20241101:185543.416 adding performance counter mem/vmmemctltarget[minimum]:95
12857:20241101:185543.416 adding performance counter mem/vmmemctltarget[minimum,absolute]:95
12857:20241101:185543.416 adding performance counter mem/vmmemctltarget[maximum]:96
12857:20241101:185543.416 adding performance counter mem/vmmemctltarget[maximum,absolute]:96
12857:20241101:185543.416 adding performance counter mem/consumed[none]:97
12857:20241101:185543.416 adding performance counter mem/consumed[none,absolute]:97
12857:20241101:185543.416 adding performance counter mem/consumed[average]:98
12857:20241101:185543.416 adding performance counter mem/consumed[average,absolute]:98
12857:20241101:185543.416 adding performance counter mem/consumed[minimum]:99
12857:20241101:185543.416 adding performance counter mem/consumed[minimum,absolute]:99
12857:20241101:185543.416 adding performance counter mem/consumed[maximum]:100
12857:20241101:185543.416 adding performance counter mem/consumed[maximum,absolute]:100
12857:20241101:185543.416 adding performance counter mem/overhead[none]:101
12857:20241101:185543.416 adding performance counter mem/overhead[none,absolute]:101
12857:20241101:185543.416 adding performance counter mem/overhead[average]:102
12857:20241101:185543.416 adding performance counter mem/overhead[average,absolute]:102
12857:20241101:185543.416 adding performance counter mem/overhead[minimum]:103
12857:20241101:185543.416 adding performance counter mem/overhead[minimum,absolute]:103
12857:20241101:185543.416 adding performance counter mem/overhead[maximum]:104
12857:20241101:185543.416 adding performance counter mem/overhead[maximum,absolute]:104
12857:20241101:185543.416 adding performance counter mem/compressed[average]:105
12857:20241101:185543.416 adding performance counter mem/compressed[average,absolute]:105
12857:20241101:185543.416 adding performance counter mem/compressionRate[average]:106
12857:20241101:185543.416 adding performance counter mem/compressionRate[average,rate]:106
12857:20241101:185543.416 adding performance counter mem/decompressionRate[average]:107
12857:20241101:185543.416 adding performance counter mem/decompressionRate[average,rate]:107
12857:20241101:185543.416 adding performance counter mem/capacity.provisioned[average]:108
12857:20241101:185543.416 adding performance counter mem/capacity.provisioned[average,absolute]:108
12857:20241101:185543.416 adding performance counter mem/capacity.entitlement[average]:109
12857:20241101:185543.416 adding performance counter mem/capacity.entitlement[average,absolute]:109
12857:20241101:185543.416 adding performance counter mem/capacity.usable[average]:110
12857:20241101:185543.416 adding performance counter mem/capacity.usable[average,absolute]:110
12857:20241101:185543.416 adding performance counter mem/capacity.usage[average]:111
12857:20241101:185543.416 adding performance counter mem/capacity.usage[average,absolute]:111
12857:20241101:185543.416 adding performance counter mem/capacity.contention[average]:112
12857:20241101:185543.416 adding performance counter mem/capacity.contention[average,rate]:112
12857:20241101:185543.416 adding performance counter mem/capacity.usage.vm[average]:113
12857:20241101:185543.416 adding performance counter mem/capacity.usage.vm[average,absolute]:113
12857:20241101:185543.416 adding performance counter mem/capacity.usage.vmOvrhd[average]:114
12857:20241101:185543.416 adding performance counter mem/capacity.usage.vmOvrhd[average,absolute]:114
12857:20241101:185543.417 adding performance counter mem/capacity.usage.vmkOvrhd[average]:115
12857:20241101:185543.417 adding performance counter mem/capacity.usage.vmkOvrhd[average,absolute]:115
12857:20241101:185543.417 adding performance counter mem/capacity.usage.userworld[average]:116
12857:20241101:185543.417 adding performance counter mem/capacity.usage.userworld[average,absolute]:116
12857:20241101:185543.417 adding performance counter mem/reservedCapacity.vm[average]:117
12857:20241101:185543.417 adding performance counter mem/reservedCapacity.vm[average,absolute]:117
12857:20241101:185543.417 adding performance counter mem/reservedCapacity.vmOvhd[average]:118
12857:20241101:185543.417 adding performance counter mem/reservedCapacity.vmOvhd[average,absolute]:118
12857:20241101:185543.417 adding performance counter mem/reservedCapacity.vmkOvrhd[average]:119
12857:20241101:185543.417 adding performance counter mem/reservedCapacity.vmkOvrhd[average,absolute]:119
12857:20241101:185543.417 adding performance counter mem/reservedCapacity.userworld[average]:120
12857:20241101:185543.417 adding performance counter mem/reservedCapacity.userworld[average,absolute]:120
12857:20241101:185543.417 adding performance counter mem/reservedCapacityPct[average]:121
12857:20241101:185543.417 adding performance counter mem/reservedCapacityPct[average,absolute]:121
12857:20241101:185543.417 adding performance counter mem/consumed.vms[average]:122
12857:20241101:185543.417 adding performance counter mem/consumed.vms[average,absolute]:122
12857:20241101:185543.417 adding performance counter mem/consumed.userworlds[average]:123
12857:20241101:185543.417 adding performance counter mem/consumed.userworlds[average,absolute]:123
12857:20241101:185543.417 adding performance counter mem/bandwidth.read[latest]:124
12857:20241101:185543.417 adding performance counter mem/bandwidth.read[latest,absolute]:124
12857:20241101:185543.417 adding performance counter mem/bandwidth.write[latest]:125
12857:20241101:185543.417 adding performance counter mem/bandwidth.write[latest,absolute]:125
12857:20241101:185543.417 adding performance counter mem/bandwidth.total[latest]:126
12857:20241101:185543.417 adding performance counter mem/bandwidth.total[latest,absolute]:126
12857:20241101:185543.417 adding performance counter mem/vm.bandwidth.read[latest]:127
12857:20241101:185543.417 adding performance counter mem/vm.bandwidth.read[latest,absolute]:127
12857:20241101:185543.417 adding performance counter mem/missrate[latest]:128
12857:20241101:185543.417 adding performance counter mem/missrate[latest,absolute]:128
12857:20241101:185543.417 adding performance counter mem/latency.read[latest]:129
12857:20241101:185543.417 adding performance counter mem/latency.read[latest,absolute]:129
12857:20241101:185543.417 adding performance counter mem/latency.write[latest]:130
12857:20241101:185543.417 adding performance counter mem/latency.write[latest,absolute]:130
12857:20241101:185543.417 adding performance counter disk/usage[none]:131
12857:20241101:185543.417 adding performance counter disk/usage[none,rate]:131
12857:20241101:185543.417 adding performance counter disk/usage[average]:132
12857:20241101:185543.417 adding performance counter disk/usage[average,rate]:132
12857:20241101:185543.417 adding performance counter disk/usage[minimum]:133
12857:20241101:185543.417 adding performance counter disk/usage[minimum,rate]:133
12857:20241101:185543.417 adding performance counter disk/usage[maximum]:134
12857:20241101:185543.417 adding performance counter disk/usage[maximum,rate]:134
12857:20241101:185543.417 adding performance counter disk/numberRead[summation]:135
12857:20241101:185543.418 adding performance counter disk/numberRead[summation,delta]:135
12857:20241101:185543.418 adding performance counter disk/numberWrite[summation]:136
12857:20241101:185543.418 adding performance counter disk/numberWrite[summation,delta]:136
12857:20241101:185543.418 adding performance counter disk/read[average]:137
12857:20241101:185543.418 adding performance counter disk/read[average,rate]:137
12857:20241101:185543.418 adding performance counter disk/write[average]:138
12857:20241101:185543.418 adding performance counter disk/write[average,rate]:138
12857:20241101:185543.418 adding performance counter disk/totalLatency[average]:139
12857:20241101:185543.418 adding performance counter disk/totalLatency[average,absolute]:139
12857:20241101:185543.418 adding performance counter disk/maxTotalLatency[latest]:140
12857:20241101:185543.418 adding performance counter disk/maxTotalLatency[latest,absolute]:140
12857:20241101:185543.418 adding performance counter disk/commandsAborted[summation]:141
12857:20241101:185543.418 adding performance counter disk/commandsAborted[summation,delta]:141
12857:20241101:185543.418 adding performance counter disk/busResets[summation]:142
12857:20241101:185543.418 adding performance counter disk/busResets[summation,delta]:142
12857:20241101:185543.418 adding performance counter disk/numberReadAveraged[average]:143
12857:20241101:185543.418 adding performance counter disk/numberReadAveraged[average,rate]:143
12857:20241101:185543.418 adding performance counter disk/numberWriteAveraged[average]:144
12857:20241101:185543.418 adding performance counter disk/numberWriteAveraged[average,rate]:144
12857:20241101:185543.418 adding performance counter disk/throughput.usage[average]:145
12857:20241101:185543.418 adding performance counter disk/throughput.usage[average,rate]:145
12857:20241101:185543.418 adding performance counter disk/throughput.contention[average]:146
12857:20241101:185543.418 adding performance counter disk/throughput.contention[average,absolute]:146
12857:20241101:185543.418 adding performance counter disk/scsiReservationConflicts[summation]:147
12857:20241101:185543.418 adding performance counter disk/scsiReservationConflicts[summation,delta]:147
12857:20241101:185543.418 adding performance counter disk/scsiReservationCnflctsPct[average]:148
12857:20241101:185543.418 adding performance counter disk/scsiReservationCnflctsPct[average,absolute]:148
12857:20241101:185543.418 adding performance counter net/usage[none]:149
12857:20241101:185543.418 adding performance counter net/usage[none,rate]:149
12857:20241101:185543.418 adding performance counter net/usage[average]:150
12857:20241101:185543.418 adding performance counter net/usage[average,rate]:150
12857:20241101:185543.418 adding performance counter net/usage[minimum]:151
12857:20241101:185543.418 adding performance counter net/usage[minimum,rate]:151
12857:20241101:185543.418 adding performance counter net/usage[maximum]:152
12857:20241101:185543.418 adding performance counter net/usage[maximum,rate]:152
12857:20241101:185543.418 adding performance counter net/packetsRx[summation]:153
12857:20241101:185543.418 adding performance counter net/packetsRx[summation,delta]:153
12857:20241101:185543.418 adding performance counter net/packetsTx[summation]:154
12857:20241101:185543.418 adding performance counter net/packetsTx[summation,delta]:154
12857:20241101:185543.418 adding performance counter net/received[average]:155
12857:20241101:185543.418 adding performance counter net/received[average,rate]:155
12857:20241101:185543.419 adding performance counter net/transmitted[average]:156
12857:20241101:185543.419 adding performance counter net/transmitted[average,rate]:156
12857:20241101:185543.419 adding performance counter net/throughput.provisioned[average]:157
12857:20241101:185543.419 adding performance counter net/throughput.provisioned[average,absolute]:157
12857:20241101:185543.419 adding performance counter net/throughput.usable[average]:158
12857:20241101:185543.419 adding performance counter net/throughput.usable[average,absolute]:158
12857:20241101:185543.419 adding performance counter net/throughput.usage[average]:159
12857:20241101:185543.419 adding performance counter net/throughput.usage[average,rate]:159
12857:20241101:185543.419 adding performance counter net/throughput.contention[summation]:160
12857:20241101:185543.419 adding performance counter net/throughput.contention[summation,delta]:160
12857:20241101:185543.419 adding performance counter net/throughput.packetsPerSec[average]:161
12857:20241101:185543.419 adding performance counter net/throughput.packetsPerSec[average,rate]:161
12857:20241101:185543.419 adding performance counter sys/uptime[latest]:162
12857:20241101:185543.419 adding performance counter sys/uptime[latest,absolute]:162
12857:20241101:185543.419 adding performance counter sys/heartbeat[summation]:163
12857:20241101:185543.419 adding performance counter sys/heartbeat[summation,delta]:163
12857:20241101:185543.419 adding performance counter power/power[average]:164
12857:20241101:185543.419 adding performance counter power/power[average,rate]:164
12857:20241101:185543.419 adding performance counter power/powerCap[average]:165
12857:20241101:185543.419 adding performance counter power/powerCap[average,absolute]:165
12857:20241101:185543.419 adding performance counter power/energy[summation]:166
12857:20241101:185543.419 adding performance counter power/energy[summation,delta]:166
12857:20241101:185543.419 adding performance counter power/capacity.usagePct[average]:167
12857:20241101:185543.419 adding performance counter power/capacity.usagePct[average,absolute]:167
12857:20241101:185543.419 adding performance counter storageAdapter/commandsAveraged[average]:168
12857:20241101:185543.419 adding performance counter storageAdapter/commandsAveraged[average,rate]:168
12857:20241101:185543.419 adding performance counter storageAdapter/numberReadAveraged[average]:169
12857:20241101:185543.419 adding performance counter storageAdapter/numberReadAveraged[average,rate]:169
12857:20241101:185543.419 adding performance counter storageAdapter/numberWriteAveraged[average]:170
12857:20241101:185543.419 adding performance counter storageAdapter/numberWriteAveraged[average,rate]:170
12857:20241101:185543.419 adding performance counter storageAdapter/read[average]:171
12857:20241101:185543.419 adding performance counter storageAdapter/read[average,rate]:171
12857:20241101:185543.419 adding performance counter storageAdapter/write[average]:172
12857:20241101:185543.419 adding performance counter storageAdapter/write[average,rate]:172
12857:20241101:185543.419 adding performance counter storageAdapter/totalReadLatency[average]:173
12857:20241101:185543.419 adding performance counter storageAdapter/totalReadLatency[average,absolute]:173
12857:20241101:185543.419 adding performance counter storageAdapter/totalWriteLatency[average]:174
12857:20241101:185543.419 adding performance counter storageAdapter/totalWriteLatency[average,absolute]:174
12857:20241101:185543.420 adding performance counter storageAdapter/maxTotalLatency[latest]:175
12857:20241101:185543.420 adding performance counter storageAdapter/maxTotalLatency[latest,absolute]:175
12857:20241101:185543.420 adding performance counter storageAdapter/throughput.cont[average]:176
12857:20241101:185543.420 adding performance counter storageAdapter/throughput.cont[average,absolute]:176
12857:20241101:185543.420 adding performance counter storageAdapter/OIOsPct[average]:177
12857:20241101:185543.420 adding performance counter storageAdapter/OIOsPct[average,absolute]:177
12857:20241101:185543.420 adding performance counter virtualDisk/numberReadAveraged[average]:178
12857:20241101:185543.420 adding performance counter virtualDisk/numberReadAveraged[average,rate]:178
12857:20241101:185543.420 adding performance counter virtualDisk/numberWriteAveraged[average]:179
12857:20241101:185543.420 adding performance counter virtualDisk/numberWriteAveraged[average,rate]:179
12857:20241101:185543.420 adding performance counter virtualDisk/read[average]:180
12857:20241101:185543.420 adding performance counter virtualDisk/read[average,rate]:180
12857:20241101:185543.420 adding performance counter virtualDisk/write[average]:181
12857:20241101:185543.420 adding performance counter virtualDisk/write[average,rate]:181
12857:20241101:185543.420 adding performance counter virtualDisk/totalReadLatency[average]:182
12857:20241101:185543.420 adding performance counter virtualDisk/totalReadLatency[average,absolute]:182
12857:20241101:185543.420 adding performance counter virtualDisk/totalWriteLatency[average]:183
12857:20241101:185543.420 adding performance counter virtualDisk/totalWriteLatency[average,absolute]:183
12857:20241101:185543.420 adding performance counter virtualDisk/throughput.cont[average]:184
12857:20241101:185543.420 adding performance counter virtualDisk/throughput.cont[average,absolute]:184
12857:20241101:185543.420 adding performance counter datastore/numberReadAveraged[average]:185
12857:20241101:185543.420 adding performance counter datastore/numberReadAveraged[average,rate]:185
12857:20241101:185543.420 adding performance counter datastore/numberWriteAveraged[average]:186
12857:20241101:185543.420 adding performance counter datastore/numberWriteAveraged[average,rate]:186
12857:20241101:185543.420 adding performance counter datastore/read[average]:187
12857:20241101:185543.420 adding performance counter datastore/read[average,rate]:187
12857:20241101:185543.420 adding performance counter datastore/write[average]:188
12857:20241101:185543.420 adding performance counter datastore/write[average,rate]:188
12857:20241101:185543.420 adding performance counter datastore/totalReadLatency[average]:189
12857:20241101:185543.420 adding performance counter datastore/totalReadLatency[average,absolute]:189
12857:20241101:185543.420 adding performance counter datastore/totalWriteLatency[average]:190
12857:20241101:185543.420 adding performance counter datastore/totalWriteLatency[average,absolute]:190
12857:20241101:185543.420 adding performance counter datastore/maxTotalLatency[latest]:191
12857:20241101:185543.420 adding performance counter datastore/maxTotalLatency[latest,absolute]:191
12857:20241101:185543.420 adding performance counter datastore/datastoreIops[average]:192
12857:20241101:185543.420 adding performance counter datastore/datastoreIops[average,absolute]:192
12857:20241101:185543.420 adding performance counter datastore/sizeNormalizedDatastoreLatency[average]:193
12857:20241101:185543.420 adding performance counter datastore/sizeNormalizedDatastoreLatency[average,absolute]:193
12857:20241101:185543.420 adding performance counter datastore/throughput.usage[average]:194
12857:20241101:185543.420 adding performance counter datastore/throughput.usage[average,absolute]:194
12857:20241101:185543.420 adding performance counter datastore/throughput.contention[average]:195
12857:20241101:185543.421 adding performance counter datastore/throughput.contention[average,absolute]:195
12857:20241101:185543.421 adding performance counter datastore/busResets[summation]:196
12857:20241101:185543.421 adding performance counter datastore/busResets[summation,delta]:196
12857:20241101:185543.421 adding performance counter datastore/commandsAborted[summation]:197
12857:20241101:185543.421 adding performance counter datastore/commandsAborted[summation,delta]:197
12857:20241101:185543.421 adding performance counter datastore/siocActiveTimePercentage[average]:198
12857:20241101:185543.421 adding performance counter datastore/siocActiveTimePercentage[average,absolute]:198
12857:20241101:185543.421 adding performance counter storagePath/throughput.cont[average]:199
12857:20241101:185543.421 adding performance counter storagePath/throughput.cont[average,absolute]:199
12857:20241101:185543.421 adding performance counter storagePath/maxTotalLatency[latest]:200
12857:20241101:185543.421 adding performance counter storagePath/maxTotalLatency[latest,absolute]:200
12857:20241101:185543.421 adding performance counter virtualDisk/throughput.usage[average]:201
12857:20241101:185543.421 adding performance counter virtualDisk/throughput.usage[average,rate]:201
12857:20241101:185543.421 adding performance counter virtualDisk/commandsAborted[summation]:202
12857:20241101:185543.421 adding performance counter virtualDisk/commandsAborted[summation,delta]:202
12857:20241101:185543.421 adding performance counter virtualDisk/busResets[summation]:203
12857:20241101:185543.421 adding performance counter virtualDisk/busResets[summation,delta]:203
12857:20241101:185543.421 adding performance counter storageAdapter/outstandingIOs[average]:204
12857:20241101:185543.421 adding performance counter storageAdapter/outstandingIOs[average,absolute]:204
12857:20241101:185543.421 adding performance counter storageAdapter/queued[average]:205
12857:20241101:185543.421 adding performance counter storageAdapter/queued[average,absolute]:205
12857:20241101:185543.421 adding performance counter storageAdapter/queueDepth[average]:206
12857:20241101:185543.421 adding performance counter storageAdapter/queueDepth[average,absolute]:206
12857:20241101:185543.421 adding performance counter storageAdapter/queueLatency[average]:207
12857:20241101:185543.421 adding performance counter storageAdapter/queueLatency[average,absolute]:207
12857:20241101:185543.421 adding performance counter storageAdapter/throughput.usag[average]:208
12857:20241101:185543.421 adding performance counter storageAdapter/throughput.usag[average,rate]:208
12857:20241101:185543.421 adding performance counter storagePath/busResets[summation]:209
12857:20241101:185543.421 adding performance counter storagePath/busResets[summation,delta]:209
12857:20241101:185543.421 adding performance counter storagePath/commandsAborted[summation]:210
12857:20241101:185543.421 adding performance counter storagePath/commandsAborted[summation,delta]:210
12857:20241101:185543.421 adding performance counter storagePath/throughput.usage[average]:211
12857:20241101:185543.421 adding performance counter storagePath/throughput.usage[average,rate]:211
12857:20241101:185543.421 adding performance counter net/throughput.usage.vm[average]:212
12857:20241101:185543.421 adding performance counter net/throughput.usage.vm[average,rate]:212
12857:20241101:185543.421 adding performance counter net/throughput.usage.nfs[average]:213
12857:20241101:185543.421 adding performance counter net/throughput.usage.nfs[average,rate]:213
12857:20241101:185543.421 adding performance counter net/throughput.usage.vmotion[average]:214
12857:20241101:185543.421 adding performance counter net/throughput.usage.vmotion[average,rate]:214
12857:20241101:185543.422 adding performance counter net/throughput.usage.ft[average]:215
12857:20241101:185543.422 adding performance counter net/throughput.usage.ft[average,rate]:215
12857:20241101:185543.422 adding performance counter net/throughput.usage.iscsi[average]:216
12857:20241101:185543.422 adding performance counter net/throughput.usage.iscsi[average,rate]:216
12857:20241101:185543.422 adding performance counter net/throughput.usage.hbr[average]:217
12857:20241101:185543.422 adding performance counter net/throughput.usage.hbr[average,rate]:217
12857:20241101:185543.422 adding performance counter power/capacity.usable[average]:218
12857:20241101:185543.422 adding performance counter power/capacity.usable[average,absolute]:218
12857:20241101:185543.422 adding performance counter power/capacity.usage[average]:219
12857:20241101:185543.422 adding performance counter power/capacity.usage[average,absolute]:219
12857:20241101:185543.422 adding performance counter power/capacity.usageIdle[average]:220
12857:20241101:185543.422 adding performance counter power/capacity.usageIdle[average,absolute]:220
12857:20241101:185543.422 adding performance counter power/capacity.usageSystem[average]:221
12857:20241101:185543.422 adding performance counter power/capacity.usageSystem[average,absolute]:221
12857:20241101:185543.422 adding performance counter power/capacity.usageVm[average]:222
12857:20241101:185543.422 adding performance counter power/capacity.usageVm[average,absolute]:222
12857:20241101:185543.422 adding performance counter power/capacity.usageStatic[average]:223
12857:20241101:185543.422 adding performance counter power/capacity.usageStatic[average,absolute]:223
12857:20241101:185543.422 adding performance counter cpu/cpuentitlement[latest]:224
12857:20241101:185543.422 adding performance counter cpu/cpuentitlement[latest,absolute]:224
12857:20241101:185543.422 adding performance counter mem/mementitlement[latest]:225
12857:20241101:185543.422 adding performance counter mem/mementitlement[latest,absolute]:225
12857:20241101:185543.422 adding performance counter clusterServices/vmDrsScore[latest]:226
12857:20241101:185543.422 adding performance counter clusterServices/vmDrsScore[latest,absolute]:226
12857:20241101:185543.422 adding performance counter clusterServices/cpufairness[latest]:227
12857:20241101:185543.422 adding performance counter clusterServices/cpufairness[latest,absolute]:227
12857:20241101:185543.422 adding performance counter clusterServices/memfairness[latest]:228
12857:20241101:185543.422 adding performance counter clusterServices/memfairness[latest,absolute]:228
12857:20241101:185543.422 adding performance counter net/throughput.pktsTx[average]:229
12857:20241101:185543.422 adding performance counter net/throughput.pktsTx[average,absolute]:229
12857:20241101:185543.422 adding performance counter net/throughput.pktsTxMulticast[average]:230
12857:20241101:185543.422 adding performance counter net/throughput.pktsTxMulticast[average,absolute]:230
12857:20241101:185543.422 adding performance counter net/throughput.pktsTxBroadcast[average]:231
12857:20241101:185543.422 adding performance counter net/throughput.pktsTxBroadcast[average,absolute]:231
12857:20241101:185543.422 adding performance counter net/throughput.pktsRx[average]:232
12857:20241101:185543.422 adding performance counter net/throughput.pktsRx[average,absolute]:232
12857:20241101:185543.422 adding performance counter net/throughput.pktsRxMulticast[average]:233
12857:20241101:185543.422 adding performance counter net/throughput.pktsRxMulticast[average,absolute]:233
12857:20241101:185543.422 adding performance counter net/throughput.pktsRxBroadcast[average]:234
12857:20241101:185543.422 adding performance counter net/throughput.pktsRxBroadcast[average,absolute]:234
12857:20241101:185543.423 adding performance counter net/throughput.droppedTx[average]:235
12857:20241101:185543.423 adding performance counter net/throughput.droppedTx[average,absolute]:235
12857:20241101:185543.423 adding performance counter net/throughput.droppedRx[average]:236
12857:20241101:185543.423 adding performance counter net/throughput.droppedRx[average,absolute]:236
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsTx[average]:237
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsTx[average,absolute]:237
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsTxMcast[average]:238
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsTxMcast[average,absolute]:238
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsTxBcast[average]:239
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsTxBcast[average,absolute]:239
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsRx[average]:240
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsRx[average,absolute]:240
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsRxMcast[average]:241
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsRxMcast[average,absolute]:241
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsRxBcast[average]:242
12857:20241101:185543.423 adding performance counter net/throughput.vds.pktsRxBcast[average,absolute]:242
12857:20241101:185543.423 adding performance counter net/throughput.vds.droppedTx[average]:243
12857:20241101:185543.423 adding performance counter net/throughput.vds.droppedTx[average,absolute]:243
12857:20241101:185543.423 adding performance counter net/throughput.vds.droppedRx[average]:244
12857:20241101:185543.423 adding performance counter net/throughput.vds.droppedRx[average,absolute]:244
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagTx[average]:245
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagTx[average,absolute]:245
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagTxMcast[average]:246
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagTxMcast[average,absolute]:246
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagTxBcast[average]:247
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagTxBcast[average,absolute]:247
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagRx[average]:248
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagRx[average,absolute]:248
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagRxMcast[average]:249
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagRxMcast[average,absolute]:249
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagRxBcast[average]:250
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagRxBcast[average,absolute]:250
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagDropTx[average]:251
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagDropTx[average,absolute]:251
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagDropRx[average]:252
12857:20241101:185543.423 adding performance counter net/throughput.vds.lagDropRx[average,absolute]:252
12857:20241101:185543.423 adding performance counter vmop/numPoweron[latest]:253
12857:20241101:185543.423 adding performance counter vmop/numPoweron[latest,absolute]:253
12857:20241101:185543.423 adding performance counter vmop/numPoweroff[latest]:254
12857:20241101:185543.423 adding performance counter vmop/numPoweroff[latest,absolute]:254
12857:20241101:185543.423 adding performance counter vmop/numSuspend[latest]:255
12857:20241101:185543.424 adding performance counter vmop/numSuspend[latest,absolute]:255
12857:20241101:185543.424 adding performance counter vmop/numReset[latest]:256
12857:20241101:185543.424 adding performance counter vmop/numReset[latest,absolute]:256
12857:20241101:185543.424 adding performance counter vmop/numRebootGuest[latest]:257
12857:20241101:185543.424 adding performance counter vmop/numRebootGuest[latest,absolute]:257
12857:20241101:185543.424 adding performance counter vmop/numStandbyGuest[latest]:258
12857:20241101:185543.424 adding performance counter vmop/numStandbyGuest[latest,absolute]:258
12857:20241101:185543.424 adding performance counter vmop/numShutdownGuest[latest]:259
12857:20241101:185543.424 adding performance counter vmop/numShutdownGuest[latest,absolute]:259
12857:20241101:185543.424 adding performance counter vmop/numCreate[latest]:260
12857:20241101:185543.424 adding performance counter vmop/numCreate[latest,absolute]:260
12857:20241101:185543.424 adding performance counter vmop/numDestroy[latest]:261
12857:20241101:185543.424 adding performance counter vmop/numDestroy[latest,absolute]:261
12857:20241101:185543.424 adding performance counter vmop/numRegister[latest]:262
12857:20241101:185543.424 adding performance counter vmop/numRegister[latest,absolute]:262
12857:20241101:185543.424 adding performance counter vmop/numUnregister[latest]:263
12857:20241101:185543.424 adding performance counter vmop/numUnregister[latest,absolute]:263
12857:20241101:185543.424 adding performance counter vmop/numReconfigure[latest]:264
12857:20241101:185543.424 adding performance counter vmop/numReconfigure[latest,absolute]:264
12857:20241101:185543.424 adding performance counter vmop/numClone[latest]:265
12857:20241101:185543.424 adding performance counter vmop/numClone[latest,absolute]:265
12857:20241101:185543.424 adding performance counter vmop/numDeploy[latest]:266
12857:20241101:185543.424 adding performance counter vmop/numDeploy[latest,absolute]:266
12857:20241101:185543.424 adding performance counter vmop/numChangeHost[latest]:267
12857:20241101:185543.424 adding performance counter vmop/numChangeHost[latest,absolute]:267
12857:20241101:185543.424 adding performance counter vmop/numChangeDS[latest]:268
12857:20241101:185543.424 adding performance counter vmop/numChangeDS[latest,absolute]:268
12857:20241101:185543.424 adding performance counter vmop/numChangeHostDS[latest]:269
12857:20241101:185543.424 adding performance counter vmop/numChangeHostDS[latest,absolute]:269
12857:20241101:185543.424 adding performance counter vmop/numVMotion[latest]:270
12857:20241101:185543.424 adding performance counter vmop/numVMotion[latest,absolute]:270
12857:20241101:185543.424 adding performance counter vmop/numSVMotion[latest]:271
12857:20241101:185543.424 adding performance counter vmop/numSVMotion[latest,absolute]:271
12857:20241101:185543.424 adding performance counter vmop/numXVMotion[latest]:272
12857:20241101:185543.424 adding performance counter vmop/numXVMotion[latest,absolute]:272
12857:20241101:185543.424 adding performance counter clusterServices/effectivecpu[average]:273
12857:20241101:185543.424 adding performance counter clusterServices/effectivecpu[average,rate]:273
12857:20241101:185543.424 adding performance counter clusterServices/effectivemem[average]:274
12857:20241101:185543.424 adding performance counter clusterServices/effectivemem[average,absolute]:274
12857:20241101:185543.425 adding performance counter cpu/totalmhz[average]:275
12857:20241101:185543.425 adding performance counter cpu/totalmhz[average,rate]:275
12857:20241101:185543.425 adding performance counter mem/totalmb[average]:276
12857:20241101:185543.425 adding performance counter mem/totalmb[average,absolute]:276
12857:20241101:185543.425 adding performance counter clusterServices/clusterDrsScore[latest]:277
12857:20241101:185543.425 adding performance counter clusterServices/clusterDrsScore[latest,absolute]:277
12857:20241101:185543.425 adding performance counter clusterServices/failover[latest]:278
12857:20241101:185543.425 adding performance counter clusterServices/failover[latest,absolute]:278
12857:20241101:185543.425 adding performance counter gpu/utilization[average]:279
12857:20241101:185543.425 adding performance counter gpu/utilization[average,absolute]:279
12857:20241101:185543.425 adding performance counter gpu/mem.used[average]:280
12857:20241101:185543.425 adding performance counter gpu/mem.used[average,absolute]:280
12857:20241101:185543.425 adding performance counter gpu/mem.reserved[latest]:281
12857:20241101:185543.425 adding performance counter gpu/mem.reserved[latest,absolute]:281
12857:20241101:185543.425 adding performance counter gpu/power.used[latest]:282
12857:20241101:185543.425 adding performance counter gpu/power.used[latest,absolute]:282
12857:20241101:185543.425 adding performance counter gpu/temperature[average]:283
12857:20241101:185543.425 adding performance counter gpu/temperature[average,absolute]:283
12857:20241101:185543.425 adding performance counter gpu/mem.total[latest]:284
12857:20241101:185543.425 adding performance counter gpu/mem.total[latest,absolute]:284
12857:20241101:185543.425 adding performance counter disk/used[latest]:285
12857:20241101:185543.425 adding performance counter disk/used[latest,absolute]:285
12857:20241101:185543.425 adding performance counter disk/provisioned[latest]:286
12857:20241101:185543.425 adding performance counter disk/provisioned[latest,absolute]:286
12857:20241101:185543.425 adding performance counter disk/capacity[latest]:287
12857:20241101:185543.425 adding performance counter disk/capacity[latest,absolute]:287
12857:20241101:185543.425 adding performance counter disk/unshared[latest]:288
12857:20241101:185543.425 adding performance counter disk/unshared[latest,absolute]:288
12857:20241101:185543.425 adding performance counter disk/actualused[latest]:289
12857:20241101:185543.425 adding performance counter disk/actualused[latest,absolute]:289
12857:20241101:185543.425 adding performance counter disk/deltaused[latest]:290
12857:20241101:185543.425 adding performance counter disk/deltaused[latest,absolute]:290
12857:20241101:185543.425 adding performance counter disk/capacity.provisioned[average]:291
12857:20241101:185543.425 adding performance counter disk/capacity.provisioned[average,absolute]:291
12857:20241101:185543.425 adding performance counter disk/capacity.usage[average]:292
12857:20241101:185543.425 adding performance counter disk/capacity.usage[average,absolute]:292
12857:20241101:185543.425 adding performance counter disk/capacity.contention[average]:293
12857:20241101:185543.425 adding performance counter disk/capacity.contention[average,absolute]:293
12857:20241101:185543.425 adding performance counter vcDebugInfo/activationlatencystats[maximum]:294
12857:20241101:185543.425 adding performance counter vcDebugInfo/activationlatencystats[maximum,absolute]:294
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationlatencystats[minimum]:295
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationlatencystats[minimum,absolute]:295
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationlatencystats[summation]:296
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationlatencystats[summation,absolute]:296
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationstats[maximum]:297
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationstats[maximum,absolute]:297
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationstats[minimum]:298
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationstats[minimum,absolute]:298
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationstats[summation]:299
12857:20241101:185543.426 adding performance counter vcDebugInfo/activationstats[summation,absolute]:299
12857:20241101:185543.426 adding performance counter vcResources/buffersz[average]:300
12857:20241101:185543.426 adding performance counter vcResources/buffersz[average,absolute]:300
12857:20241101:185543.426 adding performance counter vcResources/cachesz[average]:301
12857:20241101:185543.426 adding performance counter vcResources/cachesz[average,absolute]:301
12857:20241101:185543.426 adding performance counter vcResources/ctxswitchesrate[average]:302
12857:20241101:185543.426 adding performance counter vcResources/ctxswitchesrate[average,rate]:302
12857:20241101:185543.426 adding performance counter vcResources/diskreadsectorrate[average]:303
12857:20241101:185543.426 adding performance counter vcResources/diskreadsectorrate[average,rate]:303
12857:20241101:185543.426 adding performance counter vcResources/diskreadsrate[average]:304
12857:20241101:185543.426 adding performance counter vcResources/diskreadsrate[average,rate]:304
12857:20241101:185543.426 adding performance counter vcResources/diskwritesectorrate[average]:305
12857:20241101:185543.426 adding performance counter vcResources/diskwritesectorrate[average,rate]:305
12857:20241101:185543.426 adding performance counter vcResources/diskwritesrate[average]:306
12857:20241101:185543.426 adding performance counter vcResources/diskwritesrate[average,rate]:306
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsynclatencystats[maximum]:307
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsynclatencystats[maximum,absolute]:307
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsynclatencystats[minimum]:308
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsynclatencystats[minimum,absolute]:308
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsynclatencystats[summation]:309
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsynclatencystats[summation,absolute]:309
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsyncstats[maximum]:310
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsyncstats[maximum,absolute]:310
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsyncstats[minimum]:311
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsyncstats[minimum,absolute]:311
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsyncstats[summation]:312
12857:20241101:185543.426 adding performance counter vcDebugInfo/hostsyncstats[summation,absolute]:312
12857:20241101:185543.426 adding performance counter vcDebugInfo/inventorystats[maximum]:313
12857:20241101:185543.426 adding performance counter vcDebugInfo/inventorystats[maximum,absolute]:313
12857:20241101:185543.426 adding performance counter vcDebugInfo/inventorystats[minimum]:314
12857:20241101:185543.426 adding performance counter vcDebugInfo/inventorystats[minimum,absolute]:314
12857:20241101:185543.427 adding performance counter vcDebugInfo/inventorystats[summation]:315
12857:20241101:185543.427 adding performance counter vcDebugInfo/inventorystats[summation,absolute]:315
12857:20241101:185543.427 adding performance counter vcDebugInfo/lockstats[maximum]:316
12857:20241101:185543.427 adding performance counter vcDebugInfo/lockstats[maximum,absolute]:316
12857:20241101:185543.427 adding performance counter vcDebugInfo/lockstats[minimum]:317
12857:20241101:185543.427 adding performance counter vcDebugInfo/lockstats[minimum,absolute]:317
12857:20241101:185543.427 adding performance counter vcDebugInfo/lockstats[summation]:318
12857:20241101:185543.427 adding performance counter vcDebugInfo/lockstats[summation,absolute]:318
12857:20241101:185543.427 adding performance counter vcDebugInfo/lrostats[maximum]:319
12857:20241101:185543.427 adding performance counter vcDebugInfo/lrostats[maximum,absolute]:319
12857:20241101:185543.427 adding performance counter vcDebugInfo/lrostats[minimum]:320
12857:20241101:185543.427 adding performance counter vcDebugInfo/lrostats[minimum,absolute]:320
12857:20241101:185543.427 adding performance counter vcDebugInfo/lrostats[summation]:321
12857:20241101:185543.427 adding performance counter vcDebugInfo/lrostats[summation,absolute]:321
12857:20241101:185543.427 adding performance counter vcDebugInfo/miscstats[maximum]:322
12857:20241101:185543.427 adding performance counter vcDebugInfo/miscstats[maximum,absolute]:322
12857:20241101:185543.427 adding performance counter vcDebugInfo/miscstats[minimum]:323
12857:20241101:185543.427 adding performance counter vcDebugInfo/miscstats[minimum,absolute]:323
12857:20241101:185543.427 adding performance counter vcDebugInfo/miscstats[summation]:324
12857:20241101:185543.427 adding performance counter vcDebugInfo/miscstats[summation,absolute]:324
12857:20241101:185543.427 adding performance counter vcDebugInfo/morefregstats[maximum]:325
12857:20241101:185543.427 adding performance counter vcDebugInfo/morefregstats[maximum,absolute]:325
12857:20241101:185543.427 adding performance counter vcDebugInfo/morefregstats[minimum]:326
12857:20241101:185543.427 adding performance counter vcDebugInfo/morefregstats[minimum,absolute]:326
12857:20241101:185543.427 adding performance counter vcDebugInfo/morefregstats[summation]:327
12857:20241101:185543.427 adding performance counter vcDebugInfo/morefregstats[summation,absolute]:327
12857:20241101:185543.427 adding performance counter vcResources/packetrecvrate[average]:328
12857:20241101:185543.427 adding performance counter vcResources/packetrecvrate[average,rate]:328
12857:20241101:185543.427 adding performance counter vcResources/packetsentrate[average]:329
12857:20241101:185543.427 adding performance counter vcResources/packetsentrate[average,rate]:329
12857:20241101:185543.427 adding performance counter vcResources/systemcpuusage[average]:330
12857:20241101:185543.427 adding performance counter vcResources/systemcpuusage[average,rate]:330
12857:20241101:185543.427 adding performance counter vcResources/pagefaultrate[average]:331
12857:20241101:185543.427 adding performance counter vcResources/pagefaultrate[average,rate]:331
12857:20241101:185543.427 adding performance counter vcResources/physicalmemusage[average]:332
12857:20241101:185543.427 adding performance counter vcResources/physicalmemusage[average,absolute]:332
12857:20241101:185543.427 adding performance counter vcResources/priviledgedcpuusage[average]:333
12857:20241101:185543.427 adding performance counter vcResources/priviledgedcpuusage[average,rate]:333
12857:20241101:185543.427 adding performance counter vcDebugInfo/scoreboard[maximum]:334
12857:20241101:185543.427 adding performance counter vcDebugInfo/scoreboard[maximum,absolute]:334
12857:20241101:185543.428 adding performance counter vcDebugInfo/scoreboard[minimum]:335
12857:20241101:185543.428 adding performance counter vcDebugInfo/scoreboard[minimum,absolute]:335
12857:20241101:185543.428 adding performance counter vcDebugInfo/scoreboard[summation]:336
12857:20241101:185543.428 adding performance counter vcDebugInfo/scoreboard[summation,absolute]:336
12857:20241101:185543.428 adding performance counter vcDebugInfo/sessionstats[maximum]:337
12857:20241101:185543.428 adding performance counter vcDebugInfo/sessionstats[maximum,absolute]:337
12857:20241101:185543.428 adding performance counter vcDebugInfo/sessionstats[minimum]:338
12857:20241101:185543.428 adding performance counter vcDebugInfo/sessionstats[minimum,absolute]:338
12857:20241101:185543.428 adding performance counter vcDebugInfo/sessionstats[summation]:339
12857:20241101:185543.428 adding performance counter vcDebugInfo/sessionstats[summation,absolute]:339
12857:20241101:185543.428 adding performance counter vcResources/syscallsrate[average]:340
12857:20241101:185543.428 adding performance counter vcResources/syscallsrate[average,rate]:340
12857:20241101:185543.428 adding performance counter vcDebugInfo/systemstats[maximum]:341
12857:20241101:185543.428 adding performance counter vcDebugInfo/systemstats[maximum,absolute]:341
12857:20241101:185543.428 adding performance counter vcDebugInfo/systemstats[minimum]:342
12857:20241101:185543.428 adding performance counter vcDebugInfo/systemstats[minimum,absolute]:342
12857:20241101:185543.428 adding performance counter vcDebugInfo/systemstats[summation]:343
12857:20241101:185543.428 adding performance counter vcDebugInfo/systemstats[summation,absolute]:343
12857:20241101:185543.428 adding performance counter vcResources/usercpuusage[average]:344
12857:20241101:185543.428 adding performance counter vcResources/usercpuusage[average,rate]:344
12857:20241101:185543.428 adding performance counter vcDebugInfo/vcservicestats[maximum]:345
12857:20241101:185543.428 adding performance counter vcDebugInfo/vcservicestats[maximum,absolute]:345
12857:20241101:185543.428 adding performance counter vcDebugInfo/vcservicestats[minimum]:346
12857:20241101:185543.428 adding performance counter vcDebugInfo/vcservicestats[minimum,absolute]:346
12857:20241101:185543.428 adding performance counter vcDebugInfo/vcservicestats[summation]:347
12857:20241101:185543.428 adding performance counter vcDebugInfo/vcservicestats[summation,absolute]:347
12857:20241101:185543.428 adding performance counter vcResources/virtualmemusage[average]:348
12857:20241101:185543.428 adding performance counter vcResources/virtualmemusage[average,absolute]:348
12857:20241101:185543.428 adding performance counter virtualDisk/readOIO[latest]:349
12857:20241101:185543.428 adding performance counter virtualDisk/readOIO[latest,absolute]:349
12857:20241101:185543.428 adding performance counter virtualDisk/writeOIO[latest]:350
12857:20241101:185543.428 adding performance counter virtualDisk/writeOIO[latest,absolute]:350
12857:20241101:185543.428 adding performance counter virtualDisk/readLoadMetric[latest]:351
12857:20241101:185543.428 adding performance counter virtualDisk/readLoadMetric[latest,absolute]:351
12857:20241101:185543.428 adding performance counter virtualDisk/writeLoadMetric[latest]:352
12857:20241101:185543.428 adding performance counter virtualDisk/writeLoadMetric[latest,absolute]:352
12857:20241101:185543.428 adding performance counter rescpu/actav1[latest]:353
12857:20241101:185543.428 adding performance counter rescpu/actav1[latest,absolute]:353
12857:20241101:185543.428 adding performance counter datastore/datastoreReadBytes[latest]:354
12857:20241101:185543.428 adding performance counter datastore/datastoreReadBytes[latest,absolute]:354
12857:20241101:185543.429 adding performance counter datastore/datastoreWriteBytes[latest]:355
12857:20241101:185543.429 adding performance counter datastore/datastoreWriteBytes[latest,absolute]:355
12857:20241101:185543.429 adding performance counter datastore/datastoreReadIops[latest]:356
12857:20241101:185543.429 adding performance counter datastore/datastoreReadIops[latest,absolute]:356
12857:20241101:185543.429 adding performance counter datastore/datastoreWriteIops[latest]:357
12857:20241101:185543.429 adding performance counter datastore/datastoreWriteIops[latest,absolute]:357
12857:20241101:185543.429 adding performance counter datastore/datastoreReadOIO[latest]:358
12857:20241101:185543.429 adding performance counter datastore/datastoreReadOIO[latest,absolute]:358
12857:20241101:185543.429 adding performance counter datastore/datastoreWriteOIO[latest]:359
12857:20241101:185543.429 adding performance counter datastore/datastoreWriteOIO[latest,absolute]:359
12857:20241101:185543.429 adding performance counter datastore/datastoreNormalReadLatency[latest]:360
12857:20241101:185543.429 adding performance counter datastore/datastoreNormalReadLatency[latest,absolute]:360
12857:20241101:185543.429 adding performance counter datastore/datastoreNormalWriteLatency[latest]:361
12857:20241101:185543.429 adding performance counter datastore/datastoreNormalWriteLatency[latest,absolute]:361
12857:20241101:185543.429 adding performance counter datastore/datastoreReadLoadMetric[latest]:362
12857:20241101:185543.429 adding performance counter datastore/datastoreReadLoadMetric[latest,absolute]:362
12857:20241101:185543.429 adding performance counter datastore/datastoreWriteLoadMetric[latest]:363
12857:20241101:185543.429 adding performance counter datastore/datastoreWriteLoadMetric[latest,absolute]:363
12857:20241101:185543.429 adding performance counter datastore/datastoreVMObservedLatency[latest]:364
12857:20241101:185543.429 adding performance counter datastore/datastoreVMObservedLatency[latest,absolute]:364
12857:20241101:185543.429 adding performance counter disk/scsiReservationCnflctsPct[average]:365
12857:20241101:185543.429 adding performance counter disk/scsiReservationCnflctsPct[average,rate]:365
12857:20241101:185543.429 adding performance counter disk/read[latest]:366
12857:20241101:185543.429 adding performance counter disk/read[latest,absolute]:366
12857:20241101:185543.429 adding performance counter disk/readFailed[latest]:367
12857:20241101:185543.429 adding performance counter disk/readFailed[latest,absolute]:367
12857:20241101:185543.429 adding performance counter disk/write[latest]:368
12857:20241101:185543.429 adding performance counter disk/write[latest,absolute]:368
12857:20241101:185543.429 adding performance counter disk/writeFailed[latest]:369
12857:20241101:185543.429 adding performance counter disk/writeFailed[latest,absolute]:369
12857:20241101:185543.429 adding performance counter disk/commands.success[latest]:370
12857:20241101:185543.429 adding performance counter disk/commands.success[latest,absolute]:370
12857:20241101:185543.429 adding performance counter disk/commands.failed[latest]:371
12857:20241101:185543.429 adding performance counter disk/commands.failed[latest,absolute]:371
12857:20241101:185543.429 adding performance counter disk/commands.queued[latest]:372
12857:20241101:185543.429 adding performance counter disk/commands.queued[latest,absolute]:372
12857:20241101:185543.429 adding performance counter disk/commands.active[latest]:373
12857:20241101:185543.429 adding performance counter disk/commands.active[latest,absolute]:373
12857:20241101:185543.430 adding performance counter disk/state[latest]:374
12857:20241101:185543.430 adding performance counter disk/state[latest,absolute]:374
12857:20241101:185543.430 adding performance counter disk/TM.abort[latest]:375
12857:20241101:185543.430 adding performance counter disk/TM.abort[latest,absolute]:375
12857:20241101:185543.430 adding performance counter disk/TM.abortRetry[latest]:376
12857:20241101:185543.430 adding performance counter disk/TM.abortRetry[latest,absolute]:376
12857:20241101:185543.430 adding performance counter disk/TM.abortFailed[latest]:377
12857:20241101:185543.430 adding performance counter disk/TM.abortFailed[latest,absolute]:377
12857:20241101:185543.430 adding performance counter disk/TM.virtReset[latest]:378
12857:20241101:185543.430 adding performance counter disk/TM.virtReset[latest,absolute]:378
12857:20241101:185543.430 adding performance counter disk/TM.virtResetRetry[latest]:379
12857:20241101:185543.430 adding performance counter disk/TM.virtResetRetry[latest,absolute]:379
12857:20241101:185543.430 adding performance counter disk/TM.virtResetFailed[latest]:380
12857:20241101:185543.430 adding performance counter disk/TM.virtResetFailed[latest,absolute]:380
12857:20241101:185543.430 adding performance counter disk/TM.lunReset[latest]:381
12857:20241101:185543.430 adding performance counter disk/TM.lunReset[latest,absolute]:381
12857:20241101:185543.430 adding performance counter disk/TM.lunResetRetry[latest]:382
12857:20241101:185543.430 adding performance counter disk/TM.lunResetRetry[latest,absolute]:382
12857:20241101:185543.430 adding performance counter disk/TM.lunResetFailed[latest]:383
12857:20241101:185543.430 adding performance counter disk/TM.lunResetFailed[latest,absolute]:383
12857:20241101:185543.430 adding performance counter disk/TM.deviceReset[latest]:384
12857:20241101:185543.430 adding performance counter disk/TM.deviceReset[latest,absolute]:384
12857:20241101:185543.430 adding performance counter disk/TM.deviceResetRetry[latest]:385
12857:20241101:185543.430 adding performance counter disk/TM.deviceResetRetry[latest,absolute]:385
12857:20241101:185543.430 adding performance counter disk/TM.deviceResetFailed[latest]:386
12857:20241101:185543.430 adding performance counter disk/TM.deviceResetFailed[latest,absolute]:386
12857:20241101:185543.430 adding performance counter disk/TM.busReset[latest]:387
12857:20241101:185543.430 adding performance counter disk/TM.busReset[latest,absolute]:387
12857:20241101:185543.430 adding performance counter disk/TM.busResetRetry[latest]:388
12857:20241101:185543.430 adding performance counter disk/TM.busResetRetry[latest,absolute]:388
12857:20241101:185543.430 adding performance counter disk/TM.busResetFailed[latest]:389
12857:20241101:185543.430 adding performance counter disk/TM.busResetFailed[latest,absolute]:389
12857:20241101:185543.430 adding performance counter disk/latency.qavg[latest]:390
12857:20241101:185543.430 adding performance counter disk/latency.qavg[latest,absolute]:390
12857:20241101:185543.430 adding performance counter disk/latency.davg[latest]:391
12857:20241101:185543.430 adding performance counter disk/latency.davg[latest,absolute]:391
12857:20241101:185543.430 adding performance counter disk/latency.kavg[latest]:392
12857:20241101:185543.430 adding performance counter disk/latency.kavg[latest,absolute]:392
12857:20241101:185543.430 adding performance counter disk/latency.gavg[latest]:393
12857:20241101:185543.430 adding performance counter disk/latency.gavg[latest,absolute]:393
12857:20241101:185543.431 adding performance counter storageAdapter/outstandingIOs[latest]:394
12857:20241101:185543.431 adding performance counter storageAdapter/outstandingIOs[latest,absolute]:394
12857:20241101:185543.431 adding performance counter storageAdapter/queued[latest]:395
12857:20241101:185543.431 adding performance counter storageAdapter/queued[latest,absolute]:395
12857:20241101:185543.431 adding performance counter storageAdapter/queueDepth[latest]:396
12857:20241101:185543.431 adding performance counter storageAdapter/queueDepth[latest,absolute]:396
12857:20241101:185543.431 adding performance counter cpu/partnerBusyTime[average]:397
12857:20241101:185543.431 adding performance counter cpu/partnerBusyTime[average,rate]:397
12857:20241101:185543.431 adding performance counter cpu/utilization[average]:398
12857:20241101:185543.431 adding performance counter cpu/utilization[average,rate]:398
12857:20241101:185543.431 adding performance counter cpu/corecount.provisioned[latest]:399
12857:20241101:185543.431 adding performance counter cpu/corecount.provisioned[latest,absolute]:399
12857:20241101:185543.431 adding performance counter cpu/cache.l3.occupancy[average]:400
12857:20241101:185543.431 adding performance counter cpu/cache.l3.occupancy[average,absolute]:400
12857:20241101:185543.431 adding performance counter cpu/corecount.usage[latest]:401
12857:20241101:185543.431 adding performance counter cpu/corecount.usage[latest,absolute]:401
12857:20241101:185543.431 adding performance counter cpu/load.avg1min[latest]:402
12857:20241101:185543.431 adding performance counter cpu/load.avg1min[latest,absolute]:402
12857:20241101:185543.431 adding performance counter cpu/load.avg5min[latest]:403
12857:20241101:185543.431 adding performance counter cpu/load.avg5min[latest,absolute]:403
12857:20241101:185543.431 adding performance counter cpu/load.avg15min[latest]:404
12857:20241101:185543.431 adding performance counter cpu/load.avg15min[latest,absolute]:404
12857:20241101:185543.431 adding performance counter mem/capacity.provisioned[latest]:405
12857:20241101:185543.431 adding performance counter mem/capacity.provisioned[latest,absolute]:405
12857:20241101:185543.431 adding performance counter mem/reservedCapacityPct[latest]:406
12857:20241101:185543.431 adding performance counter mem/reservedCapacityPct[latest,absolute]:406
12857:20241101:185543.431 adding performance counter mem/overcommit.avg1min[latest]:407
12857:20241101:185543.431 adding performance counter mem/overcommit.avg1min[latest,absolute]:407
12857:20241101:185543.431 adding performance counter mem/overcommit.avg5min[latest]:408
12857:20241101:185543.431 adding performance counter mem/overcommit.avg5min[latest,absolute]:408
12857:20241101:185543.431 adding performance counter mem/overcommit.avg15min[latest]:409
12857:20241101:185543.431 adding performance counter mem/overcommit.avg15min[latest,absolute]:409
12857:20241101:185543.431 adding performance counter mem/physical.total[latest]:410
12857:20241101:185543.431 adding performance counter mem/physical.total[latest,absolute]:410
12857:20241101:185543.431 adding performance counter mem/physical.user[latest]:411
12857:20241101:185543.431 adding performance counter mem/physical.user[latest,absolute]:411
12857:20241101:185543.431 adding performance counter mem/physical.free[latest]:412
12857:20241101:185543.431 adding performance counter mem/physical.free[latest,absolute]:412
12857:20241101:185543.431 adding performance counter mem/kernel.managed[latest]:413
12857:20241101:185543.432 adding performance counter mem/kernel.managed[latest,absolute]:413
12857:20241101:185543.432 adding performance counter mem/kernel.minfree[latest]:414
12857:20241101:185543.432 adding performance counter mem/kernel.minfree[latest,absolute]:414
12857:20241101:185543.432 adding performance counter mem/kernel.unreserved[latest]:415
12857:20241101:185543.432 adding performance counter mem/kernel.unreserved[latest,absolute]:415
12857:20241101:185543.432 adding performance counter mem/pshare.shared[latest]:416
12857:20241101:185543.432 adding performance counter mem/pshare.shared[latest,absolute]:416
12857:20241101:185543.432 adding performance counter mem/pshare.common[latest]:417
12857:20241101:185543.432 adding performance counter mem/pshare.common[latest,absolute]:417
12857:20241101:185543.432 adding performance counter mem/pshare.sharedSave[latest]:418
12857:20241101:185543.432 adding performance counter mem/pshare.sharedSave[latest,absolute]:418
12857:20241101:185543.432 adding performance counter mem/swap.current[latest]:419
12857:20241101:185543.432 adding performance counter mem/swap.current[latest,absolute]:419
12857:20241101:185543.432 adding performance counter mem/swap.target[latest]:420
12857:20241101:185543.432 adding performance counter mem/swap.target[latest,absolute]:420
12857:20241101:185543.432 adding performance counter mem/swap.readrate[average]:421
12857:20241101:185543.432 adding performance counter mem/swap.readrate[average,rate]:421
12857:20241101:185543.432 adding performance counter mem/swap.writerate[average]:422
12857:20241101:185543.432 adding performance counter mem/swap.writerate[average,rate]:422
12857:20241101:185543.432 adding performance counter mem/zip.zipped[latest]:423
12857:20241101:185543.432 adding performance counter mem/zip.zipped[latest,absolute]:423
12857:20241101:185543.432 adding performance counter mem/zip.saved[latest]:424
12857:20241101:185543.432 adding performance counter mem/zip.saved[latest,absolute]:424
12857:20241101:185543.432 adding performance counter mem/memctl.current[latest]:425
12857:20241101:185543.432 adding performance counter mem/memctl.current[latest,absolute]:425
12857:20241101:185543.432 adding performance counter mem/memctl.target[latest]:426
12857:20241101:185543.432 adding performance counter mem/memctl.target[latest,absolute]:426
12857:20241101:185543.432 adding performance counter mem/memctl.max[latest]:427
12857:20241101:185543.432 adding performance counter mem/memctl.max[latest,absolute]:427
12857:20241101:185543.432 adding performance counter mem/health.reservationState[latest]:428
12857:20241101:185543.432 adding performance counter mem/health.reservationState[latest,absolute]:428
12857:20241101:185543.432 adding performance counter mem/capacity.overhead[average]:429
12857:20241101:185543.432 adding performance counter mem/capacity.overhead[average,absolute]:429
12857:20241101:185543.432 adding performance counter mem/capacity.overheadResv[average]:430
12857:20241101:185543.432 adding performance counter mem/capacity.overheadResv[average,absolute]:430
12857:20241101:185543.432 adding performance counter mem/capacity.consumed[latest]:431
12857:20241101:185543.432 adding performance counter mem/capacity.consumed[latest,absolute]:431
12857:20241101:185543.432 adding performance counter mem/capacity.active[latest]:432
12857:20241101:185543.432 adding performance counter mem/capacity.active[latest,absolute]:432
12857:20241101:185543.432 adding performance counter power/capacity.usageCpu[average]:433
12857:20241101:185543.433 adding performance counter power/capacity.usageCpu[average,absolute]:433
12857:20241101:185543.433 adding performance counter power/capacity.usageMem[average]:434
12857:20241101:185543.433 adding performance counter power/capacity.usageMem[average,absolute]:434
12857:20241101:185543.433 adding performance counter power/capacity.usageOther[average]:435
12857:20241101:185543.433 adding performance counter power/capacity.usageOther[average,absolute]:435
12857:20241101:185543.433 adding performance counter vmotion/vmkernel.downtime[latest]:436
12857:20241101:185543.433 adding performance counter vmotion/vmkernel.downtime[latest,absolute]:436
12857:20241101:185543.433 adding performance counter vmotion/downtime[latest]:437
12857:20241101:185543.433 adding performance counter vmotion/downtime[latest,absolute]:437
12857:20241101:185543.433 adding performance counter vmotion/precopy.time[latest]:438
12857:20241101:185543.433 adding performance counter vmotion/precopy.time[latest,absolute]:438
12857:20241101:185543.433 adding performance counter vmotion/rtt[latest]:439
12857:20241101:185543.433 adding performance counter vmotion/rtt[latest,absolute]:439
12857:20241101:185543.433 adding performance counter vmotion/dst.migration.time[latest]:440
12857:20241101:185543.433 adding performance counter vmotion/dst.migration.time[latest,absolute]:440
12857:20241101:185543.433 adding performance counter vmotion/mem.sizemb[latest]:441
12857:20241101:185543.433 adding performance counter vmotion/mem.sizemb[latest,absolute]:441
12857:20241101:185543.433 adding performance counter hbr/vms[latest]:442
12857:20241101:185543.433 adding performance counter hbr/vms[latest,absolute]:442
12857:20241101:185543.433 adding performance counter net/throughput.hbr.inbound[average]:443
12857:20241101:185543.433 adding performance counter net/throughput.hbr.inbound[average,rate]:443
12857:20241101:185543.433 adding performance counter net/throughput.hbr.outbound[average]:444
12857:20241101:185543.433 adding performance counter net/throughput.hbr.outbound[average,rate]:444
12857:20241101:185543.433 adding performance counter virtualDisk/hbr.readLatencyMS[latest]:445
12857:20241101:185543.433 adding performance counter virtualDisk/hbr.readLatencyMS[latest,absolute]:445
12857:20241101:185543.433 adding performance counter virtualDisk/hbr.stallLatencyMS[latest]:446
12857:20241101:185543.433 adding performance counter virtualDisk/hbr.stallLatencyMS[latest,absolute]:446
12857:20241101:185543.433 adding performance counter net/latency.hbr.outbound[latest]:447
12857:20241101:185543.433 adding performance counter net/latency.hbr.outbound[latest,absolute]:447
12857:20241101:185543.433 adding performance counter lwd/numSnapshots[latest]:448
12857:20241101:185543.433 adding performance counter lwd/numSnapshots[latest,absolute]:448
12857:20241101:185543.433 adding performance counter nfs/apdState[latest]:449
12857:20241101:185543.433 adding performance counter nfs/apdState[latest,absolute]:449
12857:20241101:185543.433 adding performance counter nfs/readIssueTime[latest]:450
12857:20241101:185543.433 adding performance counter nfs/readIssueTime[latest,absolute]:450
12857:20241101:185543.433 adding performance counter nfs/writeIssueTime[latest]:451
12857:20241101:185543.433 adding performance counter nfs/writeIssueTime[latest,absolute]:451
12857:20241101:185543.434 adding performance counter nfs/totalReads[latest]:452
12857:20241101:185543.434 adding performance counter nfs/totalReads[latest,absolute]:452
12857:20241101:185543.434 adding performance counter nfs/readsFailed[latest]:453
12857:20241101:185543.434 adding performance counter nfs/readsFailed[latest,absolute]:453
12857:20241101:185543.434 adding performance counter nfs/totalWrites[latest]:454
12857:20241101:185543.434 adding performance counter nfs/totalWrites[latest,absolute]:454
12857:20241101:185543.434 adding performance counter nfs/writesFailed[latest]:455
12857:20241101:185543.434 adding performance counter nfs/writesFailed[latest,absolute]:455
12857:20241101:185543.434 adding performance counter nfs/readTime[latest]:456
12857:20241101:185543.434 adding performance counter nfs/readTime[latest,absolute]:456
12857:20241101:185543.434 adding performance counter nfs/writeTime[latest]:457
12857:20241101:185543.434 adding performance counter nfs/writeTime[latest,absolute]:457
12857:20241101:185543.434 adding performance counter nfs/ioRequestsQueued[latest]:458
12857:20241101:185543.434 adding performance counter nfs/ioRequestsQueued[latest,absolute]:458
12857:20241101:185543.434 adding performance counter nfs/totalCreate[latest]:459
12857:20241101:185543.434 adding performance counter nfs/totalCreate[latest,absolute]:459
12857:20241101:185543.434 adding performance counter nfs/createFailed[latest]:460
12857:20241101:185543.434 adding performance counter nfs/createFailed[latest,absolute]:460
12857:20241101:185543.434 adding performance counter nfs/socketBufferFull[latest]:461
12857:20241101:185543.434 adding performance counter nfs/socketBufferFull[latest,absolute]:461
12857:20241101:185543.434 adding performance counter datastore/vmfs.totalTxn[latest]:462
12857:20241101:185543.434 adding performance counter datastore/vmfs.totalTxn[latest,absolute]:462
12857:20241101:185543.434 adding performance counter datastore/vmfs.cancelledTxn[latest]:463
12857:20241101:185543.434 adding performance counter datastore/vmfs.cancelledTxn[latest,absolute]:463
12857:20241101:185543.434 adding performance counter datastore/vmfs.apdState[latest]:464
12857:20241101:185543.434 adding performance counter datastore/vmfs.apdState[latest,absolute]:464
12857:20241101:185543.434 adding performance counter datastore/vmfs.apdCount[latest]:465
12857:20241101:185543.434 adding performance counter datastore/vmfs.apdCount[latest,absolute]:465
12857:20241101:185543.434 adding performance counter vvol/pe.isaccessible[latest]:466
12857:20241101:185543.434 adding performance counter vvol/pe.isaccessible[latest,absolute]:466
12857:20241101:185543.434 adding performance counter vvol/pe.reads.done[latest]:467
12857:20241101:185543.434 adding performance counter vvol/pe.reads.done[latest,absolute]:467
12857:20241101:185543.434 adding performance counter vvol/pe.writes.done[latest]:468
12857:20241101:185543.434 adding performance counter vvol/pe.writes.done[latest,absolute]:468
12857:20241101:185543.434 adding performance counter vvol/pe.total.done[latest]:469
12857:20241101:185543.434 adding performance counter vvol/pe.total.done[latest,absolute]:469
12857:20241101:185543.434 adding performance counter vvol/pe.reads.sent[latest]:470
12857:20241101:185543.434 adding performance counter vvol/pe.reads.sent[latest,absolute]:470
12857:20241101:185543.435 adding performance counter vvol/pe.writes.sent[latest]:471
12857:20241101:185543.435 adding performance counter vvol/pe.writes.sent[latest,absolute]:471
12857:20241101:185543.435 adding performance counter vvol/pe.total.sent[latest]:472
12857:20241101:185543.435 adding performance counter vvol/pe.total.sent[latest,absolute]:472
12857:20241101:185543.435 adding performance counter vvol/pe.readsissued.failed[latest]:473
12857:20241101:185543.435 adding performance counter vvol/pe.readsissued.failed[latest,absolute]:473
12857:20241101:185543.435 adding performance counter vvol/pe.writesissued.failed[latest]:474
12857:20241101:185543.435 adding performance counter vvol/pe.writesissued.failed[latest,absolute]:474
12857:20241101:185543.435 adding performance counter vvol/pe.totalissued.failed[latest]:475
12857:20241101:185543.435 adding performance counter vvol/pe.totalissued.failed[latest,absolute]:475
12857:20241101:185543.435 adding performance counter vvol/pe.reads.failed[latest]:476
12857:20241101:185543.435 adding performance counter vvol/pe.reads.failed[latest,absolute]:476
12857:20241101:185543.435 adding performance counter vvol/pe.writes.failed[latest]:477
12857:20241101:185543.435 adding performance counter vvol/pe.writes.failed[latest,absolute]:477
12857:20241101:185543.435 adding performance counter vvol/pe.total.failed[latest]:478
12857:20241101:185543.435 adding performance counter vvol/pe.total.failed[latest,absolute]:478
12857:20241101:185543.435 adding performance counter vvol/pe.read.latency[latest]:479
12857:20241101:185543.435 adding performance counter vvol/pe.read.latency[latest,absolute]:479
12857:20241101:185543.435 adding performance counter vvol/pe.write.latency[latest]:480
12857:20241101:185543.435 adding performance counter vvol/pe.write.latency[latest,absolute]:480
12857:20241101:185543.435 adding performance counter vvol/pe.issue.latency[latest]:481
12857:20241101:185543.435 adding performance counter vvol/pe.issue.latency[latest,absolute]:481
12857:20241101:185543.435 adding performance counter vvol/pe.total.latency[latest]:482
12857:20241101:185543.435 adding performance counter vvol/pe.total.latency[latest,absolute]:482
12857:20241101:185543.435 adding performance counter vvol/pe.cancel.sent[latest]:483
12857:20241101:185543.435 adding performance counter vvol/pe.cancel.sent[latest,absolute]:483
12857:20241101:185543.435 adding performance counter vvol/pe.cancel.failed[latest]:484
12857:20241101:185543.435 adding performance counter vvol/pe.cancel.failed[latest,absolute]:484
12857:20241101:185543.435 adding performance counter vvol/pe.deviceresets.sent[latest]:485
12857:20241101:185543.435 adding performance counter vvol/pe.deviceresets.sent[latest,absolute]:485
12857:20241101:185543.435 adding performance counter vvol/pe.deviceresets.failed[latest]:486
12857:20241101:185543.435 adding performance counter vvol/pe.deviceresets.failed[latest,absolute]:486
12857:20241101:185543.435 adding performance counter vvol/pe.resets.sent[latest]:487
12857:20241101:185543.435 adding performance counter vvol/pe.resets.sent[latest,absolute]:487
12857:20241101:185543.435 adding performance counter vvol/pe.resets.failed[latest]:488
12857:20241101:185543.435 adding performance counter vvol/pe.resets.failed[latest,absolute]:488
12857:20241101:185543.435 adding performance counter vvol/pe.unmaps.sent[latest]:489
12857:20241101:185543.435 adding performance counter vvol/pe.unmaps.sent[latest,absolute]:489
12857:20241101:185543.435 adding performance counter vvol/pe.unmaps.failed[latest]:490
12857:20241101:185543.435 adding performance counter vvol/pe.unmaps.failed[latest,absolute]:490
12857:20241101:185543.436 adding performance counter vvol/container.reads.done[latest]:491
12857:20241101:185543.436 adding performance counter vvol/container.reads.done[latest,absolute]:491
12857:20241101:185543.436 adding performance counter vvol/container.writes.done[latest]:492
12857:20241101:185543.436 adding performance counter vvol/container.writes.done[latest,absolute]:492
12857:20241101:185543.436 adding performance counter vvol/container.total.done[latest]:493
12857:20241101:185543.436 adding performance counter vvol/container.total.done[latest,absolute]:493
12857:20241101:185543.436 adding performance counter vvol/container.reads.sent[latest]:494
12857:20241101:185543.436 adding performance counter vvol/container.reads.sent[latest,absolute]:494
12857:20241101:185543.436 adding performance counter vvol/container.writes.sent[latest]:495
12857:20241101:185543.436 adding performance counter vvol/container.writes.sent[latest,absolute]:495
12857:20241101:185543.436 adding performance counter vvol/container.total.sent[latest]:496
12857:20241101:185543.436 adding performance counter vvol/container.total.sent[latest,absolute]:496
12857:20241101:185543.436 adding performance counter vvol/container.readsissued.failed[latest]:497
12857:20241101:185543.436 adding performance counter vvol/container.readsissued.failed[latest,absolute]:497
12857:20241101:185543.436 adding performance counter vvol/container.writesissued.failed[latest]:498
12857:20241101:185543.436 adding performance counter vvol/container.writesissued.failed[latest,absolute]:498
12857:20241101:185543.436 adding performance counter vvol/container.totalissued.failed[latest]:499
12857:20241101:185543.436 adding performance counter vvol/container.totalissued.failed[latest,absolute]:499
12857:20241101:185543.436 adding performance counter vvol/container.reads.failed[latest]:500
12857:20241101:185543.436 adding performance counter vvol/container.reads.failed[latest,absolute]:500
12857:20241101:185543.436 adding performance counter vvol/container.writes.failed[latest]:501
12857:20241101:185543.436 adding performance counter vvol/container.writes.failed[latest,absolute]:501
12857:20241101:185543.436 adding performance counter vvol/container.total.failed[latest]:502
12857:20241101:185543.436 adding performance counter vvol/container.total.failed[latest,absolute]:502
12857:20241101:185543.436 adding performance counter vvol/container.read.latency[latest]:503
12857:20241101:185543.436 adding performance counter vvol/container.read.latency[latest,absolute]:503
12857:20241101:185543.436 adding performance counter vvol/container.write.latency[latest]:504
12857:20241101:185543.436 adding performance counter vvol/container.write.latency[latest,absolute]:504
12857:20241101:185543.436 adding performance counter vvol/container.issue.latency[latest]:505
12857:20241101:185543.436 adding performance counter vvol/container.issue.latency[latest,absolute]:505
12857:20241101:185543.436 adding performance counter vvol/container.total.latency[latest]:506
12857:20241101:185543.436 adding performance counter vvol/container.total.latency[latest,absolute]:506
12857:20241101:185543.436 adding performance counter vvol/device.reads.done[latest]:507
12857:20241101:185543.436 adding performance counter vvol/device.reads.done[latest,absolute]:507
12857:20241101:185543.436 adding performance counter vvol/device.writes.done[latest]:508
12857:20241101:185543.436 adding performance counter vvol/device.writes.done[latest,absolute]:508
12857:20241101:185543.436 adding performance counter vvol/device.total.done[latest]:509
12857:20241101:185543.436 adding performance counter vvol/device.total.done[latest,absolute]:509
12857:20241101:185543.436 adding performance counter vvol/device.reads.sent[latest]:510
12857:20241101:185543.436 adding performance counter vvol/device.reads.sent[latest,absolute]:510
12857:20241101:185543.437 adding performance counter vvol/device.writes.sent[latest]:511
12857:20241101:185543.437 adding performance counter vvol/device.writes.sent[latest,absolute]:511
12857:20241101:185543.437 adding performance counter vvol/device.total.sent[latest]:512
12857:20241101:185543.437 adding performance counter vvol/device.total.sent[latest,absolute]:512
12857:20241101:185543.437 adding performance counter vvol/device.readsissued.failed[latest]:513
12857:20241101:185543.437 adding performance counter vvol/device.readsissued.failed[latest,absolute]:513
12857:20241101:185543.437 adding performance counter vvol/device.writesissued.failed[latest]:514
12857:20241101:185543.437 adding performance counter vvol/device.writesissued.failed[latest,absolute]:514
12857:20241101:185543.437 adding performance counter vvol/device.totalissued.failed[latest]:515
12857:20241101:185543.437 adding performance counter vvol/device.totalissued.failed[latest,absolute]:515
12857:20241101:185543.437 adding performance counter vvol/device.reads.failed[latest]:516
12857:20241101:185543.437 adding performance counter vvol/device.reads.failed[latest,absolute]:516
12857:20241101:185543.437 adding performance counter vvol/device.writes.failed[latest]:517
12857:20241101:185543.437 adding performance counter vvol/device.writes.failed[latest,absolute]:517
12857:20241101:185543.437 adding performance counter vvol/device.total.failed[latest]:518
12857:20241101:185543.437 adding performance counter vvol/device.total.failed[latest,absolute]:518
12857:20241101:185543.437 adding performance counter vvol/device.read.latency[latest]:519
12857:20241101:185543.437 adding performance counter vvol/device.read.latency[latest,absolute]:519
12857:20241101:185543.437 adding performance counter vvol/device.write.latency[latest]:520
12857:20241101:185543.437 adding performance counter vvol/device.write.latency[latest,absolute]:520
12857:20241101:185543.437 adding performance counter vvol/device.issue.latency[latest]:521
12857:20241101:185543.437 adding performance counter vvol/device.issue.latency[latest,absolute]:521
12857:20241101:185543.437 adding performance counter vvol/device.total.latency[latest]:522
12857:20241101:185543.437 adding performance counter vvol/device.total.latency[latest,absolute]:522
12857:20241101:185543.437 adding performance counter vvol/device.cancel.sent[latest]:523
12857:20241101:185543.437 adding performance counter vvol/device.cancel.sent[latest,absolute]:523
12857:20241101:185543.437 adding performance counter vvol/device.cancel.failed[latest]:524
12857:20241101:185543.437 adding performance counter vvol/device.cancel.failed[latest,absolute]:524
12857:20241101:185543.437 adding performance counter vvol/device.deviceresets.sent[latest]:525
12857:20241101:185543.437 adding performance counter vvol/device.deviceresets.sent[latest,absolute]:525
12857:20241101:185543.437 adding performance counter vvol/device.deviceresets.failed[latest]:526
12857:20241101:185543.437 adding performance counter vvol/device.deviceresets.failed[latest,absolute]:526
12857:20241101:185543.437 adding performance counter vvol/device.resets.sent[latest]:527
12857:20241101:185543.437 adding performance counter vvol/device.resets.sent[latest,absolute]:527
12857:20241101:185543.437 adding performance counter vvol/device.resets.failed[latest]:528
12857:20241101:185543.437 adding performance counter vvol/device.resets.failed[latest,absolute]:528
12857:20241101:185543.437 adding performance counter vvol/device.unmaps.sent[latest]:529
12857:20241101:185543.437 adding performance counter vvol/device.unmaps.sent[latest,absolute]:529
12857:20241101:185543.437 adding performance counter vvol/device.unmaps.failed[latest]:530
12857:20241101:185543.437 adding performance counter vvol/device.unmaps.failed[latest,absolute]:530
12857:20241101:185543.438 adding performance counter cpu/swapwait[summation]:531
12857:20241101:185543.438 adding performance counter cpu/swapwait[summation,delta]:531
12857:20241101:185543.438 adding performance counter cpu/utilization[none]:532
12857:20241101:185543.438 adding performance counter cpu/utilization[none,rate]:532
12857:20241101:185543.438 adding performance counter cpu/utilization[maximum]:533
12857:20241101:185543.438 adding performance counter cpu/utilization[maximum,rate]:533
12857:20241101:185543.438 adding performance counter cpu/utilization[minimum]:534
12857:20241101:185543.438 adding performance counter cpu/utilization[minimum,rate]:534
12857:20241101:185543.438 adding performance counter cpu/coreUtilization[none]:535
12857:20241101:185543.438 adding performance counter cpu/coreUtilization[none,rate]:535
12857:20241101:185543.438 adding performance counter cpu/coreUtilization[average]:536
12857:20241101:185543.438 adding performance counter cpu/coreUtilization[average,rate]:536
12857:20241101:185543.438 adding performance counter cpu/coreUtilization[maximum]:537
12857:20241101:185543.438 adding performance counter cpu/coreUtilization[maximum,rate]:537
12857:20241101:185543.438 adding performance counter cpu/coreUtilization[minimum]:538
12857:20241101:185543.438 adding performance counter cpu/coreUtilization[minimum,rate]:538
12857:20241101:185543.438 adding performance counter cpu/totalCapacity[average]:539
12857:20241101:185543.438 adding performance counter cpu/totalCapacity[average,absolute]:539
12857:20241101:185543.438 adding performance counter cpu/latency[average]:540
12857:20241101:185543.438 adding performance counter cpu/latency[average,rate]:540
12857:20241101:185543.438 adding performance counter cpu/entitlement[latest]:541
12857:20241101:185543.438 adding performance counter cpu/entitlement[latest,absolute]:541
12857:20241101:185543.438 adding performance counter cpu/demand[average]:542
12857:20241101:185543.438 adding performance counter cpu/demand[average,absolute]:542
12857:20241101:185543.438 adding performance counter cpu/costop[summation]:543
12857:20241101:185543.438 adding performance counter cpu/costop[summation,delta]:543
12857:20241101:185543.438 adding performance counter cpu/maxlimited[summation]:544
12857:20241101:185543.438 adding performance counter cpu/maxlimited[summation,delta]:544
12857:20241101:185543.438 adding performance counter cpu/overlap[summation]:545
12857:20241101:185543.438 adding performance counter cpu/overlap[summation,delta]:545
12857:20241101:185543.438 adding performance counter cpu/run[summation]:546
12857:20241101:185543.438 adding performance counter cpu/run[summation,delta]:546
12857:20241101:185543.438 adding performance counter cpu/demandEntitlementRatio[latest]:547
12857:20241101:185543.438 adding performance counter cpu/demandEntitlementRatio[latest,absolute]:547
12857:20241101:185543.438 adding performance counter cpu/readiness[average]:548
12857:20241101:185543.438 adding performance counter cpu/readiness[average,rate]:548
12857:20241101:185543.438 adding performance counter cpu/usage.vcpus[average]:549
12857:20241101:185543.438 adding performance counter cpu/usage.vcpus[average,rate]:549
12857:20241101:185543.438 adding performance counter mem/swapin[none]:550
12857:20241101:185543.438 adding performance counter mem/swapin[none,absolute]:550
12857:20241101:185543.439 adding performance counter mem/swapin[average]:551
12857:20241101:185543.439 adding performance counter mem/swapin[average,absolute]:551
12857:20241101:185543.439 adding performance counter mem/swapin[maximum]:552
12857:20241101:185543.439 adding performance counter mem/swapin[maximum,absolute]:552
12857:20241101:185543.439 adding performance counter mem/swapin[minimum]:553
12857:20241101:185543.439 adding performance counter mem/swapin[minimum,absolute]:553
12857:20241101:185543.439 adding performance counter mem/swapout[none]:554
12857:20241101:185543.439 adding performance counter mem/swapout[none,absolute]:554
12857:20241101:185543.439 adding performance counter mem/swapout[average]:555
12857:20241101:185543.439 adding performance counter mem/swapout[average,absolute]:555
12857:20241101:185543.439 adding performance counter mem/swapout[maximum]:556
12857:20241101:185543.439 adding performance counter mem/swapout[maximum,absolute]:556
12857:20241101:185543.439 adding performance counter mem/swapout[minimum]:557
12857:20241101:185543.439 adding performance counter mem/swapout[minimum,absolute]:557
12857:20241101:185543.439 adding performance counter mem/sysUsage[none]:558
12857:20241101:185543.439 adding performance counter mem/sysUsage[none,absolute]:558
12857:20241101:185543.439 adding performance counter mem/sysUsage[average]:559
12857:20241101:185543.439 adding performance counter mem/sysUsage[average,absolute]:559
12857:20241101:185543.439 adding performance counter mem/sysUsage[maximum]:560
12857:20241101:185543.439 adding performance counter mem/sysUsage[maximum,absolute]:560
12857:20241101:185543.439 adding performance counter mem/sysUsage[minimum]:561
12857:20241101:185543.439 adding performance counter mem/sysUsage[minimum,absolute]:561
12857:20241101:185543.439 adding performance counter mem/activewrite[average]:562
12857:20241101:185543.439 adding performance counter mem/activewrite[average,absolute]:562
12857:20241101:185543.439 adding performance counter mem/overheadMax[average]:563
12857:20241101:185543.439 adding performance counter mem/overheadMax[average,absolute]:563
12857:20241101:185543.439 adding performance counter mem/totalCapacity[average]:564
12857:20241101:185543.439 adding performance counter mem/totalCapacity[average,absolute]:564
12857:20241101:185543.439 adding performance counter mem/zipped[latest]:565
12857:20241101:185543.439 adding performance counter mem/zipped[latest,absolute]:565
12857:20241101:185543.439 adding performance counter mem/zipSaved[latest]:566
12857:20241101:185543.439 adding performance counter mem/zipSaved[latest,absolute]:566
12857:20241101:185543.439 adding performance counter mem/latency[average]:567
12857:20241101:185543.439 adding performance counter mem/latency[average,absolute]:567
12857:20241101:185543.439 adding performance counter mem/entitlement[average]:568
12857:20241101:185543.439 adding performance counter mem/entitlement[average,absolute]:568
12857:20241101:185543.439 adding performance counter mem/lowfreethreshold[average]:569
12857:20241101:185543.439 adding performance counter mem/lowfreethreshold[average,absolute]:569
12857:20241101:185543.439 adding performance counter mem/llSwapUsed[none]:570
12857:20241101:185543.439 adding performance counter mem/llSwapUsed[none,absolute]:570
12857:20241101:185543.440 adding performance counter mem/llSwapInRate[average]:571
12857:20241101:185543.440 adding performance counter mem/llSwapInRate[average,rate]:571
12857:20241101:185543.440 adding performance counter mem/llSwapOutRate[average]:572
12857:20241101:185543.440 adding performance counter mem/llSwapOutRate[average,rate]:572
12857:20241101:185543.440 adding performance counter mem/overheadTouched[average]:573
12857:20241101:185543.440 adding performance counter mem/overheadTouched[average,absolute]:573
12857:20241101:185543.440 adding performance counter mem/llSwapUsed[average]:574
12857:20241101:185543.440 adding performance counter mem/llSwapUsed[average,absolute]:574
12857:20241101:185543.440 adding performance counter mem/llSwapUsed[maximum]:575
12857:20241101:185543.440 adding performance counter mem/llSwapUsed[maximum,absolute]:575
12857:20241101:185543.440 adding performance counter mem/llSwapUsed[minimum]:576
12857:20241101:185543.440 adding performance counter mem/llSwapUsed[minimum,absolute]:576
12857:20241101:185543.440 adding performance counter mem/llSwapIn[none]:577
12857:20241101:185543.440 adding performance counter mem/llSwapIn[none,absolute]:577
12857:20241101:185543.440 adding performance counter mem/llSwapIn[average]:578
12857:20241101:185543.440 adding performance counter mem/llSwapIn[average,absolute]:578
12857:20241101:185543.440 adding performance counter mem/llSwapIn[maximum]:579
12857:20241101:185543.440 adding performance counter mem/llSwapIn[maximum,absolute]:579
12857:20241101:185543.440 adding performance counter mem/llSwapIn[minimum]:580
12857:20241101:185543.440 adding performance counter mem/llSwapIn[minimum,absolute]:580
12857:20241101:185543.440 adding performance counter mem/llSwapOut[none]:581
12857:20241101:185543.440 adding performance counter mem/llSwapOut[none,absolute]:581
12857:20241101:185543.440 adding performance counter mem/llSwapOut[average]:582
12857:20241101:185543.440 adding performance counter mem/llSwapOut[average,absolute]:582
12857:20241101:185543.440 adding performance counter mem/llSwapOut[maximum]:583
12857:20241101:185543.440 adding performance counter mem/llSwapOut[maximum,absolute]:583
12857:20241101:185543.440 adding performance counter mem/llSwapOut[minimum]:584
12857:20241101:185543.440 adding performance counter mem/llSwapOut[minimum,absolute]:584
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.size[latest]:585
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.size[latest,absolute]:585
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.sizeMax[latest]:586
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.sizeMax[latest,absolute]:586
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.workingSet[latest]:587
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.workingSet[latest,absolute]:587
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.workingSetMax[latest]:588
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.workingSetMax[latest,absolute]:588
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.overhead[latest]:589
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.overhead[latest,absolute]:589
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.capMissRatio[latest]:590
12857:20241101:185543.440 adding performance counter mem/vmfs.pbc.capMissRatio[latest,absolute]:590
12857:20241101:185543.441 adding performance counter disk/commands[summation]:591
12857:20241101:185543.441 adding performance counter disk/commands[summation,delta]:591
12857:20241101:185543.441 adding performance counter disk/deviceReadLatency[average]:592
12857:20241101:185543.441 adding performance counter disk/deviceReadLatency[average,absolute]:592
12857:20241101:185543.441 adding performance counter disk/kernelReadLatency[average]:593
12857:20241101:185543.441 adding performance counter disk/kernelReadLatency[average,absolute]:593
12857:20241101:185543.441 adding performance counter disk/totalReadLatency[average]:594
12857:20241101:185543.441 adding performance counter disk/totalReadLatency[average,absolute]:594
12857:20241101:185543.441 adding performance counter disk/queueReadLatency[average]:595
12857:20241101:185543.441 adding performance counter disk/queueReadLatency[average,absolute]:595
12857:20241101:185543.441 adding performance counter disk/deviceWriteLatency[average]:596
12857:20241101:185543.441 adding performance counter disk/deviceWriteLatency[average,absolute]:596
12857:20241101:185543.441 adding performance counter disk/kernelWriteLatency[average]:597
12857:20241101:185543.441 adding performance counter disk/kernelWriteLatency[average,absolute]:597
12857:20241101:185543.441 adding performance counter disk/totalWriteLatency[average]:598
12857:20241101:185543.441 adding performance counter disk/totalWriteLatency[average,absolute]:598
12857:20241101:185543.441 adding performance counter disk/queueWriteLatency[average]:599
12857:20241101:185543.441 adding performance counter disk/queueWriteLatency[average,absolute]:599
12857:20241101:185543.441 adding performance counter disk/deviceLatency[average]:600
12857:20241101:185543.441 adding performance counter disk/deviceLatency[average,absolute]:600
12857:20241101:185543.441 adding performance counter disk/kernelLatency[average]:601
12857:20241101:185543.441 adding performance counter disk/kernelLatency[average,absolute]:601
12857:20241101:185543.441 adding performance counter disk/queueLatency[average]:602
12857:20241101:185543.441 adding performance counter disk/queueLatency[average,absolute]:602
12857:20241101:185543.441 adding performance counter disk/maxQueueDepth[average]:603
12857:20241101:185543.441 adding performance counter disk/maxQueueDepth[average,absolute]:603
12857:20241101:185543.441 adding performance counter disk/commandsAveraged[average]:604
12857:20241101:185543.441 adding performance counter disk/commandsAveraged[average,rate]:604
12857:20241101:185543.441 adding performance counter net/droppedRx[summation]:605
12857:20241101:185543.441 adding performance counter net/droppedRx[summation,delta]:605
12857:20241101:185543.441 adding performance counter net/droppedTx[summation]:606
12857:20241101:185543.441 adding performance counter net/droppedTx[summation,delta]:606
12857:20241101:185543.441 adding performance counter net/bytesRx[average]:607
12857:20241101:185543.441 adding performance counter net/bytesRx[average,rate]:607
12857:20241101:185543.441 adding performance counter net/bytesTx[average]:608
12857:20241101:185543.441 adding performance counter net/bytesTx[average,rate]:608
12857:20241101:185543.442 adding performance counter net/broadcastRx[summation]:609
12857:20241101:185543.442 adding performance counter net/broadcastRx[summation,delta]:609
12857:20241101:185543.442 adding performance counter net/broadcastTx[summation]:610
12857:20241101:185543.442 adding performance counter net/broadcastTx[summation,delta]:610
12857:20241101:185543.442 adding performance counter net/multicastRx[summation]:611
12857:20241101:185543.442 adding performance counter net/multicastRx[summation,delta]:611
12857:20241101:185543.442 adding performance counter net/multicastTx[summation]:612
12857:20241101:185543.442 adding performance counter net/multicastTx[summation,delta]:612
12857:20241101:185543.442 adding performance counter net/errorsRx[summation]:613
12857:20241101:185543.442 adding performance counter net/errorsRx[summation,delta]:613
12857:20241101:185543.442 adding performance counter net/errorsTx[summation]:614
12857:20241101:185543.442 adding performance counter net/errorsTx[summation,delta]:614
12857:20241101:185543.442 adding performance counter net/unknownProtos[summation]:615
12857:20241101:185543.442 adding performance counter net/unknownProtos[summation,delta]:615
12857:20241101:185543.442 adding performance counter net/pnicBytesRx[average]:616
12857:20241101:185543.442 adding performance counter net/pnicBytesRx[average,rate]:616
12857:20241101:185543.442 adding performance counter net/pnicBytesTx[average]:617
12857:20241101:185543.442 adding performance counter net/pnicBytesTx[average,rate]:617
12857:20241101:185543.442 adding performance counter sys/heartbeat[latest]:618
12857:20241101:185543.442 adding performance counter sys/heartbeat[latest,absolute]:618
12857:20241101:185543.442 adding performance counter sys/diskUsage[latest]:619
12857:20241101:185543.442 adding performance counter sys/diskUsage[latest,absolute]:619
12857:20241101:185543.442 adding performance counter sys/resourceCpuUsage[none]:620
12857:20241101:185543.442 adding performance counter sys/resourceCpuUsage[none,rate]:620
12857:20241101:185543.442 adding performance counter sys/resourceCpuUsage[average]:621
12857:20241101:185543.442 adding performance counter sys/resourceCpuUsage[average,rate]:621
12857:20241101:185543.442 adding performance counter sys/resourceCpuUsage[maximum]:622
12857:20241101:185543.442 adding performance counter sys/resourceCpuUsage[maximum,rate]:622
12857:20241101:185543.442 adding performance counter sys/resourceCpuUsage[minimum]:623
12857:20241101:185543.442 adding performance counter sys/resourceCpuUsage[minimum,rate]:623
12857:20241101:185543.442 adding performance counter sys/resourceMemTouched[latest]:624
12857:20241101:185543.442 adding performance counter sys/resourceMemTouched[latest,absolute]:624
12857:20241101:185543.442 adding performance counter sys/resourceMemMapped[latest]:625
12857:20241101:185543.442 adding performance counter sys/resourceMemMapped[latest,absolute]:625
12857:20241101:185543.442 adding performance counter sys/resourceMemShared[latest]:626
12857:20241101:185543.442 adding performance counter sys/resourceMemShared[latest,absolute]:626
12857:20241101:185543.442 adding performance counter sys/resourceMemSwapped[latest]:627
12857:20241101:185543.442 adding performance counter sys/resourceMemSwapped[latest,absolute]:627
12857:20241101:185543.442 adding performance counter sys/resourceMemOverhead[latest]:628
12857:20241101:185543.442 adding performance counter sys/resourceMemOverhead[latest,absolute]:628
12857:20241101:185543.443 adding performance counter sys/resourceMemCow[latest]:629
12857:20241101:185543.443 adding performance counter sys/resourceMemCow[latest,absolute]:629
12857:20241101:185543.443 adding performance counter sys/resourceMemZero[latest]:630
12857:20241101:185543.443 adding performance counter sys/resourceMemZero[latest,absolute]:630
12857:20241101:185543.443 adding performance counter sys/resourceCpuRun1[latest]:631
12857:20241101:185543.443 adding performance counter sys/resourceCpuRun1[latest,absolute]:631
12857:20241101:185543.443 adding performance counter sys/resourceCpuAct1[latest]:632
12857:20241101:185543.443 adding performance counter sys/resourceCpuAct1[latest,absolute]:632
12857:20241101:185543.443 adding performance counter sys/resourceCpuMaxLimited1[latest]:633
12857:20241101:185543.443 adding performance counter sys/resourceCpuMaxLimited1[latest,absolute]:633
12857:20241101:185543.443 adding performance counter sys/resourceCpuRun5[latest]:634
12857:20241101:185543.443 adding performance counter sys/resourceCpuRun5[latest,absolute]:634
12857:20241101:185543.443 adding performance counter sys/resourceCpuAct5[latest]:635
12857:20241101:185543.443 adding performance counter sys/resourceCpuAct5[latest,absolute]:635
12857:20241101:185543.443 adding performance counter sys/resourceCpuMaxLimited5[latest]:636
12857:20241101:185543.443 adding performance counter sys/resourceCpuMaxLimited5[latest,absolute]:636
12857:20241101:185543.443 adding performance counter sys/resourceCpuAllocMin[latest]:637
12857:20241101:185543.443 adding performance counter sys/resourceCpuAllocMin[latest,absolute]:637
12857:20241101:185543.443 adding performance counter sys/resourceCpuAllocMax[latest]:638
12857:20241101:185543.443 adding performance counter sys/resourceCpuAllocMax[latest,absolute]:638
12857:20241101:185543.443 adding performance counter sys/resourceCpuAllocShares[latest]:639
12857:20241101:185543.443 adding performance counter sys/resourceCpuAllocShares[latest,absolute]:639
12857:20241101:185543.443 adding performance counter sys/resourceMemAllocMin[latest]:640
12857:20241101:185543.443 adding performance counter sys/resourceMemAllocMin[latest,absolute]:640
12857:20241101:185543.443 adding performance counter sys/resourceMemAllocMax[latest]:641
12857:20241101:185543.443 adding performance counter sys/resourceMemAllocMax[latest,absolute]:641
12857:20241101:185543.443 adding performance counter sys/resourceMemAllocShares[latest]:642
12857:20241101:185543.443 adding performance counter sys/resourceMemAllocShares[latest,absolute]:642
12857:20241101:185543.443 adding performance counter sys/osUptime[latest]:643
12857:20241101:185543.443 adding performance counter sys/osUptime[latest,absolute]:643
12857:20241101:185543.443 adding performance counter sys/resourceMemConsumed[latest]:644
12857:20241101:185543.443 adding performance counter sys/resourceMemConsumed[latest,absolute]:644
12857:20241101:185543.443 adding performance counter sys/resourceFdUsage[latest]:645
12857:20241101:185543.443 adding performance counter sys/resourceFdUsage[latest,absolute]:645
12857:20241101:185543.443 adding performance counter rescpu/actpk1[latest]:646
12857:20241101:185543.443 adding performance counter rescpu/actpk1[latest,absolute]:646
12857:20241101:185543.443 adding performance counter rescpu/runav1[latest]:647
12857:20241101:185543.443 adding performance counter rescpu/runav1[latest,absolute]:647
12857:20241101:185543.443 adding performance counter rescpu/actav5[latest]:648
12857:20241101:185543.443 adding performance counter rescpu/actav5[latest,absolute]:648
12857:20241101:185543.444 adding performance counter rescpu/actpk5[latest]:649
12857:20241101:185543.444 adding performance counter rescpu/actpk5[latest,absolute]:649
12857:20241101:185543.444 adding performance counter rescpu/runav5[latest]:650
12857:20241101:185543.444 adding performance counter rescpu/runav5[latest,absolute]:650
12857:20241101:185543.444 adding performance counter rescpu/actav15[latest]:651
12857:20241101:185543.444 adding performance counter rescpu/actav15[latest,absolute]:651
12857:20241101:185543.444 adding performance counter rescpu/actpk15[latest]:652
12857:20241101:185543.444 adding performance counter rescpu/actpk15[latest,absolute]:652
12857:20241101:185543.444 adding performance counter rescpu/runav15[latest]:653
12857:20241101:185543.444 adding performance counter rescpu/runav15[latest,absolute]:653
12857:20241101:185543.444 adding performance counter rescpu/runpk1[latest]:654
12857:20241101:185543.444 adding performance counter rescpu/runpk1[latest,absolute]:654
12857:20241101:185543.444 adding performance counter rescpu/maxLimited1[latest]:655
12857:20241101:185543.444 adding performance counter rescpu/maxLimited1[latest,absolute]:655
12857:20241101:185543.444 adding performance counter rescpu/runpk5[latest]:656
12857:20241101:185543.444 adding performance counter rescpu/runpk5[latest,absolute]:656
12857:20241101:185543.444 adding performance counter rescpu/maxLimited5[latest]:657
12857:20241101:185543.444 adding performance counter rescpu/maxLimited5[latest,absolute]:657
12857:20241101:185543.444 adding performance counter rescpu/runpk15[latest]:658
12857:20241101:185543.444 adding performance counter rescpu/runpk15[latest,absolute]:658
12857:20241101:185543.444 adding performance counter rescpu/maxLimited15[latest]:659
12857:20241101:185543.444 adding performance counter rescpu/maxLimited15[latest,absolute]:659
12857:20241101:185543.444 adding performance counter rescpu/sampleCount[latest]:660
12857:20241101:185543.444 adding performance counter rescpu/sampleCount[latest,absolute]:660
12857:20241101:185543.444 adding performance counter rescpu/samplePeriod[latest]:661
12857:20241101:185543.444 adding performance counter rescpu/samplePeriod[latest,absolute]:661
12857:20241101:185543.444 adding performance counter managementAgent/memUsed[average]:662
12857:20241101:185543.444 adding performance counter managementAgent/memUsed[average,absolute]:662
12857:20241101:185543.444 adding performance counter managementAgent/swapUsed[average]:663
12857:20241101:185543.444 adding performance counter managementAgent/swapUsed[average,absolute]:663
12857:20241101:185543.444 adding performance counter managementAgent/cpuUsage[average]:664
12857:20241101:185543.444 adding performance counter managementAgent/cpuUsage[average,rate]:664
12857:20241101:185543.444 adding performance counter storagePath/commandsAveraged[average]:665
12857:20241101:185543.444 adding performance counter storagePath/commandsAveraged[average,rate]:665
12857:20241101:185543.444 adding performance counter storagePath/numberReadAveraged[average]:666
12857:20241101:185543.444 adding performance counter storagePath/numberReadAveraged[average,rate]:666
12857:20241101:185543.444 adding performance counter storagePath/numberWriteAveraged[average]:667
12857:20241101:185543.444 adding performance counter storagePath/numberWriteAveraged[average,rate]:667
12857:20241101:185543.444 adding performance counter storagePath/read[average]:668
12857:20241101:185543.444 adding performance counter storagePath/read[average,rate]:668
12857:20241101:185543.445 adding performance counter storagePath/write[average]:669
12857:20241101:185543.445 adding performance counter storagePath/write[average,rate]:669
12857:20241101:185543.445 adding performance counter storagePath/totalReadLatency[average]:670
12857:20241101:185543.445 adding performance counter storagePath/totalReadLatency[average,absolute]:670
12857:20241101:185543.445 adding performance counter storagePath/totalWriteLatency[average]:671
12857:20241101:185543.445 adding performance counter storagePath/totalWriteLatency[average,absolute]:671
12857:20241101:185543.445 adding performance counter virtualDisk/readIOSize[latest]:672
12857:20241101:185543.445 adding performance counter virtualDisk/readIOSize[latest,absolute]:672
12857:20241101:185543.445 adding performance counter virtualDisk/writeIOSize[latest]:673
12857:20241101:185543.445 adding performance counter virtualDisk/writeIOSize[latest,absolute]:673
12857:20241101:185543.445 adding performance counter virtualDisk/smallSeeks[latest]:674
12857:20241101:185543.445 adding performance counter virtualDisk/smallSeeks[latest,absolute]:674
12857:20241101:185543.445 adding performance counter virtualDisk/mediumSeeks[latest]:675
12857:20241101:185543.445 adding performance counter virtualDisk/mediumSeeks[latest,absolute]:675
12857:20241101:185543.445 adding performance counter virtualDisk/largeSeeks[latest]:676
12857:20241101:185543.445 adding performance counter virtualDisk/largeSeeks[latest,absolute]:676
12857:20241101:185543.445 adding performance counter virtualDisk/readLatencyUS[latest]:677
12857:20241101:185543.445 adding performance counter virtualDisk/readLatencyUS[latest,absolute]:677
12857:20241101:185543.445 adding performance counter virtualDisk/writeLatencyUS[latest]:678
12857:20241101:185543.445 adding performance counter virtualDisk/writeLatencyUS[latest,absolute]:678
12857:20241101:185543.445 adding performance counter datastore/datastoreMaxQueueDepth[latest]:679
12857:20241101:185543.445 adding performance counter datastore/datastoreMaxQueueDepth[latest,absolute]:679
12857:20241101:185543.445 adding performance counter datastore/unmapSize[summation]:680
12857:20241101:185543.445 adding performance counter datastore/unmapSize[summation,delta]:680
12857:20241101:185543.445 adding performance counter datastore/unmapIOs[summation]:681
12857:20241101:185543.445 adding performance counter datastore/unmapIOs[summation,delta]:681
12857:20241101:185543.445 adding performance counter hbr/hbrNumVms[average]:682
12857:20241101:185543.445 adding performance counter hbr/hbrNumVms[average,absolute]:682
12857:20241101:185543.445 adding performance counter hbr/hbrNetRx[average]:683
12857:20241101:185543.445 adding performance counter hbr/hbrNetRx[average,rate]:683
12857:20241101:185543.445 adding performance counter hbr/hbrNetTx[average]:684
12857:20241101:185543.445 adding performance counter hbr/hbrNetTx[average,rate]:684
12857:20241101:185543.445 adding performance counter hbr/hbrNetLatency[average]:685
12857:20241101:185543.445 adding performance counter hbr/hbrNetLatency[average,absolute]:685
12857:20241101:185543.445 adding performance counter hbr/hbrDiskReadLatency[average]:686
12857:20241101:185543.445 adding performance counter hbr/hbrDiskReadLatency[average,absolute]:686
12857:20241101:185543.445 adding performance counter hbr/hbrDiskStallLatency[average]:687
12857:20241101:185543.445 adding performance counter hbr/hbrDiskStallLatency[average,absolute]:687
12857:20241101:185543.445 adding performance counter hbr/hbrDiskTransferSuccess[average]:688
12857:20241101:185543.445 adding performance counter hbr/hbrDiskTransferSuccess[average,absolute]:688
12857:20241101:185543.446 adding performance counter hbr/hbrDiskTransferIdle[average]:689
12857:20241101:185543.446 adding performance counter hbr/hbrDiskTransferIdle[average,absolute]:689
12857:20241101:185543.446 adding performance counter hbr/hbrDiskTransferBytes[average]:690
12857:20241101:185543.446 adding performance counter hbr/hbrDiskTransferBytes[average,absolute]:690
12857:20241101:185543.446 adding performance counter vflashModule/numActiveVMDKs[latest]:691
12857:20241101:185543.446 adding performance counter vflashModule/numActiveVMDKs[latest,absolute]:691
12857:20241101:185543.446 adding performance counter vsanDomObj/readIops[average]:692
12857:20241101:185543.446 adding performance counter vsanDomObj/readIops[average,rate]:692
12857:20241101:185543.446 adding performance counter vsanDomObj/readThroughput[average]:693
12857:20241101:185543.446 adding performance counter vsanDomObj/readThroughput[average,rate]:693
12857:20241101:185543.446 adding performance counter vsanDomObj/readAvgLatency[average]:694
12857:20241101:185543.446 adding performance counter vsanDomObj/readAvgLatency[average,absolute]:694
12857:20241101:185543.446 adding performance counter vsanDomObj/readMaxLatency[latest]:695
12857:20241101:185543.446 adding performance counter vsanDomObj/readMaxLatency[latest,absolute]:695
12857:20241101:185543.446 adding performance counter vsanDomObj/readCacheHitRate[latest]:696
12857:20241101:185543.446 adding performance counter vsanDomObj/readCacheHitRate[latest,absolute]:696
12857:20241101:185543.446 adding performance counter vsanDomObj/readCongestion[average]:697
12857:20241101:185543.446 adding performance counter vsanDomObj/readCongestion[average,rate]:697
12857:20241101:185543.446 adding performance counter vsanDomObj/writeIops[average]:698
12857:20241101:185543.446 adding performance counter vsanDomObj/writeIops[average,rate]:698
12857:20241101:185543.446 adding performance counter vsanDomObj/writeThroughput[average]:699
12857:20241101:185543.446 adding performance counter vsanDomObj/writeThroughput[average,rate]:699
12857:20241101:185543.446 adding performance counter vsanDomObj/writeAvgLatency[average]:700
12857:20241101:185543.446 adding performance counter vsanDomObj/writeAvgLatency[average,absolute]:700
12857:20241101:185543.446 adding performance counter vsanDomObj/writeMaxLatency[latest]:701
12857:20241101:185543.446 adding performance counter vsanDomObj/writeMaxLatency[latest,absolute]:701
12857:20241101:185543.446 adding performance counter vsanDomObj/writeCongestion[average]:702
12857:20241101:185543.446 adding performance counter vsanDomObj/writeCongestion[average,rate]:702
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteIops[average]:703
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteIops[average,rate]:703
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteThroughput[average]:704
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteThroughput[average,rate]:704
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average]:705
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average,absolute]:705
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest]:706
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest,absolute]:706
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteCongestion[average]:707
12857:20241101:185543.446 adding performance counter vsanDomObj/recoveryWriteCongestion[average,rate]:707
12857:20241101:185543.446 adding performance counter gpu/utilization[none]:708
12857:20241101:185543.446 adding performance counter gpu/utilization[none,absolute]:708
12857:20241101:185543.447 adding performance counter gpu/utilization[maximum]:709
12857:20241101:185543.447 adding performance counter gpu/utilization[maximum,absolute]:709
12857:20241101:185543.447 adding performance counter gpu/utilization[minimum]:710
12857:20241101:185543.447 adding performance counter gpu/utilization[minimum,absolute]:710
12857:20241101:185543.447 adding performance counter gpu/mem.used[none]:711
12857:20241101:185543.447 adding performance counter gpu/mem.used[none,absolute]:711
12857:20241101:185543.447 adding performance counter gpu/mem.used[maximum]:712
12857:20241101:185543.447 adding performance counter gpu/mem.used[maximum,absolute]:712
12857:20241101:185543.447 adding performance counter gpu/mem.used[minimum]:713
12857:20241101:185543.447 adding performance counter gpu/mem.used[minimum,absolute]:713
12857:20241101:185543.447 adding performance counter gpu/mem.usage[none]:714
12857:20241101:185543.447 adding performance counter gpu/mem.usage[none,absolute]:714
12857:20241101:185543.447 adding performance counter gpu/mem.usage[average]:715
12857:20241101:185543.447 adding performance counter gpu/mem.usage[average,absolute]:715
12857:20241101:185543.447 adding performance counter gpu/mem.usage[maximum]:716
12857:20241101:185543.447 adding performance counter gpu/mem.usage[maximum,absolute]:716
12857:20241101:185543.447 adding performance counter gpu/mem.usage[minimum]:717
12857:20241101:185543.447 adding performance counter gpu/mem.usage[minimum,absolute]:717
12857:20241101:185543.447 Unknown performance counter 718 type of unitInfo:gigaBytes
12857:20241101:185543.447 adding performance counter gpu/mem.used.gb[latest]:718
12857:20241101:185543.447 Unknown performance counter 718 type of unitInfo:gigaBytes
12857:20241101:185543.447 adding performance counter gpu/mem.used.gb[latest,absolute]:718
12857:20241101:185543.447 Unknown performance counter 719 type of unitInfo:gigaBytes
12857:20241101:185543.447 adding performance counter gpu/mem.reserved.gb[latest]:719
12857:20241101:185543.447 Unknown performance counter 719 type of unitInfo:gigaBytes
12857:20241101:185543.447 adding performance counter gpu/mem.reserved.gb[latest,absolute]:719
12857:20241101:185543.447 Unknown performance counter 720 type of unitInfo:gigaBytes
12857:20241101:185543.447 adding performance counter gpu/mem.total.gb[latest]:720
12857:20241101:185543.447 Unknown performance counter 720 type of unitInfo:gigaBytes
12857:20241101:185543.447 adding performance counter gpu/mem.total.gb[latest,absolute]:720
12857:20241101:185543.447 adding performance counter pmem/available.reservation[latest]:721
12857:20241101:185543.447 adding performance counter pmem/available.reservation[latest,absolute]:721
12857:20241101:185543.447 adding performance counter pmem/drsmanaged.reservation[latest]:722
12857:20241101:185543.447 adding performance counter pmem/drsmanaged.reservation[latest,absolute]:722
12857:20241101:185543.447 adding performance counter vmx/numVCPUs[latest]:723
12857:20241101:185543.447 adding performance counter vmx/numVCPUs[latest,absolute]:723
12857:20241101:185543.447 adding performance counter vmx/vcpusMhzMin[latest]:724
12857:20241101:185543.447 adding performance counter vmx/vcpusMhzMin[latest,absolute]:724
12857:20241101:185543.447 adding performance counter vmx/vcpusMhzMax[latest]:725
12857:20241101:185543.447 adding performance counter vmx/vcpusMhzMax[latest,absolute]:725
12857:20241101:185543.447 adding performance counter vmx/vcpusMhzMean[latest]:726
12857:20241101:185543.447 adding performance counter vmx/vcpusMhzMean[latest,absolute]:726
12857:20241101:185543.447 adding performance counter vmx/cpuSpeed[latest]:727
12857:20241101:185543.447 adding performance counter vmx/cpuSpeed[latest,absolute]:727
12857:20241101:185543.447 adding performance counter vmx/overheadMemSizeMin[latest]:728
12857:20241101:185543.447 adding performance counter vmx/overheadMemSizeMin[latest,absolute]:728
12857:20241101:185543.448 adding performance counter vmx/overheadMemSizeMax[latest]:729
12857:20241101:185543.448 adding performance counter vmx/overheadMemSizeMax[latest,absolute]:729
12857:20241101:185543.448 adding performance counter vmx/vigor.opsTotal[latest]:730
12857:20241101:185543.448 adding performance counter vmx/vigor.opsTotal[latest,absolute]:730
12857:20241101:185543.448 adding performance counter vmx/poll.itersPerS[latest]:731
12857:20241101:185543.448 adding performance counter vmx/poll.itersPerS[latest,absolute]:731
12857:20241101:185543.448 adding performance counter vmx/userRpc.opsPerS[latest]:732
12857:20241101:185543.448 adding performance counter vmx/userRpc.opsPerS[latest,absolute]:732
12857:20241101:185543.448 End of vmware_service_get_perf_counters():SUCCEED
12857:20241101:185543.449 In vmware_service_get_evt_severity()
12857:20241101:185543.508 vmware_service_get_evt_severity() SOAP response:
EventManagerdescriptionInformationinfoWarningwarningErrorerrorUseruserExtendedEventImport certificate successinfoImport certificate succeeded.Import certificate succeeded.Import certificate succeeded.Import certificate succeeded.ad.event.ImportCertEvent|Import certificate succeeded. <EventLongDescription id="ad.event.ImportCertEvent"> <description> Import certificate succeeded </description> </EventLongDescription> ExtendedEventImport certificate failureerrorImport certificate failed.Import certificate failed.Import certificate failed.Import certificate failed.ad.event.ImportCertFailedEvent|Import certificate failed. <EventLongDescription id="ad.event.ImportCertFailedEvent"> <description> Import certificate failed </description> </EventLongDescription> ExtendedEventJoin domain successinfoJoin domain succeeded.Join domain succeeded.Join domain succeeded.Join domain succeeded.ad.event.JoinDomainEvent|Join domain succeeded. <EventLongDescription id="ad.event.JoinDomainEvent"> <description> Join domain succeeded </description> </EventLongDescription> ExtendedEventJoin domain failureerrorJoin domain failed.Join domain failed.Join domain failed.Join domain failed.ad.event.JoinDomainFailedEvent|Join domain failed. <EventLongDescription id="ad.event.JoinDomainFailedEvent"> <description> Join domain failed </description> </EventLongDescription> ExtendedEventLeave domain successinfoLeave domain succeeded.Leave domain succeeded.Leave domain succeeded.Leave domain succeeded.ad.event.LeaveDomainEvent|Leave domain succeeded. <EventLongDescription id="ad.event.LeaveDomainEvent"> <description> Leave domain succeeded </description> </EventLongDescription> ExtendedEventLeave domain failureerrorLeave domain failed.Leave domain failed.Leave domain failed.Leave domain failed.ad.event.LeaveDomainFailedEvent|Leave domain failed. <EventLongDescription id="ad.event.LeaveDomainFailedEvent"> <description> Leave domain failed </description> </EventLongDescription> ExtendedEventBackup job failederrorcom.vmware.applmgmt.backup.job.failed.event|Backup job failed <EventLongDescription id="com.vmware.applmgmt.backup.job.failed.event"> <description> Backup job failed </description> <cause> <description> Backup job failed </description> <action> Check backup server connectivity and available space </action> </cause> </EventLongDescription> ExtendedEventBackup job finished successfullyinfocom.vmware.applmgmt.backup.job.finished.event|Backup job finished successfully <EventLongDescription id="com.vmware.applmgmt.backup.job.finished.event"> <description> Backup job finished successfully </description> <cause> <description> Backup job finished successfully </description> </cause> </EventLongDescription> ExtendedEventGlobal Permission created for user with role and propagation.infocom.vmware.cis.CreateGlobalPermission|Global Permission created for user {User} with role {Role} and propagation {Propagation}.EventExPermission created for user on item with role.infocom.vmware.cis.CreatePermission|Permission created for user {User} on item {DocType} with role {Role}.EventExGlobal Permission removed for user.infocom.vmware.cis.RemoveGlobalPermission|Global Permission removed for user {User}.EventExPermission removed for user on iteminfocom.vmware.cis.RemovePermission|Permission removed for user {User} on item {DocType}EventExUser attached tag(s) to object(s)com.vmware.cis.tagging.attach|User {User} attached tag(s) {Tag} to object(s) {Object}EventExUser detached tag(s) from object(s)com.vmware.cis.tagging.detach|User {User} detached tag(s) {Tag} from object(s) {Object}ExtendedEventHttpNfc service disabled - missing configurationerrorHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationcom.vmware.configuration.httpnfc.missing|HttpNfc service is disabled because of missing configuration. Please check vpxa configuration file and correct the error and reconnect host. <EventLongDescription id="com.vmware.configuration.httpnfc.missing"> <description> The HttpNfc service is disabled because of missing configuration section in vpxa.cfg. Please check vpxa configuration file and correct the error and reconnect host. </description> <cause> <description>The vpxa configuration file requires a configuration section for HttpNfc</description> <action>Please check vpxa configuration file and correct the error and reconnect host.</action> </cause> </EventLongDescription> EventExAdded Licenseinfocom.vmware.license.AddLicenseEvent|License {licenseKey} added to VirtualCenterEventExAssigned Licenseinfocom.vmware.license.AssignLicenseEvent|License {licenseKey} assigned to asset {entityName} with id {entityId}EventExDownload License Informationwarningcom.vmware.license.DLFDownloadFailedEvent|Failed to download license information from the host {hostname} due to {errorReason.@enum.com.vmware.license.DLFDownloadFailedEvent.DLFDownloadFailedReason}EventExDefault License Keys Updatedinfocom.vmware.license.DefaultLicenseKeysUpdatedEvent|Default License Keys for asset {entityName} have been updatedEventExHost License Edition Not Allowedwarningcom.vmware.license.HostLicenseEditionNotAllowedEvent|The host is licensed with {edition}. The license edition of vCenter Server does not support {edition}.ExtendedEventHost license or evaluation period has expiredwarningcom.vmware.license.HostLicenseExpiredEvent|Expired host license or evaluation period. <EventLongDescription id="com.vmware.license.HostLicenseExpiredEvent"> <description> Host license or evaluation period has expired. </description> <cause> <description>Expired host license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventHost time-limited license has expiredwarningcom.vmware.license.HostSubscriptionLicenseExpiredEvent|Expired host time-limited license. <EventLongDescription id="com.vmware.license.HostSubscriptionLicenseExpiredEvent"> <description> Host time-limited license has expired. </description> <cause> <description>Expired host time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> EventExLicense assignment faultsinfocom.vmware.license.LicenseAssignFailedEvent|License assignment on the host fails. Reasons: {errorMessage.@enum.com.vmware.license.LicenseAssignError}. <EventLongDescription id="com.vmware.license.LicenseAssignFailedEvent"> <description> The host license assignment succeeds on vCenter Server but can not be successfully pushed down to the host. Any license assignment to a host proceeds in two stages. In the first stage vCenter Server does preliminary checks on the license key, the license state of the host and determines if the requested assignment is valid. If so, it stores this assignment locally in its database. In the second stage, vCenter Server pushes the newly assigned license to the host. During the second stage the host might reject the assignment under certain circumstances. These circumstances usually result from a mismatch of the information available to vCenter Server and the host concerned. Any such discrepancies are notified to the user via this event. This event lists the reason because of which it was logged and also shows up as a configuration issue on the vSphere Client. </description> <cause> <description>License expiry information mismatch between vCenter Server and host</description> <action>If the system time on the machine running vCenter Server and host are not in sync then put them in sync</action> </cause> <cause> <description>The license key is a per Virtual Machine key and the number of powered on Virtual Machines is larger than the maximum limit of the key</description> <action>Use a different key with a larger capacity</action> </cause> </EventLongDescription> EventExLicense Capacity Exceededwarningcom.vmware.license.LicenseCapacityExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the license capacity ({capacity} {costUnitText})EventExLicense ExpirywarningYour host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires.com.vmware.license.LicenseExpiryEvent|Your host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires. <EventLongDescription id="com.vmware.license.LicenseExpiryEvent"> <description> If a host is assigned a temporary license (a license key with an expiry), this event is logged in order to provide users an advanced warning on the imminent expiry of the license key. The event logging starts 15 days prior to the expiry of the license key. This event also shows up on the host summary page as a configuration issue on the vSphere Client. </description> <cause> <description>License key is about to expire or has expired</description> <action>Assign a different license key</action> </cause> </EventLongDescription> EventExLicense User Threshold Exceededwarningcom.vmware.license.LicenseUserThresholdExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the user-defined threshold ({threshold} {costUnitText}) <EventLongDescription id="com.vmware.license.LicenseUserThresholdExceededEvent"> <description> Users can define thresholds to monitor overuse of the product license. This event is logged when the license usage threshold defined by the user for a product edition is exceeded. </description> <cause> <description> License usage of a product edition has exceeded the user-defined threshold </description> <action> Review license assignments and usage </action> </cause> </EventLongDescription> EventExRemoved Licenseinfocom.vmware.license.RemoveLicenseEvent|License {licenseKey} removed from VirtualCenterEventExUnassigned Licenseinfocom.vmware.license.UnassignLicenseEvent|License unassigned from asset {entityName} with id {entityId}ExtendedEventvCenter Server license or evaluation period has expiredwarningcom.vmware.license.VcLicenseExpiredEvent|Expired vCenter Server license or evaluation period. <EventLongDescription id="com.vmware.license.VcLicenseExpiredEvent"> <description> vCenter Server license or evaluation period has expired. </description> <cause> <description>Expired vCenter Server license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventvCenter Server time-limited license has expiredwarningcom.vmware.license.VcSubscriptionLicenseExpiredEvent|Expired vCenter Server time-limited license. <EventLongDescription id="com.vmware.license.VcSubscriptionLicenseExpiredEvent"> <description> vCenter Server time-limited license has expired. </description> <cause> <description>Expired vCenter Server time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventSome in-use features are not supported by current licensewarningcom.vmware.license.vsan.FeatureBeyondCapability|In-use vSAN features {feature} are not supported by current license.ExtendedEventHost flash capacity exceeds the licensed limit for vSANwarningcom.vmware.license.vsan.HostSsdOverUsageEvent|The capacity of the flash disks on the host exceeds the limit of the vSAN license. <EventLongDescription id="com.vmware.license.vsan.HostSsdOverUsageEvent"> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <cause> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN license or evaluation period has expiredwarningcom.vmware.license.vsan.LicenseExpiryEvent|Expired vSAN license or evaluation period. <EventLongDescription id="com.vmware.license.vsan.LicenseExpiryEvent"> <description> Expired vSAN license or evaluation period. </description> <cause> <description> Expired vSAN license or evaluation period. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN time-limited license has expiredwarningcom.vmware.license.vsan.SubscriptionLicenseExpiredEvent|Expired vSAN time-limited license. <EventLongDescription id="com.vmware.license.vsan.SubscriptionLicenseExpiredEvent"> <description> Expired vSAN time-limited license. </description> <cause> <description> Expired vSAN time-limited license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> EventExStorage policy associatedinfoAssociated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}com.vmware.pbm.profile.associate|Associated storage policy: {ProfileId} with entity: {EntityId}EventExStorage policy createdinfoStorage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.create|Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}EventExStorage policy deletedinfoDeleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}com.vmware.pbm.profile.delete|Deleted storage policy: {ProfileId}EventExStorage policy dissociatedinfoDissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}com.vmware.pbm.profile.dissociate|Dissociated storage policy: {ProfileId} from entity: {EntityId}EventExStorage policy updatedinfoStorage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.update|Storage policy updated for {ProfileId}. Policy name: {ProfileName}EventExStorage policy name updatedinfoStorage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}com.vmware.pbm.profile.updateName|Storage policy name updated for {ProfileId}. New name: {NewProfileName}EventExCertificate Manager event in SSOinfocom.vmware.sso.CertificateManager|Certificate Manager event by {userName} at {timestamp} : {description}EventExConfiguration Management event in SSOinfocom.vmware.sso.ConfigurationManagement|Configuration Management event by {userName} at {timestamp} : {description}EventExDomain Management event in SSOinfocom.vmware.sso.DomainManagement|Domain Management event by {userName} at {timestamp} : {description}EventExIdentity Source Management event in SSOinfocom.vmware.sso.IdentitySourceManagement|Identity Source Management event by {userName} at {timestamp} : {description}EventExIdentity Source LDAP Certificate is about to expireinfocom.vmware.sso.LDAPCertExpiry|Renew Identity Source LDAP Certificate: {description}EventExLockout Policy event in SSOinfocom.vmware.sso.LockoutPolicy|Lockout Policy event by {userName} at {timestamp} : {description}EventExFailed login attempt event in SSOerrorcom.vmware.sso.LoginFailure|Failed login {userName} from {userIp} at {timestamp} in SSOEventExSuccessful login attempt event in SSOinfocom.vmware.sso.LoginSuccess|Successful login {userName} from {userIp} at {timestamp} in SSOEventExLogout attempt event in SSOinfocom.vmware.sso.Logout|Logout event by {userName} from {userIp} at {timestamp} in SSOEventExPassword Policy event in SSOinfocom.vmware.sso.PasswordPolicy|Password Policy event by {userName} at {timestamp} : {description}EventExPrincipal Management event in SSOinfocom.vmware.sso.PrincipalManagement|Principal Management event by {userName} at {timestamp} : {description}EventExRole Management event in SSOinfocom.vmware.sso.RoleManagement|Role Management event by {userName} at {timestamp} : {description}EventExSTS Signing Certificates are about to expireinfocom.vmware.sso.STSCertExpiry|Renew STS Signing Certificates: {description}EventExSMTP Configuration event in SSOinfocom.vmware.sso.SmtpConfiguration|SMTP Configuration event by {userName} at {timestamp} : {description}EventExSystem Management event in SSOinfocom.vmware.sso.SystemManagement|System Management event by {userName} at {timestamp} : {description}EventExvCenter Identity event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentity|vCenter Identity event by {userName} at {timestamp} : {description}EventExvCenter Identity Providers event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentityProviders|vCenter Identity Providers event by {userName} at {timestamp} : {description}EventExvCenter Trusts event in Trustmanagementinfocom.vmware.trustmanagement.VcTrusts|vCenter Trusts event by {userName} at {timestamp} : {description}EventExIdentity Provider SSL Trust Certificate is about to expireinfocom.vmware.trustmanagement.WS1SSLCertExpiry|Renew Identity Provider SSL Trust Certificate: {description}EventExIdentity Provider Users and Groups token is about to expireinfocom.vmware.trustmanagement.WS1SyncTokenExpiry|Renew Identity Provider Users and Groups token: {description}EventExReports that a stage from autonomous cluster creation has failedwarningcom.vmware.vc.A8sCluster.CreateStageFailedEvent|Autonomous cluster creation stage: {stage} failed: {reason}EventExReports that a stage from autonomous cluster creation has completed successfullyinfocom.vmware.vc.A8sCluster.CreateStageSuccessEvent|Autonomous cluster creation stage: {stage} succeededEventExAutonomous cluster health is degraded.warningcom.vmware.vc.A8sCluster.HealthDegradedEvent|Autonomous cluster health is degraded. Reason: {reason}ExtendedEventAutonomous cluster is healthy.infocom.vmware.vc.A8sCluster.HealthHealthyEvent|Autonomous cluster is healthy.EventExAutonomous cluster is unhealthy.warningcom.vmware.vc.A8sCluster.HealthUnhealthyEvent|Autonomous cluster is unhealthy. Reason: {reason}ExtendedEventAuthz service is not running. Authorization data might not be synchronized.errorcom.vmware.vc.AuthzDataNotSynced|Authz service is not running. Authorization data might not be synchronized.ExtendedEventAuthz service is running. Authorization data is being synchronized.infocom.vmware.vc.AuthzDataSynced|Authz service is running. Authorization data is being synchronized.ExtendedEventEvent sequence ID reached its max value and was reset.infocom.vmware.vc.EventIdOverflow|Event sequence ID reached its max value and was reset.ExtendedEventcom.vmware.vc.FailedToApplyPermissionsEvent|ExtendedEventvSphere HA agent can reach all cluster management addressesinfoThe vSphere HA agent on the host {host.name} in cluster {computeResource.name} can reach all the cluster management addressesThe vSphere HA agent on the host {host.name} can reach all the cluster management addressesThe vSphere HA agent on this host can reach all the cluster management addressescom.vmware.vc.HA.AllHostAddrsPingable|The vSphere HA agent on the host {host.name} in cluster {computeResource.name} in {datacenter.name} can reach all the cluster management addresses <EventLongDescription id="com.vmware.vc.HA.AllHostAddrsPingable"> <description> The host is able to ping all of the vSphere HA management addresses of every other cluster host. </description> </EventLongDescription> ExtendedEventvSphere HA agent can reach all isolation addressesinfoAll vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name}All vSphere HA isolation addresses are reachable by this hostAll vSphere HA isolation addresses are reachable by hostcom.vmware.vc.HA.AllIsoAddrsPingable|All vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.AllIsoAddrsPingable"> <description> The host is able to ping all of the vSphere HA isolation addresses. </description> </EventLongDescription> ExtendedEventvSphere HA answered a lock-lost question on a virtual machinewarningvSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name}vSphere HA answered the lock-lost question on virtual machine {vm.name}vSphere HA answered the lock-lost question on this virtual machinecom.vmware.vc.HA.AnsweredVmLockLostQuestionEvent|vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmLockLostQuestionEvent"> <description> The virtual machine running on this host lost the exclusive lock of its files on disk. This will occur if another instance of this virtual machine is running on a different host. This situation can happen if a host loses access to both its storage and management networks but is not configured to shutdown its virtual machines on isolation. The virtual machines on this host will continue to run without access to their disks, while vSphere HA will start a new instance of the virtual machines on another host in the cluster. When the isolated host regains access to the storage network, it will try to reacquire the disk locks. This will fail since the disk locks are held by another host. The host will then issue a question on the virtual machine indicating that disk locks have been lost. vSphere HA will automatically answer this question to allow the virtual machine instance without the disk locks to power off. <description> </EventLongDescription> ExtendedEventvSphere HA answered a question from the host about terminating a virtual machinewarningvSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name}vSphere HA answered a question from host {host.name} about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating this virtual machinecom.vmware.vc.HA.AnsweredVmTerminatePDLEvent|vSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmTerminatePDLEvent"> <description> The virtual machine running on this host had a virtual disk which experienced permenant device loss. The host will issue a question if it is configured to terminate the VM automatically under such condition. This event indicates that vSphere HA answered the question. After the VM is terminated, vSphere HA will make a best effort to restart it. <description> </EventLongDescription> ExtendedEventvSphere HA disabled the automatic VM Startup/Shutdown featureinfovSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on the host {host.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature. Automatic VM restarts will interfere with HA when reacting to a host failure.com.vmware.vc.HA.AutoStartDisabled|vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Automatic VM restarts will interfere with HA when reacting to a host failure. <EventLongDescription id="com.vmware.vc.HA.AutoStartDisabled"> <description> Virtual Machine Startup/Shutdown has been disabled by HA. A host which is contained in an vSphere HA cluster is not permitted to have automatic virtual machine startup and shutdown since it may conflict with HA's attempts to relocate the virtual machines if a host fails. </description> </EventLongDescription> ExtendedEventvSphere HA did not reset a VM which had files on inaccessible datastore(s)warningvSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on host {host.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on this host because the VM had files on inaccessible datastore(s)vSphere HA did not reset this VM because the VM had file(s) on inaccessible datastore(s)com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore|vSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the VM had files on inaccessible datastore(s) <EventLongDescription id=" com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore"> <description> This event is logged when vSphere HA did not reset a VM affected by an inaccessible datastore. It will attempt to reset the VM after storage failure is cleared. </description> <cause> <description> The VM is affected by an inaccessible datastore due to storage connectivity loss. Resetting such a VM might cause the VM to be powered off and not restarted by vSphere HA. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA cluster contains incompatible hosts.warningvSphere HA Cluster {computeResource.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.vSphere HA Cluster contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.com.vmware.vc.HA.ClusterContainsIncompatibleHosts|vSphere HA Cluster {computeResource.name} in {datacenter.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported. <EventLongDescription id="com.vmware.vc.HA.ClusterContainsIncompatibleHosts"> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions. </description> <cause> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions, which isn't fully supported. Failover of VMs from ESX/ESXi 3.5 hosts to newer hosts is not guaranteed. </description> <action> Place ESX/ESXi 3.5 hosts into a separate vSphere HA cluster from hosts with more recent ESX versions. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA completed a failover actioninfovSphere HA completed a virtual machine failover action in cluster {computeResource.name}vSphere HA completed a virtual machine failover actioncom.vmware.vc.HA.ClusterFailoverActionCompletedEvent|vSphere HA completed a virtual machine failover action in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA initiated a failover actionwarningvSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name}vSphere HA initiated a failover action on {pendingVms} virtual machinescom.vmware.vc.HA.ClusterFailoverActionInitiatedEvent|vSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA failover operation in progressWarningvSphere HA failover operation in progress in cluster {computeResource.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMsvSphere HA failover operation in progress: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMscom.vmware.vc.HA.ClusterFailoverInProgressEvent|vSphere HA failover operation in progress in cluster {computeResource.name} in datacenter {datacenter.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMs <EventLongDescription id="com.vmware.vc.HA.ClusterFailoverInProgressEvent"> <description> This event is logged when a vSphere HA failover operation is in progress for virtual machines in the cluster. It also reports the number of virtual machines that are being restarted. There are four different categories of such VMs. (1) VMs being placed: vSphere HA is in the process of trying to restart these VMs; (2) VMs awaiting retry: a previous restart attempt failed, and vSphere HA is waiting for a timeout to expire before trying again; (3) VMs requiring additional resources: insufficient resources are available to restart these VMs. vSphere HA will retry when more resources become available (such as a host comes back on line); (4) Inaccessible vSAN VMs: vSphere HA cannot restart these vSAN VMs because they are not accessible. It will retry when there is a change in accessibility. </description> <cause> <description> vSphere HA is attempting to restart failed virtual machines in the cluster. It might be that the virtual machine restart is pending and has not yet completed. </description> <action> vSphere HA will retry the failover on another host unless the maximum number of failover attempts has been reached. A subsequent retry may succeed in powering on the virtual machine so allow the vSphere HA failover operation to be declared a success or failure. </action> </cause> <cause> <description> This event might also be generated when a required resource in the cluster becomes temporarily unavailabile due to network reconfiguration, hardware upgrade, software update, host overload, etc. which can cause vSphere HA to lose its network or storage hearbeats to certain hosts or virtual machines and mark them inaccessible. </description> <action> In many cases, this may be a temporary condition. If the cluster soon stabilizes to its normal condition vSphere HA will detect the host and virtual machines to be live and discard any failover attempts. In such cases, this event may be treated as a soft alarm caused by such changes. </action> </cause> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It can also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to fail over the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM might be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it might have been powered on by a user on a host. If any hosts have been declared dead, investigate whether a networking or storage issue is the cause. </action> <action> If the error reports that the virtual machine is in an invalid state, there might be an operation in progress that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation, that are taking a long time to complete. </action> </cause> </EventLongDescription> ExtendedEventHost connected to a vSphere HA masterinfovSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}com.vmware.vc.HA.ConnectedToMaster|vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.ConnectedToMaster"> <description> This event is logged whenever a host in a vSphere HA cluster transitions to a slave host state and establishes a connection with a master host. </description> </EventLongDescription> ExtendedEventvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}errorvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}com.vmware.vc.HA.CreateConfigVvolFailedEvent|vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault} <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolFailedEvent"> <description> vSphere HA failed to create a config vvol on the datastore </description> <cause> <description>A possible VP, host, network, or lack of resources prevented vSphere HA from creating a config vvol</description> <action>Look for errors in the environment, then re-enable vSphere HA</action> </cause> </EventLongDescription> ExtendedEventvSphere HA successfully created a configuration vVol after the previous failureinfovSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurecom.vmware.vc.HA.CreateConfigVvolSucceededEvent|vSphere HA successfully created a configuration vVol after the previous failure <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolSucceededEvent"> <description> vSphere HA successfully created a config vvol on the datastore. If there was a failed config vvol datastore configuration issue, it is being cleared </description> <cause> <description> There were no errors during creation of the config vvol on the datastore</description> </cause> </EventLongDescription> ExtendedEventvSphere HA agent is runninginfovSphere HA agent on host {host.name} in cluster {computeResource.name} is runningvSphere HA agent on host {host.name} is runningvSphere HA agent is runningcom.vmware.vc.HA.DasAgentRunningEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is running <EventLongDescription id=" com.vmware.vc.HA.DasAgentRunningEvent"> <description> This event is logged when the vSphere HA agent is running on a host. </description> <cause> <description> This event is reported after vSphere HA is configured on a host or after the vSphere HA agent on a host starts, such as after a host reboot. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected an HA cluster state version inconsistencywarningvSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name}vSphere HA detected an HA cluster state version inconsistencycom.vmware.vc.HA.DasClusterVersionInconsistentEvent|vSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasClusterVersionInconsistentEvent"> <description> This event is logged when vSphere HA cluster has a version inconsistency for cluster state(HostList, ClusterConfiguration, VM protection state). </description> <cause> <description> This situation could primarily occur if vCenter has been restored to an older backed up state causing vCenter to rollback to older version for the vSphere HA cluster state (HostList, ClusterConfiguration, VM protection state) while the hosts on the cluster have the latest version for the cluster state. As a result, protection state for VMs will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster, any new cluster configuration state will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster and if hosts were added or removed to/from this vSphere HA cluster after vCenter backup and before vCenter Restore, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </description> <action> Step 1. If hosts were added or removed to/from the vSphere HA cluster after vCenter backup and before vCenter Restore, please add or remove those respective hosts back to the vSphere HA cluster so that the list of hosts in the vSphere HA cluster is identical to the list of hosts in the cluster before vCenter was last restored. If you do not want to add hosts to the cluster, stop the vSphere HA process on the hosts that were added to vCenter after the backup. If this is not done, in case of a failure, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </action> <action> Step 2. Disable vSphere HA on the cluster and then re-enable vSphere HA on the cluster. This will make sure that vCenter's version for the vSphere HA cluster state(HostList, ClusterConfiguration, VM protection state) is reset with a new fault domain id for the HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a failed failover hosterrorvSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name}vSphere HA detected a possible failure of failover host {host.name}vSphere HA detected a possible failure of this failover hostcom.vmware.vc.HA.DasFailoverHostFailedEvent|vSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostFailedEvent"> <description> This event is logged when vSphere HA has detected the failure of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the failure of a failover host. A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the failover host failure, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is not running and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-isolated failover hosterrorvSphere HA detected that failover host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that failover host {host.name} is network isolated from the clustervSphere HA detected that this failover host is network isolated from the clustercom.vmware.vc.HA.DasFailoverHostIsolatedEvent|Host {host.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the network isolation of a failover host. vSphere HA reports a host as isolated if there are no heartbeats received from the HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the failover host isolation, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is isolated and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-partitioned failover hostwarningvSphere HA detected that failover host {host.name} in {computeResource.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that failover host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this failover host is in a different network partition than the mastercom.vmware.vc.HA.DasFailoverHostPartitionedEvent|Failover Host {host.name} in {computeResource.name} in {datacenter.name} is in a different network partition than the master <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostPartitionedEvent"> <description> This event is logged when vSphere HA has detected a designated failover host is network partitioned. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if a vSphere HA master agent detects a failover host is network partitioned. vSphere HA reports a host as partitioned if it cannot communicate with a subset of hosts in the cluster, yet can determine that the host is alive via its datastore heartbeats. </description> <action> Determine the cause of the partitioned failover host, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if a failover host is partitioned and a host failure occurs. See the prodcut documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA agent on a failover host is unreachableerrorThe vSphere HA agent on the failover host {host.name} in {computeResource.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on the failover host {host.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on this failover host is not reachable but host responds to ICMP pingscom.vmware.vc.HA.DasFailoverHostUnreachableEvent|The vSphere HA agent on the failover host {host.name} in cluster {computeResource.name} in {datacenter.name} is not reachable but host responds to ICMP pingsEventExHost complete datastore failureerrorAll shared datastores failed on the host {hostName} in cluster {computeResource.name}All shared datastores failed on the host {hostName}All shared datastores failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent|All shared datastores failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent"> <description> A host in a Component Protection-enabled cluster has lost connectivity to all shared datastores </description> <cause> <description>Connectivity to all shared datastores has been lost</description> <action>Reconnect at least one shared datastore</action> </cause> </EventLongDescription> EventExHost complete network failureerrorAll VM networks failed on the host {hostName} in cluster {computeResource.name}All VM networks failed on the host {hostName}All VM networks failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent|All VM networks failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent"> <description> A host in a Component Protection enabled cluster has lost connectivity to all virtual machine networks </description> <cause> <description>Connectivity to all virtual machine networks has been lost</description> <action>Reconnect at least one virtual machine network</action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a host failureerrorvSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name}vSphere HA detected a possible host failure of host {host.name}vSphere HA detected a possible host failure of this hostcom.vmware.vc.HA.DasHostFailedEvent|vSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostFailedEvent"> <description> This event is logged when vSphere HA detects a possible host failure. </description> <cause> <description> A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the host failure, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network isolated hosterrorvSphere HA detected that host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that host {host.name} is network isolated from the clustervSphere HA detected that this host is network isolated from the clustercom.vmware.vc.HA.DasHostIsolatedEvent|vSphere HA detected that host {host.name} is isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a host. </description> <cause> <description> This event will be generated if there are no heartbeats received from the vSphere HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the host isolation, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA host monitoring is disabledwarningvSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name}vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabledcom.vmware.vc.HA.DasHostMonitoringDisabledEvent|vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostMonitoringDisabledEvent"> <description> This event is logged when host monitoring has been disabled in a vSphere HA cluster. </description> <cause> <description> Host monitoring is disabled, so vSphere HA will not perform any failover actions. This event is generated to inform the user that their cluster is temporarily not being protected against host or VM failures. If host or VM failures occur while host monitoring is disabled, HA will not attempt to restart the the VMs that were running on the failed hosts. Other vSphere HA features are not impacted by whether host monitoring is disabled. </description> <action> Enable host monitoring to resume hosts monitoring. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA failed to restart a network isolated virtual machineerrorvSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart this virtual machine after it was powered off in response to a network isolation eventcom.vmware.vc.HA.FailedRestartAfterIsolationEvent|vSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} after it was powered off in response to a network isolation event. The virtual machine should be manually powered back on.EventExRunning VMs utilization cannot satisfy the configured failover resources on the cluster.warningRunning VMs utilization cannot satisfy the configured failover resources on cluster {computeResource.name}Running VMs utilization cannot satisfy the configured failover resources on the cluster.com.vmware.vc.HA.FailoverResourcesViolationEvent|Running VMs utilization cannot satisfy the configured failover resources on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.FailoverResourcesViolationEvent"> <description> This event is logged when the total utilization of the running VMs cannot satisfy the configured failover resources on a vSphere HA admission controlled cluster. </description> <cause> <description> The total utilization of the running VMs on this cluster is unable to satisfy the configured failover resources in the cluster. This event is generated to inform the user that their cluster will be running in a compromised state during failover and would not have sufficient failover resources to ensure the optimal functioning of the VMs and their workloads. The side-effect of this situation is that VMs won't be working optimally even though we ensure required failover capacity in case of failures. Other vSphere HA features are not impacted by this and this warning doesn't affect any VM related operations like power-on, vmotion etc. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to ensure that there is sufficient failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA changed a host's heartbeat datastoresinfoDatastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on this hostcom.vmware.vc.HA.HeartbeatDatastoreChanged|Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreSelected"> <description> A datastore is selected or deselected for storage heartbeating monitored by the vSphere agent on this host. vSphere HA employs stroage heartbeating to detect host failures when there is network partition. </description> </EventLongDescription> EventExvSphere HA heartbeat datastore number for a host is insufficientwarningThe number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for host {host.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for this host is {selectedNum}, which is less than required: {requiredNum}com.vmware.vc.HA.HeartbeatDatastoreNotSufficient|The number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} in {datacenter.name} is {selectedNum}, which is less than required: {requiredNum} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreNotSufficient"> <description> The number of heartbeat datastores used for this host is less than required. Multiple heartbeat datastores are needed to tolerate storage failures. The host summary page will report a configuration issue in this case. To ignore the configuration issue, use the vSphere HA cluster advanced option, das.ignoreInsufficientHbDatastore. </description> <cause> <description> The host does not have sufficient number of accessible datastores that are shared among other hosts in the cluster. </description> <action> Add more shared datastores to the host or check if any of its datastore is currently inaccessible. </action> </cause> </EventLongDescription> EventExvSphere HA agent on a host has an errorwarningvSphere HA agent for host {host.name} has an error in {computeResource.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for this host has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}com.vmware.vc.HA.HostAgentErrorEvent|vSphere HA agent for host {host.name} has an error in {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason} <EventLongDescription id="com.vmware.vc.HA.AgentErrorEvent"> <description> This event is logged when the vSphere HA agent for the host has an error. </description> <action> See product documentation for troubleshooting tips. </action> </EventLongDescription> ExtendedEventvSphere HA agent is healthyinfovSphere HA agent on host {host.name} in cluster {computeResource.name} is healthyvSphere HA agent on host {host.name} is healthyvSphere HA agent is healthycom.vmware.vc.HA.HostDasAgentHealthyEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is healthy <EventLongDescription id=" com.vmware.vc.HA.HostDasAgentHealthyEvent"> <description> This event is logged when the vSphere HA agent on a host transitions to a healthy state. </description> <cause> <description> vSphere HA reports this event when the vSphere HA agent on the host is either a master or a slave that is connected to the master over the management network. </description> </cause> </EventLongDescription> EventExvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error. {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}com.vmware.vc.HA.HostDasErrorEvent|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason} <EventLongDescription id="com.vmware.vc.HA.HostDasErrorEvent"> <description> The vSphere HA agent on this host has an error. The event may provide details with extra information indicating the cause of the error. </description> <cause> <description>There was an error configuring the vSphere HA agent on the host</description> <action> Look at the task details for the configure vSphere HA task that failed. That will provide more details about why the failure occurred. Address the problem and reconfigure vSphere HA on the host. </action> </cause> <cause> <description> There was a timeout while communicating with the vSphere HA agent. This can occur if there is a high rate of operations being performed on virtual machines in the cluster resulting in the vSphere HA agents not being able to process the changes fast enough. </description> <action> Verify that this is a transient problem by stopping operations on virtual machines in the cluster for a few minutes to give time to the vSphere HA agents to process all their pending messages. If this resolves the problem, consider reducing the rate of operations performed on the cluster. </action> </cause> <cause> <description>There is vSphere HA agent is in a shutdown or failed state</description> <action>Reconfigure vSphere HA on the host. If this fails, reconfigure vSphere HA on the cluster</action> </cause> </EventLongDescription> EventExvSphere HA detected a datastore failurewarningvSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of datastore {arg1} on host {host.name}vSphere HA detected a failure of datastore {arg1}com.vmware.vc.HA.HostDatastoreFailedEvent|vSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventUnsupported vSphere HA and vCloud Distributed Storage configurationerrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured on host {host.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured because vCloud Distributed Storage is enabled but the host does not support that featurecom.vmware.vc.HA.HostDoesNotSupportVsan|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in {datacenter.name} because vCloud Distributed Storage is enabled but the host does not support that featureExtendedEventHost has no vSphere HA isolation addresseserrorHost {host.name} in cluster {computeResource.name} has no isolation addresses defined as required by vSphere HAHost {host.name} has no isolation addresses defined as required by vSphere HAThis host has no isolation addresses defined as required by vSphere HAcom.vmware.vc.HA.HostHasNoIsolationAddrsDefined|Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no isolation addresses defined as required by vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostHasNoIsolationAddrsDefined"> <description> The host has an vSphere HA configuration issue because there were no IP addresses that vSphere HA could use for detecting network isolation. Without at least one, the host will not take any isolation response. HA, by default, will use the host's default gateway (defined in the host's networking configuration), or use the addresses that were specified in the cluster's advanced settings. </description> <action> Define a default gateway in the host's networking configuration. </action> <action> If the cluster advanced setting das.usedefaultisolationaddress is false, you must define at least one isolation address using the advanced options. </action> <action> Define one or more cluster advanced options, each containing an IP address to be pinged by vSphere HA to detect if it is network-isolated when it no longer receives communication with other hosts in the cluster. The advanced option is das.isolationAddress[n], where 'n' is a number from 1 to 9. You may specify multiple addresses. </action> </EventLongDescription> ExtendedEventvSphere HA cannot be configured on this host because there are no mounted datastores.errorvSphere HA cannot be configured on {host.name} in cluster {computeResource.name} because there are no mounted datastores.vSphere HA cannot be configured on {host.name} because there are no mounted datastores.vSphere HA cannot be configured on this host because there are no mounted datastores.com.vmware.vc.HA.HostHasNoMountedDatastores|vSphere HA cannot be configured on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because there are no mounted datastores.ExtendedEventvSphere HA requires a SSL Thumbprint for hosterrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on {host.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on this host because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for this host has been verified.com.vmware.vc.HA.HostHasNoSslThumbprint|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified. <EventLongDescription id="com.vmware.vc.HA.HostHasNoSslThumbprint"> <description> The host has an vSphere HA configuration issue because it does not have a verified ssl thumbprint. Hosts need verified SSL thumbprints for secure vSphere HA communications. </description> <action> If the host is using self-signed certificates, check that vCenter Server is configured to verify SSL certificates, and verify the thumbprints for the hosts in the vSphere HA cluster. </action> </EventLongDescription> ExtendedEventHost is incompatible with vSphere HAerrorThe product version of host {host.name} in cluster {computeResource.name} is incompatible with vSphere HA.The product version of host {host.name} is incompatible with vSphere HA.The product version of this host is incompatible with vSphere HA.com.vmware.vc.HA.HostIncompatibleWithHA|The product version of host {host.name} in cluster {computeResource.name} in {datacenter.name} is incompatible with vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostIncompatibleWithHA"> <description> The host is in a vSphere HA cluster but its product version is incompatible with HA. </description> <action> To fix the situation the host should either be moved out of the vSphere HA cluster or upgraded to a version supporting HA. </action> </EventLongDescription> EventExvSphere HA detected a network failurewarningvSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of network {network} on host {host.name}vSphere HA detected a failure of network {network}com.vmware.vc.HA.HostNetworkFailedEvent|vSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventvSphere HA detected a network-partitioned hostwarningvSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connected in {computeResource.name}vSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this host is in a different network partition than the master to which vCenter Server is connectedcom.vmware.vc.HA.HostPartitionedFromMasterEvent|vSphere HA detected that host {host.name} is in a different network partition than the master {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HostPartitionedFromMasterEvent"> <description> This event is logged when the host is in a different partition than the master. </description> </EventLongDescription> EventExThe vSphere HA host availability state changedinfoThe vSphere HA availability state of the host {host.name} in cluster {computeResource.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of the host {host.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of this host has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}com.vmware.vc.HA.HostStateChangedEvent|The vSphere HA availability state of the host {host.name} in cluster in {computeResource.name} in {datacenter.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState} <EventLongDescription id="com.vmware.vc.HA.HostStateChangedEvent"> <description> This event is logged when the availability state of a host has changed. </description> </EventLongDescription> ExtendedEventvSphere HA agent unconfigure failed on hostwarningThere was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on host {host.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on this host. To solve this problem, reconnect the host to vCenter Server.com.vmware.vc.HA.HostUnconfigureError|There was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name}. To solve this problem, reconnect the host to vCenter Server. <EventLongDescription id="com.vmware.vc.HA.HostUnconfigureError"> <description> There was an error unconfiguring the vSphere HA agent on this host. </description> <cause> <description> The vSphere HA unconfiguration task failed to send the updated hostList to vSphere HA agent on the host. This condition may interfere with the vSphere HA cluster to which the host used to belong and should be corrected. </description> <action> Add the host back to a vCenter Server of version 5.0 or later. </action> </cause> </EventLongDescription> EventExA disconnected host has vSphere HA protected VMserrorHost {host.name} in cluster {computeResource.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)Host {host.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)This host is disconnected from vCenter Server, but contains {protectedVmCount} vSphere HA protected virtual machine(s)com.vmware.vc.HA.HostUnconfiguredWithProtectedVms|Host {host.name} in cluster {computeResource.name} in {datacenter.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s) <EventLongDescription id="com.vmware.vc.HA.HostUnconfiguredWithProtectedVms"> <description> This host is disconnected and contains one or more virtual machine(s) that are still protected by vSphere HA. Consequently, these virtual machines could be failed over to another host if this host should fail. </description> <cause> <description> If a vSphere HA-enabled host is disconnected and is unable to unprotect the virtual machines currently running on it (perhaps due to datastores being unavailable, or not being able to communicate with the vSphere HA master host) then these virtual machines would still be protected, but reside on the disconnected host. Also, if a virtual machine is migrated using vMotion to a vSphere HA-enabled host that is currently in the process of disconnecting, this can lead to the same result. </description> <action> To correct this situation, ensure that the host has access to the datastores used by these virtual machines, and then reconnect the host to an vSphere HA-enabled cluster. The virtual machines should become unprotected shortly after vSphere HA is configured on the host. </action> </cause> </EventLongDescription> EventExvSphere HA configured failover resources are insufficient to satisfy desired failover levelwarningInsufficient configured resources to satisfy the desired vSphere HA failover level on cluster {computeResource.name}Insufficient configured resources to satisfy the desired vSphere HA failover levelcom.vmware.vc.HA.InsufficientFailoverLevelEvent|Insufficient configured resources to satisfy the desired vSphere HA failover level on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.InsufficientFailoverLevelEvent"> <description> The cluster does not have enough failover capacity to satisfy the desired host failures to tolerate for vSphere HA. Failovers may still be performed by vSphere HA but will be on a best effort basis and configured resources may not sufficient to respect the desired host failures to tolerate. </description> <cause> <description> The desired host failures to tolerate setting might not be completely respected since the cluster does not have the required failover capacity to satisfy the failover of the largest desired number of hosts. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to reserve more failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA detected an invalid master agentwarningvSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.com.vmware.vc.HA.InvalidMaster|vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised. <EventLongDescription id="com.vmware.vc.HA.InvalidMaster"> <description> A host in a vSphere HA cluster that is claiming to be a master has been determined to be invalid be another master host. This occurs when an existing master gets a message from another master in the same cluster. The existing master verifies that the other master is actually a valid master before it considers abdicating to the other master. An invalid master is an indication that there may be a compromised host on the network that is attempting to disrupt the HA cluster. The offending host should be examined to determine if it has been compromised. Its also possible a compromised host is impersonating a valid host so the reported host may not be the actual host that is compromised. </description> </EventLongDescription> ExtendedEventvSphere HA could not identify lock owner host on VM with duplicatesinfovSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name}vSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on this VM with duplicatescom.vmware.vc.HA.LockOwnerUnKnownForDupVms|vSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.LockOwnerUnKnownForDupVms"> <description> The vSphere HA agent could not identify lock owner host on duplicate VMs. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Could not determine the lock owner host on duplicate VM. </action> </cause> </EventLongDescription> EventExvSphere HA agent cannot reach some cluster management addressesinfovSphere HA agent on {host.name} in cluster {computeResource.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on {host.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on host cannot reach some management network addresses of other hosts: {unpingableAddrs}com.vmware.vc.HA.NotAllHostAddrsPingable|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} cannot reach some management network addresses of other hosts: {unpingableAddrs} <EventLongDescription id="com.vmware.vc.HA.NotAllIsoAddrsPingable"> <description> The vSphere HA agent on host cannot reach some of the management network addresses of other hosts, and vSphere HA may not be able to restart VMs if a host failure occurs. </description> <cause> <description> There is a network issue preventing this host from communicating with some or all of the hosts in the cluster over their vSphere HA management networks. vSphere HA reliability ic currently compromised in the cluster and failover may not reliably occur if a host or hosts should fail during this condition. </description> <action> Determine and correct the source of the communication problem. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not terminate the VM that was selected for preemptionerrorvSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name}vSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate this VM that was selected for preemptioncom.vmware.vc.HA.PreemptionFailedWithMaxRetry|vSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.PreemptionFailedWithMaxRetry"> <description> vSphere HA could not terminate the VM that was selected for preemption. </description> <cause> <description> Instances when vSphere HA receives the InsufficientResourcesFault, for any VM with fault reason indicating presence of preemptible VM. vSphere HA terminates appropriate preemptibe VM to free up resources. </description> <action> Terminate the preemptibe VM manually to free up resources. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA remediated duplicates of VMinfovSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of this VMcom.vmware.vc.HA.RemediatedDupVMs|vSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediatedDupVMs"> <description> The vSphere HA agent on host remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Kept the VM running on host which holds the lock on datastore, terminated VM on rest of the hosts where VM was running. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not remediate duplicates of VMwarningvSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of this VMcom.vmware.vc.HA.RemediationFailedForDupVMs|vSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediationFailedForDupVMs"> <description> The vSphere HA agent on host could not remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Duplicates of VM running on multiple hosts could not be terminated. </action> </cause> </EventLongDescription> EventExvSphere HA failed to start a Fault Tolerance secondary VM.errorvSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.com.vmware.vc.HA.StartFTSecondaryFailedEvent|vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name} in {datacenter.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondaryFailedEvent"> <description> vSphere HA agent failed to start a Fault Tolerance secondary VM. vSphere HA will retry until either the operation succeeds or until the maximum number of restart attempts is reached. </description> </EventLongDescription> EventExvSphere HA successfully started a Fault Tolerance secondary VM.infovSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}.com.vmware.vc.HA.StartFTSecondarySucceededEvent|vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondarySucceededEvent"> <description> vSphere HA agent successfully started a Fault Tolerance secondary virtual machine. </description> </EventLongDescription> EventExvSphere HA removed a datastore from preferred heartbeat datastoreswarningvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} because the datastore is removed from inventoryvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster because the datastore is removed from inventorycom.vmware.vc.HA.UserHeartbeatDatastoreRemoved|vSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} in {datacenter.name} because the datastore is removed from inventory <EventLongDescription id="com.vmware.vc.HA.UserHeartbeatDatastoreRemoved"> <description> The datastore is removed from the set of preferred heartbeat datastores selected for this cluster. </description> <cause> <description> The datastore does not exist in the inventory. This happens when the datastore is removed from a host in the cluster manually or via a rescan. </description> <action> Choose a different datastore by reconfiguring the vSphere HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA did not perform an isolation response for vm because its VM restart priority is DisabledinfovSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} because its VM restart priorirty is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is Disabled"vSphere HA did not perform an isolation response because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledIsolationEvent|vSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledIsolationEvent"> <description> This event is logged when a host in a vSphere HA cluster was isolated and no isolation response was taken. </description> <cause> <description> The VM restart priority setting is set to disabled, so vSphere HA did not perform any action on this VM when the host became isolated. If the restart priority is disabled, HA will not attempt to restart the VM on another host, so HA will take no action for this VM on the isolated host. This event is informational only. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA did not attempt to restart vm because its VM restart priority is DisabledinfovSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is Disabled"vSphere HA did not attempt to restart vm because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledRestartEvent|vSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledRestartEvent"> <description> This event is logged when a failed VM in a vSphere HA cluster will not be restarted because its VM restart priority setting is set to disabled. </description> <cause> <description> The restart priority for the cluster or VM is disabled, so vSphere HA did not perform any action on this VM failed. This event is informational only. </description> </cause> </EventLongDescription> EventExvCenter Server cannot communicate with the master vSphere HA agentwarningvCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name}vCenter Server cannot communicate with the master vSphere HA agent on {hostname}com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent|vCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent"> <description> This event is logged when vCenter Server cannot communicate with a vSphere HA master agent. </description> <cause> <description> This event is reported when vCenter Server is not able to communicate with a vSphere HA master agent on the host, but it can communicate with other vSphere HA agents in the cluster and these are reporting the host is a master. </description> <action> Correct the networking issue that is preventing vCenter Server from communicating with the host listed in the event. This problem can occur, for example, if the physical NIC in use by this network connection has failed. </action> </cause> </EventLongDescription> ExtendedEventvCenter Server is unable to find a master vSphere HA agentwarningvCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name}vCenter Server is unable to find a master vSphere HA agentcom.vmware.vc.HA.VcCannotFindMasterEvent|vCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotFindMasterEvent"> <description> This event is logged when vCenter Server is unable to find a master vSphere HA agent. </description> <cause> <description> </description> <action> </action> </cause> </EventLongDescription> EventExvCenter Server connected to a vSphere HA master agentinfovCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is connected to a master HA agent running on host {hostname}com.vmware.vc.HA.VcConnectedToMasterEvent|vCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcConnectedToMasterEvent"> <description> This event is logged when vCenter Server is connected with a master vSphere HA agent. </description> </EventLongDescription> EventExvCenter Server disconnected from a master vSphere HA agentwarningvCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is disconnected from a master HA agent running on host {hostname}com.vmware.vc.HA.VcDisconnectedFromMasterEvent|vCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcDisconnectedFromMasterEvent"> <description> This event is logged when vCenter Server is disconnected from a master vSphere HA agent. </description> </EventLongDescription> ExtendedEventvSphere HA was unable to reset a VM after it exhausted the retrieserrorvSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on host {host.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on this host after {retryTimes} retriesvSphere HA was unable to reset this VM after {retryTimes} retriescom.vmware.vc.HA.VmDasResetAbortedEvent|vSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmDasResetAbortedEvent"> <description> This event is logged when vSphere HA was unable to reset a VM. </description> <cause> <description> The operation to reset the VM continued to fail. vSphere HA stopped resetting the VM after it exhausted the retries. </description> <action>Ensure that the host system is manageable, for example host agent is not hung. Check if there are no other concurrent tasks running for the VM.</action> </cause> </EventLongDescription> ExtendedEventVirtual machine failed to become vSphere HA ProtectederrorVirtual machine {vm.name} in cluster {computeResource.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.This virtual machine failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.com.vmware.vc.HA.VmNotProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmNotProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation but the VM has not transitioned to vSphere HA Protected in the time period expected. This condition exists because the master vSphere HA agent has not yet persisted that the VM successfully powered on or vCenter is unaware that it did. Consequently, vSphere HA may not restart the VM after a failure. </description> <action> There are a number of reasons why a VM may remain not protected for a period of time. First, the system may be heavily loaded, in which case the transition will just take longer. Second, vCenter may be unable to communicate with the vSphere HA master agent. Examine the inventory to see if any hosts in the cluster are not responding. Third, the the management network may be partitioned, which is preventing the master that owns the VM from protecting it or reporting this information to vCenter. The cluster summary page may report a config issue in this case or hosts in the VM inventory will be reported as not responding. Finally, the vSphere HA master election is taking too long to complete. The cluster summary page will report if this situation exists. See the product documentation for additional troubleshooting tips. </action> </EventLongDescription> ExtendedEventVirtual machine is vSphere HA protectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.This virtual machine is vSphere HA Protected and HA will attempt to restart it after a failure.com.vmware.vc.HA.VmProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is vSphere HA Protected and HA will attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation and vSphere HA has persisted this fact. Consequently, vSphere HA will attempt to restart the VM after a failure. </description> </EventLongDescription> ExtendedEventVirtual machine is not vSphere HA ProtectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.This virtual machine is not vSphere HA Protected.com.vmware.vc.HA.VmUnprotectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is not vSphere HA Protected. <EventLongDescription id="com.vmware.vc.HA.VmUnprotectedEvent"> <description> The virtual machine transitioned from the vSphere HA protected to unprotected state. This transition is a result of a user powering off the virtual machine, disabling vSphere HA, disconnecting the host on which the virtual machine is running, or destroying the cluster in which the virtual machine resides. </description> </EventLongDescription> ExtendedEventvSphere HA has unprotected out-of-disk-space VMinfovSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected this virtual machine because it ran out of disk spacecom.vmware.vc.HA.VmUnprotectedOnDiskSpaceFull|vSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} because it ran out of disk spaceExtendedEventvSphere HA did not terminate a VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}warningvSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate this VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore|vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore} <EventLongDescription id=" com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore"> <description> This event is logged when a VM affected by an inaccessible datastore in a vSphere HA cluster was not terminated. </description> <cause> <description> VM Component Protection is configured to not terminate the VM, or vSphere HA host monitoring is disabled, or VM restart priority is diabled, or the VM is an agent VM, or there are no sufficient resources to fail over the VM. For the case of insufficent resources, vSphere HA will attempt to terminate the VM when resources become available. </description> <action>Select VM Component Protection option to terminate VM</action> <action>Enable host monitoring</action> <action>Enable VM Restart priority</action> <action>Reduce resource reservations of other VMs in the cluster</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition or isolation if one exists</action> <action>If vSphere DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> </EventLongDescription> ExtendedEventDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleinfoDatastore {ds.name} mounted on host {host.name} in cluster {computeResource.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on host {host.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessiblecom.vmware.vc.HA.VmcpStorageFailureCleared|Datastore {ds.name} mounted on host {host.name} was inaccessible. The condition was cleared and the datastore is now accessible <EventLongDescription id=" com.vmware.vc.HA.VmcpStorageFailureCleared"> <description> This event is logged when a datastore connectivity was restored. The host can have the following storage access failures: All Paths Down (APD) and Permanent Device Loss (PDL). Datastore was shown as unavailable/inaccessible in storage view. </description> <cause> <description> A datastore on this host was inaccessible. The condition was cleared and the datastore is now accessible. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected that a datastore was inaccessible. This affected the VM with files on the datastorewarningvSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on host {host.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on this host was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected the VM with files on the datastorecom.vmware.vc.HA.VmcpStorageFailureDetectedForVm|vSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} in {datacenter.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastore <EventLongDescription id="com.vmware.vc.HA.VmcpStorageFailureDetectedForVm"> <description> This event is logged when a VM's files were not accessible due to a storage connectivity failure. vSphere HA will take action if VM Component Protection is enabled for the VM. </description> <cause> <description> A datastore was inaccessible due to a storage connectivity loss of All Paths Down or Permenant Device Loss. A VM was affected because it had files on the inaccessible datastore. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA was unable to terminate VM affected by an inaccessible datastore after it exhausted the retrieserrorvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on this host after {retryTimes} retriesvSphere HA was unable to terminate this VM affected by an inaccessible datastore after {retryTimes} retriescom.vmware.vc.HA.VmcpTerminateVmAborted|vSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminateVmAborted"> <description> This event is logged when vSphere HA was unable to terminate a VM affected by an inaccessible datastore. </description> <cause> <description> The operation to terminate the VM continued to fail. vSphere HA stopped terminating the VM after it exhausted the retries. </description> <action> Ensure that the host system is manageable, for example host agent is not hung. Check if there are other concurrent tasks running for the VM.</action> <action> Reset the VM if guest application is not operational after the datastore becomes accessible.</action> </cause> </EventLongDescription> ExtendedEventvSphere HA attempted to terminate a VM affected by an inaccessible datastorewarningvSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on host{host.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on this host because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate this VM because the VM was affected by an inaccessible datastorecom.vmware.vc.HA.VmcpTerminatingVm|vSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} in {datacenter.name} because the VM was affected by an inaccessible datastore <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminatingVm"> <description> This event is logged when vSphere HA attempted to terminate a VM affected by an inaccessible datastore. A VM is terminated by issuing a SIGKILL to the vmx process. </description> <cause> <description> The VM was affected by an inaccessible datastore. vSphere HA VM Component Protection attempted to terminate the VM. </description> </cause> </EventLongDescription> EventExHardware Health Status Changedinfocom.vmware.vc.HardwareSensorEvent|Sensor {sensorNumber} type {sensorType}, Description {sensorName} state {status} for {message}. Part Name/Number {partName} {partNumber} Manufacturer {manufacturer}EventExStatus of each Hardware Health Sensor Groupinfocom.vmware.vc.HardwareSensorGroupStatus|Hardware Sensor Status: Processor {processor}, Memory {memory}, Fan {fan}, Voltage {voltage}, Temperature {temperature}, Power {power}, System Board {systemBoard}, Battery {battery}, Storage {storage}, Other {other}ExtendedEventHost configuration is TPM encrypted.warningcom.vmware.vc.HostTpmConfigEncryptionEvent|Host configuration is TPM encrypted.EventExOperation cleanup encountered errorsinfoOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup with task {taskId} encountered errorscom.vmware.vc.OperationCleanupErrorsEvent|Operation cleanup for {vm.name} with task {taskId} encountered errorsExtendedEventThe user does not have permission to view the entity associated with this event.infocom.vmware.vc.RestrictedAccess|The user does not have permission to view the entity associated with this event.EventExFailed to register host with Intel® SGX Registration Service.errorFailed to register host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.FailedRegistration|Failed to register host {host.name} with Intel® SGX Registration Service {registrationUrl}. The service responded with {statusCode}, {errorCode}: {errorMessage}.EventExSending registration request to Intel® SGX Registration Service.infoSending registration request to Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.InitiatingRegistration|Sending registration request for host {host.name} to Intel® SGX Registration Service {registrationUrl}.EventExSuccessfully registered host with Intel® SGX Registration Service.infoSuccessfully registered host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.SuccessfulRegistration|Successfully registered host {host.name} with Intel® SGX Registration Service {registrationUrl}.EventExStateless Alarm TriggeredinfoAlarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'com.vmware.vc.StatelessAlarmTriggeredEvent|Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'ExtendedEventTrusted Host attestation failed.errorcom.vmware.vc.TaHostAttestFailEvent|Trusted Host attestation failed.ExtendedEventTrusted Host attestation passed.infocom.vmware.vc.TaHostAttestPassEvent|Trusted Host attestation passed.ExtendedEventTrusted Host attestation status unset.infocom.vmware.vc.TaHostAttestUnsetEvent|Trusted Host attestation status unset.EventExHost Time Syncronization establishedinfocom.vmware.vc.TimeSyncEvent|Time service {serviceName} has synchronized with remote time source, details: {message}.EventExHost Time Syncronization losterrorcom.vmware.vc.TimeSyncFailedEvent|Time service {serviceName} is not sychronized with the remote time source, details: {message}.ExtendedEventHost must be decommissioned when moved out of a Trusted Infrastructure cluster.errorHost {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.com.vmware.vc.TrustAuthority.DecommissionHost|Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.ExtendedEventHost is not configured for vSphere Trust Authority.errorHost {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.com.vmware.vc.TrustAuthority.HostNotConfigured|Host {host.name} is not configured for vSphere Trust Authority.EventExThe client certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSClientCertExpirationEvent|The client certificate for the Key Provider {keyProviderId} in the Trust Authority Host {hostName} will expire in {dayNum} day(s).EventExThe server certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSServerCertExpirationEvent|The server cetificate of key server {serverName} in the Trusted Key Provider {keyProviderId} will expire in {dayNum} day(s).ExtendedEventCertificates have changed. Trust authority cluster needs to be reconfigured.errorcom.vmware.vc.TrustAuthority.StsCertificatesChange|Certificates have changed. Trust authority cluster needs to be reconfigured.EventExvCenter Service Overall Health Changedinfocom.vmware.vc.VCHealthStateChangedEvent|vCenter Service overall health changed from '{oldState}' to '{newState}' <EventLongDescription id="com.vmware.vc.VCHealthStateChangedEvent"> <description> This event is logged when the overall health of vCenter Service has changed or become unavailable. </description> <cause> <description> The vCenter Service overall health state has changed or become unavailable </description> <action> Examine the vCenter Service health state and make sure the VimWebServices service is up and running on the vCenter Server </action> </cause> </EventLongDescription> EventExDatastore is in healthy state within the clusterinfoDatastore {dsName} is in healthy state within the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreHealthy|Datastore {dsName} is in healthy state within the cluster {computeResource.name}EventExDatastore is not accessible on the host(s)warningDatastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreInaccessible|Datastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}EventExDatastore unmount is failederrorUnmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreUnmountFailed|Unmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}EventExDatastore in desired configuration is missing on the host(s)warningDatastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}com.vmware.vc.VMCStorage.DesiredDatastoreMissing|Datastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}EventExHost(s) mounted with the datastore which is not present in desired configurationerrorHost(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}com.vmware.vc.VMCStorage.NotDesiredDatastorePresent|Host(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}EventExExecuting VM Instant CloneinfoExecuting Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} to {destVmName}Executing Instant Clone to {destVmName}com.vmware.vc.VmBeginInstantCloneEvent|Executing Instant Clone of {vm.name} on {host.name} to {destVmName}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneFailedInvalidDestinationEvent|Cannot clone {vm.name} as {destVmName} to invalid or non-existent destination with ID {invalidMoRef}: {fault}EventExRestarting VM CloneinfoRestarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} to {destVmName} with task {taskId}Restarting VM Clone to {destVmName} with task {taskId}com.vmware.vc.VmCloneRestartEvent|Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneToResourcePoolFailedEvent|Cannot clone {vm.name} as {destVmName} to resource pool {destResourcePool}: {fault}EventExFailed to create virtual machineerrorFailed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName}Failed to create virtual machine on {host.name}com.vmware.vc.VmCreateFailedEvent|Failed to create virtual machine {vmName} on {host.name}ExtendedEventVirtual machine disks consolidation succeeded.infoVirtual machine {vm.name} disks consolidatation succeeded on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation succeeded on {host.name}.Virtual machine {vm.name} disks consolidation succeeded.Virtual machine disks consolidation succeeded.com.vmware.vc.VmDiskConsolidatedEvent|Virtual machine {vm.name} disks consolidated successfully on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation needed.warningVirtual machine {vm.name} disks consolidatation is needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is needed on {host.name}.Virtual machine {vm.name} disks consolidation is needed.Virtual machine disks consolidation is needed.com.vmware.vc.VmDiskConsolidationNeeded|Virtual machine {vm.name} disks consolidation is needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation no longer needed.infoVirtual machine {vm.name} disks consolidatation is no longer needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is no longer needed on {host.name}.Virtual machine {vm.name} disks consolidation is no longer needed.Virtual machine disks consolidation is no longer needed.com.vmware.vc.VmDiskConsolidationNoLongerNeeded|Virtual machine {vm.name} disks consolidation is no longer needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation failed.warningVirtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation failed on {host.name}.Virtual machine {vm.name} disks consolidation failed.Virtual machine disks consolidation failed.com.vmware.vc.VmDiskFailedToConsolidateEvent|Virtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExcom.vmware.vc.VmForkFailedInvalidDestinationEvent|EventExCannot complete Instant Clone of VMerrorCannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone to {destVmName}. Reason : {fault.msg}com.vmware.vc.VmInstantCloneFailedEvent|Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}EventExInstant Clone WarningwarningInstant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning - {warning}com.vmware.vc.VmInstantCloneWarningEvent|Instant Clone Warning for {vmName} - {warning}EventExInstant Clone of VM has completedinfoInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} has completedInstant Clone of {srcVmName} has completedcom.vmware.vc.VmInstantClonedEvent|Instant Clone of {srcVmName} on {host.name} has completedEventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.infocom.vmware.vc.VpxdMemoryUsageClearEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status}.EventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.errorcom.vmware.vc.VpxdMemoryUsageErrorEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status} (used: {usedMemory}%, soft limit: {limit}%).EventExOperation enabledinfocom.vmware.vc.authorization.MethodEnabled|The operation {MethodName} on the {EntityName} of type {EntityType} is enabled.EventExPrivilege check failedwarningPrivilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}com.vmware.vc.authorization.NoPermission|Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}ExtendedEventErrors occurred during automatic CPVM certificate rotation.errorcom.vmware.vc.certificatemanagement.CPVMCertificateUpdateFailedEvent|Errors occurred during automatic CPVM certificate rotation.ExtendedEventCPVM successfully performed automatic certificate rotation.infocom.vmware.vc.certificatemanagement.CPVMCertificateUpdateHealthyEvent|CPVM successfully performed automatic certificate rotation.ExtendedEventErrors occurred during automatic Spherelet certificate rotation.errorcom.vmware.vc.certificatemanagement.SphereletCertificateUpdateFailedEvent|Errors occurred during automatic Spherelet certificate rotation.ExtendedEventNo errors found during automatic Spherelet certificate rotation.infocom.vmware.vc.certificatemanagement.SphereletCertificateUpdateHealthyEvent|No errors found during automatic Spherelet certificate rotation.ExtendedEventTRUSTED ROOT certificates imported successfully.infocom.vmware.vc.certificatemanagement.TrustedRootsImportedEvent|TRUSTED ROOT certificates imported successfully.ExtendedEventTRUSTED ROOT certificates imported successfully, but with warnings.warningcom.vmware.vc.certificatemanagement.TrustedRootsImportedWithWarningsEvent|TRUSTED ROOT certificates imported successfully, but with warnings.ExtendedEventvCenter Server TLS certificate replaced successfully.infocom.vmware.vc.certificatemanagement.VcCertificateReplacedEvent|vCenter Server TLS certificate replaced successfully.ExtendedEventvCenter Server TLS certificate replaced successfully, but there are warnings detected.warningcom.vmware.vc.certificatemanagement.VcCertificateReplacedWithWarningsEvent|vCenter Server TLS certificate replaced successfully, but there are warnings detected.EventExFailed to update the vCenter server certificate.warningcom.vmware.vc.certificatemanagement.VcServerCertificateUpdateFailureEvent|{cause} for the {serviceName}. Remediation suggested: {remediation}. For more details, please refer to {kbLink}.EventExCA Certificates were updated on hostinfoCA Certificates were updated on {hostname}com.vmware.vc.certmgr.HostCaCertsAndCrlsUpdatedEvent|CA Certificates were updated on {hostname}EventExHost Certificate expiration is imminentwarningHost Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpirationImminentEvent|Host Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}EventExHost Certificate is nearing expirationwarningHost Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringEvent|Host Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}EventExHost Certificate will expire soonwarningHost Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringShortlyEvent|Host Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}ExtendedEventHost Certificate Management Mode changedinfocom.vmware.vc.certmgr.HostCertManagementModeChangedEvent|Host Certificate Management Mode changed from {previousMode} to {presentMode}ExtendedEventHost Certificate Management Metadata changedinfocom.vmware.vc.certmgr.HostCertMetadataChangedEvent|Host Certificate Management Metadata changedEventExHost Certificate revokedwarningHost Certificate on {hostname} is revoked.com.vmware.vc.certmgr.HostCertRevokedEvent|Host Certificate on {hostname} is revoked.EventExHost Certificate was updatedinfoHost Certificate was updated on {hostname}, new thumbprint: {thumbprint}com.vmware.vc.certmgr.HostCertUpdatedEvent|Host Certificate was updated on {hostname}, new thumbprint: {thumbprint}EventExAdding host to cluster store failederrorAdding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.AddHostFailed|Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}EventExInitializing cluster store member cache failederrorInitializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.InitializeMemberCacheFailed|Initializing cluster store member cache failed. Fault Reason : {errorMessage}EventExRemoving host from cluster store failederrorRemoving host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.RemoveHostFailed|Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}EventExUpdating host encryption keyinfocom.vmware.vc.crypto.HostKeyUpdatedEvent|Host encryption key set to {newKey}. Old key: {oldKey}EventExcom.vmware.vc.crypto.IntegrityCheckFailed|EventExcom.vmware.vc.crypto.IntegrityCheckPassed|EventExCrypto operation audit eventinfocom.vmware.vc.crypto.Operation|Cryptographic operations during {description}{operation}{diskOperations}EventExFailed to update VM fileserrorFailed to update VM files on datastore {ds.name}com.vmware.vc.datastore.UpdateVmFilesFailedEvent|Failed to update VM files on datastore {ds.name} using host {hostName}EventExUpdated VM filesinfoUpdated VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatedVmFilesEvent|Updated VM files on datastore {ds.name} using host {hostName}EventExUpdating VM FilesinfoUpdating VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatingVmFilesEvent|Updating VM files on datastore {ds.name} using host {hostName}ExtendedEventLink Aggregation Control Protocol configuration is inconsistentinfoSingle Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.com.vmware.vc.dvs.LacpConfigInconsistentEvent|Single Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.ExtendedEventFault Tolerance VM restart disabledwarningvSphere HA has been disabled in cluster {computeResource.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart this VM or its Secondary VM after a failure.com.vmware.vc.ft.VmAffectedByDasDisabledEvent|vSphere HA has been disabled in cluster {computeResource.name} of datacenter {datacenter.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure. <EventLongDescription id="com.vmware.vc.ft.VmAffectedByDasDisabledEvent"> <description> When vSphere HA is disabled in a cluster, you cannot restart a Primary VM or its Secondary VM after a failure. This event is issued when vSphere HA is disabled and a Fault Tolerant virtual machine is powered on. The event alerts you of the risk to the Fault Tolerant virtual machine that results from disabling vSphere HA. </description> <cause> <description>vSphere HA was disabled when a Fault Tolerant virtual machine was powered on</description> <action>Re-enable vSphere HA</action> </cause> </EventLongDescription> EventExGuest operationinfoGuest operation {operationName.@enum.com.vmware.vc.guestOp} performed.com.vmware.vc.guestOperations.GuestOperation|Guest operation {operationName.@enum.com.vmware.vc.guestOp} performed on Virtual machine {vm.name}.EventExGuest operation authentication failurewarningGuest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp}.com.vmware.vc.guestOperations.GuestOperationAuthFailure|Guest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp} on Virtual machine {vm.name}.ExtendedEventvSphere HA restarted a virtual machinewarningvSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA restarted virtual machine {vm.name} on host {host.name}vSphere HA restarted virtual machine {vm.name}vSphere HA restarted this virtual machinecom.vmware.vc.ha.VmRestartedByHAEvent|vSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.ha.VmRestartedByHAEvent"> <description> The virtual machine was restarted automatically by vSphere HA on this host. This response may be triggered by a failure of the host the virtual machine was originally running on or by an unclean power-off of the virtual machine (eg. if the vmx process was killed). </description> </EventLongDescription> ExtendedEventAutostart power on failederrorPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on this host failedcom.vmware.vc.host.AutoStartPowerOnFailedEvent|Powering on virtual machines according to autostart rules on host {host.name} in datacenter {datacenter.name} failedExtendedEventAutostart rules reconfigure failederrorReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on this host failedcom.vmware.vc.host.AutoStartReconfigureFailedEvent|Reconfiguring autostart rules for virtual machines on {host.name} in datacenter {datacenter.name} failedEventExEncryption mode is enabled on host.infoEncryption mode is enabled on host.com.vmware.vc.host.Crypto.Enabled|Encryption mode is enabled on host {hostName}.EventExThe operation is not supported on hosts which have encryption disabled.errorcom.vmware.vc.host.Crypto.HostCryptoDisabled|The operation is not supported on host {hostName} because encryption is disabled.EventExHost key is being renewed because an error occurred on the key provider.warningHost key is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KMSClusterError|Host key of {hostName} is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.EventExHost key is being renewed because key was missing on the key provider.warningHost key is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KeyMissingOnKMS|Host key of {hostName} is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.EventExHost requires encryption mode enabled and the key provider is not available.errorHost requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KMSClusterError|Host {hostName} requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExHost requires encryption mode enabled and the key is not available on the key provider.errorHost requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KeyMissingOnKMS|Host {hostName} requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExFailed to send keys to host because of host error.errorcom.vmware.vc.host.Crypto.SendKeyError.HostError|Failed to send keys {keys} to host {hostName}. Please check host connection.EventExHost profile {operation} failed with error: {error}.errorHost profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.com.vmware.vc.host.HPOperationFailed|Host profile {operation} failed with error: {error}.ExtendedEventHost booted from stateless cache.warningHost booted from stateless cache.Host booted from stateless cache.Host booted from stateless cache.com.vmware.vc.host.HostBootedFromStatelessCacheEvent|Host booted from stateless cache.EventExHost IP address conflict detectederrorHost IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}com.vmware.vc.host.HostIpConflictEvent|Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}ExtendedEventHost in Memory Mode and active DRAM usage is normalinfo{host.name} is in Memory Mode and its active DRAM usage is normal{host.name} is in Memory Mode and its active DRAM usage is normalThe host is in Memory Mode and its active DRAM usage is normalcom.vmware.vc.host.MemoryModeActiveDRAMGreen|Host {host.name} is in Memory Mode and its active DRAM usage is normalExtendedEventHost in Memory Mode and active DRAM usage is highwarningHost {host.name} is in Memory Mode and its active DRAM usage is highHost {host.name} is in Memory Mode and its active DRAM usage is highThe host is in Memory Mode and its active DRAM usage is highcom.vmware.vc.host.MemoryModeActiveDRAMYellow|Host {host.name} is in Memory Mode and its active DRAM usage is highExtendedEventNSX installation failed on host.errorNSX installation failed on host.NSX installation failed on host.NSX installation failed on host.com.vmware.vc.host.NsxInstallFailed|NSX installation failed on host.ExtendedEventNSX installation successful on host.infoNSX installation successful on host.NSX installation successful on host.NSX installation successful on host.com.vmware.vc.host.NsxInstallSuccess|NSX installation successful on host.ExtendedEventPartial maintenance mode status has changed.infoHost status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.com.vmware.vc.host.PartialMaintenanceModeStatusChanged|Host status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}com.vmware.vc.host.StatelessHPApplyEarlyBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}com.vmware.vc.host.StatelessHPApplyFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}com.vmware.vc.host.StatelessHPApplyPostBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}EventExHost TPM attestation failederrorHost TPM attestation failed for host {host.name}: {1}Host TPM attestation failed for host {host.name}: {1}Host TPM attestation failed: {1}com.vmware.vc.host.TPMAttestationFailedEvent|Host TPM attestation failed for host {host.name} in datacenter {datacenter.name}: {1}ExtendedEventActive DRAM usage of the memory tiered host is normalinfoHost {host.name} is a memory tiered host and its active DRAM usage is normalHost {host.name} is a memory tiered host and its active DRAM usage is normalActive DRAM usage of the memory tiered host is normalcom.vmware.vc.host.TieringMemoryActiveDRAMGreen|Host {host.name} is a memory tiered host and its active DRAM usage is normalExtendedEventActive DRAM usage of the memory tiered host is highwarningHost {host.name} is a memory tiered host and its active DRAM usage is highHost {host.name} is a memory tiered host and its active DRAM usage is highActive DRAM usage of the memory tiered host is highcom.vmware.vc.host.TieringMemoryActiveDRAMYellow|Host {host.name} is a memory tiered host and its active DRAM usage is highExtendedEventNew TPM host endorsement key doesn't match the one in the DBerrorThe new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DBcom.vmware.vc.host.TpmEndorsementKeyMismatch|The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name} in datacenter {datacenter.name}ExtendedEventHost's virtual flash resource is accessible.infoHost's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.com.vmware.vc.host.clear.vFlashResource.inaccessible|Host's virtual flash resource is restored to be accessible.EventExHost's virtual flash resource usage dropped below the threshold.infoHost's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.com.vmware.vc.host.clear.vFlashResource.reachthreshold|Host's virtual flash resource usage dropped below {1}%.ExtendedEventDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.warningDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound|Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.ExtendedEventDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostswarningDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostscom.vmware.vc.host.problem.DeprecatedVMFSVolumeFoundAfterVMFS3EOL|Deprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsExtendedEventImproved virtual disk infrastructure's catalog management turned unhealthywarningcom.vmware.vc.host.problem.VStorageObjectInfraCatalogUnhealthy|Improved virtual disk infrastructure's catalog management turned unhealthyExtendedEventImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.warningImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent|Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. <EventLongDescription id="com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent"> <description> Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. </description> <cause> <description> This is caused by creating improved virtual disk infrastructure namespaces with empty storage policy. </description> <action> Update infrastructure namespaces storage policy. </action> </cause> </EventLongDescription> ExtendedEventHost's virtual flash resource is inaccessible.warningHost's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.com.vmware.vc.host.problem.vFlashResource.inaccessible|Host's virtual flash resource is inaccessible. <EventLongDescription id="com.vmware.vc.host.problem.vFlashResource.inaccessible"> <description> Inaccessible host virtual flash resource indicates that its backing VFFS volume is inaccessible. Due to inaccessible host virtual flash resource, virtual machines with vSphere Flash Read Cache configured cannot be powered on or might experience unpredicted behavior if powered on. </description> <cause> <description> This might be caused by an unmounted VFFS volume or an APD/PDL on the VFFS volume. </description> <action> Check the backing VFFS volume connection status. For example, mount the unmounted volume or resolve the APD/PDL issues. The host virtual flash resource is accessible as long as the backing VFFS volume is accessible. </action> </cause> </EventLongDescription> EventExHost's virtual flash resource usage exceeds the threshold.warningHost's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.com.vmware.vc.host.problem.vFlashResource.reachthreshold|Host's virtual flash resource usage is more than {1}%.ExtendedEventVirtual flash resource is configured on the hostinfoVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostcom.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent|Virtual flash resource is configured on the hostExtendedEventVirtual flash resource is removed from the hostinfoVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostcom.vmware.vc.host.vFlash.VFlashResourceRemovedEvent|Virtual flash resource is removed from the hostEventExDefault virtual flash module is changed to {vFlashModule} on the hostinfoDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostcom.vmware.vc.host.vFlash.defaultModuleChangedEvent|Any new virtual Flash Read Cache configuration request will use {vFlashModule} as default virtual flash module. All existing virtual Flash Read Cache configurations remain unchanged. <EventLongDescription id="com.vmware.vc.host.vFlash.defaultModuleChangedEvent"> <description> The default virtual flash module has been changed. Any new virtual Flash Read Cache configuration uses the new default virtual flash module if undefined in configuration. All existing configurations will remain unchanged. </description> </EventLongDescription> ExtendedEventVirtual flash modules are loaded or reloaded on the hostinfoVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostcom.vmware.vc.host.vFlash.modulesLoadedEvent|Virtual flash modules are loaded or reloaded on the hostEventExEntity became healthyinfo{entityName} became healthycom.vmware.vc.infraUpdateHa.GreenHealthEvent|{entityName} became healthyEventExProvider has posted invalid health updateswarningProvider {providerName} has posted invalid health updatesProvider {providerName} has posted invalid health updatescom.vmware.vc.infraUpdateHa.InvalidUpdatesEvent|Provider {providerName} has posted invalid health updatesEventExProvider reported a healthy statusinfo{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}com.vmware.vc.infraUpdateHa.PostGreenHealthUpdateEvent|{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}EventExProvider reported a severely degraded statuswarning{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostRedHealthUpdateEvent|{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExProvider reported a moderately degraded statuswarning{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostYellowHealthUpdateEvent|{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExEntity has entered quarantine modewarning{entityName} has entered quarantine modecom.vmware.vc.infraUpdateHa.QuarantineEvent|{entityName} has entered quarantine modeEventExEntity has exited quarantine modeinfo{entityName} has exited quarantine modecom.vmware.vc.infraUpdateHa.QuarantineRemovedEvent|{entityName} has exited quarantine modeEventExEntity became severely degradedwarning{entityName} became severely degradedcom.vmware.vc.infraUpdateHa.RedHealthEvent|{entityName} became severely degradedEventExProvider has stale updateswarningProvider {providerName} has not posted an update in {timeout} secondsProvider {providerName} has not posted an update in {timeout} secondscom.vmware.vc.infraUpdateHa.StaleUpdatesEvent|Provider {providerName} has not posted an update in {timeout} secondsEventExEntity has unknown health statewarning{entityName} has unknown health statecom.vmware.vc.infraUpdateHa.UnknownHealthEvent|{entityName} has unknown health stateEventExEntity became moderately degradedwarning{entityName} became moderately degradedcom.vmware.vc.infraUpdateHa.YellowHealthEvent|{entityName} became moderately degradedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters has failederrorvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterInstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterInstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters has failederrorvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUninstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} are successfulcom.vmware.vc.iofilter.FilterUninstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters has failederrorvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} and in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUpgradeFailedEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters is successfulinfovSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterUpgradeSuccessEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has succeededEventExvSphere APIs for I/O Filters (VAIO) host vendor provider registration has failed.errorvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderRegistrationFailedEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully registeredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredcom.vmware.vc.iofilter.HostVendorProviderRegistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredEventExFailed to unregister vSphere APIs for I/O Filters (VAIO) host vendor provider.errorFailed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderUnregistrationFailedEvent|Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully unregisteredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredcom.vmware.vc.iofilter.HostVendorProviderUnregistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredExtendedEventIoFilterManager API invoked with untrusted certificate SSL trust policywarningIoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name}com.vmware.vc.iofilter.UntrustedCertificateEvent|IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventKey providers are backed up.infocom.vmware.vc.kms.crypto.AllBackedUp|All key providers are backed up.EventExKey creation failed on key provider.errorcom.vmware.vc.kms.crypto.KeyGenerateFail|Key creation failed on key provider {clusterName} with error code {errorCode}. Check log for details.EventExKey provider(s) are not backed up.errorcom.vmware.vc.kms.crypto.NotBackedUp|Key provider(s) {providerIds} are not backed up.EventExKey provider backup is suggested after it is updated.warningcom.vmware.vc.kms.crypto.NotBackedUpAfterUpdate|Key provider(s) {providerIds} are not backed up. Backup is suggested after updating a provider.EventExFailed to send keys because of key provider error.errorcom.vmware.vc.kms.crypto.SendKeyError.KMSClusterError|Failed to send keys {keys} because of KMS connection error.EventExFailed to send keys because keys are missing on key provider.errorcom.vmware.vc.kms.crypto.SendKeyError.KeyMissingOnKMS|Failed to send keys {keys} because of keys missing on key provider.EventExThe Trusted Key Provider is not available.warningcom.vmware.vc.kms.crypto.TrustAuthority.ClusterNotAvailable|The Trusted Key Provider {keyProviderId} is not available.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.ClusterUnhealthy|The Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.KmsUnhealthy|The key server {serverName} in the Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExKey Management Server is unreachableerrorcom.vmware.vc.kms.crypto.Unreachable|Key Management Server {serverName}({address}) is unreachableEventExRetrieved Key Management Server vendor information.infocom.vmware.vc.kms.crypto.Vendor|Key Management Server {serverName}({address}) vendor: {vendor}EventExVirtual NIC entered passthrough modeinfoNetwork passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is active on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent|Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExVirtual NIC exited passthrough modeinfoNetwork passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is inactive on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterExitedPassthroughEvent|Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExFailed to clone state for entity on extensionerrorFailed to clone state on extension {extensionName}com.vmware.vc.ovfconsumers.CloneOvfConsumerStateErrorEvent|Failed to clone state for the entity '{entityName}' on extension {extensionName}EventExFailed to retrieve OVF environment sections for VM on extensionerrorFailed to retrieve OVF environment sections from extension {extensionName}com.vmware.vc.ovfconsumers.GetOvfEnvironmentSectionsErrorEvent|Failed to retrieve OVF environment sections for VM '{vm.name}' from extension {extensionName}EventExUnable to power on VM after cloningerrorPowering on after cloning was blocked by an extension. Message: {description}com.vmware.vc.ovfconsumers.PowerOnAfterCloneErrorEvent|Powering on VM '{vm.name}' after cloning was blocked by an extension. Message: {description}EventExFailed to register entity on extensionerrorcom.vmware.vc.ovfconsumers.RegisterEntityErrorEvent|Failed to register entity '{entityName}' on extension {extensionName}EventExFailed to unregister entities on extensionerrorcom.vmware.vc.ovfconsumers.UnregisterEntitiesErrorEvent|Failed to unregister entities on extension {extensionName}EventExFailed to validate OVF descriptor on extensionerrorcom.vmware.vc.ovfconsumers.ValidateOstErrorEvent|Failed to validate OVF descriptor on extension {extensionName}ExtendedEventAnswer file exportedinfoAnswer file for host {host.name} has been exportedAnswer file for host {host.name} has been exportedAnswer file exportedcom.vmware.vc.profile.AnswerFileExportedEvent|Answer file for host {host.name} in datacenter {datacenter.name} has been exportedExtendedEventHost customization settings updatedinfoHost customization settings for host {host.name} has been updatedHost customization settings for host {host.name} has been updatedHost customization settings updatedcom.vmware.vc.profile.AnswerFileUpdatedEvent|Host customization settings for host {host.name} in datacenter {datacenter.name} has been updatedEventExResource pool renamedinfoResource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'com.vmware.vc.rp.ResourcePoolRenamedEvent|Resource pool '{oldName}' has been renamed to '{newName}'ExtendedEventDatastore maintenance mode operation canceledinfoThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledcom.vmware.vc.sdrs.CanceledDatastoreMaintenanceModeEvent|The datastore maintenance mode operation has been canceledExtendedEventDatastore cluster is healthyinfoDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthycom.vmware.vc.sdrs.ClearDatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} is healthyExtendedEventConfigured storage DRSinfoConfigured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.ConfiguredStorageDrsOnPodEvent|Configured storage DRS on datastore cluster {objectName}ExtendedEventDatastore cluster has datastores that belong to different SRM Consistency GroupswarningDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency Groupscom.vmware.vc.sdrs.ConsistencyGroupViolationEvent|Datastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsExtendedEventDatastore entered maintenance modeinfoDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modecom.vmware.vc.sdrs.DatastoreEnteredMaintenanceModeEvent|Datastore {ds.name} has entered maintenance modeExtendedEventDatastore is entering maintenance modeinfoDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modecom.vmware.vc.sdrs.DatastoreEnteringMaintenanceModeEvent|Datastore {ds.name} is entering maintenance modeExtendedEventDatastore exited maintenance modeinfoDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modecom.vmware.vc.sdrs.DatastoreExitedMaintenanceModeEvent|Datastore {ds.name} has exited maintenance modeEventExDatastore cluster has datastores shared across multiple datacenterswarningDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacenterscom.vmware.vc.sdrs.DatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersExtendedEventErrors encountered while datastore entering into maintenance modeerrorDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modecom.vmware.vc.sdrs.DatastoreMaintenanceModeErrorsEvent|Datastore {ds.name} encountered errors while entering maintenance modeExtendedEventStorage DRS disabledinfoDisabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsDisabledEvent|Disabled storage DRS on datastore cluster {objectName}EventExStorage DRS enabledinfoEnabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}com.vmware.vc.sdrs.StorageDrsEnabledEvent|Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}ExtendedEventStorage DRS invocation failederrorStorage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsInvocationFailedEvent|Storage DRS invocation failed on datastore cluster {objectName}ExtendedEventNew storage DRS recommendation generatedinfoA new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsNewRecommendationPendingEvent|A new storage DRS recommendation has been generated on datastore cluster {objectName}EventExDatastore cluster connected to host(s) that do not support storage DRSwarningDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRScom.vmware.vc.sdrs.StorageDrsNotSupportedHostConnectedToPodEvent|Datastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSExtendedEventPending storage recommendations were appliedinfoAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedcom.vmware.vc.sdrs.StorageDrsRecommendationApplied|All pending recommendations on datastore cluster {objectName} were appliedEventExStorage DRS migrated VM disksinfoStorage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStorageMigrationEvent|Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}EventExStorage DRS placed VM disksinfoStorage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStoragePlacementEvent|Storage DRS placed disks of VM {vm.name} on datastore {ds.name}EventExDatastore cluster createdinfoCreated datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodCreatedEvent|Created datastore cluster {objectName}EventExDatastore cluster deletedinfoRemoved datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodDestroyedEvent|Removed datastore cluster {objectName}EventExSIOC: pre-4.1 host connected to SIOC-enabled datastorewarningSIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.com.vmware.vc.sioc.NotSupportedHostConnectedToDatastoreEvent|SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.ExtendedEventESXi VASA client certificate provision has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateProvisionFailure|ESXi VASA client certificate provision has failedExtendedEventESXi VASA client certificate provision has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateProvisionSuccess|ESXi VASA client certificate provision has succeededExtendedEventESXi VASA client certificate register to some/all VP(s) has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateRegisterFailure|ESXi VASA client certificate register to some/all VP(s) has failedExtendedEventESXi VASA client certificate register to VP(s) has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateRegisterSuccess|ESXi VASA client certificate register to VP(s) has succeededEventExSystem capability warning from storage providerwarningcom.vmware.vc.sms.LunCapabilityInitEvent|Storage provider [{providerName}] : system capability warning for {eventSubjectId} : {msgTxt}EventExSystem capability normal event from storage providerinfocom.vmware.vc.sms.LunCapabilityMetEvent|Storage provider [{providerName}] : system capability normal for {eventSubjectId}EventExSystem capability alert from storage providererrorcom.vmware.vc.sms.LunCapabilityNotMetEvent|Storage provider [{providerName}] : system capability alert for {eventSubjectId} : {msgTxt}EventExA Storage Alarm of type 'Object' cleared by the VASA providerinfocom.vmware.vc.sms.ObjectTypeAlarmClearedEvent|Storage provider [{providerName}] cleared a Storage Alarm of type 'Object' on {eventSubjectId} : {msgTxt}EventExAn alert on an object raised by the VASA providererrorcom.vmware.vc.sms.ObjectTypeAlarmErrorEvent|Storage provider [{providerName}] raised an alert type 'Object' on {eventSubjectId} : {msgTxt}EventExA warning on an object raised by the VASA providerwarningcom.vmware.vc.sms.ObjectTypeAlarmWarningEvent|Storage provider [{providerName}] raised a warning of type 'Object' on {eventSubjectId} : {msgTxt}EventExRegistering renewed VC Client Certificate failed for the VASA provider.errorcom.vmware.vc.sms.RegisterVcClientCertOnRenewalFailure|Registering renewed VC Client Certificate failed for VASA provider with url : {provider}.ExtendedEventRegistering renewed VC Client Certificate succeeded for all the VASA providers.infocom.vmware.vc.sms.RegisterVcClientCertOnRenewalSuccess|Registering renewed VC Client Certificate succeeded for all the VASA providers.EventExThin provisioning capacity threshold normal event from storage providerinfocom.vmware.vc.sms.ThinProvisionedLunThresholdClearedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold normal for {eventSubjectId}EventExThin provisioning capacity threshold alert from storage providererrorcom.vmware.vc.sms.ThinProvisionedLunThresholdCrossedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold alert for {eventSubjectId}EventExThin provisioning capacity threshold warning from storage providerwarningcom.vmware.vc.sms.ThinProvisionedLunThresholdInitEvent|Storage provider [{providerName}] : thin provisioning capacity threshold warning for {eventSubjectId}EventExStorage provider certificate will expire very shortlyerrorcom.vmware.vc.sms.VasaProviderCertificateHardLimitReachedEvent|Certificate for storage provider {providerName} will expire very shortly. Expiration date : {expiryDate}EventExVASA Provider certificate is renewedinfocom.vmware.vc.sms.VasaProviderCertificateRenewalEvent|VASA Provider certificate for {providerName} is renewedEventExStorage provider certificate will expire soonwarningcom.vmware.vc.sms.VasaProviderCertificateSoftLimitReachedEvent|Certificate for storage provider {providerName} will expire soon. Expiration date : {expiryDate}EventExStorage provider certificate is validinfocom.vmware.vc.sms.VasaProviderCertificateValidEvent|Certificate for storage provider {providerName} is validEventExStorage provider is connectedinfocom.vmware.vc.sms.VasaProviderConnectedEvent|Storage provider {providerName} is connectedEventExStorage provider is disconnectederrorcom.vmware.vc.sms.VasaProviderDisconnectedEvent|Storage provider {providerName} is disconnectedEventExRefreshing CA certificates and CRLs failed for some VASA providerserrorcom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsFailure|Refreshing CA certificates and CRLs failed for VASA providers with url : {providerUrls}ExtendedEventRefreshing CA certificates and CRLs succeeded for all registered VASA providers.infocom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsSuccess|Refreshing CA certificates and CRLs succeeded for all registered VASA providers.EventExOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate failed for the VASA 5.0 or greater VASA providers.errorcom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationFailure|On VMCA Root Certificate rotation, register and refresh certificates failed for VASA 5.0 or greater VASA provider : {provider}ExtendedEventOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate succeeded for all the VASA 5.0 or greater VASA providers.infocom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationSuccess|On VMCA Root Certificate rotation, register and refresh certificates succeeded for all the VASA 5.0 or greater VASA providers.EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vmName} connected to {datastore.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vmName} connected to datastore {datastore.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vmName} connected to {datastore.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vmName} connected to {datastore.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExHealth event from storage providerinfocom.vmware.vc.sms.provider.health.event|Storage provider [{providerName}] : health event for {eventSubjectId} : {msgTxt}EventExSystem event from storage providerinfocom.vmware.vc.sms.provider.system.event|Storage provider [{providerName}] : system event : {msgTxt}EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant {operationalStatus] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExProfile association/dissociation failederrorProfile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}com.vmware.vc.spbm.ProfileAssociationFailedEvent|Profile association/dissociation failed for {entityName}EventExConfiguring storage policy failed.errorConfiguring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}com.vmware.vc.spbm.ServiceErrorEvent|Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}ExtendedEventQuick stats is not up-to-dateinfoQuick stats on {host.name} in {computeResource.name} is not up-to-dateQuick stats on {host.name} is not up-to-dateQuick stats on {host.name} is not up-to-datecom.vmware.vc.stats.HostQuickStatesNotUpToDateEvent|Quick stats on {host.name} in {computeResource.name} in {datacenter.name} is not up-to-date <EventLongDescription id="com.vmware.vc.stats.HostQuickStatesNotUpToDateEvent"> <description> "Quick stats on the host is not up-to-date. </description> <cause> <description> Quickstats on the host are not up-to-date. This is expected if the host was recently added or reconnected or VC just started up. </description> <action> No specific action needs to be taken. </action> </cause> </EventLongDescription> EventExODBC errorerrorcom.vmware.vc.stats.StatsInsertErrorEvent|Stats insertion failed for entity {entity} due to ODBC error. <EventLongDescription id="com.vmware.vc.stats.StatsInsertErrorEvent"> <description> If a set of performance statistics data insertion fails due to database related issues, this event is logged. </description> <cause> <description>Usually an attempt to insert duplicate entries causes this event</description> <action>Usually it is transient and self-healing. If not then probably the database contains rogue entries. Manually deleting the data for the particular stat provider might fix the issue</action> </cause> </EventLongDescription> EventExRoot user password expired.errorcom.vmware.vc.system.RootPasswordExpiredEvent|Root user password has expired. Log in to https://{pnid}:5480 to update the root password.EventExRoot user password is about to expire.warningcom.vmware.vc.system.RootPasswordExpiryEvent|Root user password expires in {days} days. Log in to https://{pnid}:5480 to update the root password.ExtendedEventFT Disabled VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection will protect this virtul machine as non-FT virtual machine because the FT state is disabledcom.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because the FT state is disabledExtendedEventFailover FT VM due to component failureinfoFT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} on host {host.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} is going to fail over to Secondary VM due to component failureFT Primary VM is going to fail over to Secondary VM due to component failurecom.vmware.vc.vcp.FtFailoverEvent|FT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is going to fail over to Secondary VM due to component failure ExtendedEventFT VM failover failederrorFT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} failed to failover to secondaryFT virtual machine {vm.name} on host {host.name} failed to failover to secondaryFT virtual machine {vm.name} failed to failover to secondaryFT virtual machine failed to failover to secondarycom.vmware.vc.vcp.FtFailoverFailedEvent|FT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to failover to secondaryExtendedEventRestarting FT secondary due to component failureinfoHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine due to component failurecom.vmware.vc.vcp.FtSecondaryRestartEvent|HA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to component failureExtendedEventFT secondary VM restart failederrorFT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} failed to restartFT Secondary VM {vm.name} on host {host.name} failed to restartFT Secondary VM {vm.name} failed to restartFT Secondary VM failed to restartcom.vmware.vc.vcp.FtSecondaryRestartFailedEvent|FT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartExtendedEventNeed secondary VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine since it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects this virtul machine as non-FT virtual machine because it has been in the needSecondary state too longcom.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because it has been in the needSecondary state too longEventExVM Component Protection test endsinfoVM Component Protection test ends on host {host.name} in cluster {computeResource.name}VM Component Protection test ends on host {host.name}VM Component Protection test endscom.vmware.vc.vcp.TestEndEvent|VM Component Protection test ends on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}EventExVM Component Protection test startsinfoVM Component Protection test starts on host {host.name} in cluster {computeResource.name}VM Component Protection test starts on host {host.name}VM Component Protection test startscom.vmware.vc.vcp.TestStartEvent|VM Component Protection test starts on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventNo action on VMinfoHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} due to the feature configuration settingHA VM Component Protection did not take action due to the feature configuration settingcom.vmware.vc.vcp.VcpNoActionEvent|HA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to the feature configuration settingEventExVirtual machine lost datastore accesserrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {datastore}Virtual machine {vm.name} on host {host.name} lost access to {datastore}Virtual machine {vm.name} lost access to {datastore}Virtual machine lost access to {datastore}com.vmware.vc.vcp.VmDatastoreFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {datastore}EventExVirtual machine lost VM network accessibilityerrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {network}Virtual machine {vm.name} on host {host.name} lost access to {network}Virtual machine {vm.name} lost access to {network}Virtual machine lost access to {network}com.vmware.vc.vcp.VmNetworkFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {network}EventExVM power off hangerrorHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine successfully after trying {numTimes} times and will keep tryingcom.vmware.vc.vcp.VmPowerOffHangEvent|HA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} successfully after trying {numTimes} times and will keep tryingExtendedEventRestarting VM due to component failureinfoHA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failureHA VM Component Protection is restarting virtual machine due to component failurecom.vmware.vc.vcp.VmRestartEvent|HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventVirtual machine affected by component failure failed to restarterrorVirtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} failed to restartVirtual machine {vm.name} affected by component failure on host {host.name} failed to restartVirtual machine {vm.name} affected by component failure failed to restartVirtual machine affected by component failure failed to restartcom.vmware.vc.vcp.VmRestartFailedEvent|Virtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartEventExNo candidate host to restarterrorHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for this virtual machine after waiting {numSecWait} seconds and will keep tryingcom.vmware.vc.vcp.VmWaitForCandidateHostEvent|HA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} after waiting {numSecWait} seconds and will keep tryingEventExCertificate will expire soon.warningcom.vmware.vc.vecs.CertExpirationEvent|Certificate '{subject}' from '{store}' expires on {expiryDate}EventExKMS Client Certificate will expire soon.warningcom.vmware.vc.vecs.KMSClientCertExpirationEvent|KMS Client Certificate '{subject}' expires on {expiryDate}EventExKMS Server Certificate will expire soon.warningcom.vmware.vc.vecs.KMSServerCertExpirationEvent|KMS Server Certificate '{subject}' expires on {expiryDate}EventExOperation on the SSD device failederrorConfiguration on disk {disk.path} failed. Reason : {fault.msg}com.vmware.vc.vflash.SsdConfigurationFailedEvent|Configuration on disk {disk.path} failed. Reason : {fault.msg}EventExVirtual machine is locked because an error occurred on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KMSClusterError|Virtual machine {vmName} is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the host.errorVirtual machine is locked because keys were missing on the host {host}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnHost|Virtual machine {vmName} is locked because keys were missing on the host {host}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnKMS|Virtual machine {vmName} is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because the required Trusted Key Provider(s) is unavailable.errorVirtual machine is locked. Before unlocking, check the status of Trusted Key Provider(s) {kmsCluster} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).com.vmware.vc.vm.Crypto.VMLocked.TAKMSClusterUnavaliable|Virtual machine {vmName} is locked. Before unlocking, check the status of Trusted Key Provider(s) {keyProviderId} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).EventExVirtual machine is locked because Trust Authority managed key(s) are missing on the required host.errorVirtual machine is locked because Trust Authority managed key(s) are missing on host {host}.com.vmware.vc.vm.Crypto.VMLocked.TAKeyMissingOnHost|Virtual machine {vmName} is locked because Trust Authority managed key(s) {missedkeys} are missing on the required host {host}.EventExVirtual machine is unlocked.infoVirtual machine is unlocked.com.vmware.vc.vm.Crypto.VMUnlocked|Virtual machine {vmName} is unlocked.EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}com.vmware.vc.vm.DstVmClonedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was cloned from {oldMoRef}EventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}com.vmware.vc.vm.DstVmMigratedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was migrated from {oldMoRef}ExtendedEventVirtual machine PMem bandwidth usage is normalinfoVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalThe virtual machine's PMem bandwidth usage is normalcom.vmware.vc.vm.PMemBandwidthGreen|Virtual machine {vm.name}'s PMem bandwidth usage is normalExtendedEventVirtual machine PMem bandwidth usage is highwarningVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highThe virtual machine's PMem bandwidth usage is highcom.vmware.vc.vm.PMemBandwidthYellow|Virtual machine {vm.name}'s PMem bandwidth usage is highExtendedEventVirtual machine failed to power on after cloning.errorVirtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after performing cloning operation on this host.Virtual machine failed to power on after cloning.com.vmware.vc.vm.PowerOnAfterCloneErrorEvent|Virtual machine {vm.name} failed to power on after cloning on host {host.name} in datacenter {datacenter.name}EventExVirtual machine clone failederrorVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}"Virtual machine on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}com.vmware.vc.vm.SrcVmCloneFailedEvent|Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {oldMoRef} in {computeResource.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} on host {host.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}com.vmware.vc.vm.SrcVmClonedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} was cloned to {newMoRef}ExtendedEventVirtual machine failed to create instant clone childerrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childcom.vmware.vc.vm.SrcVmForkFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to create instant clone childEventExVirtual machine migration failederrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to migrateVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migratecom.vmware.vc.vm.SrcVmMigrateFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to migrateEventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} on {host.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}com.vmware.vc.vm.SrcVmMigratedEvent|Virtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} in {datacenter.name} was migrated to {newMoRef}ExtendedEventTemplate converted to VMinfoTemplate {vm.name} converted to VM on {host.name}Template {vm.name} converted to VM on {host.name}Template {vm.name} converted to VMConverted to VM on {host.name}com.vmware.vc.vm.TemplateConvertedToVmEvent|Template {vm.name} converted to VM on {host.name} in {datacenter.name}ExtendedEventVirtual machine tier 1 bandwidth usage is normalinfoVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalThe virtual machine's tier 1 bandwidth usage is normalcom.vmware.vc.vm.Tier1BandwidthGreen|Virtual machine {vm.name}'s tier 1 bandwidth usage is normalExtendedEventVirtual machine tier 1 bandwidth usage is highwarningVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highThe virtual machine's tier 1 bandwidth usage is highcom.vmware.vc.vm.Tier1BandwidthYellow|Virtual machine {vm.name}'s tier 1 bandwidth usage is highExtendedEventThe network adapter of VM successfully activate UPTinfoUPT on network adapter is activatedcom.vmware.vc.vm.Uptv2Active|The UPT is successfully activated on the network adapterEventExThe network adapter of VM fails to activate UPTwarningUPT on network adapter is not activatedcom.vmware.vc.vm.Uptv2Inactive|The UPT failed to activate on the network adapter.{details}EventExVirtual NIC reservation is not satisfiederrorReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is not satisfiedReservation of Virtual NIC {deviceLabel} is not satisfiedcom.vmware.vc.vm.VmAdapterResvNotSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is not satisfiedEventExVirtual NIC reservation is satisfiedinfoReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is satisfiedReservation of Virtual NIC {deviceLabel} is satisfiedcom.vmware.vc.vm.VmAdapterResvSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is satisfiedExtendedEventVM marked as templateinfoVM {vm.name} marked as template on {host.name}VM {vm.name} marked as template on {host.name}VM {vm.name} marked as templateMarked as template on {host.name}com.vmware.vc.vm.VmConvertedToTemplateEvent|VM {vm.name} marked as template on {host.name} in {datacenter.name}ExtendedEventPromoted disks of virtual machine successfullyinfoPromoted disks of virtual machine {vm.name} in {computeResource.name}Promoted disks of virtual machine {vm.name} on host {host.name}Promoted disks of virtual machine {vm.name}Promoted disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotedEvent|Promoted disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventPromoting disks of virtual machineinfoPromoting disks of virtual machine {vm.name} in {computeResource.name}Promoting disks of virtual machine {vm.name} on host {host.name}Promoting disks of virtual machine {vm.name}Promoting disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotingEvent|Promoting disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}EventExHot migrating virtual machine with encryptioninfoHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating from {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptioncom.vmware.vc.vm.VmHotMigratingWithEncryptionEvent|Hot migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost}, {destDatastore} in {destDatacenter} with encryptionEventExcom.vmware.vc.vm.VmMigratingWithEncryptionEvent|ExtendedEventFailed to promote disks of virtual machineinfoFailed to promote disks of virtual machine {vm.name} in {computeResource.name}Failed to promote disks of virtual machine {vm.name} on host {host.name}Failed to promote disks of virtual machine {vm.name}Failed to promote disks of virtual machine {vm.name}com.vmware.vc.vm.VmPromoteDisksFailedEvent|Failed to promote disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventReconfigure VM failed for {VM} on shared diskwarningReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskcom.vmware.vc.vm.VmReconfigureFailedonSharedDiskEvent|Reconfigure VM failed for {VM} on shared diskExtendedEventVirtual machine register failederrorVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on this host failedVirtual machine registration failedcom.vmware.vc.vm.VmRegisterFailedEvent|Virtual machine {vm.name} registration on {host.name} in datacenter {datacenter.name} failedEventExFailed to revert the virtual machine state to a snapshoterrorFailed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} on host {host.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine to snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateFailedToRevertToSnapshot|Failed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}EventExThe virtual machine state has been reverted to a snapshotinfoThe execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} on host {host.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateRevertedToSnapshot|The execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}EventExFault Tolerance virtual machine syncing to secondary with encryptioninfoFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM syncing to secondary on {dstHost} with encryptioncom.vmware.vc.vm.VmSyncingWithEncryptionEvent|Fault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionExtendedEventVirtual machine termination requestedinfoVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine termination requestedcom.vmware.vc.vm.VmTerminateEvent|Virtual machine {vm.name} termination requestedExtendedEventVirtual machine termination failederrorVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine termination failedcom.vmware.vc.vm.VmTerminateFailedEvent|Virtual machine {vm.name} termination failedEventExThe disk device is encrypted with mixed keys.warningThe disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.com.vmware.vc.vm.crypto.DiskchainUsingMixedKeys|The disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.EventExCryptographic operation failed due to insufficient disk space on datastoreerrorCryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.com.vmware.vc.vm.crypto.NoDiskSpace|Cryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.EventExcom.vmware.vc.vm.crypto.RekeyFail|ExtendedEventApplication Monitoring Is Not SupportedwarningApplication monitoring is not supported on {host.name} in cluster {computeResource.name}Application monitoring is not supported on {host.name}Application monitoring is not supportedcom.vmware.vc.vmam.AppMonitoringNotSupported|Application monitoring is not supported on {host.name} in cluster {computeResource.name} in {datacenter.name}EventExvSphere HA detected application heartbeat status changewarningvSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent|vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent"> <description> Application monitoring state changes indicate a change in the health of the application being monitored or in the application-monitoring process. A transition from gray to green occurs when application heartbeat is being enabled from within the guest. A transition to red occurs after vSphere HA didn't receive any heartbeats within 30 seconds. A transition from red to green occurs if heartbeats begin again before vSphere HA can react. A transition to gray occurs after application heartbeating is disabled from within the guest. </description> <cause> <description> Either the user initiated action from inside the guest or vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> If the state transitions to red, investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. Frequent state transitions to or from gray may indicate a problem with the application-monitoring agent. If they occur, investigate whether the enabling/disabling of monitoring is expected. </action> </cause> </EventLongDescription> EventExvSphere HA detected application state changewarningvSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthStateChangedEvent|vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthStateChangedEvent"> <description> Application state changes indicate that an in-guest application has posted one of the two allowed values - appStateOk or appStateNeedReset. The former indicates that the monitored application is fine, the latter causes an immediate reset if Application Monitoring is enabled for this virtual machine. </description> <cause> <description> This is an in-guest initated action. </description> <action> If vSphere HA and Application Monitoring are enabled for this virtual machine, it is reset if the state is appStateNeedReset. If the virtual machine is being migrated using vMotion the reset will be delayed until the virtual machine has reached its destination. Also, the reset will be delayed until the datastore connectivity issues are resolved. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected application heartbeat failurewarningvSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected application heartbeat failure for {vm.name} on {host.name}vSphere HA detected application heartbeat failure for {vm.name}vSphere HA detected application heartbeat failure for this virtual machinecom.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent|vSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent"> <description> vSphere HA has detected a heartbeat failure from the application-monitoring agent inside the guest. If application monitoring is enabled in vSphere the virtual machine will be reset. </description> <cause> <description> vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> Investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. </action> </cause> </EventLongDescription> EventExvCenter server replication status has changed.infocom.vmware.vc.vmdir.ReplicationStatusChangeEvent|vCenter Server Replication Status : {replicationStatus} . {message}EventExvCenter server replication state has changedinfocom.vmware.vc.vmdir.StateChangeEvent|vCenter Server Replication State changed to '{newState}' from '{oldState}' cause: {reason}EventExvSAN datastore {datastoreName} does not have capacityerrorvSAN datastore {datastoreName} in cluster {computeResource.name} does not have capacityvSAN datastore {datastoreName} does not have capacitycom.vmware.vc.vsan.DatastoreNoCapacityEvent|vSAN datastore {datastoreName} in cluster {computeResource.name} in datacenter {datacenter.name} does not have capacity <EventLongDescription id="com.vmware.vc.vsan.DatastoreNoCapacityEvent"> <description> vSAN datastore does not have capacity. </description> <cause> <description> This might be because no disk is configured for vSAN, local disks configured for vSAN service become inaccessible or flash disks configured for vSAN service become inaccessible. </description> <action> Check if vSAN storage configuration is correct and if the local disks and flash disks configured for vSAN service are accessible. </action> </cause> </EventLongDescription> EventExHost cannot communicate with one or more other nodes in the vSAN enabled clustererrorHost {host.name} in cluster {computeResource.name} cannot communicate with all other nodes in the vSAN enabled clusterHost {host.name} cannot communicate with all other nodes in the vSAN enabled clusterHost cannot communicate with one or more other nodes in the vSAN enabled clustercom.vmware.vc.vsan.HostCommunicationErrorEvent|Host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} cannot communicate with all other nodes in the vSAN enabled cluster <EventLongDescription id="com.vmware.vc.vsan.HostCommunicationErrorEvent"> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. </description> <cause> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. This might be caused by network partition or misconfiguration. Each host needs at least one vmnic with vSAN enabled. Those vmnics need to be on the same physical network. The host should have the vSAN service enabled. </description> <action> Check the host for vSAN service configuration, vSAN network configuration and network connection. </action> </cause> </EventLongDescription> ExtendedEventHost with vSAN service enabled is not in the vCenter clustererror{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name}{host.name} with vSAN service enabled is not in the vCenter clusterHost with vSAN service enabled is not in the vCenter clustercom.vmware.vc.vsan.HostNotInClusterEvent|{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.HostNotInClusterEvent"> <description> Host with the vSAN service enabled is not in the vCenter cluster. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN service is not enabled with the recommended interface. </description> <action> Add the host into the cluster or disable vSAN on the host. </action> </cause> </EventLongDescription> ExtendedEventHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailserror{host.name} is in a vSAN cluster {computeResource.name} but does not have vSAN service enabled{host.name} is in a vSAN cluster but does not have vSAN service enabledHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailscom.vmware.vc.vsan.HostNotInVsanClusterEvent|{host.name} is in a vSAN enabled cluster {computeResource.name} in datacenter {datacenter.name} but does not have vSAN service enabled <EventLongDescription id="com.vmware.vc.vsan.HostNotInVsanClusterEvent"> <description> Host is in a vSAN enabled cluster but does not have vSAN service enabled. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN is not enabled with the recommended interface or the vSAN configuration is not set up appropriately. </description> <action> Re-enable vSAN or check the vSAN configuration. </action> </cause> </EventLongDescription> EventExvSAN host vendor provider registration has failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent"> <description> Cannot deregister host vendor provider in Storage management service </description> <cause> <description>Host vendor provider deregistration failed</description> <action>Check if Storage management service is running</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider has been successfully unregisteredinfovSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredcom.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully unregistered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent"> <description> Deregistered host vendor provider from Storage management service </description> </EventLongDescription> EventExvSAN host vendor provider registration failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent"> <description> Cannot register host vendor provider in Storage management service </description> <cause> <description>Host vendor provider registration failed</description> <action>Check if Storage management service is running</action> <action>Check if the vendor provider on host is running</action> <action>Check if there are network connectivity issues between host and VC</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider registration succeededinfovSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredcom.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully registered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent"> <description> Registered host vendor provider in Storage management service </description> </EventLongDescription> ExtendedEventvSAN network is not configurederrorvSAN network is not configured on {host.name} in cluster {computeResource.name}vSAN network is not configured on {host.name}vSAN network is not configuredcom.vmware.vc.vsan.NetworkMisConfiguredEvent|vSAN network is not configured on {host.name}, in cluster {computeResource.name}, and in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.NetworkMisConfiguredEvent"> <description> vSAN network is not configured. </description> <cause> <description> vSAN network is not set up appropriately. vSAN datastore will not be formed as expected. </description> <action> Create at least one vmnic with vSAN enabled on the host. </action> </cause> </EventLongDescription> EventExFound another host participating in the vSAN service which is not a member of this host's vCenter clustererrorFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter cluster {computeResource.name}Found host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clusterFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clustercom.vmware.vc.vsan.RogueHostFoundEvent|Found host(s) {hostString} participating in the vSAN service in cluster {computeResource.name} in datacenter {datacenter.name} is not a member of this host's vCenter cluster <EventLongDescription id="com.vmware.vc.vsan.RogueHostFoundEvent"> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. </description> <cause> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. This might be caused by misconfiguration. </description> <action> Add the rogue host into the cluster or disable vSAN on the rogue host. </action> </cause> </EventLongDescription> EventExFailed to turn off the disk locator LEDerrorFailed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOffFailedEvent|Failed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}EventExFailed to turn on the disk locator LEDerrorFailed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOnFailedEvent|Failed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}EventExvSAN cluster needs disk format upgradewarningvSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationvSAN cluster has one or more hosts for which disk format upgrade is recommended: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationcom.vmware.vc.vsan.VsanHostNeedsUpgradeEvent|vSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationEventExUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}errorUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}com.vmware.vc.vtpm.FailedProcessingVTpmCertsEvent|Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}ExtendedEventA compute policy has been createdinfocom.vmware.vcenter.compute.policies.createEvent|Compute policy {policyName} has been createdExtendedEventA compute policy has been deletedinfocom.vmware.vcenter.compute.policies.deleteEvent|Compute policy {policyName} has been deletedEventExDatabase replication state changed: sync, async or no replicationinfocom.vmware.vcha.DB.replication.state.changed|Database replication mode changed to {newState}EventExThe management interface (NIC0) IP address you specified as for the Passive node is different than the original IP address used to configure vCenter HA. You must use the same IP address.errorcom.vmware.vcha.cluster.differentFailoverIp|The management interface (NIC0) IP address you specified as {given} for the Passive node is different than the original IP address {original} used to configure vCenter HA. You must use the same IP address.EventExvCenter HA cluster mode changedinfocom.vmware.vcha.cluster.mode.changed|vCenter HA cluster mode changed to {clusterMode}ExtendedEventUnable to enable mode.errorcom.vmware.vcha.cluster.modeEnableFailed|Unable to enable mode.EventExThe hostname for a node does not map to the vCenter Server PNID.errorcom.vmware.vcha.cluster.pnidHostnameMismatch|The hostname for {nodeIp} does not map to the vCenter Server PNID. Review the hostname you specified during the VM clone customization step.ExtendedEventVerify if the Passive and the Witness nodes are up and reachable.errorcom.vmware.vcha.cluster.quorumNotCloned|Verify if the Passive and the Witness nodes are up and reachable.EventExUnable to SSH to a node.errorcom.vmware.vcha.cluster.sshConnectFailed|Unable to SSH to {nodeIp}.ExtendedEventvCenter HA cluster state is currently degradedwarningcom.vmware.vcha.cluster.state.degraded|vCenter HA cluster state is currently degradedExtendedEventvCenter HA cluster is destroyedinfocom.vmware.vcha.cluster.state.destroyed|vCenter HA cluster is destroyedExtendedEventvCenter HA cluster state is currently healthyinfocom.vmware.vcha.cluster.state.healthy|vCenter HA cluster state is currently healthyExtendedEventvCenter HA cluster state is currently isolatederrorcom.vmware.vcha.cluster.state.isolated|vCenter HA cluster state is currently isolatedExtendedEventUnable to get vpxd hostname.errorcom.vmware.vcha.cluster.vcFqdnUnavailable|Unable to get vpxd hostname.ExtendedEventFailover cannot proceed when cluster is in disabled modewarningcom.vmware.vcha.failover.failed.disabled.mode|Failover cannot proceed when cluster is in disabled modeExtendedEventFailover cannot proceed when cluster does not have all three nodes connectedwarningcom.vmware.vcha.failover.failed.node.lost|Failover cannot proceed when cluster does not have all three nodes connectedExtendedEventFailover cannot proceed when Passive node is not ready to takeoverwarningcom.vmware.vcha.failover.failed.passive.not.ready|Failover cannot proceed when vPostgres on Passive node is not ready to takeoverExtendedEventFailover did not succeed. Failed to flush the data to the Passive nodewarningcom.vmware.vcha.failover.flush.failed.degraded|Failover did not succeed. Failed to flush the data to the Passive nodeExtendedEventFailover failure is acknowledgedinfocom.vmware.vcha.failover.flush.failed.healthy|Failover failure is acknowledgedExtendedEventFailover status is unknowninfocom.vmware.vcha.failover.flush.failed.unknown|Failover status is unknownExtendedEventFailover succeededinfocom.vmware.vcha.failover.succeeded|Failover succeededEventExAppliance File replication state changedinfocom.vmware.vcha.file.replication.state.changed|Appliance {fileProviderType} is {state}EventExThis node was forcefully converted to the Active nodeinfocom.vmware.vcha.force.reset.active|Node {nodename} was forcefully converted to the Active nodeEventExOne node joined back to the clusterinfocom.vmware.vcha.node.joined|Node {nodeName} joined back to the clusterEventExOne node left the clusterwarningcom.vmware.vcha.node.left|Node {nodeName} left the clusterExtendedEventPSC HA state is currently degradedinfocom.vmware.vcha.psc.ha.health.degraded|PSC HA state is currently degradedExtendedEventPSC HA state is currently healthyinfocom.vmware.vcha.psc.ha.health.healthy|PSC HA state is currently healthyExtendedEventPSC HA state is not being monitoredinfocom.vmware.vcha.psc.ha.health.unknown|PSC HA is not monitored after vCenter HA cluster is destroyedExtendedEventVMware Directory Service health is currently degradedwarningcom.vmware.vcha.vmdir.health.degraded|VMware Directory Service health is currently degradedExtendedEventVMware Directory Service is currently healthyinfocom.vmware.vcha.vmdir.health.healthy|VMware Directory Service is currently healthyExtendedEventVMware Directory Service health is not being monitoredinfocom.vmware.vcha.vmdir.health.unknown|VMware Directory Service health is not being monitoredExtendedEventvSphere Cluster Services mode is system managed on cluster.infocom.vmware.vcls.cluster.DeploymentModeSystemManagedEvent|vSphere Cluster Services mode is system managed on cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.infocom.vmware.vcls.cluster.DrsDisabledHaDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-enabled cluster.errorcom.vmware.vcls.cluster.DrsEnabledDeployModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-enabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.DrsEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.ExtendedEventvSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.warningcom.vmware.vcls.cluster.HaEnabledDrsDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.HaEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.ExtendedEventVSAN VASA provider became available.infocom.vmware.vcls.cluster.VsanProviderAvailableEvent|VSAN VASA provider became available.ExtendedEventTimed out waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.VsanProviderTimedoutEvent|Timed out waiting for VSAN VASA provider availability.EventExA Data Processing Unit is down.infoA Data Processing Unit is down.com.vmware.vim.dpu.down|The Data Processing Unit with id '{dpuId}' is down.EventExA Data Processing Unit has been removed from the system.infoA Data Processing Unit has been removed from the system.com.vmware.vim.dpu.removed|The Data Processing Unit with id '{dpuId}' has been removed from the system.EventExThe management state for a Data Processing Unit has changed.infoThe management state for a Data Processing Unit has changed.com.vmware.vim.dpu.state.changed|The management state for the Data Processing Unit with id '{dpuId}' has changed to '{state}'.EventExThe dpu failover ended on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.com.vmware.vim.dpuFailover.end|DPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.EventExThe dpu failover started on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.com.vmware.vim.dpuFailover.start|DPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.ExtendedEventInvalid UTF-8 string encountered.warningInvalid UTF-8 string encountered.com.vmware.vim.utf8filter.badvalue|Invalid UTF-8 string encountered.ExtendedEventSome of the disks of the virtual machine failed to load. The information present for them in the virtual machine configuration may be incompletewarningSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} failed to load. The information present for them in the virtual machine configuration may be incompletecom.vmware.vim.vm.DisksNotLoaded|Some of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteExtendedEventSnapshot operations are not allowed due to some of the snapshot related objects failed to load.warningSnapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.com.vmware.vim.vm.SnapshotNotAllowed|Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.ExtendedEventVirtual machine reboot converted to power off because the rebootPowerOff option is enabledinfoReboot converted to power off on virtual machine {vm.name}.Reboot converted to power off.com.vmware.vim.vm.reboot.powerOff|Reboot converted to power off on virtual machine {vm.name} on {host.name} because the rebootPowerOff option is enabled.EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' bound to provider '{providerName}'EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vm|vService dependency '{dependencyName}' on '{vm.name}' bound to provider '{providerName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vApp|Created vService dependency '{dependencyName}' with type '{dependencyType}' on vApp '{targetName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vm|Created vService dependency '{dependencyName}' with type '{dependencyType}' on '{vm.name}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vApp|Destroyed vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vm|Destroyed vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vApp|Reconfigured vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vm|Reconfigured vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' unbound from provider '{providerName}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vm|vService dependency '{dependencyName}' on '{vm.name}' unbound from provider '{providerName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vApp|Updated vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vm|Updated vService dependency '{dependencyName}' on '{vm.name}'EventExvService provider registeredinfocom.vmware.vim.vsm.provider.register|vService provider '{providerName}' with type '{providerType}' registered for extension '{extensionKey}'EventExvService provider unregisteredinfocom.vmware.vim.vsm.provider.unregister|vService provider '{providerName}' with type '{providerType}' unregistered for extension '{extensionKey}'EventExvService provider updatedinfocom.vmware.vim.vsm.provider.update|Updating vService provider '{providerName}' registered for extension '{extensionKey}'EventExDeleting stale vdisks generated by FCD migration failed.errorcom.vmware.vslm.DeleteStaleDiskFailureEvent|Deleting stale vdisk {diskPath} and related files generated as part of FCD migration failed. Try to delete them manually.EventExRegistering vdisk as FCD at source failed during rollback of unsuccessful migration.errorcom.vmware.vslm.RegisterDiskFailed|Registering {fcdPath} with name {fcdName} as FCD at source failed during rollback of unsuccessful migration. Try to register it manually using RegisterDisk API.EventExUnregistering of vdisk at destination failed during rollback of unsuccessful migration.errorcom.vmware.vslm.UnRegisterDiskFailed|Unregistering of FCD {fcdId} failed at destination during rollback of unsuccessful migration. Reconcile of datastore {datastore} should fix inconsistencies if any.EventExConnectivity check completedinfocom.vmware.vsphere.client.security.ConnectivityCheckEvent|Connectivity check completed. Operation: {Operation}. Subscription status: {SubscriptionCheckResult}. Connectivity status: {ConnectivityCheckResult}. Access type: {AccessType}. User: {Username}ExtendedEventDatastore is accessible to all hosts under the cluster.infocom.vmware.wcp.Datastore.accessible|Datastore is accessible to all hosts under the clusterExtendedEventDatastore not accessible to all hosts under the cluster.warningcom.vmware.wcp.Datastore.inaccessible|Datastore not accessible to all hosts under the cluster.EventExRemote access for an ESXi local user account has been locked temporarilly due to multiple failed login attempts.warningesx.audit.account.locked|Remote access for ESXi local user account '{1}' has been locked for {2} seconds after {3} failed login attempts.EventExMultiple remote login failures detected for an ESXi local user account.warningesx.audit.account.loginfailures|Multiple remote login failures detected for ESXi local user account '{1}'.ExtendedEventRestoring factory defaults through DCUI.warningesx.audit.dcui.defaults.factoryrestore|The host has been restored to default factory settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventThe DCUI has been disabled.infoesx.audit.dcui.disabled|The DCUI has been disabled.ExtendedEventThe DCUI has been enabled.infoesx.audit.dcui.enabled|The DCUI has been enabled.ExtendedEventRebooting host through DCUI.warningesx.audit.dcui.host.reboot|The host is being rebooted through the Direct Console User Interface (DCUI).ExtendedEventShutting down host through DCUI.warningesx.audit.dcui.host.shutdown|The host is being shut down through the Direct Console User Interface (DCUI).ExtendedEventRestarting host agents through DCUI.infoesx.audit.dcui.hostagents.restart|The management agents on the host are being restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExLogin authentication on DCUI failederroresx.audit.dcui.login.failed|Authentication of user {1} has failed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExDCUI login password changed.infoesx.audit.dcui.login.passwd.changed|Login password for user {1} has been changed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventFactory network settings restored through DCUI.warningesx.audit.dcui.network.factoryrestore|The host has been restored to factory network settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExRestarting network through DCUI.infoesx.audit.dcui.network.restart|A management interface {1} has been restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.esx.audit.entropy.available.low|Host is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.esx.audit.entropy.external.source.disconnected|Host is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.EventExPowering off host through esxcliwarningesx.audit.esxcli.host.poweroff.reason|The host is being powered off through esxcli. Reason for powering off: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.reboot.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.restart.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExHost acceptance level changedinfoesx.audit.esximage.hostacceptance.changed|Host acceptance level changed from {1} to {2}ExtendedEventUEFI Secure Boot enabled: Cannot skip signature checks.warningesx.audit.esximage.install.nobypasssigcheck|UEFI Secure Boot enabled: Cannot skip signature checks. Installing unsigned VIBs will prevent the system from booting. So the vib signature check will be enforced.ExtendedEventAttempting to install an image profile bypassing signing and acceptance level verification.warningesx.audit.esximage.install.nosigcheck|Attempting to install an image profile bypassing signing and acceptance level verification. This may pose a large security risk.ExtendedEventAttempting to install an image profile with validation disabled.warningesx.audit.esximage.install.novalidation|Attempting to install an image profile with validation disabled. This may result in an image with unsatisfied dependencies, file or package conflicts, and potential security violations.EventExSECURITY ALERT: Installing image profile.warningesx.audit.esximage.install.securityalert|SECURITY ALERT: Installing image profile '{1}' with {2}.EventExSuccessfully installed image profile.infoesx.audit.esximage.profile.install.successful|Successfully installed image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully updated host to new image profile.infoesx.audit.esximage.profile.update.successful|Successfully updated host to image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully changed software on host.infoesx.audit.esximage.software.apply.succeeded|Successfully installed {1} component(s) and removed {2} component(s) on host. To see more details about the transaction, run 'esxcli software profile get'.EventExSuccessfully installed VIBs.infoesx.audit.esximage.vib.install.successful|Successfully installed {1} VIB(s), removed {2} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully removed VIBsinfoesx.audit.esximage.vib.remove.successful|Successfully removed {1} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExDPU trust validation failederroresx.audit.esxtokend.dputrust.failed|DPU: {1} trust validation failedEventExDPU was removedwarningesx.audit.esxtokend.dputrust.removed|DPU:{1} was removed.EventExDPU trust validation succeededinfoesx.audit.esxtokend.dputrust.succeeded|DPU: {1} trust validation succeeded.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Warning tripped.EventExNVDIMM: Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.lifetime.warning|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Warning tripped.EventExNVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.spareblocks|NVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.temperature|NVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).warningesx.audit.hardware.nvd.health.life.pctused|NVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).EventExNVDIMM Count of DRAM correctable ECC errors above threshold.infoesx.audit.hardware.nvd.health.module.ce|NVDIMM (handle {1}, idString {2}): Count of DRAM correctable ECC errors above threshold.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Warning tripped.EventExNVDIMM: Module Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.lifetime.warning|NVDIMM (handle {1}, idString {2}): Module Lifetime Warning tripped.EventExNVDIMM: Module Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.temperature.warning|NVDIMM (handle {1}, idString {2}): Module Temperature Warning tripped.EventExNVDIMM: Maintenance needed.warningesx.audit.hardware.nvd.health.vmw.statusflags.maintNeeded|NVDIMM (handle {1}, idString {2}): Maintenance needed.EventExA physical disk has been inserted.infoA physical disk has been insertedesx.audit.hcm.event.disk.insertion|A physical disk has been inserted ({1}).EventExA physical disk has been removed.infoA physical disk has been removed.esx.audit.hcm.event.disk.removal|A physical disk has been removed ({1}).ExtendedEventHost has booted.infoesx.audit.host.boot|Host has booted.EventExHost experienced a crashinfoesx.audit.host.crash.reason|The crash at {1} occurred due to: {2}. More details will be available in the generated vmkernel-zdump.EventExThe host experienced a crashinfoesx.audit.host.crash.reason.available|The host experienced a crash. Reason: {1}.ExtendedEventHost experienced a crashinfoesx.audit.host.crash.reason.unavailable|Host experienced a crash. More details will be available in the generated vmkernel-zdump.EventExThe number of virtual machines registered on the host exceeded limit.warningThe number of virtual machines registered on host {host.name} in cluster {computeResource.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered on host {host.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered exceeded limit: {current} registered, {limit} is the maximum supported.esx.audit.host.maxRegisteredVMsExceeded|The number of virtual machines registered on host {host.name} in cluster {computeResource.name} in {datacenter.name} exceeded limit: {current} registered, {limit} is the maximum supported.EventExThe host has been powered offinfoesx.audit.host.poweroff.reason.available|The host has been powered off. Reason for powering off: {1}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.management|The power off at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.subsystem|The power off at {1} was requested by {2} due to: {3}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.timestamp|The power off at {1} was requested due to: {2}.ExtendedEventHost had been powered offinfoesx.audit.host.poweroff.reason.unavailable|Host had been powered off. The poweroff was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.user|The power off at {1} was requested by user/entity {2} due to: {3}.EventExThe host experienced Quick Bootinfoesx.audit.host.quickboot.reason.available|The host experienced Quick Boot. Reason for reboot: {1}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.management|The Quick Boot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.subsystem|The Quick Boot at {1} was requested by {2} due to: {3}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.timestamp|The Quick Boot at {1} was requested due to: {2}.ExtendedEventHost experienced Quick Bootinfoesx.audit.host.quickboot.reason.unavailable|Host experienced Quick Boot. The Quick Boot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.user|The Quick Boot at {1} was requested by user/entity {2} due to: {3}.EventExThe host has been rebootedinfoesx.audit.host.reboot.reason.available|The host has been rebooted. Reason for reboot: {1}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.management|The reboot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.subsystem|The reboot at {1} was requested by {2} due to: {3}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.timestamp|The reboot at {1} was requested due to: {2}.ExtendedEventHost had been rebootedinfoesx.audit.host.reboot.reason.unavailable|Host had been rebooted. The reboot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.user|The reboot at {1} was requested by user/entity {2} due to: {3}.ExtendedEventHost is rebooting.infoesx.audit.host.stop.reboot|Host is rebooting.ExtendedEventHost is shutting down.infoesx.audit.host.stop.shutdown|Host is shutting down.EventExPowering off host through hostdwarningesx.audit.hostd.host.poweroff.reason|The host is being powered off through hostd. Reason for powering off: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.reboot.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.restart.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.ExtendedEventAdministrator access to the host has been enabled.infoesx.audit.lockdownmode.disabled|Administrator access to the host has been enabled.ExtendedEventAdministrator access to the host has been disabled.infoesx.audit.lockdownmode.enabled|Administrator access to the host has been disabled.ExtendedEventList of lockdown exception users has been changed.infoesx.audit.lockdownmode.exceptions.changed|List of lockdown exception users has been changed.ExtendedEventThe host has canceled entering maintenance mode.infoesx.audit.maintenancemode.canceled|The host has canceled entering maintenance mode.ExtendedEventThe host has entered maintenance mode.infoesx.audit.maintenancemode.entered|The host has entered maintenance mode.ExtendedEventThe host has begun entering maintenance mode.infoesx.audit.maintenancemode.entering|The host has begun entering maintenance mode.ExtendedEventThe host has exited maintenance mode.infoesx.audit.maintenancemode.exited|The host has exited maintenance mode.ExtendedEventThe host has failed entering maintenance mode.erroresx.audit.maintenancemode.failed|The host has failed entering maintenance mode.EventExFirewall configuration has changed.infoesx.audit.net.firewall.config.changed|Firewall configuration has changed. Operation '{1}' for rule set {2} succeeded.ExtendedEventFirewall has been disabled.warningesx.audit.net.firewall.disabled|Firewall has been disabled.EventExFirewall has been enabled for port.infoesx.audit.net.firewall.enabled|Firewall has been enabled for port {1}.EventExPort is now protected by Firewall.infoesx.audit.net.firewall.port.hooked|Port {1} is now protected by Firewall.EventExPort is no longer protected with Firewall.warningesx.audit.net.firewall.port.removed|Port {1} is no longer protected with Firewall.EventExLACP disabledinfoesx.audit.net.lacp.disable|LACP for VDS {1} is disabled.EventExLACP eabledinfoesx.audit.net.lacp.enable|LACP for VDS {1} is enabled.EventExuplink is connectedinfoesx.audit.net.lacp.uplink.connected|LACP info: uplink {1} on VDS {2} got connected.EventExThe host has canceled entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.canceled|The host has canceled entering '{1}'.EventExThe host has entered a partial maintenance mode.infoesx.audit.partialmaintenancemode.entered|The host has entered '{1}'.EventExThe host has begun entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.entering|The host has begun entering '{1}'.EventExThe host has exited a partial maintenance mode.infoesx.audit.partialmaintenancemode.exited|The host has exited '{1}'.EventExThe host has failed entering a partial maintenance mode.erroresx.audit.partialmaintenancemode.failed|The host has failed entering '{1}'.ExtendedEventThe ESXi command line shell has been disabled.infoesx.audit.shell.disabled|The ESXi command line shell has been disabled.ExtendedEventThe ESXi command line shell has been enabled.infoesx.audit.shell.enabled|The ESXi command line shell has been enabled.ExtendedEventSSH access has been disabled.infoesx.audit.ssh.disabled|SSH access has been disabled.ExtendedEventSSH access has been enabled.infoesx.audit.ssh.enabled|SSH access has been enabled.EventExSSH session was closed.infoesx.audit.ssh.session.closed|SSH session was closed for '{1}@{2}'.EventExSSH login has failed.infoesx.audit.ssh.session.failed|SSH login has failed for '{1}@{2}'.EventExSSH session was opened.infoesx.audit.ssh.session.opened|SSH session was opened for '{1}@{2}'.EventExPowering off hostwarningesx.audit.subsystem.host.poweroff.reason|The host is being powered off. Reason for powering off: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.reboot.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.restart.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.ExtendedEventSupershell session has been started by a user.warningSupershell session has been started by a user.esx.audit.supershell.access|Supershell session has been started by a user.EventExTest with an int argumenterroresx.audit.test.test1d|Test with {1}EventExTest with a string argumenterroresx.audit.test.test1s|Test with {1}ExtendedEventUSB configuration has changed.infoUSB configuration has changed on host {host.name} in cluster {computeResource.name}.USB configuration has changed on host {host.name}.USB configuration has changed.esx.audit.usb.config.changed|USB configuration has changed on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExEnforcement level changed for all security domains.warningesx.audit.uw.secpolicy.alldomains.level.changed|The enforcement level for all security domains has been changed to {1}. The enforcement level must always be set to enforcing.EventExEnforcement level changed for security domain.warningesx.audit.uw.secpolicy.domain.level.changed|The enforcement level for security domain {1} has been changed to {2}. The enforcement level must always be set to enforcing.ExtendedEventExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.warningesx.audit.uw.security.User.ExecInstalledOnly.disabled|ExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.Infoesx.audit.uw.security.User.ExecInstalledOnly.enabled|ExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.EventExExecution of non-installed file prevented.warningesx.audit.uw.security.execInstalledOnly.violation|Execution of unknown (non VIB installed) binary '{1}' prevented. Unknown content can cause malware attacks similar to Ransomware.EventExExecution of non-installed file detected.warningesx.audit.uw.security.execInstalledOnly.warning|Execution of unknown (non VIB installed) binary '{1}'. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventLVM device discovered.infoesx.audit.vmfs.lvm.device.discovered|One or more LVM devices have been discovered on this host.EventExRead IO performance maybe impacted for diskinfoRead IO performance maybe impacted for disk {1}: {2}Read IO performance maybe impacted for disk {1}: {2}esx.audit.vmfs.sesparse.bloomfilter.disabled|Read IO performance maybe impacted for disk {1}: {2}EventExFile system mounted.infoesx.audit.vmfs.volume.mounted|File system {1} on volume {2} has been mounted in {3} mode on this host.EventExLVM volume un-mounted.infoesx.audit.vmfs.volume.umounted|The volume {1} has been safely un-mounted. The datastore is no longer accessible on this host.EventExvSAN device is added back successfully after MEDIUM error.infovSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.devicerebuild|vSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExvSAN diskgroup is rebuilt successfully after MEDIUM error.infovSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.diskgrouprebuild|vSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExFound components with invalid metadatawarning{1} components found with invalid metadata on disk {2} {3}esx.audit.vob.vsan.lsom.foundInvalidMetadataComp|{1} components found with invalid metadata on disk {2} {3}EventExvSAN storagepool is added back successfully after MEDIUM error.infovSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.storagepoolrebuild|vSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExTest with both int and sting arguments.infoesx.audit.vobdtestcorrelator.test|Test with both string: {2} and int: {1}.ExtendedEventvSAN clustering services have been enabled.infovSAN clustering and directory services have been enabled.esx.audit.vsan.clustering.enabled|vSAN clustering and directory services have been enabled.ExtendedEventvSAN virtual NIC has been added.infovSAN virtual NIC has been added.esx.audit.vsan.net.vnic.added|vSAN virtual NIC has been added.ExtendedEventvSAN network configuration has been removed.errorvSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.esx.audit.vsan.net.vnic.deleted|vSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.EventExvSAN RDMA changed for vmknic.infovSAN RDMA changed for vmknic {1}.esx.audit.vsan.rdma.changed|vSAN RDMA changed for vmknic {1}.ExtendedEventHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445warningHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445esx.audit.weak.ssl.protocol|Weak SSL protocols found and disabled. Please refer to KB article: KB 1234567ExtendedEventA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.infoA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.esx.clear.coredump.configured|A vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.ExtendedEventAt least one coredump target has been configured. Host core dumps will be saved.infoAt least one coredump target has been configured. Host core dumps will be saved.esx.clear.coredump.configured2|At least one coredump target has been configured. Host core dumps will be saved.EventExNVDIMM Energy Source is sufficiently charged.infoesx.clear.hardware.nvd.health.module.es.charged|NVDIMM (handle {1}, idString {2}): Energy Source is sufficiently charged.EventExRestored network connectivity to portgroupsinfoesx.clear.net.connectivity.restored|Network connectivity restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExRestored Network Connectivity to DVPortsinfoesx.clear.net.dvport.connectivity.restored|Network connectivity restored on DVPorts: {1}. Physical NIC {2} is up.EventExRestored Network Redundancy to DVPortsinfoesx.clear.net.dvport.redundancy.restored|Uplink redundancy restored on DVPorts: {1}. Physical NIC {2} is up recently.EventExlag transition upinfoesx.clear.net.lacp.lag.transition.up|LACP info: LAG {1} on VDS {2} is up.EventExuplink transition upinfoesx.clear.net.lacp.uplink.transition.up|LACP info: uplink {1} on VDS {2} is moved into link aggregation group.EventExuplink is unblockedinfoesx.clear.net.lacp.uplink.unblocked|LACP info: uplink {1} on VDS {2} is unblocked.EventExRestored uplink redundancy to portgroupsinfoesx.clear.net.redundancy.restored|Uplink redundancy restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExLink state upinfoesx.clear.net.vmnic.linkstate.up|Physical NIC {1} linkstate is up.EventExStorage Device I/O Latency has improvedinfoesx.clear.psastor.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.psastor.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.psastor.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExScsi Device I/O Latency has improvedinfoesx.clear.scsi.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.scsi.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.scsi.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExExited the All Paths Down stateinfoesx.clear.storage.apd.exit|Device or filesystem with identifier {1} has exited the All Paths Down state.EventExRestored connectivity to storage deviceinfoesx.clear.storage.connectivity.restored|Connectivity to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored path redundancy to storage deviceinfoesx.clear.storage.redundancy.restored|Path redundancy to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored connection to NFS serverinfoesx.clear.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExNFS volume I/O Latency has improvedinfoesx.clear.vmfs.nfs.volume.io.latency.improved|NFS volume {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExvSAN device has come online.infovSAN device {1} has come online.esx.clear.vob.vsan.pdl.online|vSAN device {1} has come online.EventExTest with both int and sting arguments.infoesx.clear.vobdtestcorrelator.test|Test with both string: {1} {3} and int: {2}.ExtendedEventvSAN clustering services have now been enabled.infovSAN clustering and directory services have now been enabled.esx.clear.vsan.clustering.enabled|vSAN clustering and directory services have now been enabled.ExtendedEventvSAN now has at least one active network configuration.infovSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.network.available|vSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExA previously reported vmknic now has a valid IP.infovmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.vmknic.ready|vmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExVVol container has come online.infoesx.clear.vvol.container.online|VVol container {1} has come online.EventExA 3rd party component on ESXi has reported an error.erroresx.problem.3rdParty.error|A 3rd party component, {1}, running on ESXi has reported an error. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.info|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.information|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported a warning.warningesx.problem.3rdParty.warning|A 3rd party component, {1}, running on ESXi has reported a warning related to a problem. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA corrected memory error occurrederroresx.problem.apei.bert.memory.error.corrected|A corrected memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA fatal memory error occurrederroresx.problem.apei.bert.memory.error.fatal|A fatal memory error occurred in the last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA recoverable memory error occurrederroresx.problem.apei.bert.memory.error.recoverable|A recoverable memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA corrected PCIe error occurrederroresx.problem.apei.bert.pcie.error.corrected|A corrected PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA fatal PCIe error occurrederroresx.problem.apei.bert.pcie.error.fatal|Platform encounterd a fatal PCIe error in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA recoverable PCIe error occurrederroresx.problem.apei.bert.pcie.error.recoverable|A recoverable PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExAn application running on ESXi host has crashed and core file creation failed.warningesx.problem.application.core.dumpFailed|An application ({1}) running on ESXi host has crashed ({2} time(s) so far), but core dump creation failed.EventExAn application running on ESXi host has crashed and a core file was created.warningesx.problem.application.core.dumped|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). A core file might have been created at {3}.EventExAn application running on ESXi host has crashed and an encrypted core file was created.warningesx.problem.application.core.dumped.encrypted|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). An encrypted core file using keyId {3} might have been created at {4}.ExtendedEventCritical failure detected during boot, please refer to KB 93107.errorA critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.esx.problem.boot.failure.detected|A critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.ExtendedEventSystem clock no longer synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.lostsync|system clock no longer synchronized to upstream time serversExtendedEventSystem clock synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.sync|system clock synchronized to upstream time serversExtendedEventSystem clock lost synchronization to upstream time serverswarningesx.problem.clock.correction.adjtime.unsync|system clock lost synchronization to upstream time serversEventExApplication system changed clock, synchronization lostwarningesx.problem.clock.correction.changed|{1} stepped system clock to {2}.{3}, synchronization lostEventExAllowed system clock update with large time changewarningesx.problem.clock.correction.delta.allowed|Clock stepped to {1}.{2}, but delta {3} > {4} secondsEventExFailed system clock update with large time changeerroresx.problem.clock.correction.delta.failed|Clock step to {1}.{2} failed, delta {3} > {4} seconds, number of large corrections > {5}EventExAllowed system clock update with large time change, but number of future updates limitedwarningesx.problem.clock.correction.delta.warning|Clock stepped to {1}.{2}, but delta {3} > {4} seconds, {5}/{6} large correctionsEventExSystem clock stepped, lost synchronizationwarningesx.problem.clock.correction.step.unsync|system clock stepped to {1}.{2}, lost synchronizationEventExSystem clock maximum number of large corrections changedwarningesx.problem.clock.parameter.set.maxLargeCorrections|system clock max number of correction set to {1}EventExSystem clock maximum negative phase correction changedwarningesx.problem.clock.parameter.set.maxNegPhaseCorrection|system clock max negative phase correction set to {1}EventExSystem clock maximum positive phase correction changedwarningesx.problem.clock.parameter.set.maxPosPhaseCorrection|system clock max positive phase correction set to {1}EventExSystem clock count of number of large corrections changedwarningesx.problem.clock.parameter.set.numLargeCorrections|system clock number of large correction set to {1}EventExSystem clock VOB report interval changedwarningesx.problem.clock.parameter.set.vobReportInterval|system clock max number of correction set to {1}ExtendedEventSystem clock state has been resetwarningesx.problem.clock.state.reset|system clock state has been resetEventExThe storage capacity of the coredump targets is insufficient to capture a complete coredump.warningThe storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.esx.problem.coredump.capacity.insufficient|The storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.EventExThe free space available in default coredump copy location is insufficient to copy new coredumps.warningThe free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.esx.problem.coredump.copyspace|The free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.EventExThe given partition has insufficient amount of free space to extract the coredump.warningThe given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.esx.problem.coredump.extraction.failed.nospace|The given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.ExtendedEventNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.warningNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured|No vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.ExtendedEventNo coredump target has been configured. Host core dumps cannot be saved.warningNo coredump target has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured2|No coredump target has been configured. Host core dumps cannot be saved.ExtendedEventDRAM ECC not enabled. Please enable it in BIOS.erroresx.problem.cpu.amd.mce.dram.disabled|DRAM ECC not enabled. Please enable it in BIOS.ExtendedEventNot all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. erroresx.problem.cpu.intel.ioapic.listing.error|Not all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. ExtendedEventMCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.erroresx.problem.cpu.mce.invalid|MCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.EventExHigh number of corrected errors on a page.infoesx.problem.cpu.page.correctederrors.high|High number of corrected errors on host physical page number {1}EventExDisabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.erroresx.problem.cpu.smp.ht.invalid|Disabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.EventExFound {1} PCPUs, but only using {2} of them due to specified limit.erroresx.problem.cpu.smp.ht.numpcpus.max|Found {1} PCPUs, but only using {2} of them due to specified limit.EventExDisabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.erroresx.problem.cpu.smp.ht.partner.missing|Disabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.EventExError copying ConfigStore from backup.errorError copying ConfigStore from backup.esx.problem.cs.createstore.copy.backup.error|Error copying ConfigStore from backup {1}.ExtendedEventFailed an operation on the ConfigStore database.errorFailed an operation on the ConfigStore database.esx.problem.cs.db.operation.error|Failed an operation on the ConfigStore database.ExtendedEventFailed to setup desired configuration.errorFailed to setup desired configuration.esx.problem.cs.desired.config.error|Failed to setup desired configuration.ExtendedEventError cleaning up Datafile store.errorError cleaning up Datafile store.esx.problem.cs.dfs.cleanup.error|Error cleaning up Datafile store.ExtendedEventDataFile store cannot be restored.errorDataFile store cannot be restored.esx.problem.cs.dfs.restore.error|DataFile store cannot be restored.EventExError processing schema file.errorError processing schema file.esx.problem.cs.schema.file.error|Error processing schema file {1}.EventExInvalid metadata in schema file.errorInvalid metadata in schema file.esx.problem.cs.schema.metadata.error|Invalid metadata in schema file {1}.EventExVibId validation failed for schema file.errorVibId validation failed for schema file.esx.problem.cs.schema.validation.error|VibId validation failed for schema file {1}.EventExError in upgrading config.errorError in upgrading config.esx.problem.cs.upgrade.config.error|Error in upgrading config {1}.EventExUnable to obtain a DHCP lease.erroresx.problem.dhclient.lease.none|Unable to obtain a DHCP lease on interface {1}.EventExNo expiry time on offered DHCP lease.erroresx.problem.dhclient.lease.offered.noexpiry|No expiry time on offered DHCP lease from {1}.EventExThe maintenance mode state for some Data Processing Units may be out of sync with the host.warningThe maintenance mode state for some Data Processing Units may be out of sync with the host.esx.problem.dpu.maintenance.sync.failed|The maintenance mode state for Data Processing Units with ids '{dpus}' may be out of sync with the host.EventExSome drivers need special notice.warningDriver for device {1} is {2}. Please refer to KB article: {3}.esx.problem.driver.abnormal|Driver for device {1} is {2}. Please refer to KB article: {3}.EventExHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.esx.problem.entropy.config.error|Host is configured with external entropy source. Entropy daemon has become non functional because of an {1} change. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.empty|Host is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.inmemory.empty|Host is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.EventExCould not install image profile.erroresx.problem.esximage.install.error|Could not install image profile: {1}EventExHost doesn't meet image profile hardware requirements.erroresx.problem.esximage.install.invalidhardware|Host doesn't meet image profile '{1}' hardware requirements: {2}EventExCould not stage image profile.erroresx.problem.esximage.install.stage.error|Could not stage image profile '{1}': {2}ExtendedEventThe host can not support the applied EVC mode.warningesx.problem.evc.incompatible|The host can not support the applied EVC mode.EventExSkipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.device.invalid|Skipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.EventExSkipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.pin.invalid|Skipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.EventExFPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.clear|FPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.creditstall|FPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.devicespecific|FPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.lostcredit|FPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.oversubscription|FPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.devicespecific|FPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.timeout|FPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.unabletoroute|FPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.unknown|FPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.devicespecific|FPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidCRC|FPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidtransmissionword|FPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link failure: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.linkfailure|FPIN FC link failure: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsignal|FPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsynchronization|FPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.primitivesequenceprotocolerror|FPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.uncorrectableFECerror|FPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.unknown|FPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.clear|FPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.creditstall|FPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.devicespecific|FPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.lostcredit|FPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.oversubscription|FPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.EventExIOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.erroresx.problem.hardware.ioapic.missing|IOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.ExtendedEventFailed to communicate with the BMC. IPMI functionality will be unavailable on this system.erroresx.problem.hardware.ipmi.bmc.bad|Failed to communicate with the BMC. IPMI functionality will be unavailable on this system.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Error tripped.EventExNVDIMM: Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.lifetime.error|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Error tripped.EventExNVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.erroresx.problem.hardware.nvd.health.lastshutdownstatus|NVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.EventExNVDIMM Configuration error detected.erroresx.problem.hardware.nvd.health.module.config.error|NVDIMM (handle {1}, idString {2}): Configuration error detected.EventExNVDIMM Controller failure detected.erroresx.problem.hardware.nvd.health.module.ctlr.fail|NVDIMM (handle {1}, idString {2}): Controller failure detected. Access to the device and its capabilities are lost.EventExNVDIMM Controller firmware error detected.erroresx.problem.hardware.nvd.health.module.ctlr.fw.error|NVDIMM (handle {1}, idString {2}): Controller firmware error detected.EventExNVDIMM Energy Source still charging.warningesx.problem.hardware.nvd.health.module.es.charging|NVDIMM (handle {1}, idString {2}): Energy Source still charging but does not have sufficient charge to support a backup. Persistency is temporarily lost for the device.EventExNVDIMM Energy Source failure detected.erroresx.problem.hardware.nvd.health.module.es.fail|NVDIMM (handle {1}, idString {2}): Energy Source failure detected. Persistency is lost for the device.EventExNVDIMM Previous ARM operation failed.warningesx.problem.hardware.nvd.health.module.ops.arm.fail|NVDIMM (handle {1}, idString {2}): Previous ARM operation failed.EventExNVDIMM Previous ERASE operation failed.warningesx.problem.hardware.nvd.health.module.ops.erase.fail|NVDIMM (handle {1}, idString {2}): Previous ERASE operation failed.EventExThe Platform flush failed. The restored data may be inconsistent.erroresx.problem.hardware.nvd.health.module.ops.flush.fail|NVDIMM (handle {1}, idString {2}): The Platform flush failed. The restored data may be inconsistent.EventExNVDIMM Last RESTORE operation failed.erroresx.problem.hardware.nvd.health.module.ops.restore.fail|NVDIMM (handle {1}, idString {2}): Last RESTORE operation failed.EventExNVDIMM Previous SAVE operation failed.erroresx.problem.hardware.nvd.health.module.ops.save.fail|NVDIMM (handle {1}, idString {2}): Previous SAVE operation failed.EventExNVDIMM Count of DRAM uncorrectable ECC errors above threshold.warningesx.problem.hardware.nvd.health.module.uce|NVDIMM (handle {1}, idString {2}): Count of DRAM uncorrectable ECC errors above threshold.EventExNVDIMM Vendor specific error.erroresx.problem.hardware.nvd.health.module.vendor.error|NVDIMM (handle {1}, idString {2}): Vendor specific error.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Error tripped.EventExNVDIMM: Module Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.lifetime.error|NVDIMM (handle {1}, idString {2}): Module Lifetime Error tripped.EventExNVDIMM: Module Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.temperature.error|NVDIMM (handle {1}, idString {2}): Module Temperature Error tripped.EventExNVDIMM: All data may be lost in the event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInPowerLoss|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of power loss.EventExNVDIMM: All data may be lost in the event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInShutdown|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of shutdown.EventExNVDIMM: Subsequent reads may fail or return invalid data and subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossNow|NVDIMM (handle {1}, idString {2}): Subsequent reads may fail or return invalid data and subsequent writes may not persist.EventExNVDIMM: Performance degraded.erroresx.problem.hardware.nvd.health.vmw.statusflags.perfDegraded|NVDIMM (handle {1}, idString {2}): Performance degraded.EventExNVDIMM: Write persistency loss may happen in event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInPowerLoss|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of power loss.EventExNVDIMM: Write persistency loss may happen in event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInShutdown|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of shutdown.EventExNVDIMM: Subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossNow|NVDIMM (handle {1}, idString {2}): Subsequent writes may not persist.ExtendedEventTPM 2.0 device detected but a connection cannot be established.warningesx.problem.hardware.tpm2.connection|TPM 2.0 device detected but a connection cannot be established.ExtendedEventTPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.nosha256|TPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.ExtendedEventTPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.notis|TPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.ExtendedEventUnable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.warningesx.problem.hardware.tpm2.ownership|Unable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.ExtendedEventesx.problem.hardware.tpm2.provisioning|EventExA physical disk has a predictive failure.warningA physical disk has a predictive failure.esx.problem.hcm.event.disk.predictive.failure|A physical disk has a predictive failure ({1}).ExtendedEventAn unread host kernel core dump has been found.warningesx.problem.host.coredump|An unread host kernel core dump has been found.EventExHostd crashed and a core file was created.warningesx.problem.hostd.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExHostd crashed and an encrypted core file was created.warningesx.problem.hostd.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventThis host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.infoesx.problem.hyperthreading.unmitigated|This host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.ExtendedEventSome of the config entries in the VM inventory were skipped because they are invalid.warningesx.problem.inventory.invalidConfigEntries|Some of the config entries in the VM inventory were skipped because they are invalid.EventExAn iofilter installed on the host has stopped functioning.errorIOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}esx.problem.iofilter.disabled|IOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}EventExStorage I/O Control version mismatchinfoesx.problem.iorm.badversion|Host {1} cannot participate in Storage I/O Control(SIOC) on datastore {2} because the version number {3} of the SIOC agent on this host is incompatible with number {4} of its counterparts on other hosts connected to this datastore.EventExUnmanaged workload detected on SIOC-enabled datastoreinfoesx.problem.iorm.nonviworkload|An unmanaged I/O workload is detected on a SIOC-enabled datastore: {1}.EventExThe metadata store has degraded on one of the hosts in the cluster.errorThe metadata store has degraded on host {1}.esx.problem.metadatastore.degraded|The metadata store has degraded on host {1}.ExtendedEventThe metadata store is healthy.infoThe metadata store is healthy.esx.problem.metadatastore.healthy|The metadata store is healthy.ExtendedEventFailed to create default migration heapwarningesx.problem.migrate.vmotion.default.heap.create.failed|Failed to create default migration heap. This might be the result of severe host memory pressure or virtual address space exhaustion. Migration might still be possible, but will be unreliable in cases of extreme host memory pressure.EventExError with migration listen socketerroresx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown|The ESXi host's vMotion network server encountered an error while monitoring incoming network connections. Shutting down listener socket. vMotion might not be possible with this host until vMotion is manually re-enabled. Failure status: {1}EventExThe max_vfs module option has been set for at least one module.warningSetting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.esx.problem.module.maxvfs.set|Setting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.EventExLost Network Connectivityerroresx.problem.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Connectivity to DVPortserroresx.problem.net.dvport.connectivity.lost|Lost network connectivity on DVPorts: {1}. Physical NIC {2} is down.EventExNetwork Redundancy Degraded on DVPortswarningesx.problem.net.dvport.redundancy.degraded|Uplink redundancy degraded on DVPorts: {1}. Physical NIC {2} is down.EventExLost Network Redundancy on DVPortswarningesx.problem.net.dvport.redundancy.lost|Lost uplink redundancy on DVPorts: {1}. Physical NIC {2} is down.EventExNo IPv6 TSO supporterroresx.problem.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid fenceId configuration on dvPorterroresx.problem.net.fence.port.badfenceid|VMkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: invalid fenceId.EventExMaximum number of fence networks or portserroresx.problem.net.fence.resource.limited|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: maximum number of fence networks or ports have been reached.EventExSwitch fence property is not seterroresx.problem.net.fence.switch.unavailable|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: dvSwitch fence property is not set.EventExFirewall configuration operation failed. The changes were not applied.erroresx.problem.net.firewall.config.failed|Firewall configuration operation '{1}' failed. The changes were not applied to rule set {2}.EventExAdding port to Firewall failed.erroresx.problem.net.firewall.port.hookfailed|Adding port {1} to Firewall failed.EventExFailed to set gatewayerroresx.problem.net.gateway.set.failed|Cannot connect to the specified gateway {1}. Failed to set it.EventExNetwork memory pool thresholdwarningesx.problem.net.heap.belowthreshold|{1} free size dropped below {2} percent.EventExlag transition downwarningesx.problem.net.lacp.lag.transition.down|LACP warning: LAG {1} on VDS {2} is down.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse|LACP error: No peer response on uplink {1} for VDS {2}.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse.2|LACP error: No peer response on VDS {1}.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.incompatible|LACP error: Current teaming policy on VDS {1} is incompatible, supported is IP hash only.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.linkstatus|LACP error: Current teaming policy on VDS {1} is incompatible, supported link failover detection is link status only.EventExuplink is blockedwarningesx.problem.net.lacp.uplink.blocked|LACP warning: uplink {1} on VDS {2} is blocked.EventExuplink is disconnectedwarningesx.problem.net.lacp.uplink.disconnected|LACP warning: uplink {1} on VDS {2} got disconnected.EventExuplink duplex mode is differenterroresx.problem.net.lacp.uplink.fail.duplex|LACP error: Duplex mode across all uplink ports must be full, VDS {1} uplink {2} has different mode.EventExuplink speed is differenterroresx.problem.net.lacp.uplink.fail.speed|LACP error: Speed across all uplink ports must be same, VDS {1} uplink {2} has different speed.EventExAll uplinks must be activeerroresx.problem.net.lacp.uplink.inactive|LACP error: All uplinks on VDS {1} must be active.EventExuplink transition downwarningesx.problem.net.lacp.uplink.transition.down|LACP warning: uplink {1} on VDS {2} is moved out of link aggregation group.EventExInvalid vmknic specified in /Migrate/Vmknicwarningesx.problem.net.migrate.bindtovmk|The ESX advanced configuration option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Update the configuration option with a valid vmknic. Alternatively, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExUnsupported vMotion network latency detectedwarningesx.problem.net.migrate.unsupported.latency|ESXi has detected {1}ms round-trip vMotion network latency between host {2} and {3}. High latency vMotion networks are supported only if both ESXi hosts have been configured for vMotion latency tolerance.EventExFailed to apply for free portserroresx.problem.net.portset.port.full|Portset {1} has reached the maximum number of ports ({2}). Cannot apply for any more free ports.EventExVlan ID of the port is invaliderroresx.problem.net.portset.port.vlan.invalidid|{1} VLANID {2} is invalid. VLAN ID must be between 0 and 4095.EventExTry to register an unsupported portset classwarningesx.problem.net.portset.unsupported.psclass|{1} is not a VMware supported portset class, the relevant module must be unloaded.EventExVirtual NIC connection to switch failedwarningesx.problem.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningesx.problem.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Redundancywarningesx.problem.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExRSPAN src session conflict with teamingerroresx.problem.net.rspan.teaming.uplink.io.conflict|Failed to set RSPAN src session {1} on portset {2} due to it disallows uplink I/O which conflicts with {3} teaming policy {4}.EventExThe teaming policy has an invalid uplinkerroresx.problem.net.teaming.policy.invalid.uplink|Failed to update teaming policy {1} on portset {2} due to an invalid uplink {3} which disallows normal I/O.EventExFailed to set MTU on an uplinkwarningesx.problem.net.uplink.mtu.failed|VMkernel failed to set the MTU value {1} on the uplink {2}.EventExA duplicate IP address was detected on a vmknic interfacewarningesx.problem.net.vmknic.ip.duplicate|A duplicate IP address was detected for {1} on the interface {2}. The current owner is {3}.EventExLink state downwarningesx.problem.net.vmnic.linkstate.down|Physical NIC {1} linkstate is down.EventExLink state unstablewarningesx.problem.net.vmnic.linkstate.flapping|Taking down physical NIC {1} because the link is unstable.EventExNic Watchdog Resetwarningesx.problem.net.vmnic.watchdog.reset|Uplink {1} has recovered from a transient failure due to watchdog timeoutEventExNTP daemon stopped. Time correction out of bounds.erroresx.problem.ntpd.clock.correction.error|NTP daemon stopped. Time correction {1} > {2} seconds. Manually set the time and restart ntpd.EventExOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.osdata.partition.full|OSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212ExtendedEventConfigured OSData cannot be found. Please refer to KB article: KB 87212.warningConfigured OSData cannot be found. Please refer to KB article: KB 87212.esx.problem.osdata.path.notfound|Configured OSData cannot be found. Please refer to KB article: KB 87212.EventExVirtual machine killed as it kept using a corrupted memory page.erroresx.problem.pageretire.mce.injected|Killing virtual machine with config path {1} because at least {2} uncorrectable memory error machine check exceptions were injected for guest physical page {3} but the virtual machine's operating system kept using the page.EventExA virtual machine was killed as it kept using a corrupted memory page.errorThe virtual machine was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.esx.problem.pageretire.mce.injected.2|{1} was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.EventExMemory page retirement requested by platform firmware.infoesx.problem.pageretire.platform.retire.request|Memory page retirement requested by platform firmware. FRU ID: {1}. Refer to System Hardware Log: {2}EventExNumber of host physical memory pages that have been selected for retirement but could not yet be retired is high.warningesx.problem.pageretire.selectedbutnotretired.high|Number of host physical memory pages that have been selected for retirement but could not yet be retired is high: ({1})EventExNumber of host physical memory pages selected for retirement exceeds threshold.warningesx.problem.pageretire.selectedmpnthreshold.host.exceeded|Number of host physical memory pages that have been selected for retirement ({1}) exceeds threshold ({2}).ExtendedEventNo memory to allocate APD Eventwarningesx.problem.psastor.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExStorage Device close failed.warningesx.problem.psastor.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.psastor.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.psastor.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExStorage Device I/O Latency going highwarningesx.problem.psastor.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExPlugin's isLocal entry point failedwarningesx.problem.psastor.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.psastor.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.psastor.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.psastor.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExDevice has been turned off administratively.infoesx.problem.psastor.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.psastor.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.psastor.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExMaximum number of storage pathserroresx.problem.psastor.psastorpath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.psastor.unsupported.plugin.type|Storage Device Allocation not supported for plugin type {1}EventExFailed to delete resource group.warningFailed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.esx.problem.resourcegroup.delete.failed|Failed to delete resource groups with names '{rgnames}'.EventExFailed to Set the Virtual Machine's Latency Sensitivitywarningesx.problem.sched.latency.abort|Unable to apply latency-sensitivity setting to virtual machine {1}. No valid placement on the host.EventExNo Cache Allocation Resourcewarningesx.problem.sched.qos.cat.noresource|Unable to support cache allocation for virtual machine {1}. Out of resources.EventExNo Cache Allocation Supportwarningesx.problem.sched.qos.cat.notsupported|Unable to support L3 cache allocation for virtual machine {1}. No processor capabilities.EventExNo Cache Monitoring Resourcewarningesx.problem.sched.qos.cmt.noresource|Unable to support cache monitoring for virtual machine {1}. Out of resources.EventExNo Cache Monitoring Supportwarningesx.problem.sched.qos.cmt.notsupported|Unable to support L3 cache monitoring for virtual machine {1}. No processor capabilities.ExtendedEventScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.warningScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.esx.problem.scratch.on.usb|Scratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.EventExScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.scratch.partition.full|Scratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212EventExSize of scratch partition is too small.warningSize of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.esx.problem.scratch.partition.size.small|Size of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.EventExNo scratch partition has been configured.warningNo scratch partition has been configured. Recommended scratch partition size is {} MiB.esx.problem.scratch.partition.unconfigured|No scratch partition has been configured. Recommended scratch partition size is {} MiB.ExtendedEventNo memory to allocate APD Eventwarningesx.problem.scsi.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExScsi Device close failed.warningesx.problem.scsi.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.scsi.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExFailed to attach filter to device.warningesx.problem.scsi.device.filter.attach.failed|Failed to attach filters to device '%s' during registration. Plugin load failed or the filter rules are incorrect.EventExInvalid XCOPY request for devicewarningesx.problem.scsi.device.invalid.xcopy.request|Invalid XCOPY request for device {1}. Host {2}, Device {3}, Plugin {4}, {5} sense, sense.key = {6}, sense.asc = {7}, sense.ascq = {8}: {9}EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.scsi.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExFailed to obtain INQUIRY data from the devicewarningesx.problem.scsi.device.io.inquiry.failed|Failed to get standard inquiry for device {1} from Plugin {2}.ExtendedEventScsi device queue parameters incorrectly set.warningesx.problem.scsi.device.io.invalid.disk.qfull.value|QFullSampleSize should be bigger than QFullThreshold. LUN queue depth throttling algorithm will not function as expected. Please set the QFullSampleSize and QFullThreshold disk configuration values in ESX correctly.EventExScsi Device I/O Latency going highwarningesx.problem.scsi.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExQErr cannot be changed on device. Please change it manually on the device if possible.warningesx.problem.scsi.device.io.qerr.change.config|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The system is not configured to change the QErr setting of device. The QErr value supported by system is 0x{3}. Please check the SCSI ChangeQErrSetting configuration value for ESX.EventExScsi Device QErr setting changedwarningesx.problem.scsi.device.io.qerr.changed|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The device was originally configured to the supported QErr setting of 0x{3}, but this has been changed and could not be changed back.EventExPlugin's isLocal entry point failedwarningesx.problem.scsi.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.scsi.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.scsi.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.scsi.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExFailed to apply NMP SATP option during device discovery.warningesx.problem.scsi.device.nmp.satp.option.failed|Invalid config parameter: \"{1}\" provided in the nmp satp claimrule, this setting was not applied while claiming the path {2}EventExDevice has been turned off administratively.infoesx.problem.scsi.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.scsi.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExThin Provisioned Device Nearing Capacitywarningesx.problem.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.scsi.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExvVol PE path going out of vVol-incapable adaptererroresx.problem.scsi.scsipath.badpath.unreachpe|Sanity check failed for path {1}. The path is to a vVol PE, but it goes out of adapter {2} which is not PE capable. Path dropped.EventExCannot safely determine vVol PEerroresx.problem.scsi.scsipath.badpath.unsafepe|Sanity check failed for path {1}. Could not safely determine if the path is to a vVol PE. Path dropped.EventExMaximum number of storage pathserroresx.problem.scsi.scsipath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.scsi.unsupported.plugin.type|Scsi Device Allocation not supported for plugin type {1}ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.infoesx.problem.sgx.addpackage|Support for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.infoesx.problem.sgx.htenabled|Support for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.esx.problem.slp.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.EventExAll paths are downwarningesx.problem.storage.apd.start|Device or filesystem with identifier {1} has entered the All Paths Down state.EventExAll Paths Down timed out, I/Os will be fast failedwarningesx.problem.storage.apd.timeout|Device or filesystem with identifier {1} has entered the All Paths Down Timeout state after being in the All Paths Down state for {2} seconds. I/Os will now be fast failed.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.devicepor|Frequent PowerOn Reset Unit Attentions are occurring on device {1}. This might indicate a storage problem. Affected datastores: {2}EventExLost Storage Connectivityerroresx.problem.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.pathpor|Frequent PowerOn Reset Unit Attentions are occurring on path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExFrequent State Changes of Storage Pathinfoesx.problem.storage.connectivity.pathstatechanges|Frequent path state changes are occurring for path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExiSCSI discovery target login connection problemerroresx.problem.storage.iscsi.discovery.connect.error|iSCSI discovery to {1} on {2} failed. The iSCSI Initiator could not establish a network connection to the discovery address.EventExiSCSI Discovery target login errorerroresx.problem.storage.iscsi.discovery.login.error|iSCSI discovery to {1} on {2} failed. The Discovery target returned a login error of: {3}.EventExiSCSI iSns Discovery errorerroresx.problem.storage.iscsi.isns.discovery.error|iSCSI iSns discovery to {1} on {2} failed. ({3} : {4}).EventExiSCSI Target login connection problemerroresx.problem.storage.iscsi.target.connect.error|Login to iSCSI target {1} on {2} failed. The iSCSI initiator could not establish a network connection to the target.EventExiSCSI Target login errorerroresx.problem.storage.iscsi.target.login.error|Login to iSCSI target {1} on {2} failed. Target returned login error of: {3}.EventExiSCSI target permanently removederroresx.problem.storage.iscsi.target.permanently.lost|The iSCSI target {2} was permanently removed from {1}.EventExiSCSI target was permanently removederroresx.problem.storage.iscsi.target.permanently.removed|The iSCSI target {1} was permanently removed from {2}.EventExDegraded Storage Path Redundancywarningesx.problem.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. Affected datastores: {3}.EventExLost Storage Path Redundancywarningesx.problem.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.esx.problem.swap.systemSwap.isPDL.cannot.remove|System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.EventExSystem swap was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningesx.problem.swap.systemSwap.isPDL.cannot.remove.2|System swap was affected by the PDL of {1} and was removed. System swap has been reconfigured.EventExSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure|System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.EventExSystem swap was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningesx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2|System swap was affected by the PDL of {1}. It was removed but the subsequent reconfiguration failed.ExtendedEventSystem logging is not configured.warningSystem logging is not configured on host {host.name}.esx.problem.syslog.config|System logging is not configured on host {host.name}. Please check Syslog options for the host under Configuration -> Software -> Advanced Settings in vSphere client.ExtendedEventSystem logs are stored on non-persistent storage.warningSystem logs on host {host.name} are stored on non-persistent storage.esx.problem.syslog.nonpersistent|System logs on host {host.name} are stored on non-persistent storage. Consult product documentation to configure a syslog server or a scratch partition.ExtendedEventTest with no argumentserroresx.problem.test.test0|Test with no argumentsEventExTest with both int and string argumentserroresx.problem.test.test2|Test with both {1} and {2}ExtendedEventUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.warningUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.esx.problem.unsupported.tls.protocols|Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.EventExA VFAT filesystem is full.erroresx.problem.vfat.filesystem.full.other|The VFAT filesystem {1} (UUID {2}) is full.EventExA VFAT filesystem, being used as the host's scratch partition, is full.erroresx.problem.vfat.filesystem.full.scratch|The host's scratch partition, which is the VFAT filesystem {1} (UUID {2}), is full.EventExConfigstore is reaching its critical size limit. Please refer to the KB 93362 for more details.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.EventExA ramdisk has a very high usage. Please refer to the KB 93362 for more details.warningRamdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.ExtendedEventAn operation on the root filesystem has failed.erroresx.problem.visorfs.failure|An operation on the root filesystem has failed.EventExThe root filesystem's file table is full.erroresx.problem.visorfs.inodetable.full|The root filesystem's file table is full. As a result, the file {1} could not be created by the application '{2}'.EventExA ramdisk is full.erroresx.problem.visorfs.ramdisk.full|The ramdisk '{1}' is full. As a result, the file {2} could not be written.EventExA ramdisk's file table is full.erroresx.problem.visorfs.ramdisk.inodetable.full|The file table of the ramdisk '{1}' is full. As a result, the file {2} could not be created by the application '{3}'.EventExConfig store is reaching its critical size limit.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.EventExA ramdisk has a very high usage.warningRamdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left.EventExA VM could not fault in the a page. The VM is terminated as further progress is impossible.erroresx.problem.vm.kill.unexpected.fault.failure|The VM using the config file {1} could not fault in a guest physical page from the hypervisor level swap file at {2}. The VM is terminated as further progress is impossible.EventExA virtual machine could not fault in the a page. It is terminated as further progress is impossible.errorThe virtual machine could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleesx.problem.vm.kill.unexpected.fault.failure.2|{1} could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleEventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire.64|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA virtual machine cointained a host physical page that was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.errorThe virtual machine contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2|{1} contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.noSwapResponse|The VM using the config file {1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA virtual machine did not respond to swap actions. It is terminated as further progress is impossible.errorThe virtual machine did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.esx.problem.vm.kill.unexpected.noSwapResponse.2|{1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA VM is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.erroresx.problem.vm.kill.unexpected.vmtrack|The VM using the config file {1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.errorThe virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.esx.problem.vm.kill.unexpected.vmtrack.2|{1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA user world daemon of a virtual machine could not fault in the a page. The VM is terminated as further progress is impossible.errorThe user world daemon of this virtual machine could not fault in a page. The virtual machine is terminated as further progress is impossible.esx.problem.vm.kill.unexpected.vmx.fault.failure.2|The user world daemon of {1} could not fault in a page. The virtual machine is terminated as further progress is impossible.EventExMulti-extent ATS-only VMFS Volume unable to use ATSerroresx.problem.vmfs.ats.incompatibility.detected|Multi-extent ATS-only volume '{1}' ({2}) is unable to use ATS because HardwareAcceleratedLocking is disabled on this host: potential for introducing filesystem corruption. Volume should not be used from other hosts.EventExDevice Backing VMFS has lost ATS Supporterroresx.problem.vmfs.ats.support.lost|ATS-Only VMFS volume '{1}' not mounted. Host does not support ATS or ATS initialization has failed.EventExVMFS Locked By Remote Hosterroresx.problem.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.erroresx.problem.vmfs.extent.offline|An attached device {1} may be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system came onlineinfoesx.problem.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Heartbeat Corruption Detected.erroresx.problem.vmfs.heartbeat.corruptondisk|At least one corrupt on-disk heartbeat region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Volume Connectivity Restoredinfoesx.problem.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfoesx.problem.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterroresx.problem.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerroresx.problem.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Volume will remain in read-only metadata mode with limited write support until journal can be created.EventExTrying to acquire lock on an already locked file. - File descriptionerror{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}esx.problem.vmfs.lock.busy.filedesc|{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}EventExTrying to acquire lock on an already locked file. FilenameerrorLock(s) held on file {1} by other host(s).esx.problem.vmfs.lock.busy.filename|Lock(s) held on file {1} by other host(s).EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk.v2|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected.errorInconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.lockmode.inconsistency.detected|Inconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExFailed to mount NFS volumeerroresx.problem.vmfs.nfs.mount.failed|NFS mount failed for {1}:{2} volume {3}. Status: {4}EventExLost connection to NFS servererroresx.problem.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExvmknic configured for NFS has been removedwarningesx.problem.vmfs.nfs.vmknic.removed|vmknic {1} removed, NFS{2} datastore {3} configured with the vmknic will be inaccessible.EventExNFS volume average I/O Latency has exceeded configured threshold for the current configured periodwarningesx.problem.vmfs.nfs.volume.io.latency.exceed.threshold.period|NFS volume {1} average I/O latency {2}(us) has exceeded threshold {3}(us) for last {4} minutesEventExNFS volume I/O Latency going highwarningesx.problem.vmfs.nfs.volume.io.latency.high|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExNFS volume I/O Latency exceeding thresholdwarningesx.problem.vmfs.nfs.volume.io.latency.high.exceed.threshold|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds. Exceeded threshold {4} microsecondsEventExNo space on NFS volume.warningesx.problem.vmfs.nfs.volume.no.space|{1}: No space on NFS volume.EventExVMFS Resource Corruption Detectederroresx.problem.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected on spanned volume.errorInconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanned.lockmode.inconsistency.detected|Inconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.EventExIncompatible VMFS span state detected.errorIncompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanstate.incompatibility.detected|Incompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExRemote logging host has become unreachable.erroresx.problem.vmsyslogd.remote.failure|The host "{1}" has become unreachable. Remote logging to this host has stopped.ExtendedEventLogging to storage has failed.erroresx.problem.vmsyslogd.storage.failure|Logging to storage has failed. Logs are no longer being stored locally on this host.EventExThe configured log directory cannot be used. The default directory will be used instead.erroresx.problem.vmsyslogd.storage.logdir.invalid|The configured log directory {1} cannot be used. The default directory {2} will be used instead.EventExLog daemon has failed for an unexpected reason.erroresx.problem.vmsyslogd.unexpected|Log daemon has failed for an unexpected reason: {1}EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.errorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.EventExvSAN detected LSN mismatch in mirrorswarningvSAN detected LSN mismatch in mirrors for object {1}.esx.problem.vob.vsan.dom.lsnmismatcherror|vSAN detected LSN mismatch in mirrors for object {1}.EventExResync encountered no space errorwarningResync encountered no space error for component {1} on disk {2}.esx.problem.vob.vsan.dom.nospaceduringresync|Resync encountered no space error for component {1} on disk {2}. Resync will resume once space is freed up on this disk. Need around {3}MB to resync the component on this diskEventExResync is delayed.warningResync is delayed for component {1} on disk {2} for object {3}.esx.problem.vob.vsan.dom.resyncdecisiondelayed|Resync is delayed for component {1} on disk {2} until data availability is regained for object {3} on the remote site.EventExResync timed outwarningResync timed out for component {2} on disk {3}.esx.problem.vob.vsan.dom.resynctimeout|Resync timed out as no progress was made in {1} minute(s) for component {2} on disk {3}. Resync will be tried again for this component. The remaining resync is around {4}MB.EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskerrorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskunrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.unrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.EventExNVMe critical health warning for disk. The disk's backup device has failed.errorNVMe critical health warning for disk {1}. The disk's backup device has failed.esx.problem.vob.vsan.lsom.backupfailednvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's backup device has failed.EventExOffline event on component.warningOffline event issued for component: {1}, flag: {2}, reason: {3}.esx.problem.vob.vsan.lsom.componentoffline|Offline event issued for component: {1}, flag: {2}, reason: {3}.EventExvSAN Node: Near node component count limit.warningvSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).esx.problem.vob.vsan.lsom.componentthreshold|vSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).EventExEvacuation has failed for device and it will be retried by DDH.errorEvacuation has failed for device {1} and it will be retried by DDH.esx.problem.vob.vsan.lsom.ddhEvacFailed|Evacuation has failed for device {1} and it will be retried by DDH.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.devicerepair|vSAN device {1} is being repaired due to I/O failures, and will be out of service until the repair is complete. If the device is part of a dedup disk group, the entire disk group will be out of service until the repair is complete.EventExvSAN device has high latency. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithhighlatency|vSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.EventExvSAN device smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithsmartfailure|vSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.EventExvSAN device is under permanent failure.errorvSAN device {1} is under permanent failure.esx.problem.vob.vsan.lsom.diskerror|vSAN device {1} is under permanent failure.EventExFailed to create a new disk group.errorFailed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.esx.problem.vob.vsan.lsom.diskgrouplimit|Failed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.EventExvSAN diskgroup log is congested.errorvSAN diskgroup {1} log is congestedesx.problem.vob.vsan.lsom.diskgrouplogcongested|vSAN diskgroup {1} log is congested.EventExvSAN disk group is under congestion. It will be remediated. No action is needed.warningvSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.esx.problem.vob.vsan.lsom.diskgroupundercongestion|vSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.EventExFailed to add disk to disk group.errorFailed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.esx.problem.vob.vsan.lsom.disklimit2|Failed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.EventExvSAN device is under propagated error.errorvSAN device {1} is under propagated erroresx.problem.vob.vsan.lsom.diskpropagatederror|vSAN device {1} is under propagated error.EventExvSAN device is under propagated permanent error.errorvSAN device {1} is under propagated permanent erroresx.problem.vob.vsan.lsom.diskpropagatedpermerror|vSAN device {1} is under propagated permanent error.EventExvSAN device is unhealthy.errorvSAN device {1} is unhealthyesx.problem.vob.vsan.lsom.diskunhealthy|vSAN device {1} is unhealthy.EventExEvacuation failed for device due to insufficient resources and it will be retried.errorEvacuation failed for device {1} due to insufficient resources and it will be retried.esx.problem.vob.vsan.lsom.evacFailedInsufficientResources|Evacuation failed for device {1} due to insufficient resources and it will be retried. Please make resources available for evacuation.EventExDeleted invalid metadata component.warningDeleted invalid metadata component: {1}.esx.problem.vob.vsan.lsom.invalidMetadataComponent|Deleted invalid metadata component: {1}.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.metadataURE|vSAN device {1} encountered an unrecoverable read error. This disk will be evacuated and rebuilt. If the device is part of a dedup disk group, the entire disk group will be evacuated and rebuilt.EventExNVMe disk critical health warning for disk. Disk is now read only.errorNVMe critical health warning for disk {1}. Disk is now read only.esx.problem.vob.vsan.lsom.readonlynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1} is: The NVMe disk has become read only.EventExNVMe critical health warning for disk. The disk has become unreliable.errorNVMe critical health warning for disk {1}. The disk has become unreliable.esx.problem.vob.vsan.lsom.reliabilitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk has become unreliable.EventExNVMe critical health warning for disk. The disk's spare capacity is below threshold.errorNVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.esx.problem.vob.vsan.lsom.sparecapacitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.storagepoolURE|vSAN device {1} encountered an unrecoverable read error. This disk will be rebuilt.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.storagepoolrepair|vSAN device {1} is being repaired due to I/O failures and will be out of service until the repair is complete.EventExNo response for I/O on vSAN device.errorNo response for I/O on vSAN device {1}.esx.problem.vob.vsan.lsom.storagepoolstuckio|No response for I/O on vSAN device {1}.EventExvSAN device detected suspended I/Os.errorvSAN device {1} detected suspended I/Os.esx.problem.vob.vsan.lsom.stuckio|vSAN device {1} detected suspended I/Os. Taking the host out of service to avoid affecting the vSAN cluster.EventExvSAN device detected stuck I/O error.errorvSAN device {1} detected stuck I/O error.esx.problem.vob.vsan.lsom.stuckiooffline|vSAN device {1} detected stuck I/O error. Marking the device as offline.EventExvSAN device is under propagated stuck I/O error.errorvSAN device {1} is under propagated stuck I/O error.esx.problem.vob.vsan.lsom.stuckiopropagated|vSAN device {1} is under propagated stuck I/O error. Marking the device as offline.EventExvSAN device detected I/O timeout error.errorvSAN device {1} detected I/O timeout error.esx.problem.vob.vsan.lsom.stuckiotimeout|vSAN device {1} detected I/O timeout error. This may lead to stuck I/O.EventExNVMe critical health warning for disk. The disk's temperature is beyond threshold.errorNVMe critical health warning for disk {1}. The disk's temperature is beyond bethreshold.esx.problem.vob.vsan.lsom.temperaturenvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's temperature beyond threshold.EventExvSAN device has gone offline.errorvSAN device {1} has gone offline.esx.problem.vob.vsan.pdl.offline|vSAN device {1} has gone offline.EventExA ZDOM object is paused due to continuous fail-stops.warningZDOM object {1} is paused on host {2}, numFailStops={3}.esx.problem.vob.vsan.zdom.failstoppaused|ZDOM object {1} is paused on host {2}, numFailStops={3}.ExtendedEventTest with no arguments.infoesx.problem.vobdtestcorrelator.test.0|Test with no argumentsEventExTest with int argument.infoesx.problem.vobdtestcorrelator.test.1d|Test with int argument: {1}EventExTest with sting argument.infoesx.problem.vobdtestcorrelator.test.1s|Test with sting argument: {1}EventExTest with huge sting argument.infoesx.problem.vobdtestcorrelator.test.hugestr|Test with huge sting argument: {1}EventExVpxa crashed and a core file was created.warningesx.problem.vpxa.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExVpxa crashed and an encrypted core file was created.warningesx.problem.vpxa.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventvSAN clustering services have been disabled.warningvSAN clustering and directory services have been disabled thus will be no longer available.esx.problem.vsan.clustering.disabled|vSAN clustering and directory services have been disabled thus will be no longer available.EventExData component found on witness host.warningData component {1} found on witness host is ignored.esx.problem.vsan.dom.component.datacomponent.on.witness.host|Data component {1} found on witness host is ignored.EventExvSAN Distributed Object Manager failed to initializewarningvSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.esx.problem.vsan.dom.init.failed.status|vSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.EventExOne or more disks exceed its/their warning usage of estimated endurance threshold.infoOne or more disks exceed its/their warning usage of estimated endurance threshold.esx.problem.vsan.health.ssd.endurance|Disks {Disk Name} in Cluster {Cluster Name} have exceeded warning usage of their estimated endurance threshold {Disk Percentage Threshold}, currently at {Disk Percentage Used} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds the estimated endurance threshold.errorOne of the disks exceeds the estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.error|Disks {1} have exceeded their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds 90% of its estimated endurance threshold.warningOne of the disks exceeds 90% of its estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.warning|Disks {1} have exceeded 90 percent usage of their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.errorOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.esx.problem.vsan.health.vsanesa.pdl|Disk {1} is detected with PDL in vSAN ESA Cluster. Please check the host for further details.EventExvSAN device Memory/SSD congestion has changed.infoLSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.esx.problem.vsan.lsom.congestionthreshold|LSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.EventExA vmknic added to vSAN network configuration doesn't have valid IP. Network is not ready.errorvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.esx.problem.vsan.net.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.ExtendedEventvSAN doesn't haven any redundancy in its network configuration.warningvSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.lost|vSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.ExtendedEventvSAN is operating on reduced network redundancy.warningvSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.reduced|vSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.ExtendedEventvSAN doesn't have any network configuration for use.errorvSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.esx.problem.vsan.no.network.connectivity|vSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.EventExA vmknic added to vSAN network configuration doesn't have valid IP. It will not be in use.warningvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.esx.problem.vsan.vmknic.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.EventEx Failed to add shared virtual disk. Maximum count reachederroresx.problem.vscsi.shared.vmdk.add.failure.max.count|Failed to add shared virtual disk. Maximum number of shared vmdks supported per ESX host are {1}EventExNo free slots availableerroresx.problem.vscsi.shared.vmdk.no.free.slot.available|No Free slot available. Maximum number of virtual machinies supported in MSCS cluster are {1}EventExFailed to power on virtual machines on shared VMDK with running virtual machineerroresx.problem.vscsi.shared.vmdk.virtual.machine.power.on.failed|Two or more virtual machines (\"{1}\" and \"{2}\") sharing same virtual disk are not allowed to be Powered-On on same host.EventExVVol container has gone offline.erroresx.problem.vvol.container.offline|VVol container {1} has gone offline: isPEAccessible {2}, isVPAccessible {3}.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.EventExCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated.thirdPartyProv|CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.EventExApplication consistent sync completed.infoApplication consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Application consistent sync completed for virtual machine {vm.name} on host {host.name}.Application consistent sync completed for virtual machine {vm.name}.Application consistent sync completed.hbr.primary.AppQuiescedDeltaCompletedEvent|Application consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)ExtendedEventConnection to VR Server restored.infoConnection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Connection to VR Server restored for virtual machine {vm.name} on host {host.name}.Connection to VR Server restored for virtual machine {vm.name}.Connection to VR Server restored.hbr.primary.ConnectionRestoredToHbrServerEvent|Connection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExSync stopped.warningSync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped: {reason.@enum.hbr.primary.ReasonForDeltaAbort}hbr.primary.DeltaAbortedEvent|Sync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}EventExSync completed.infoSync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync completed for virtual machine {vm.name} on host {host.name}.Sync completed for virtual machine {vm.name}.Sync completed.hbr.primary.DeltaCompletedEvent|Sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventSync started.infoSync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync started by {userName} for virtual machine {vm.name} on host {host.name}.Sync started by {userName} for virtual machine {vm.name}.Sync started by {userName}.hbr.primary.DeltaStartedEvent|Sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFile system consistent sync completed.infoFile system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.File system consistent sync completed for virtual machine {vm.name} on host {host.name}.File system consistent sync completed for virtual machine {vm.name}.File system consistent sync completed.hbr.primary.FSQuiescedDeltaCompletedEvent|File system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)EventExFailed to start sync.errorFailed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartDeltaEvent|Failed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExFailed to start full sync.errorFailed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartSyncEvent|Failed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExDisk replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}hbr.primary.InvalidDiskReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}EventExVirtual machine replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}hbr.primary.InvalidVmReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}ExtendedEventVR Server does not support network compression.warningVR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support network compression for virtual machine {vm.name} on host {host.name}.VR Server does not support network compression for virtual machine {vm.name}.VR Server does not support network compression.hbr.primary.NetCompressionNotOkForServerEvent|VR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server supports network compression.infoVR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server supports network compression for virtual machine {vm.name} on host {host.name}.VR Server supports network compression for virtual machine {vm.name}.VR Server supports network compression.hbr.primary.NetCompressionOkForServerEvent|VR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExNo connection to VR Server.warningNo connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server: {reason.@enum.hbr.primary.ReasonForNoServerConnection}hbr.primary.NoConnectionToHbrServerEvent|No connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}EventExVR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}errorVR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}hbr.primary.NoProgressWithHbrServerEvent|VR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}ExtendedEventPrepare Delta Time exceeds configured RPO.warningPrepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name}.Prepare Delta Time exceeds configured RPO.hbr.primary.PrepareDeltaTimeExceedsRpoEvent|Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventQuiescing is not supported for this virtual machine.warningQuiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing is not supported for virtual machine {vm.name} on host {host.name}.Quiescing is not supported for virtual machine {vm.name}.Quiescing is not supported for this virtual machine.hbr.primary.QuiesceNotSupported|Quiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server is compatible with the configured RPO.infoVR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name}.VR Server is compatible with the configured RPO.hbr.primary.RpoOkForServerEvent|VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server does not support the configured RPO.warningVR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server does not support the configured RPO for virtual machine {vm.name}.VR Server does not support the configured RPO.hbr.primary.RpoTooLowForServerEvent|VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFull sync completed.infoFull sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync completed for virtual machine {vm.name} on host {host.name}.Full sync completed for virtual machine {vm.name}.Full sync completed.hbr.primary.SyncCompletedEvent|Full sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventFull sync started.infoFull sync started for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync started for virtual machine {vm.name} on host {host.name}.Full sync started for virtual machine {vm.name}.Full sync started.hbr.primary.SyncStartedEvent|Full sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventReplication paused.infoReplication paused for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Replication paused for virtual machine {vm.name} on host {host.name}.Replication paused for virtual machine {vm.name}.Replication paused.hbr.primary.SystemPausedReplication|Replication paused by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.warningQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.hbr.primary.UnquiescedDeltaCompletedEvent|Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).EventExReplication configuration changed.infoReplication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} on host {host.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).hbr.primary.VmReplicationConfigurationChangedEvent|Replication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).AccountCreatedEventAccount createdinfoAn account was createdAccount {spec.id} was created on host {host.name} <EventLongDescription id="vim.event.AccountCreatedEvent"> <description> An account has been created on the host </description> </EventLongDescription> AccountRemovedEventAccount removedinfoAccount {account} was removedAccount {account} was removed on host {host.name} <EventLongDescription id="vim.event.AccountRemovedEvent"> <description> An account has been removed from the host </description> </EventLongDescription> AccountUpdatedEventAccount updatedinfoAccount {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}' <EventLongDescription id="vim.event.AccountUpdatedEvent"> <description> An account has been updated on the host </description> </EventLongDescription> AdminPasswordNotChangedEventAdministrator password not changedinfoThe default password for the root user has not been changedThe default password for the root user on the host {host.name} has not been changed <EventLongDescription id="vim.event.AdminPasswordNotChangedEvent"> <description> The default password for the Administrator user on the host has not been changed </description> <cause> <description> You have not changed the password for the Administrator user on the host so the default password is still active </description> <action> Change the password for the Administrator user on the host </action> </cause> </EventLongDescription> AlarmAcknowledgedEventAlarm acknowledgedinfoAcknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}'Acknowledged alarm '{alarm.name}' on {entity.name}AlarmActionTriggeredEventAlarm action triggeredinfoAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarmClearedEventAlarm clearedinfoManually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}AlarmCreatedEventAlarm createdinfoCreated alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}'Created alarm '{alarm.name}' on {entity.name}AlarmEmailCompletedEventAlarm email sentinfoAlarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}AlarmEmailFailedEventCannot send alarm emailerrorAlarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to} <EventLongDescription id="vim.event.AlarmEmailFailedEvent"> <description> An error occurred while sending email notification of a triggered alarm </description> <cause> <description>Failed to send email for a triggered alarm</description> <action>Check the vCenter Server SMTP settings for sending email notifications</action> </cause> </EventLongDescription> AlarmEvent<Alarm Event>info<internal>AlarmReconfiguredEventAlarm reconfiguredinfoReconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}'Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}. <EventLongDescription id="vim.event.AlarmReconfiguredEvent"> <description> An alarm has been reconfigured </description> <cause> <description>A user has reconfigured an alarm</description> </cause> </EventLongDescription> AlarmRemovedEventAlarm removedinfoRemoved alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}'Removed alarm '{alarm.name}' on {entity.name}AlarmScriptCompleteEventAlarm script completedinfoAlarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}AlarmScriptFailedEventAlarm script not completederrorAlarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg} <EventLongDescription id="vim.event.AlarmScriptFailedEvent"> <description> The vCenter Server logs this event if an error occurs while running a script after an alarm triggers. </description> <cause> <description>There was an error running the script</description> <action>Fix the script or failure condition</action> </cause> </EventLongDescription> AlarmSnmpCompletedEventAlarm SNMP trap sentinfoAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarmSnmpFailedEventAlarm SNMP trap not senterrorAlarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg} <EventLongDescription id="vim.event.AlarmSnmpFailedEvent"> <description> The vCenter Server logs this event if an error occurs while sending an SNMP trap when an alarm triggers. </description> <cause> <description>An SNMP trap could not be sent for a triggered alarm</description> <action>Check the vCenter Server SNMP settings. Make sure that the vCenter Server network can handle SNMP packets.</action> </cause> </EventLongDescription> AlarmStatusChangedEventAlarm status changedinfoAlarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}AllVirtualMachinesLicensedEventAll virtual machines are licensedinfoAll running virtual machines are licensedAlreadyAuthenticatedSessionEventAlready authenticatedinfoUser cannot logon since the user is already logged onAuthorizationEvent<Authorization Event>info<internal>BadUsernameSessionEventInvalid user nameerrorCannot login {userName}@{ipAddress} <EventLongDescription id="vim.event.BadUsernameSessionEvent"> <description> A user attempted to log in with an unknown or invalid username </description> <cause> <description> The username is unknown to the system </description> <action> Use a username that is included in the system user directory </action> <action> On Linux, verify that the user directory is correctly configured </action> <action> If you are using Active Directory, check the health of the domain controller </action> </cause> <cause> <description> The user provided an invalid password </description> <action> Supply the correct password </action> </cause> </EventLongDescription> CanceledHostOperationEventCanceled host operationinfoThe operation performed on host {host.name} was canceledThe operation performed on host {host.name} was canceledThe operation was canceledThe operation performed on host {host.name} in {datacenter.name} was canceled <EventLongDescription id="vim.event.CanceledHostOperationEvent"> <description> An operation performed on the host was canceled </description> <cause> <description> A previous event in the sequence of events will provide more information about the cause of this cancellation </description> </cause> </EventLongDescription> ClusterComplianceCheckedEventChecked cluster for complianceinfoChecked cluster {computeResource.name} for complianceCluster was checked for compliance with profile {profile.name}Checked cluster for compliance <EventLongDescription id="vim.event.ClusterComplianceCheckedEvent"> <description> The cluster was checked for compliance with a cluster profile </description> <cause> <description> The user initiated a compliance check on the cluster against a cluster profile </description> </cause> <cause> <description> A scheduled has initiated a compliance check for the cluster against a cluster profile </description> </cause> </EventLongDescription> ClusterCreatedEventCluster createdinfoCreated cluster {computeResource.name}Created in folder {parent.name}Created cluster {computeResource.name} in {datacenter.name}ClusterDestroyedEventCluster deletedinfoRemoved cluster {computeResource.name}Removed clusterRemoved cluster {computeResource.name} in datacenter {datacenter.name}ClusterEvent<Cluster Event>info<internal>ClusterOvercommittedEventCluster overcommittederrorInsufficient capacity in cluster {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in cluster {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.ClusterOvercommittedEvent"> <description> The cumulative CPU and/or memory resources of all hosts in the cluster are not adequate to satisfy the resource reservations of all virtual machines in the cluster </description> <cause> <description>You attempted to power on a virtual machine bypassing vCenter Server. This condition occurs when you attempt the power on using the vSphere Client directly connected to the host.</description> <action>In a DRS cluster, do not power on virtual machines bypassing vCenter Server</action> </cause> <cause> <description>A host was placed in Maintenance, Standby, or Disconnected Mode</description> <action>Bring any host in Maintenance, Standby, or Disconnected mode out of these modes</action> </cause> </EventLongDescription> ClusterReconfiguredEventCluster reconfiguredinfoReconfigured cluster {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Cluster reconfiguredReconfigured cluster {computeResource.name} in datacenter {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ClusterReconfiguredEvent"> <description> The cluster configuration was changed. The cluster configuration includes information about the DRS, DPM, EVC and vSphere HA settings of the cluster. All DRS rules are also stored in the cluster configuration. Editing the cluster configuration may trigger an invocation of DRS and/or enabling/disabling of vSphere HA on each host in the cluster. </description> </EventLongDescription> ClusterStatusChangedEventCluster status changedinfoConfiguration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.ClusterStatusChangedEvent"> <description> The cluster status has changed. This status is the status of the root resource pool that encompasses the entire cluster. A cluster status change may be accompanied by the removal of a configuration issue if one was previously detected. A cluster status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The cluster status changed to yellow</description> <action>Add more resources (more hosts), or reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The cluster status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> CustomFieldDefAddedEventCustom field definition addedinfoCreated new custom field definition {name}CustomFieldDefEvent<Custom Field Definition Event>info<internal>CustomFieldDefRemovedEventCustom field definition removedinfoRemoved field definition {name}CustomFieldDefRenamedEventCustom field definition renamedinfoRenamed field definition from {name} to {newName}CustomFieldEvent<Custom Field Event>info<internal>CustomFieldValueChangedEventCustom field value changedinfoChanged custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} in {datacenter.name} from '{prevState}' to '{value}'CustomizationEvent<Customization Event>info<internal>CustomizationFailed<An error occurred during customization>infoAn error occurred during customization, Reason: {reason.@enum.CustomizationFailed.ReasonCode}An error occurred during customization on VM {vm.name}, Reason: {reason.@enum.CustomizationFailed.ReasonCode}. See customization log at {logLocation} on the guest OS for details.CustomizationLinuxIdentityFailedCustomization Linux Identity FailederrorAn error occurred while setting up Linux identity. See log file '{logLocation}' on guest OS for details. <EventLongDescription id="vim.event.CustomizationLinuxIdentityFailed"> <description> The guest operating system Linux distribution is not supported by the customization scripts. Please refer to the VMware vSphere Compatibility Matrix for the list of the supported Linux distributions. </description> <cause> <description> Customization of the target guest operating system Linux distribution is not supported. </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> CustomizationNetworkSetupFailedCannot complete customization network setuperrorAn error occurred while setting up network properties of the guest OS. See the log file {logLocation} in the guest OS for details. <EventLongDescription id="vim.event.CustomizationNetworkSetupFailed"> <description> The customization scripts failed to set the parameters in the corresponding configuration files for Linux or in the Windows registry </description> <cause> <description> The Customization Specification contains an invalid host name or domain name </description> <action> Review the guest operating system log files for this event for more details </action> <action> Provide a valid host name for the target guest operating system. The name must comply with the host name and domain name definitions in RFC 952, 1035, 1123, 2181. </action> </cause> <cause> <description> Could not find a NIC with the MAC address specified in the Customization Package </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that there was no change in the virtual NIC MAC address between the creation of the Customization Package and its deployment. Deployment occurs during the first boot of the virtual machine after customization has been scheduled. </action> </cause> <cause> <description> The customization code needs read/write permissions for certain configuration files. These permissions were not granted to the 'root' account on Linux or to the account used by the VMware Tools Service on the Windows guest operating system. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Grant read/write permissions to the 'root' account for Linux or to the account used by the VMware Tools Service on the Windows guest operating system and the registry keys that need to be modified by the customization code </action> </cause> </EventLongDescription> CustomizationStartedEventStarted customizationinfoStarted customization of VM {vm.name}. Customization log located at {logLocation} in the guest OS.CustomizationSucceededCustomization succeededinfoCustomization of VM {vm.name} succeeded. Customization log located at {logLocation} in the guest OS.CustomizationSysprepFailedCannot complete customization SyspreperrorThe version of Sysprep {sysprepVersion} provided for customizing VM {vm.name} does not match the version of guest OS {systemVersion}. See the log file {logLocation} in the guest OS for more information. <EventLongDescription id="vim.event.CustomizationSysprepFailed"> <description> The sysprep files in the folder corresponding to the selected target guest operating system are not compatible with the actual version of the guest operation system </description> <cause> <description> The sysprep files in the folder corresponding to the target guest operating system (for example Windows XP) are for a different guest operating system (for example Windows 2003) </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> <cause> <description> The sysprep files in the folder corresponding to the guest operating system are for a different Service Pack, for example the guest operating system is Windows XP SP2 and but the sysprep files are for Windows XP SP1. </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> </EventLongDescription> CustomizationUnknownFailureUnknown customization errorerrorAn error occurred while customizing VM {vm.name}. For details reference the log file {logLocation} in the guest OS. <EventLongDescription id="vim.event.CustomizationUnknownFailure"> <description> The customization component failed to set the required parameters inside the guest operating system </description> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the registry keys used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on registry keys used by the customization code </action> </cause> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the files and folders used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on the files and folders used by the customization code </action> </cause> <cause> <description> On Linux, an invalid or unsupported time zone is passed to the customization scripts and the time zone configuration failed as a result </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that a supported time zone is passed in Customization Specification. </action> </cause> <cause> <description> On Linux, the guest operating system 'root' account does not have read/write permissions for the configuration files that the customization scripts need to modify ('/etc/hosts') </description> <action> Grant read/write permissions for the configuration files to the guest operating system 'root' account </action> </cause> <cause> <description> To enable guest customization on Linux, in case open-vm-tools are used, you must also install the deployPkg plug-in. </description> <action> Follow kb.vmware.com/kb/2075048 to install the open-vm-tools deployPkg plug-in. </action> </cause> <cause> <description> Customization of the target guest operating system is not supported </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> DVPortgroupCreatedEventdvPort group createdinfodvPort group {net.name} was added to switch {dvs}.dvPort group {net.name} in {datacenter.name} was added to switch {dvs.name}.DVPortgroupDestroyedEventdvPort group deletedinfodvPort group {net.name} was deleted.dvPort group {net.name} in {datacenter.name} was deleted.DVPortgroupEventdvPort group eventinfodvPort group eventdvPort group eventDVPortgroupReconfiguredEventdvPort group reconfiguredinfodvPort group {net.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}dvPort group {net.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DVPortgroupRenamedEventdvPort group renamedinfodvPort group {oldName} was renamed to {newName}.dvPort group {oldName} in {datacenter.name} was renamed to {newName}DasAdmissionControlDisabledEventvSphere HA admission control disabledinfovSphere HA admission control disabled for cluster {computeResource.name}vSphere HA admission control disabledvSphere HA admission control disabled for cluster {computeResource.name} in {datacenter.name}DasAdmissionControlEnabledEventvSphere HA admission control enabledinfovSphere HA admission control enabled for cluster {computeResource.name}vSphere HA admission control enabledvSphere HA admission control enabled for cluster {computeResource.name} in {datacenter.name}DasAgentFoundEventvSphere HA agent foundinfoRe-established contact with a primary host in this vSphere HA clusterDasAgentUnavailableEventvSphere HA agent unavailableerrorUnable to contact a primary vSphere HA agent in cluster {computeResource.name}Unable to contact a primary vSphere HA agentUnable to contact a primary vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasAgentUnavailableEvent"> <description> vCenter Server is not able to contact any good primary hosts in the vSphere HA cluster. vSphere HA protection may not be available for virtual machines running in the cluster. In addition, you cannot enable or reconfigure vSphere HA on hosts in the cluster until contact between vCenter Server and a good primary host is restored. </description> <cause> <description> There was a network outage, and all hosts show up in the inventory as "not responding" </description> <action>Restore the network</action> </cause> <cause> <description>All the primary hosts in the cluster failed</description> <action> If the failed primary hosts cannot be restored, disable vSphere HA on the cluster, wait for the Unconfigure vSphere HA tasks to complete on all hosts, and re-enable vSphere HA on the cluster </action> </cause> </EventLongDescription> DasClusterIsolatedEventAll vSphere HA hosts isolatederrorAll hosts in the vSphere HA cluster {computeResource.name} were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster {computeResource.name} in {datacenter.name} were isolated from the network. Check the network configuration for proper network redundancy in the management network.DasDisabledEventvSphere HA disabled for clusterinfovSphere HA disabled for cluster {computeResource.name}vSphere HA disabled for this clustervSphere HA disabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasDisabledEvent"> <description> vSphere HA has been disabled on this host due to a user action. vSphere HA is disabled when a host is disconnected from vCenter Server or placed into maintenance or standby mode. Virtual machines on other hosts in the cluster will not be failed over to this host in the event of a host failure. In addition, if the host is disconnected, any virtual machines running on this host will not be failed if the host fails. Further, no attempt will be made by vSphere HA VM and Application Monitoring to reset VMs. </description> </EventLongDescription> DasEnabledEventvSphere HA enabled for clusterinfovSphere HA enabled for cluster {computeResource.name}vSphere HA enabled for this clustervSphere HA enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasEnabledEvent"> <description> vSphere HA has been enabled on this host due to a user action. vSphere HA is enabled when a host is added to or moved into a vSphere HA cluster or when vSphere HA is enabled on a cluster. If the host was already in a vSphere HA cluster, vSphere HA will be enabled when the host is reconnected to vCenter Server or brought out of maintenance or standby mode. vSphere HA will attempt to protect any VMs that are running on the host at the time that HA is enabled on it. </description> </EventLongDescription> DasHostFailedEventvSphere HA host failederrorA possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name} in cluster {computeResource.name} in {datacenter.name}DasHostIsolatedEventvSphere HA host isolatedwarningHost {isolatedHost.name} has been isolated from cluster {computeResource.name}Host {isolatedHost.name} has been isolatedHost has been isolated from clusterHost {isolatedHost.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasHostIsolatedEvent"> <description> vSphere HA detected that the host is network isolated. When a host is in this state, vSphere HA applies the power-off or shutdown host isolation response to virtual machines running on the host, and continues to monitor the virtual machines that are left powered on. While a host is in this state, vSphere HA's ability to restart virtual machines after a failure is impacted. vSphere HA only powers off or shuts down a virtual machine if the agent on the host determines that a master agent is responsible for the virtual machine. </description> <cause> <description> A host is network isolated if both of the following conditions are met: (1) isolation addresses have been configured and the host is unable to ping them; (2) the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <action> Resolve the networking problem that is preventing the host from pinging its isolation addresses and communicating with other hosts. Ensure that there is redundancy in the management networks used by vSphere HA. With redundancy, vSphere HA is able to communicate over more than one path thus reducing the chance of a host becoming isolated. </action> </cause> </EventLongDescription> DatacenterCreatedEventDatacenter createdinfoCreated in folder {parent.name}Created datacenter {datacenter.name}Created datacenter {datacenter.name} in folder {parent.name}DatacenterEvent<Datacenter Event>info<internal>DatacenterRenamedEventDatacenter renamedinfoRenamed datacenterRenamed datacenter from {oldName} to {newName}Renamed datacenter from {oldName} to {newName}DatastoreCapacityIncreasedEventDatastore capacity increasedinfoDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytesDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytes in {datacenter.name}DatastoreDestroyedEventDatastore deletedinfoRemoved unconfigured datastore {datastore.name}Removed unconfigured datastore {datastore.name}DatastoreDiscoveredEventDatastore discoveredinfoDiscovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name}Discovered datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreDiscoveredEvent"> <description> A datastore was discovered on a host </description> <cause> <description> A host that has access to this datastore was added to the datacenter </description> </cause> <cause> <description> The storage backing this datastore was unmasked to a host in the datacenter </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host and the datastore was visible on at least one other host in the datacenter prior to this operation. </description> </cause> </EventLongDescription> DatastoreDuplicatedEventDatastore duplicatederrorMultiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detectedMultiple datastores named {datastore} detected on host {host.name} in {datacenter.name}DatastoreEvent<Datastore Event>info<internal>DatastoreFileCopiedEventFile or directory copied to datastoreinfoCopy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Copy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileDeletedEventFile or directory deletedinfoDeletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Deletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileEvent<Datastore File Event>info<internal>DatastoreFileMovedEventFile or directory moved to datastoreinfoMove of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Move of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreIORMReconfiguredEventReconfigured Storage I/O Control on datastoreinfoReconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}DatastorePrincipalConfiguredDatastore principal configuredinfoConfigured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal}Configured datastore principal {datastorePrincipal} on host {host.name} in {datacenter.name}DatastoreRemovedOnHostEventDatastore removed from hostinfoRemoved datastore {datastore.name} from {host.name}Removed datastore {datastore.name}Removed datastore {datastore.name} from {host.name} in {datacenter.name}DatastoreRenamedEventDatastore renamedinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name}DatastoreRenamedOnHostEventDatastore renamed from hostinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreRenamedOnHostEvent"> <description> A datastore was renamed on a host managed by vCenter Server </description> <cause> <description> vCenter Server discovered datastore on a host and renamed the datastore because it already exists in the vCenter Server inventory under a different name. vCenter Server might also have renamed the datastore because the name conflicts with another datastore in the same datacenter. </description> </cause> </EventLongDescription> DrsDisabledEventDRS disabledinfoDisabled DRS on cluster {computeResource.name}Disabled DRSDisabled DRS on cluster {computeResource.name} in datacenter {datacenter.name}DrsEnabledEventDRS enabledinfoEnabled DRS on cluster {computeResource.name} with automation level {behavior}Enabled DRS with automation level {behavior}Enabled DRS on {computeResource.name} with automation level {behavior} in {datacenter.name}DrsEnteredStandbyModeEventDRS entered standby modeinfoDRS put {host.name} into standby modeDRS put {host.name} into standby modeDRS put the host into standby modeDRS put {host.name} into standby modeDrsEnteringStandbyModeEventDRS entering standby modeinfoDRS is putting {host.name} into standby modeDRS is putting {host.name} into standby modeDRS is putting the host into standby modeDRS is putting {host.name} into standby modeDrsExitStandbyModeFailedEventDRS cannot exit the host out of standby modeerrorDRS cannot move {host.name} out of standby modeDRS cannot move {host.name} out of standby modeDRS cannot move the host out of standby modeDRS cannot move {host.name} out of standby mode <EventLongDescription id="vim.event.DrsExitStandbyModeFailedEvent"> <description> DPM failed to power on a host in standby mode. DPM tried to power on a host using IPMI, iLO or Wake-on-LAN protocol, but the host did not power on. </description> <cause> <description>DPM could not communicate with the BMC on the host</description> <action>Verify the IPMI/iLO credentials entered in vCenter Server</action> <action>Verify that LAN access is enabled in the BMC</action> </cause> <cause> <description>The vMotion NIC on the host does not support Wake-on-LAN</description> <action>Select a vMotion NIC that supports Wake-on-LAN</action> </cause> </EventLongDescription> DrsExitedStandbyModeEventDRS exited standby modeinfoDRS moved {host.name} out of standby modeDRS moved {host.name} out of standby modeDRS moved the host out of standby modeDRS moved {host.name} out of standby modeDrsExitingStandbyModeEventDRS exiting standby modeinfoDRS is moving {host.name} out of standby modeDRS is moving {host.name} out of standby modeDRS is moving the host out of standby modeDRS is moving {host.name} out of standby modeDrsInvocationFailedEventDRS invocation not completederrorDRS invocation not completedDRS invocation not completedDRS invocation not completed <EventLongDescription id="vim.event.DrsInvocationFailedEvent"> <description> A DRS invocation failed to complete successfully. This condition can occur for a variety of reasons, some of which may be transient. </description> <cause> <description>An error was encountered during a DRS invocation</description> <action>Disable and re-enable DRS</action> </cause> </EventLongDescription> DrsRecoveredFromFailureEventDRS has recovered from the failureinfoDRS has recovered from the failureDRS has recovered from the failureDRS has recovered from the failureDrsResourceConfigureFailedEventCannot complete DRS resource configurationerrorUnable to apply DRS resource settings on host. {reason.msg}. This can significantly reduce the effectiveness of DRS.Unable to apply DRS resource settings on host {host.name} in {datacenter.name}. {reason.msg}. This can significantly reduce the effectiveness of DRS. <EventLongDescription id="vim.event.DrsResourceConfigureFailedEvent"> <description> The DRS resource settings could not be successfully applied to a host in the cluster. This condition is typically transient. </description> <cause> <description>DRS resource settings could not be applied to a host.</description> <action>DRS generates resource settings that map the cluster values to the host. However, in this case, the values could not be successfully applied to the host. This is typically a transient error caused by delayed synchronization from DRS to the host. If this condition persists, enable debug logging in vpxa and contact VMware Support. </action> </cause> </EventLongDescription> DrsResourceConfigureSyncedEventDRS resource configuration synchronizedinfoResource configuration specification returns to synchronization from previous failureResource configuration specification returns to synchronization from previous failure on host '{host.name}' in {datacenter.name}DrsRuleComplianceEventVM is now compliant with DRS VM-Host affinity rulesinfo{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} is now compliant with DRS VM-Host affinity rulesvirtual machine on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} in {datacenter.name} is now compliant with DRS VM-Host affinity rulesDrsRuleViolationEventVM is violating a DRS VM-Host affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} is violating a DRS VM-Host affinity rulevirtual machine on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host affinity ruleDrsSoftRuleViolationEventThe VM is violating a DRS VM-Host soft affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} is violating a DRS VM-Host soft affinity rulevirtual machine on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host soft affinity ruleDrsVmMigratedEventDRS VM migratedinfoDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name}DRS migrated {vm.name} from {sourceHost.name} to {host.name}DRS migrated {vm.name} from {sourceHost.name}Migrated from {sourceHost.name} to {host.name} by DRSDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmMigratedEvent"> <description> A virtual machine was migrated based on a DRS recommendation. The recommendation might have been made be to achieve better load balancing in the cluster or to evacuate a host in the cluster that is being put into Standby or Maintenance Mode. </description> <cause> <description>DRS recommended the migration of a virtual machine</description> </cause> </EventLongDescription> DrsVmPoweredOnEventDRS VM powered oninfoDRS powered on {vm.name} on {host.name}DRS powered on {vm.name} on {host.name}DRS powered on {vm.name}DRS powered on the virtual machine on {host.name}DRS powered on {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmPoweredOnEvent"> <description> A virtual machine was powered on by the user and DRS choose a host for the virtual machine based on the current cluster load distribution combined with the virtual machine's resource requirements </description> <cause> <description>DRS chose a host for a virtual machine that was being powered on</description> </cause> </EventLongDescription> DuplicateIpDetectedEventDuplicate IP detectedinfoVirtual machine {macAddress} has a duplicate IP {duplicateIP}Virtual machine {macAddress} on host {host.name} has a duplicate IP {duplicateIP}DvpgImportEventImport Operation eventinfoImport operation with type {importType} was performed on {net.name}Import operation with type {importType} was performed on {net.name}DvpgRestoreEventRestore Operation eventinfoRestore operation was performed on {net.name}Restore operation was performed on {net.name}DvsCreatedEventvSphere Distributed Switch createdinfoA vSphere Distributed Switch {dvs.name} was createdA vSphere Distributed Switch {dvs.name} was created in {datacenter.name}.DvsDestroyedEventvSphere Distributed Switch deletedinfovSphere Distributed Switch {dvs.name} was deleted.vSphere Distributed Switch {dvs.name} in {datacenter.name} was deleted.DvsEventvSphere Distributed Switch eventinfovSphere Distributed Switch eventvSphere Distributed Switch eventDvsHealthStatusChangeEventHealth check status of the switch changed.infoHealth check status changed in vSphere Distributed Switch {dvs.name} on host {host.name}Health check status changed in vSphere Distributed Switch {dvs.name}Health check status was changed in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}DvsHostBackInSyncEventThe vSphere Distributed Switch configuration on the host was synchronized with that of the vCenter Server.infoThe vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.DvsHostJoinedEventHost joined the vSphere Distributed SwitchinfoThe host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name}.The host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostLeftEvent Host left vSphere Distributed SwitchinfoThe host {hostLeft.name} left the vSphere Distributed Switch {dvs.name}.The host {hostLeft.name} left the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostStatusUpdatedHost status changed on the vSphere Distributed SwitchinfoThe host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name}.The host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostWentOutOfSyncEventThe vSphere Distributed Switch configuration on the host differed from that of the vCenter Server.warningThe vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server. <EventLongDescription id="vim.event.DvsHostWentOutOfSyncEvent"> <description> The vSphere Distributed Switch configuration on the host differed from that of the vCenter Server </description> <cause> <description> The host was not connected to the vCenter Server when updates were sent </description> </cause> <cause> <description> vCenter Server failed to push the vSphere Distributed Switch configuration to the host in the past</description> </cause> </EventLongDescription> DvsImportEventImport Operation eventinfoImport operation with type {importType} was performed on {dvs.name}Import operation with type {importType} was performed on {dvs.name}DvsMergedEventvSphere Distributed Switch mergedinfovSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name}.vSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name} in {datacenter.name}.DvsPortBlockedEventdvPort blockedinfoThe dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortConnectedEventdvPort connectedinfoThe dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortCreatedEventdvPort createdinfoNew ports were created in the vSphere Distributed Switch {dvs.name}.New ports were created in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDeletedEventdvPort deletedinfoPorts were deleted in the vSphere Distributed Switch {dvs.name}.Deleted ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDisconnectedEventdvPort disconnectedinfoThe dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortEnteredPassthruEventdvPort in passthrough modeinfoThe dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortExitedPassthruEventdvPort not in passthrough modeinfoThe dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortJoinPortgroupEventA dvPort was moved into the dvPort group.infoThe dvPort {portKey} was moved into the dvPort group {portgroupName}.The dvPort {portKey} was moved into the dvPort group {portgroupName} in {datacenter.name}.DvsPortLeavePortgroupEventA dvPort was moved out of the dvPort group.infoThe dvPort {portKey} was moved out of the dvPort group {portgroupName}.The dvPort {portKey} was moved out of the dvPort group {portgroupName} in {datacenter.name}.DvsPortLinkDownEventdvPort link was downinfoThe dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortLinkUpEventdvPort link was upinfoThe dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortReconfiguredEventdvPort reconfiguredinfoPorts were reconfigured in the vSphere Distributed Switch {dvs.name}.
Ports changed {portKey}.
Changes are {configChanges}Reconfigured ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.
Ports changed {portKey}.
Changes are {configChanges}DvsPortRuntimeChangeEventdvPort runtime information changed.infoThe dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortUnblockedEventdvPort unblockedinfoThe dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortVendorSpecificStateChangeEventdvPort vendor specific state changed.infoThe dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsReconfiguredEventvSphere Distributed Switch reconfiguredinfoThe vSphere Distributed Switch {dvs.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}The vSphere Distributed Switch {dvs.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DvsRenamedEventvSphere Distributed Switch renamedinfoThe vSphere Distributed Switch {oldName} was renamed to {newName}.The vSphere Distributed Switch {oldName} in {datacenter.name} was renamed to {newName}.DvsRestoreEventRestore Operation eventinfoRestore operation was performed on {dvs.name}Restore operation was performed on {dvs.name}DvsUpgradeAvailableEventAn upgrade for the vSphere Distributed Switch is available.infoAn upgrade for vSphere Distributed Switch {dvs.name} is available. An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is available.DvsUpgradeInProgressEventAn upgrade for the vSphere Distributed Switch is in progress.infoAn upgrade for vSphere Distributed Switch {dvs.name} is in progress.An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is in progress.DvsUpgradeRejectedEventCannot complete the upgrade for the vSphere Distributed SwitchinfoAn upgrade for vSphere Distributed Switch {dvs.name} was rejected.Cannot complete an upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name}DvsUpgradedEventThe vSphere Distributed Switch was upgraded.infovSphere Distributed Switch {dvs.name} was upgraded.vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} was upgraded.EnteredMaintenanceModeEventEntered maintenance modeinfoHost {host.name} in {datacenter.name} has entered maintenance modeHost {host.name} in {datacenter.name} has entered maintenance modeEnter maintenance mode completed. All virtual machine operations are disabledHost {host.name} in {datacenter.name} has entered maintenance modeEnteredStandbyModeEventEntered standby modeinfoEntered standby modeThe host {host.name} is in standby modeEnteringMaintenanceModeEventEntering maintenance modeinfoHost {host.name} has started to enter maintenance modeHost {host.name} has started to enter maintenance modeStarted to enter maintenance mode. Waiting for virtual machines to shut down, suspend, or migrateHost {host.name} in {datacenter.name} has started to enter maintenance modeEnteringStandbyModeEventEntering standby modeinfoEntering standby modeThe host {host.name} is entering standby modeErrorUpgradeEventUpgrade errorerror{message} <EventLongDescription id="vim.event.ErrorUpgradeEvent"> <description> An error occurred during agent upgrade </description> </EventLongDescription> Event<Event>info<internal>ExitMaintenanceModeEventExit maintenance modeinfoHost {host.name} has exited maintenance modeHost {host.name} has exited maintenance modeExited maintenance modeHost {host.name} in {datacenter.name} has exited maintenance modeExitStandbyModeFailedEventCannot exit standby modeerrorCould not exit standby modeThe host {host.name} could not exit standby modeExitedStandbyModeEventExited standby modeinfoExited standby modeThe host {host.name} is no longer in standby modeExitingStandbyModeEventExiting standby modeinfoExiting standby modeThe host {host.name} is exiting standby modeFailoverLevelRestoredvSphere HA failover resources are sufficientinfoSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name}Sufficient resources are available to satisfy vSphere HA failover levelSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name} in {datacenter.name}GeneralEventGeneral eventinfoGeneral event: {message}GeneralHostErrorEventHost errorerrorError detected on {host.name}: {message}Error detected on {host.name}: {message}{message}Error detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostErrorEvent"> <description> An error occurred on the host </description> <cause> <description> The agent cannot send heartbeats because of a networking related failure on host </description> </cause> <cause> <description> The agent failed to update the configuration file on host </description> </cause> <cause> <description> The agent failed to save the configuration file to disk on host </description> </cause> <cause> <description> The provisioning module failed to load. As a result, all provisioning operations will fail on host. </description> </cause> </EventLongDescription> GeneralHostInfoEventHost informationinfoIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostInfoEvent"> <description> A general information event occurred on the host </description> </EventLongDescription> GeneralHostWarningEventHost warningwarningIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostWarningEvent"> <description> A general warning event occurred on the host </description> <cause> <description> Virtual machine creation might fail because the agent was unable to retrieve virtual machine creation options from the host </description> </cause> </EventLongDescription> GeneralUserEventUser eventuserUser logged event: {message} <EventLongDescription id="vim.event.GeneralUserEvent"> <description> A general user event occurred on the host </description> <cause> <description> A user initiated an action on the host </description> </cause> </EventLongDescription> GeneralVmErrorEventVM errorerrorError detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name}: {message}{message} on {host.name}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmErrorEvent"> <description> An error occurred on the virtual machine </description> </EventLongDescription> GeneralVmInfoEventVM informationinfoIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmInfoEvent"> <description> A general information event occurred on the virtual machine </description> </EventLongDescription> GeneralVmWarningEventVM warningwarningIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmWarningEvent"> <description> A general warning event occurred on the virtual machine </description> </EventLongDescription> GhostDvsProxySwitchDetectedEventThe vSphere Distributed Switch corresponding to the proxy switches on the host does not exist in vCenter Server or does not contain this host.infoThe vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host does not exist in vCenter Server or does not contain this host.The vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host {host.name} does not exist in vCenter Server or does not contain this host. <EventLongDescription id="vim.event.GhostDvsProxySwitchDetectedEvent"> <description> vCenter Server found a vSphere Distributed Switch proxy switch on the host that does not match any vSphere Distributed Switch in vCenter Server </description> <cause> <description> The vSphere Distributed Switch corresponding to the vSphere Distributed Switch proxy switch on the host was deleted while host was disconnected from the vCenter Server </description> </cause> <cause> <description> The host is no longer a member of the vSphere Distributed Switch that the proxy switch in the host corresponds to </description> </cause> </EventLongDescription> GhostDvsProxySwitchRemovedEventA ghost proxy switch on the host was resolved.infoA ghost proxy switch {switchUuid} on the host was resolved.A ghost proxy switch {switchUuid} on the host {host.name} was resolved.GlobalMessageChangedEventMessage changedinfoThe message changed: from '{prevMessage}' to '{message}'HealthStatusChangedEventStatus changeinfo{componentName} status changed from {oldStatus} to {newStatus}HostAddFailedEventCannot add hosterrorCannot add host {hostname}Cannot add host {hostname}Cannot add host {hostname} to datacenter {datacenter.name} <EventLongDescription id="vim.event.HostAddFailedEvent"> <description> Adding a host failed </description> </EventLongDescription> HostAddedEventHost AddedinfoAdded host {host.name}Added host {host.name}Added host {host.name} to datacenter {datacenter.name}HostAdminDisableEventHost administrator access disabledwarningAdministrator access to the host is disabledAdministrator access to the host {host.name} is disabled <EventLongDescription id="vim.event.HostAdminDisableEvent"> <description> Host permissions have been changed so that only the account used for vCenter Server operations has Administrator permissions </description> <cause> <description> This condition occurs when vCenter Server removes all other Administrator access to the host because the host has been placed in Lockdown Mode. The host can be managed by vCenter Server only and Only vCenter Server can re-enable Administrator access for other accounts. </description> </cause> </EventLongDescription> HostAdminEnableEventHost administrator access enabledwarningAdministrator access to the host has been restoredAdministrator access to the host {host.name} has been restored <EventLongDescription id="vim.event.HostAdminEnableEvent"> <description> vCenter Server has restored Administrator permissions for host user accounts whose permissions were disabled by Lockdown Mode </description> <cause> <description> This condition occurs when vCenter Server restores Administrator access to host user accounts that lost their Administrator permissions when the host was placed in Lockdown Mode </description> </cause> </EventLongDescription> HostCnxFailedAccountFailedEventCannot connect host and configure management accounterrorCannot connect {host.name}: cannot configure management accountCannot connect {host.name}: cannot configure management accountCannot connect: cannot configure management accountCannot connect {host.name} in {datacenter.name}: cannot configure management account <EventLongDescription id="vim.event.HostCnxFailedAccountFailedEvent"> <description> Could not connect to the host because setting up a management account failed </description> <cause> <description> The account used by vCenter Server to manage the host could not be configured </description> </cause> </EventLongDescription> HostCnxFailedAlreadyManagedEventCannot connect host - already managederrorCannot connect {host.name}: already managed by {serverName}Cannot connect {host.name}: already managed by {serverName}Cannot connect: already managed by {serverName}Cannot connect {host.name} in {datacenter.name}: already managed by {serverName} <EventLongDescription id="vim.event.HostCnxFailedAlreadyManagedEvent"> <description> Could not connect to the host because it is already being managed by a different vCenter Server instance. </description> <cause> <description> The host is already being managed by a different vCenter Server instance </description> <action> Remove the host from the inventory for the other vCenter Server instance </action> <action> Force the addition of the host to the current vCenter Server instance </action> </cause> </EventLongDescription> HostCnxFailedBadCcagentEventCannot connect host - incorrect CcagenterrorCannot connect {host.name} : server agent is not respondingCannot connect {host.name} : server agent is not respondingCannot connect: server agent is not respondingCannot connect host {host.name} in {datacenter.name} : server agent is not responding <EventLongDescription id="vim.event.HostCnxFailedBadCcagentEvent"> <description> Could not connect to the host because the host agent did not respond </description> <cause> <description> No response was received from the host agent </description> <action> Restart the host agent on the ESX/ESXi host </action> </cause> </EventLongDescription> HostCnxFailedBadUsernameEventCannot connect host - incorrect user nameerrorCannot connect {host.name}: incorrect user name or passwordCannot connect {host.name}: incorrect user name or passwordCannot connect: incorrect user name or passwordCannot connect {host.name} in {datacenter.name}: incorrect user name or password <EventLongDescription id="vim.event.HostCnxFailedBadUsernameEvent"> <description> Could not connect to the host due to an invalid username and password combination </description> <cause> <description> Invalid username and password combination </description> <action> Use the correct username and password </action> </cause> </EventLongDescription> HostCnxFailedBadVersionEventCannot connect host - incompatible versionerrorCannot connect {host.name}: incompatible versionCannot connect {host.name}: incompatible versionCannot connect: incompatible versionCannot connect {host.name} in {datacenter.name}: incompatible version <EventLongDescription id="vim.event.HostCnxFailedBadVersionEvent"> <description> Could not connect to the host due to an incompatible vSphere Client version </description> <cause> <description> The version of the vSphere Client is incompatible with the ESX/ESXi host so the connection attempt failed </description> <action> Download and use a compatible vSphere Client version to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedCcagentUpgradeEventCannot connect host - Ccagent upgradeerrorCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect: did not install or upgrade vCenter agent serviceCannot connect host {host.name} in {datacenter.name}. Did not install or upgrade vCenter agent service. <EventLongDescription id="vim.event.HostCnxFailedCcagentUpgradeEvent"> <description> Could not connect to the host because a host agent upgrade or installation is in process </description> <cause> <description> The host agent is being upgraded or installed on the host </description> <action> Wait for the host agent upgrade or installation to complete </action> </cause> </EventLongDescription> HostCnxFailedEventCannot connect hosterrorCannot connect host {host.name}: error connecting to hostCannot connect host {host.name}: error connecting to hostCannot connect: error connecting to hostCannot connect {host.name} in {datacenter.name}: error connecting to host <EventLongDescription id="vim.event.HostCnxFailedEvent"> <description> Could not connect to the host due to an unspecified condition </description> <cause> <description> Unknown cause of failure </description> </cause> </EventLongDescription> HostCnxFailedNetworkErrorEventCannot connect host - network errorerrorCannot connect {host.name}: network errorCannot connect {host.name}: network errorCannot connect: network errorCannot connect {host.name} in {datacenter.name}: network error <EventLongDescription id="vim.event.HostCnxFailedNetworkErrorEvent"> <description> Could not connect to the host due to a network error </description> <cause> <description> A Network error occurred while connecting to the host </description> <action> Verify that host networking is configured correctly </action> </cause> </EventLongDescription> HostCnxFailedNoAccessEventCannot connect host - no accesserrorCannot connect {host.name}: account has insufficient privilegesCannot connect {host.name}: account has insufficient privilegesCannot connect: account has insufficient privilegesCannot connect host {host.name} in {datacenter.name}: account has insufficient privileges <EventLongDescription id="vim.event.HostCnxFailedNoAccessEvent"> <description> Could not connect to the host due to insufficient account privileges </description> <cause> <description> The account used to connect to host does not have host access privileges </description> <action> Use an account that has sufficient privileges to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedNoConnectionEventCannot connect host - no connectionerrorCannot connect {host.name}Cannot connect {host.name}Cannot connect to hostCannot connect host {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostCnxFailedNoConnectionEvent"> <description> Could not connect to the host because the host is not in the network </description> <cause> <description> The host that you are attempting to connect to is not present in the network </description> <action> Verify that host networking is configured correctly and the host is connected to the same network as vCenter Server </action> </cause> </EventLongDescription> HostCnxFailedNoLicenseEventCannot connect host - no licenseerrorCannot connect {host.name}: not enough CPU licensesCannot connect {host.name}: not enough CPU licensesCannot connect: not enough CPU licensesCannot connect {host.name} in {datacenter.name}: not enough CPU licenses <EventLongDescription id="vim.event.HostCnxFailedNoLicenseEvent"> <description> Could not connect to the host due to a licensing issue </description> <cause> <description> There are not enough licenses to add the host to the vCenter Server inventory. This event is accompanied by a fault that specifies the missing licenses required to add the host successfully. </description> <action> Add the necessary licenses to vCenter Server and try adding the host again </action> </cause> </EventLongDescription> HostCnxFailedNotFoundEventCannot connect host - host not founderrorCannot connect {host.name}: incorrect host nameCannot connect {host.name}: incorrect host nameCannot connect: incorrect host nameCannot connect {host.name} in {datacenter.name}: incorrect host name <EventLongDescription id="vim.event.HostCnxFailedNotFoundEvent"> <description> Could not connect to the host because vCenter Server could not resolve the host name </description> <cause> <description> Unable to resolve the host name of the host </description> <action> Verify that the correct host name has been supplied for the host </action> <action> Configure the host to use a known-good (resolvable) host name </action> <action> Add the host name to the DNS server </action> </cause> </EventLongDescription> HostCnxFailedTimeoutEventCannot connect host - time-outerrorCannot connect {host.name}: time-out waiting for host responseCannot connect {host.name}: time-out waiting for host responseCannot connect: time-out waiting for host responseCannot connect {host.name} in {datacenter.name}: time-out waiting for host response <EventLongDescription id="vim.event.HostCnxFailedTimeoutEvent"> <description> Could not connect to the host because the connection attempt timed out </description> <cause> <description> A timeout occurred while attempting to connect to the host </description> </cause> </EventLongDescription> HostComplianceCheckedEventChecked host for complianceinfoHost {host.name} checked for compliance with profile {profile.name}Host {host.name} checked for compliance with profile {profile.name}Checked host for compliance with profile {profile.name}Host {host.name} checked for compliance. <EventLongDescription id="vim.event.HostComplianceCheckedEvent"> <description> The host was checked for compliance with a host profile </description> <cause> <description> The user initiated a compliance check on the host against a host profile </description> </cause> <cause> <description> A scheduled task initiated a compliance check for the host against a host profile </description> </cause> </EventLongDescription> HostCompliantEventHost compliant with profileinfoHost is in compliance with the attached profile.Host {host.name} is in compliance with the attached profileHostConfigAppliedEventHost configuration changes applied to hostinfoHost configuration changes applied to {host.name}Host configuration changes applied to {host.name}Host configuration changes applied.Host configuration changes applied.HostConnectedEventHost connectedinfoConnected to {host.name}Connected to {host.name}Established a connectionConnected to {host.name} in {datacenter.name}HostConnectionLostEventHost connection losterrorHost {host.name} is not respondingHost {host.name} is not respondingHost is not respondingHost {host.name} in {datacenter.name} is not responding <EventLongDescription id="vim.event.HostConnectionLostEvent"> <description> Connection to the host has been lost </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostDasDisabledEventvSphere HA agent disabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is disabledvSphere HA agent on {host.name} is disabledvSphere HA agent on this host is disabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is disabledHostDasDisablingEventDisabling vSphere HAinfovSphere HA is being disabled on {host.name}vSphere HA is being disabled on {host.name}Disabling vSphere HAvSphere HA is being disabled on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}HostDasEnabledEventvSphere HA agent enabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is enabledvSphere HA agent on {host.name} is enabledvSphere HA agent on this host is enabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is enabledHostDasEnablingEventEnabling host vSphere HA agentwarningEnabling vSphere HA agent on {host.name}Enabling vSphere HA agent on {host.name}Enabling vSphere HA agentEnabling vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.HostDasEnablingEvent"> <description> vSphere HA is being enabled on this host. </description> </EventLongDescription> HostDasErrorEventvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error {message}: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}HostDasEvent<Host vSphere HA Event>info<internal>HostDasOkEventvSphere HA agent configuredinfovSphere HA agent on host {host.name} is configured correctlyvSphere HA agent on host {host.name} is configured correctlyvSphere HA agent is configured correctlyvSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} is configured correctlyHostDisconnectedEventHost disconnectedinfoDisconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from host. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name} in {datacenter.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}HostEnableAdminFailedEventCannot restore administrator permissions to hosterrorCannot restore some administrator permissions to the hostCannot restore some administrator permissions to the host {host.name}HostEvent<Host Event>info<internal>HostExtraNetworksEventHost has extra vSphere HA networkserrorHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostExtraNetworksEvent"> <description> The host being added to the vSphere HA cluster has more management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, be on the same subnets. This helps to facilitate the pairing up of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnet of the joining member that are not present on the existing member. </description> <cause> <description> The host has extra networks missing on an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for the cluster. </action> </cause> </EventLongDescription> HostGetShortNameFailedEventCannot get short host nameerrorCannot complete command 'hostname -s' or returned incorrect name formatCannot complete command 'hostname -s' on host {host.name} or returned incorrect name format <EventLongDescription id="vim.event.HostGetShortNameFailedEvent"> <description> The hostname -s command has failed on the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostInAuditModeEventHost is in audit mode.infoHost is running in audit mode.Host {host.name} is running in audit mode. The host's configuration will not be persistent across reboots.HostInventoryFullEventHost inventory fullerrorMaximum ({capacity}) number of hosts allowed for this edition of vCenter Server has been reached <EventLongDescription id="vim.event.HostInventoryFullEvent"> <description> The vCenter Server Foundation license key currently allows only three hosts to be added to the inventory. Adding extra hosts results in errors and the logging of this event. </description> <cause> <description>Attempting to add more hosts than the number allowed by the license key assigned to vCenter Server</description> <action>Assign vCenter Server a license key that allows more hosts or has no host limit</action> </cause> </EventLongDescription> HostInventoryUnreadableEventHost Inventory UnreadableinfoThe virtual machine inventory file is damaged or unreadable.The virtual machine inventory file on host {host.name} is damaged or unreadable.HostIpChangedEventHost IP changedinfoIP address changed from {oldIP} to {newIP}IP address of the host {host.name} changed from {oldIP} to {newIP} <EventLongDescription id="vim.event.HostIpChangedEvent"> <description> The IP address of the host was changed </description> <cause> <description> The IP address of the host was changed through vCenter Server </description> </cause> <cause> <description> The IP address of the host was changed through the host </description> </cause> </EventLongDescription> HostIpInconsistentEventHost IP inconsistenterrorConfiguration of host IP address is inconsistent: address resolved to {ipAddress} and {ipAddress2}Configuration of host IP address is inconsistent on host {host.name}: address resolved to {ipAddress} and {ipAddress2}HostIpToShortNameFailedEventHost IP to short name not completederrorCannot resolve IP address to short nameCannot resolve IP address to short name on host {host.name} <EventLongDescription id="vim.event.HostIpToShortNameFailedEvent"> <description> The host's IP address could not be resolved to a short name </description> <cause> <description>The host or DNS records are improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostIsolationIpPingFailedEventvSphere HA isolation address unreachableerrorvSphere HA agent on host {host.name} in cluster {computeResource.name} could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} could not reach isolation address: {isolationIp}vSphere HA agent on this host could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} could not reach isolation address: {isolationIp} <EventLongDescription id="vim.event.HostIsolationIpPingFailedEvent"> <description> vSphere HA was unable to ping one or more of the isolation IP addresses. The inability to ping the addresses may cause HA to incorrectly declare the host as network isolated. A host is declared as isolated if it cannot ping the configured isolation addresses and the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <cause> <description>Could not ping the isolation address</description> <action>Correct the cause of the failure to ping the address</action> <action> Use advanced options to change the addresses used by vSphere HA for determining if a host is network isolated. By default, the isolation address is the default gateway of the management network. You can override the default using advanced options, or specify additional addresses to use for determining if a host is network isolated. Set the das.useDefaultIsolationAddress advanced option to "false" if you prefer that vSphere HA not use the default gateway as the isolation address. Specify the das.isolationAddress[X] advanced option for each isolation address that you want to specify. The new values take effect when vSphere HA is reconfigured for each host. </action> </cause> </EventLongDescription> HostLicenseExpiredEventHost license expirederrorA host license for {host.name} has expired <EventLongDescription id="vim.event.HostLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of host licenses on the license server and uses this event to notify you of any host licenses that are about to expire </description> <cause> <description>Host licenses on the license server are about to expire</description> <action>Update the license server to get a new version of the host license</action> </cause> </EventLongDescription> HostLocalPortCreatedEventA host local port is created to recover from management network connectivity loss.infoA host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic}.A host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic} on the host {host.name}.HostMissingNetworksEventHost is missing vSphere HA networkserrorHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostMissingNetworksEvent"> <description> The host being added to the vSphere HA cluster has fewer management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, have common subnets. This helps facilitate the pairing of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnets of the existing member that are not present on the joining member. </description> <cause> <description> The host does not have networks compatible with an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. After you change the host's network configuration, reconfigure vSphere HA for this host. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for this cluster. </action> </cause> </EventLongDescription> HostMonitoringStateChangedEventvSphere HA host monitoring state changedinfovSphere HA host monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'HostNoAvailableNetworksEventHost has no available networks for vSphere HA communicationerrorHost {host.name} in cluster {computeResource.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}This host currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips} <EventLongDescription id="vim.event.HostNoAvailableNetworksEvent"> <description> The host being added to the vSphere HA cluster has no management networks available for vSphere HA cluster communication. The advanced option das.allowNetwork[X] is set, but no port group names match the advanced option for this host. </description> <cause> <description> The host has no port groups that match the names used by the advanced options to control which port groups vSphere HA uses </description> <action> Delete the advanced options das.allowNetwork[X] to allow vSphere HA to select the default management port groups </action> <action> Correct the names of the port groups specified in the advanced options to match those to be used by vSphere HA for this host </action> <action> Specify additional das.allowNetwork[X] advanced options to match the port group names for this host </action> </cause> </EventLongDescription> HostNoHAEnabledPortGroupsEventHost has no port groups enabled for vSphere HAerrorHost {host.name} in cluster {computeResource.name} has no port groups enabled for vSphere HA communication.Host {host.name} has no port groups enabled for vSphere HA communication.This host has no port groups enabled for vSphere HA communication.Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no port groups enabled for vSphere HA communication. <EventLongDescription id="vim.event.HostNoHAEnabledPortGroupsEvent"> <description> vSphere HA has determined that there are no management networks available on the host for vSphere HA inter-agent communication. </description> <cause> <description> The host has no vSphere HA management networks available </description> <action> If this event is observed when the host is being added to a vSphere HA cluster, change the host's network configuration to enable vSphere HA traffic on one or more port groups. By default, vSphere HA will use the Service Console port groups on ESX and ESXi hosts, the port groups with the Management Traffic checkbox selected. If vSphere HA was already configured on the host, it is possible that the host's network settings have changed and invalidated the management network configuration. Review the settings to make sure the port groups configured for management network still exist on the host and for ESXi the Management Traffic option is enabled. Reconfigure vSphere HA on the host after fixing any configuration issues. </action> </cause> </EventLongDescription> HostNoRedundantManagementNetworkEventNo redundant management network for hostwarningHost {host.name} in cluster {computeResource.name} currently has no management network redundancyHost {host.name} currently has no management network redundancyThis host currently has no management network redundancyHost {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no management network redundancy <EventLongDescription id="vim.event.HostNoRedundantManagementNetworkEvent"> <description> vSphere HA has determined that there is only one path for vSphere HA management traffic, resulting in a single point of failure. Best practices require more than one path for vSphere HA to use for heartbeats and cluster communication. A host with a single path is more likely to be declared dead, network partitioned or isolated after a network failure. If declared dead, vSphere HA will not respond if the host subsequently actually fails, while if declared isolated, vSphere HA may apply the isolation response thus impacting the uptime of the virtual machines running on it. </description> <cause> <description>There is only one port group available for vSphere HA communication</description> <action>Configure another Service Console port group on the ESX host</action> <action> Configure another port group on the ESXi host by selecting the "Management Traffic" check box </action> <action> Use NIC teaming on the management port group to allow ESX or ESXi to direct management traffic out of more than one physical NIC in case of a path failure </action> <action> If you accept the risk of not having redundancy for vSphere HA communication, you can eliminate the configuration issue by setting the das.ignoreRedundantNetWarning advanced option to "true" </action> </cause> </EventLongDescription> HostNonCompliantEventHost non-compliant with profileerrorHost is not in compliance with the attached profile.Host {host.name} is not in compliance with the attached profile <EventLongDescription id="vim.event.HostNonCompliantEvent"> <description> The host does not comply with the host profile </description> <cause> <description> The host is not in compliance with the attached profile </description> <action> Check the Summary tab for the host in the vSphere Client to determine the possible cause(s) of noncompliance </action> </cause></EventLongDescription> HostNotInClusterEventHost not in clustererrorNot a cluster member in {datacenter.name}Host {host.name} is not a cluster member in {datacenter.name}HostOvercommittedEventHost resource overcommittederrorInsufficient capacity in host {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in host {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.HostOvercommittedEvent"> <description> A host does not have sufficient CPU and/or memory capacity to satisfy its resource configuration. The host has its own admission control, so this condition should never occur. </description> <cause> <description>A host has insufficient capacity for its resource configuration</description> <action>If you encounter this condition, contact VMware Support </action> </cause> </EventLongDescription> HostPrimaryAgentNotShortNameEventHost primary agent not specified as short nameerrorPrimary agent {primaryAgent} was not specified as a short namePrimary agent {primaryAgent} was not specified as a short name to host {host.name} <EventLongDescription id="vim.event.HostPrimaryAgentNotShortNameEvent"> <description> The primary agent is not specified in short name format </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostProfileAppliedEventHost profile appliedinfoProfile configuration applied to the hostProfile is applied on the host {host.name}HostReconnectionFailedEventCannot reconnect hosterrorCannot reconnect to {host.name}Cannot reconnect to {host.name}Cannot reconnectCannot reconnect to {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostReconnectionFailedEvent"> <description> Could not reestablish a connection to the host </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostRemovedEventHost removedinfoRemoved host {host.name}Removed host {host.name}Removed from inventoryRemoved host {host.name} in {datacenter.name}HostShortNameInconsistentEventHost short name inconsistenterrorHost names {shortName} and {shortName2} both resolved to the same IP address. Check the host's network configuration and DNS entries <EventLongDescription id="vim.event.HostShortNameInconsistentEvent"> <description> The name resolution check on the host returns different names for the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShortNameToIpFailedEventHost short name to IP not completederrorCannot resolve short name {shortName} to IP addressCannot resolve short name {shortName} to IP address on host {host.name} <EventLongDescription id="vim.event.HostShortNameToIpFailedEvent"> <description> The short name of the host can not be resolved to an IP address </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShutdownEventHost shut downinfoShut down of {host.name}: {reason}Shut down of {host.name}: {reason}Shut down of host: {reason}Shut down of {host.name} in {datacenter.name}: {reason}HostSpecificationChangedEventHost specification is changed on vCenterinfoHost specification of host {host.name} is changed on vCenter.Host specification of host {host.name} is changed on vCenter.Host specification is changed.Host specification of host {host.name} is changed on vCenter.HostSpecificationRequireEventPull host specification from host to vCenterinfoPull host specification of host {host.name} to vCenter.Pull host specification of host {host.name} to vCenter.Pull host specification to vCenter.Pull host specification of host {host.name} to vCenter.HostSpecificationUpdateEventHost specification is changed on hostinfoHost specification is changed on host {host.name}.Host specification is changed on host {host.name}.Host specification is changed.Host specification is changed on host {host.name}.HostStatusChangedEventHost status changedinfoConfiguration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.HostStatusChangedEvent"> <description> The host status has changed. This status is the status of the root resource pool that encompasses the entire host. A host status change may be accompanied by the removal of a configuration issue if one was previously detected. A host status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The host status changed to yellow</description> <action>Reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The host status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> HostSubSpecificationDeleteEventDelete host sub specification {subSpecName}infoDelete host sub specification {subSpecName} of host {host.name}.Delete host sub specification {subSpecName} of host {host.name}.Delete host sub specification.Delete host sub specification {subSpecName} of host {host.name}.HostSubSpecificationUpdateEventHost sub specification {hostSubSpec.name} is changed on hostinfoHost sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed.Host sub specification {hostSubSpec.name} is changed on host {host.name}.HostSyncFailedEventCannot synchronize hosterrorCannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg} <EventLongDescription id="vim.event.HostSyncFailedEvent"> <description> Failed to sync with the vCenter Agent on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> HostUpgradeFailedEventHost upgrade failederrorCannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostUpgradeFailedEvent"> <description> Failed to connect to the host due to an installation or upgrade issue </description> </EventLongDescription> HostUserWorldSwapNotEnabledEventThe userworld swap is not enabled on the hostwarningThe userworld swap is not enabled on the hostThe userworld swap is not enabled on the host {host.name}HostVnicConnectedToCustomizedDVPortEventSome host vNICs were reconfigured to use dvPorts with port level configuration, which might be different from the dvPort group.infoHost vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.Host {host.name} vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.HostWwnChangedEventHost WWN changedwarningWWNs are changedWWNs are changed for {host.name}HostWwnConflictEventHost WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {host.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.HostWwnConflictEvent"> <description> The WWN (World Wide Name) of this host conflicts with the WWN of another host or virtual machine </description> <cause> <description> The WWN of this host conflicts with WWN of another host </description> </cause> <cause> <description> The WWN of this host conflicts with WWN of another virtual machine</description> </cause> </EventLongDescription> IncorrectHostInformationEventIncorrect host informationerrorInformation needed to acquire the correct set of licenses not providedHost {host.name} did not provide the information needed to acquire the correct set of licenses <EventLongDescription id="vim.event.IncorrectHostInformationEvent"> <description> The host did not provide the information needed to acquire the correct set of licenses </description> <cause> <description> The cpuCores, cpuPackages or hostType information on the host is not valid </description> </cause> <cause> <description> The host information is not available because host was added as disconnected </description> </cause> </EventLongDescription> InfoUpgradeEventInformation upgradeinfo{message}InsufficientFailoverResourcesEventvSphere HA failover resources are insufficienterrorInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name}Insufficient resources to satisfy vSphere HA failover levelInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.InsufficientFailoverResourcesEvent"> <description> The cluster does not have enough unreserved capacity to satisfy the level configured for vSphere HA admission control. Failovers may still be performed by vSphere HA but will be on a best effort basis. </description> <cause> <description> If the "number of host failures to tolerate" policy is configured and a few virtual machines have a much higher CPU or memory reservation than the other virtual machines, vSphere HA admission control can be excessively conservative to ensure that there are enough unfragmented resources if a host fails. </description> <action> Use similar CPU and memory reservations for all virtual machines in the cluster. If this is not possible, consider using a different vSphere HA admission control policy, such as reserving a percentage of cluster resource for failover. Alternatively, you can use advanced options to specify a cap for the slot size. See the vSphere Availability Guide for details. </action> </cause> <cause> <description> Hosts with vSphere HA agent errors are not good candidates for providing failover capacity in the cluster, and their resources are not considered for vSphere HA admission control purposes. If many hosts have an vSphere HA agent error, vCenter Server generates this event. </description> <action> Check the event log of the hosts to determine the cause of the vSphere HA agent errors. After addressing any configuration issues, reconfigure vSphere HA on the affected hosts or on the cluster. </action> </cause> </EventLongDescription> InvalidEditionEventInvalid editionerrorThe license edition '{feature}' is invalid <EventLongDescription id="vim.event.InvalidEditionEvent"> <description> vCenter Server attempted to acquire an undefined feature from the license server </description> <cause> <description>Any operation that requires a feature license such as vMotion, DRS, vSphere HA might result in this event if that feature is not defined on the license server</description> <action>Verify that the feature in question is present on the license server</action> </cause> </EventLongDescription> EventExLicense downgradewarningLicense downgradeLicense downgradeLicense downgradevim.event.LicenseDowngradedEvent|License downgrade: {licenseKey} removes the following features: {lostFeatures} <EventLongDescription id="vim.event.LicenseDowngradedEvent"> <description> The installed license reduces the set of available features. Some of the features, previously available, will not be accessible with the new license. </description> <cause> <description>The license has been replaced.</description> <action>Revert to the license previously installed if it is not already expired.</action> <action>Contact VMware in order to obtain new license with the required features.</action> </cause> </EventLongDescription> LicenseEvent<License Event>info<internal>LicenseExpiredEventLicense expirederrorLicense {feature.featureName} has expiredLicenseNonComplianceEventInsufficient licenses.errorLicense inventory is not compliant. Licenses are overused <EventLongDescription id="vim.event.LicenseNonComplianceEvent"> <description> vCenter Server does not strictly enforce license usage. Instead, it checks for license overuse periodically. If vCenter Server detects overuse, it logs this event and triggers an alarm. </description> <cause> <description>Overuse of licenses</description> <action>Check the license reports through the vSphere Client and reduce the number of entities using the license key or add a new license key with a greater capacity</action> </cause> </EventLongDescription> LicenseRestrictedEventUnable to acquire licenses due to a restriction on the license servererrorUnable to acquire licenses due to a restriction in the option file on the license server. <EventLongDescription id="vim.event.LicenseRestrictedEvent"> <description> vCenter Server logs this event if it is unable to check out a license from the license server due to restrictions in the license file </description> <cause> <description>License file in the license server has restrictions that prevent check out</description> <action>Check the license file and remove any restrictions that you can</action> </cause> </EventLongDescription> LicenseServerAvailableEventLicense server availableinfoLicense server {licenseServer} is availableLicenseServerUnavailableEventLicense server unavailableerrorLicense server {licenseServer} is unavailable <EventLongDescription id="vim.event.LicenseServerUnavailableEvent"> <description> vCenter Server tracks the license server state and logs this event if the license server has stopped responding. </description> <cause> <description>License server is not responding and not available to vCenter Server</description> <action>Verify that the license server is running. If it is, check the connectivity between vCenter Server and the license server.</action> </cause> </EventLongDescription> LocalDatastoreCreatedEventLocal datastore createdinfoCreated local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl})Created local datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}LocalTSMEnabledEventESXi Shell is enabledinfoESXi Shell for the host has been enabledESXi Shell for the host {host.name} has been enabledLockerMisconfiguredEventLocker misconfiguredwarningDatastore {datastore} which is configured to back the locker does not existLockerReconfiguredEventLocker reconfiguredinfoLocker was reconfigured from {oldDatastore} to {newDatastore} datastoreMigrationErrorEventMigration errorerrorUnable to migrate {vm.name} from {host.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate from {host.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine from the source host failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationEvent<Migration Event>info<internal>MigrationHostErrorEventMigration host errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to host {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine to the destination host or datastore failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationHostWarningEventMigration host warningwarningMigration of {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination host or datastore is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceErrorEventMigration resource errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Cannot migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceErrorEvent"> <description> A virtual machine failed to migrate due to incompatibilities with target resource pool </description> <cause> <description>Migrating a virtual machine to the destination host or datastore is not possible due to incompatibilities with the target resource pool. </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceWarningEventMigration resource warningwarningMigration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination resource pool is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationWarningEventMigration warningwarningMigration of {vm.name} from {host.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration from {host.name}: {fault.msg}Migration of {vm.name} from {host.name} in {datacenter.name}: {fault.msg}MtuMatchEventThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to the physical NIC.infoThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}MtuMismatchEventThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to the physical NIC.errorThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}NASDatastoreCreatedEventNAS datastore createdinfoCreated NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl})Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}NetworkRollbackEventNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.errorNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.NoAccessUserEventNo access for usererrorCannot login user {userName}@{ipAddress}: no permission <EventLongDescription id="vim.event.NoAccessUserEvent"> <description> A user could not log in due to insufficient access permission </description> <cause> <description> The user account has insufficient access permission </description> <action> Log in with a user account that has the necessary access permissions or grant additional access permissions to the current user </action> </cause> </EventLongDescription> NoDatastoresConfiguredEventNo datastores configuredinfoNo datastores have been configuredNo datastores have been configured on the host {host.name}NoLicenseEventNo licenseerrorA required license {feature.featureName} is not reserved <EventLongDescription id="vim.event.NoLicenseEvent"> <description> vCenter Server logs this event if it fails to acquire a feature from the license server for an unknown reason. </description> <cause> <description>Acquiring a feature license fails for an unknown reason</description> <action>Verify that the license server has the license for the feature</action> </cause> </EventLongDescription> NoMaintenanceModeDrsRecommendationForVMNo maintenance mode DRS recommendation for the VMinfoUnable to automatically migrate {vm.name}Unable to automatically migrate from {host.name}Unable to automatically migrate {vm.name} from {host.name} <EventLongDescription id="vim.event.NoMaintenanceModeDrsRecommendationForVM"> <description> DRS failed to generate a vMotion recommendation for a virtual machine on a host entering Maintenance Mode. This condition typically occurs because no other host in the DRS cluster is compatible with the virtual machine. Unless you manually migrate or power off this virtual machine, the host will be unable to enter Maintenance Mode. </description> <cause> <description>DRS failed to evacuate a powered on virtual machine</description> <action>Manually migrate the virtual machine to another host in the cluster</action> <action>Power off the virtual machine</action> <action>Bring any hosts in Maintenance Mode out of that mode</action> <action>Cancel the task that is making the host enter Maintenance Mode </action> </cause> </EventLongDescription> NonVIWorkloadDetectedOnDatastoreEventUnmanaged workload detected on SIOC-enabled datastoreinfoAn unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.NotEnoughResourcesToStartVmEventInsufficient resources for vSphere HA to start the VM. Reason: {reason.@enum.fdm.placementFault}warningInsufficient resources to fail over {vm.name} in {computeResource.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over this virtual machine. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name} in {computeResource.name} that resides in {datacenter.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault} <EventLongDescription id="vim.event.NotEnoughResourcesToStartVmEvent"> <description> This event is issued by vSphere HA when the master agent was not able to fail over a virtual machine to one of its compatible hosts. This condition is due to one or more of the causes listed below. When this condition occurs, vSphere HA will report a cause for it in the event summary, but note that additional causes might exist. It is more likely to occur if vSphere HA admission control is disabled or more hosts fail than were provisioned for. When a virtual machine cannot be placed, vSphere HA will retry placing it when the cluster state changes. Also, if vSphere DRS is enabled, it will be invoked to try to defragment the cluster or bring hosts out of Standby Mode. </description> <cause> <description> A virtual machine has bandwidth reservations for CPU, memory, vFlash cache, or virtual NICs. There was no compatible host that had enough resources to satisfy the reservations. </description> <action>Decrease the virtual machine resource reservation</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition if one exists</action> <action>If DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> <cause> <description> The cluster has vSAN enabled, and one or more hosts that contribute storage to the cluster is inaccessible, preventing vSphere HA from powering on the virtual machine. This applies to virtual machines that have one or more files on a vSAN datastore. </description> <action>Bring online any failed hosts or resolve a network partition if one exists that involves hosts that contribute storage to the vSAN cluster</action> </cause> <cause> <description>One or more datastores that are associated with a virtual machine are inaccessible by any compatible host in the cluster.</description> <action>Bring online any non-responding host that mounts the virtual machine datastores</action> <action>Fix the all-paths-down (APD) or permanent-device-loss (PDL) issues.</action> </cause> <cause> <description>vSphere HA is enforcing virtual machine to virtual machine anti-affinity rules, and the rule cannot be satisfied. </description> <action>Add more hosts to cluster</action> <action>Bring online any non-responding host or resolve a network partition if one exists</action> <action>Remove any anti-affinity rules that are restricting the placement</action> </cause> <cause> <description>The number of VMs that can run on each host is limited. There is no host that can power on the VM without exceeding the limit.</description> <action>Increase the limit if you have set the limitVmsPerESXHost HA advanced option.</action> <action>Bring online any non-responding host or add new hosts to the cluster</action> </cause> </EventLongDescription> OutOfSyncDvsHostThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.warningThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.The vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.PermissionAddedEventPermission addedinfoPermission created for {principal} on {entity.name}, role is {role.name}, propagation is {propagate.@enum.auth.Permission.propagate}PermissionEvent<Permission Event>info<internal>PermissionRemovedEventPermission removedinfoPermission rule removed for {principal} on {entity.name}PermissionUpdatedEventPermission updatedinfoPermission changed for '{principal}' on '{entity.name}'.
Role changed from '{prevRole.name}' to role '{role.name}'. Propagate changed from '{prevPropagate.@enum.auth.Permission.propagate}' to '{propagate.@enum.auth.Permission.propagate}'.ProfileAssociatedEventProfile attached to hostinfoProfile {profile.name} has been attached.Profile {profile.name} has been attached.Profile {profile.name} has been attached with the host.Profile {profile.name} attached.ProfileChangedEventProfile was changedinfoProfile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.ProfileCreatedEventProfile createdinfoProfile is created.ProfileDissociatedEventProfile detached from hostinfoProfile {profile.name} has been detached.Profile {profile.name} has been detached. Profile {profile.name} has been detached from the host.Profile {profile.name} detached.ProfileEventinfo<internal>ProfileReferenceHostChangedEventThe profile reference host was changedinfoProfile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.ProfileRemovedEventProfile removedinfoProfile {profile.name} was removed.Profile {profile.name} was removed.Profile was removed.RecoveryEventRecovery completed on the host.infoThe host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the management virtual NIC {vnic} by connecting to a new port {portKey} on the vSphere Distributed Switch {dvsUuid}.RemoteTSMEnabledEventSSH is enabledinfoSSH for the host has been enabledSSH for the host {host.name} has been enabledResourcePoolCreatedEventResource pool createdinfoCreated resource pool {resourcePool.name} in compute-resource {computeResource.name}Created resource pool {resourcePool.name}Created resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}ResourcePoolDestroyedEventResource pool deletedinfoRemoved resource pool {resourcePool.name} on {computeResource.name}Removed resource pool {resourcePool.name}Removed resource pool {resourcePool.name} on {computeResource.name} in {datacenter.name}ResourcePoolEvent<Resource Pool Event>info<internal>ResourcePoolMovedEventResource pool movedinfoMoved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name} in {datacenter.name}ResourcePoolReconfiguredEventResource pool reconfiguredinfoUpdated configuration for {resourcePool.name} in compute-resource {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration on {resourcePool.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration for {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ResourcePoolReconfiguredEvent"> <description> The resource pool configuration changed. The resource pool configuration includes information about the resource reservations of the resource pool and the resource reservations of its children. </description> </EventLongDescription> ResourceViolatedEventResource usage exceeds configurationerrorResource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name}'Resource usage exceeds configuration on resource pool {resourcePool.name}Resource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.ResourceViolatedEvent"> <description> The cumulative CPU and/or memory resource consumption of all virtual machines in the resource pool exceeds the resource pool configuration </description> <cause> <description>You attempted to move a virtual machine from one resource pool into another bypassing vCenter Server. This condition occurs when you attempt the move using the vSphere Client directly connected to the host. </description> <action>In a DRS cluster, do not move and power on a virtual machine bypassing vCenter Server</action> </cause> </EventLongDescription> RoleAddedEventRole addedinfoNew role {role.name} createdRoleEvent<Role Event>info<internal>RoleRemovedEventRole removedinfoRole {role.name} removedRoleUpdatedEventRole updatedinfoRole modified.
Previous name: {prevRoleName}, new name: {role.name}.
Added privileges: {privilegesAdded}.
Removed privileges: {privilegesRemoved}.RollbackEventHost Network operation rolled backinfoThe Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.The operation {methodName} on the host {hostName} disconnected the host and was rolled back .The Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.ScheduledTaskCompletedEventScheduled task completedinfoTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} completed successfullyTask {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} completed successfullyScheduledTaskCreatedEventScheduled task createdinfoCreated task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name}Created task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskEmailCompletedEventSent scheduled task emailinfoTask {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} sent email to {to}ScheduledTaskEmailFailedEventScheduled task email not senterrorTask {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot send email to {to}: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskEmailFailedEvent"> <description> An error occurred while sending email notification that a scheduled task is running </description> <cause> <description>Failed to send email for the scheduled task</description> <action>Check the vCenter Server SMTP settings for sending emails</action> </cause> </EventLongDescription> ScheduledTaskEvent<Scheduled Task Event>info<internal>ScheduledTaskFailedEventCannot complete scheduled taskerrorTask {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot be completed: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskFailedEvent"> <description> An error occurred while running a scheduled task </description> <cause> <description>Failed to run a scheduled task</description> <action>Correct the failure condition</action> </cause> </EventLongDescription> ScheduledTaskReconfiguredEventScheduled task reconfiguredinfoReconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name}Reconfigured task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.ScheduledTaskRemovedEventScheduled task removedinfoRemoved task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name}Removed task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskStartedEventScheduled task startedinfoRunning task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name}Running task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ServerLicenseExpiredEventServer license expirederrorA vCenter Server license has expiredServerStartedSessionEventServer started sessioninfovCenter startedSessionEvent<Session Event>info<internal>SessionTerminatedEventSession stoppedinfoA session for user '{terminatedUsername}' has stopped <EventLongDescription id="vim.event.SessionTerminatedEvent"> <description> A session has been terminated </description> </EventLongDescription> ExtendedEventThe time-limited license on the host has expired.warningThe time-limited license on host {host.name} has expired.The time-limited license on host {host.name} has expired.The time-limited license on the host has expired.vim.event.SubscriptionLicenseExpiredEvent|The time-limited license on host {host.name} has expired. To comply with the EULA, renew the license at http://my.vmware.comTaskEventTask eventinfoTask: {info.descriptionId}TaskTimeoutEventTask time-outinfoTask: {info.descriptionId} time-out <EventLongDescription id="vim.event.TaskTimeoutEvent"> <description> A task has been cleaned up because it timed out </description> </EventLongDescription> TeamingMatchEventTeaming configuration in the vSphere Distributed Switch matches the physical switch configurationinfoTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TeamingMisMatchEventTeaming configuration in the vSphere Distributed Switch does not match the physical switch configurationerrorTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TemplateBeingUpgradedEventUpgrading templateinfoUpgrading template {legacyTemplate}TemplateUpgradeEvent<Template Upgrade Event>info<internal>TemplateUpgradeFailedEventCannot upgrade templateinfoCannot upgrade template {legacyTemplate} due to: {reason.msg}TemplateUpgradedEventTemplate upgradedinfoTemplate {legacyTemplate} upgrade completedTimedOutHostOperationEventHost operation timed outwarningThe operation performed on host {host.name} timed outThe operation performed on host {host.name} timed outThe operation timed outThe operation performed on {host.name} in {datacenter.name} timed out <EventLongDescription id="vim.event.TimedOutHostOperationEvent"> <description> An operation performed on the host has timed out </description> <cause> <description> A previous event in the sequence of events will provide information on the reason for the timeout </description> </cause> </EventLongDescription> UnlicensedVirtualMachinesEventUnlicensed virtual machinesinfoThere are {unlicensed} unlicensed virtual machines on host {host} - there are only {available} licenses availableUnlicensedVirtualMachinesFoundEventUnlicensed virtual machines foundinfo{unlicensed} unlicensed virtual machines found on host {host}UpdatedAgentBeingRestartedEventRestarting updated agentinfoThe agent is updated and will soon restartThe agent on host {host.name} is updated and will soon restartUpgradeEvent<Upgrade Event>info<internal>UplinkPortMtuNotSupportEventNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.errorNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortMtuSupportEventAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.infoAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.All VLAN MTU setting on the external physical switch allows the vSphere Distributed Switch max MTU size packets passing on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}All VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanTrunkedEventThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch.infoThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanUntrunkedEventNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch.errorNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UserAssignedToGroupUser assigned to groupinfoUser {userLogin} was added to group {group}UserLoginSessionEventUser logininfoUser {userName}@{ipAddress} logged in as {userAgent}UserLogoutSessionEventUser logoutinfoUser {userName}@{ipAddress} logged out (login time: {loginTime}, number of API invocations: {callCount}, user agent: {userAgent})UserPasswordChangedUser password changedinfoPassword was changed for account {userLogin}Password was changed for account {userLogin} on host {host.name}UserUnassignedFromGroupUser removed from groupinfoUser {userLogin} removed from group {group}UserUpgradeEventUser upgradeuser{message} <EventLongDescription id="vim.event.UserUpgradeEvent"> <description> A general user event occurred due to an upgrade </description> </EventLongDescription> VMFSDatastoreCreatedEventVMFS datastore createdinfoCreated VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl})Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}VMFSDatastoreExpandedEventVMFS datastore expandedinfoExpanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name}Expanded VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExpandedEvent"> <description> An existing extent in a VMFS volume was grown to increase its capacity </description> <cause> <description> A user or system action caused an extent of an existing VMFS datastore to be grown. Only extents with free space immediately after them are expandable. As a result, the action filled the available adjacent capacity on the LUN. </description> </cause> </EventLongDescription> VMFSDatastoreExtendedEventVMFS datastore extendedinfoExtended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name}Extended VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExtendedEvent"> <description> An existing VMFS volume was extended to increase its capacity </description> <cause> <description> A user or system action caused the datastore to be extended with a partition on a LUN to increase its capacity. </description> </cause> </EventLongDescription> VMotionLicenseExpiredEventvMotion license expirederrorA vMotion license for {host.name} has expired <EventLongDescription id="vim.event.VMotionLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of vMotion licenses on the license server and uses this event to notify you of any vMotion licenses that are about to expire </description> <cause> <description>vMotion licenses on the license server are about to expire</description> <action>Update the license server to get a fresher version of the vMotion license</action> </cause> </EventLongDescription> VcAgentUninstallFailedEventCannot uninstall vCenter agenterrorCannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUninstallFailedEvent"> <description> An attempt to uninstall the vCenter Agent failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUninstalledEventvCenter agent uninstalledinfovCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalledvCenter agent has been uninstalled from {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUninstalledEvent"> <description> The vCenter Agent has been uninstalled from host </description> </EventLongDescription> VcAgentUpgradeFailedEventCannot complete vCenter agent upgradeerrorCannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUpgradeFailedEvent"> <description> A vCenter Agent upgrade attempt failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUpgradedEventvCenter agent upgradedinfovCenter agent has been upgraded on {host.name}vCenter agent has been upgraded on {host.name}vCenter agent has been upgradedvCenter agent has been upgraded on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUpgradedEvent"> <description> The vCenter Agent has been upgraded on the host </description> </EventLongDescription> VimAccountPasswordChangedEventVIM account password changedinfoVIM account password changedVIM account password was changed on host {host.name} <EventLongDescription id="vim.event.VimAccountPasswordChangedEvent"> <description> The password for the Vim account user on the host has been changed. This account is created by vCenter Server and used to manage the host. </description> <cause> <description> vCenter Server periodically changes the password of the Vim account that it uses to manage the host </description> </cause> </EventLongDescription> VmAcquiredMksTicketEventVM acquired MKS ticketinfoRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} has been openedRemote console has been opened for this virtual machine on {host.name}Remote console to {vm.name} on {host.name} in {datacenter.name} has been opened <EventLongDescription id="vim.event.VmAcquiredMksTicketEvent"> <description> Successfully acquired MKS Ticket for the virtual machine </description> <cause> <description> The MKS Ticket used to connect to the virtual machine remote console has been successfully acquired. </description> </cause> </EventLongDescription> VmAcquiredTicketEventVM acquired ticketinfoA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket of type {ticketType.@enum.VirtualMachine.TicketType} has been acquired.A ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} on {host.name} in {datacenter.name} has been acquiredVmAutoRenameEventVM auto renameinfoInvalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name}. Renamed from {oldName} to {newName}Conflicting or invalid virtual machine name detected. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name} in {datacenter.name}. Renamed from {oldName} to {newName} <EventLongDescription id="vim.event.VmAutoRenameEvent"> <description> The virtual machine was renamed because of possible name conflicts with another virtual machine </description> <cause> <description>The virtual machine might have been added to the vCenter Server inventory while scanning the datastores of hosts added to the inventory. During such an action, the newly-added virtual machine's name might have been found to be in conflict with a virtual machine name already in the inventory. To resolve this, vCenter Server renames the newly-added virtual machine. </description> </cause> </EventLongDescription> VmBeingClonedEventVM being clonedinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}VmBeingClonedNoFolderEventVM being cloned to a vAppinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on host {host.name}, {ds.name} in {datacenter.name} to {destName} on host {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}VmBeingCreatedEventCreating VMinfoCreating {vm.name} on {host.name}, {ds.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {ds.name} in {datacenter.name}Creating VM on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmBeingDeployedEventDeploying VMinfoDeploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} from template {srcTemplate.name}Deploying VM on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} in {datacenter.name} from template {srcTemplate.name} <EventLongDescription id="vim.event.VmBeingDeployedEvent"> <description> A virtual machine is being created from a template </description> <cause> <description> A user action prompted a virtual machine to be created from this template. </description> </cause> </EventLongDescription> VmBeingHotMigratedEventVM is hot migratinginfoMigrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingHotMigratedEvent"> <description> A powered-on virtual machine is being migrated with vMotion </description> <cause> <description> A user action might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> <cause> <description> A DRS recommendation might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> </EventLongDescription> VmBeingMigratedEventVM migratinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingMigratedEvent"> <description> Changing the host on which the virtual machine is executing </description> <cause> <description> A user action caused the virtual machine to be migrated to a different host </description> </cause> </EventLongDescription> VmBeingRelocatedEventVM relocatinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingRelocatedEvent"> <description> The virtual machine execution and/or storage is being relocated </description> <cause> <description> A user action might have caused the virtual machine's execution and/or storage to be changed </description> </cause> </EventLongDescription> VmCloneEvent<VM Clone Event>info<internal><internal><internal><internal><internal>VmCloneFailedEventCannot complete VM cloneerrorFailed to clone {vm.name} on {host.name}, {ds.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmCloneFailedEvent"> <description> Cloning a virtual machine failed </description> <cause> <description> An error prevented the virtual machine from being cloned </description> </cause> </EventLongDescription> VmClonedEventVM clonedinfo{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {ds.name} in {datacenter.name}{sourceVm.name} cloned to {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmConfigMissingEventVM configuration missinginfoConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} cannot be foundConfiguration file cannot be foundConfiguration file for {vm.name} on {host.name} in {datacenter.name} cannot be found <EventLongDescription id="vim.event.VmConfigMissingEvent"> <description> One or more configuration files for the virtual machine cannot be found </description> <cause> <description> The datastore on which this virtual machine resides may be inaccessible </description> <action> Check the connectivity of the datastore on which this virtual machine resides. If the datastore has a backing LUN, check to see if there are any transient disk failures. </action> </cause> </EventLongDescription> VmConnectedEventVM connectedinfoHost is connectedVirtual machine {vm.name} is connected <EventLongDescription id="vim.event.VmConnectedEvent"> <description> The virtual machine is in a connected state in the inventory and vCenter Server can access it </description> <cause> <description> A user or system action that resulted in operations such as creating, registering, cloning or deploying a virtual machine gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> A user or system action that resulted in operations such as adding or reconnecting a host gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> The state of the virtual machine's host changed from Not Responding to Connected and the host gave vCenter Server access to the virtual machine </description> </cause> </EventLongDescription> VmCreatedEventVM createdinfoNew virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {ds.name} in {datacenter.name}Virtual machine created on {host.name}, {ds.name} in {datacenter.name}Created virtual machine {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmDasBeingResetEventvSphere HA is resetting VMinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode} <EventLongDescription id="vim.event.VmDasBeingResetEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA has been configured, the virtual machine might be reset because the VMware Tools heartbeat or application heartbeat status turned red. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasBeingResetWithScreenshotEventvSphere HA enabled VM reset with screenshotinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}{vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}. <EventLongDescription id="vim.event.VmDasBeingResetWithScreenshotEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA is configured, this condition can occur because the VMware Tools heartbeat or the application heartbeat status turned red. The event contains the location of the screenshot taken of the guest console before it was reset. You can use this information to determine the cause of the heartbeat failure. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> Check the screenshot image to see if the cause was a guest operating system failure. If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasResetFailedEventvSphere HA cannot reset VMwarningvSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA cannot reset {vm.name} on {host.name}vSphere HA cannot reset {vm.name}vSphere HA cannot reset this virtual machinevSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmDasResetFailedEvent"> <description> vSphere HA attempted to reset the virtual machine because of a heartbeat failure from VMware Tools or a guest application, depending on how vSphere HA was configured. However, the reset operation failed. </description> <cause> <description> The most likely reason for the reset failure is that the virtual machine was running another task at the time the reset was initiated. </description> <action>Check to see whether the virtual machine requires attention and reset it manually if necessary.</action> </cause> </EventLongDescription> VmDasUpdateErrorEventVM vSphere HA update errorerrorUnable to update vSphere HA agents given the state of {vm.name}VmDasUpdateOkEventCompleted VM DAS updateinfovSphere HA agents have been updated with the current state of the virtual machineVmDateRolledBackEventVM date rolled backerrorDisconnecting all hosts as the date of virtual machine {vm.name} has been rolled backVmDeployFailedEventCannot deploy VM from templateerrorFailed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmDeployFailedEvent"> <description> Failed to deploy a virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine failed to deploy. This condition can occur if there is not enough disk space, the host or virtual machine loses its network connection, the host is disconnected, and so on. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmDeployedEventVM deployedinfoTemplate {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name} <EventLongDescription id="vim.event.VmDeployedEvent"> <description> A virtual machine has been created from the specified template </description> <cause> <description> A user action caused a virtual machine to be created from the template </description> </cause> <cause> <description> A scheduled task caused a virtual machine to be created from the template </description> </cause> </EventLongDescription> VmDisconnectedEventVM disconnectedinfo{vm.name} on host {host.name} is disconnected{vm.name} on host {host.name} is disconnected{vm.name} is disconnected{host.name} is disconnected{vm.name} on host {host.name} in {datacenter.name} is disconnectedVmDiscoveredEventVM discoveredinfoDiscovered {vm.name} on {host.name}Discovered {vm.name} on {host.name}Discovered {vm.name}Discovered on {host.name}Discovered {vm.name} on {host.name} in {datacenter.name}VmDiskFailedEventCannot create VM diskerrorCannot create virtual disk {disk} <EventLongDescription id="vim.event.VmDiskFailedEvent"> <description> Failed to create a virtual disk for the virtual machine for reasons described in the event message </description> <cause> <description> A virtual disk was not created for the virtual machine. This condition can occur if the operation failed to access the disk, the disk did not have enough space, you do not have permission for the operation, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that disk is accessible, has enough space, and that the permission settings allow the operation. </action> </cause> </EventLongDescription> VmEmigratingEventVM emigratinginfoMigrating {vm.name} off host {host.name}Migrating {vm.name} off host {host.name}Migrating {vm.name} off hostMigrating off host {host.name}Migrating {vm.name} off host {host.name} in {datacenter.name}VmEndRecordingEventEnd a recording sessioninfoEnd a recording sessionEnd a recording session on {vm.name}VmEndReplayingEventEnd a replay sessioninfoEnd a replay sessionEnd a replay session on {vm.name}VmEvent<VM Event>info<internal>VmFailedMigrateEventCannot migrate VMerrorCannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} to {destHost.name}, {destDatastore.name}Cannot migrate to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmFailedMigrateEvent"> <description> Failed to migrate the virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine did not migrate. This condition can occur if vMotion IPs are not configured, the source and destination hosts are not accessible, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that the vMotion IPs are configured on source and destination hosts, the hosts are accessible, and so on. </action> </cause> </EventLongDescription> VmFailedRelayoutEventCannot complete VM relayout.errorCannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name}: {reason.msg}Cannot complete relayout for this virtual machine on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedRelayoutEvent"> <description> Failed to lay out a virtual machine </description> <cause> <description> An attempt to lay out a virtual machine on disk failed for reasons described in the event message. This condition can occur for any of several reasons, for example inability to access the disk. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmFailedRelayoutOnVmfs2DatastoreEventCannot complete VM relayout on Vmfs2 datastoreerrorCannot complete relayout due to disks on a VMFS2 volumeCannot complete relayout for virtual machine {vm.name} which has disks on a VMFS2 volume. <EventLongDescription id="vim.event.VmFailedRelayoutOnVmfs2DatastoreEvent"> <description> Failed to migrate a virtual machine on VMFS2 datastore </description> <cause> <description> An attempt to migrate a virtual machine failed because the virtual machine still has disk(s) on a VMFS2 datastore. VMFS2 datastores are read-only for ESX 3.0 and later hosts. </description> <action> Upgrade the datastore(s) from VMFS2 to VMFS3 </action> </cause> </EventLongDescription> VmFailedStartingSecondaryEventvCenter cannot start the Fault Tolerance secondary VMerrorvCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason} <EventLongDescription id="vim.event.VmFailedStartingSecondaryEvent"> <description> vCenter Server could not start the Secondary VM because of an error </description> <cause> <description> The remote host is incompatible for Secondary VM. For instance, this condition can occur when the host does not have access to the virtual machine's network or datastore. </description> <action>Ensure that the hosts in the cluster are compatible for FT</action> </cause> <cause> <description>Login to a remote host failed. If the host has been newly added to the inventory or just rebooted, it might take some time for SSL thumbprints to be propagated to the hosts. </description> <action>If the problem persists, disconnect and re-connect the host.</action> </cause> <cause> <description>Registration of the Secondary VM on the remote host failed</description> <action>Determine whether the remote host has access to the datastore that the FT virtual machine resides on</action> </cause> <cause> <description>An error occurred while starting the Secondary VM</description> <action>Determine the cause of the migration error. vCenter Server will try to restart the Secondary VM if it can.</action> </cause> </EventLongDescription> VmFailedToPowerOffEventCannot power off the VM.errorCannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name}. {reason.msg}Cannot power off: {reason.msg}Cannot power off {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOffEvent"> <description>The virtual machine failed to power off</description> <cause> <description> The virtual machine might be performing concurrent operations </description> <action>Complete the concurrent operations and retry the power-off operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility. </description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToPowerOnEventCannot power on the VM.errorCannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOnEvent"> <description> The virtual machine failed to power on </description> <cause> <description> Virtual machine power-on attempts can fail because the virtual machine is already in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the power-on failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToRebootGuestEventVM cannot reboot the guest OS.errorCannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot the guest OS for {vm.name} on {host.name} in {datacenter.name}. {reason.msg} <EventLongDescription id="vim.event.VmFailedToRebootGuestEvent"> <description> The guest operating system on the virtual machine failed to reboot. </description> <cause> <description> Guest operating system reboot failures can occur because the virtual machine is not in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the reboot failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToResetEventCannot reset VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToResetEvent"> <description>The virtual machine failed to reset</description> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere client and respond to the question or prompt </action> </cause> <cause> <description>There might not be enough available licenses to perform this operation.</description> <action> Obtain the required licenses and retry the reset operation </action> </cause> <cause> <description> Concurrent operations might be executing on the virtual machine </description> <action>Complete the concurrent operations and retry the reset operation</action> </cause> <cause> <description> The host on which the virtual machine is running is entering maintenance mode </description> <action> Wait until the host exits maintenance mode and retry the operation </action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToShutdownGuestEventCannot shut down the guest OSerrorCannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}{vm.name} cannot shut down the guest OS on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToShutdownGuestEvent"> <description> Guest operating system shutdown failed for the virtual machine </description> <cause> <description> Guest operating system shutdown can fail if VMware Tools is not installed in the virtual machine. </description> <action>Install VMware Tools.</action> </cause> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere Client and respond to the question or prompt </action> </cause> <cause> <description> Concurrent operations might be running on the virtual machine </description> <action>Complete the concurrent operations and retry the shutdown operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToStandbyGuestEventVM cannot standby the guest OSerrorCannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}{vm.name} cannot standby the guest OS on {host.name} in {datacenter.name}: {reason.msg}VmFailedToSuspendEventCannot suspend VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg}VmFailedUpdatingSecondaryConfigvCenter cannot update the Fault Tolerance secondary VM configurationerrorvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name}vCenter cannot update the Fault Tolerance secondary VM configurationvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFailedUpdatingSecondaryConfig"> <description> After a failover, the new Primary VM failed to update the configuration of the Secondary VM </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFailoverFailedvSphere HA virtual machine failover unsuccessfulwarningvSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over this virtual machine. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg} <EventLongDescription id="vim.event.VmFailoverFailed"> <description> vSphere HA did not failover this virtual machine. The event includes the details of the fault that was generated when vSphere HA attempted the failover. vSphere HA will retry the failover on another host unless the maximum number of failover attempts have been exceeded. In many cases, the retry will succeed. </description> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It may also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to failover the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. If any hosts have been declared dead, investigate whether a networking/storage issue may be the cause. </action> <action> If, however, the error reports that the virtual machine is in an invalid state, there may be an in-progress operation that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation that is taking a long time to complete. </action> </cause> </EventLongDescription> VmFaultToleranceStateChangedEventVM Fault Tolerance state changedinfoFault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state on {vm.name} on host {host.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState} <EventLongDescription id="vim.event.VmFaultToleranceStateChangedEvent"> <description> The Fault Tolerance state of the virtual machine changed </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceTurnedOffEventVM Fault Tolerance turned offinfoFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name}Fault Tolerance protection has been turned off for {vm.name} on host {host.name}Fault Tolerance protection has been turned off for {vm.name}Fault Tolerance protection has been turned off for this virtual machineFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFaultToleranceTurnedOffEvent"> <description> All Secondary VMs have been removed and Fault Tolerance protection is turned off for this virtual machine. </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceVmTerminatedEventFault Tolerance VM terminatedinfoThe Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason} <EventLongDescription id="vim.event.VmFaultToleranceVmTerminatedEvent"> <description> A Primary VM or Secondary VM became inactive </description> <cause> <description> The Secondary VM became inactive because its operations are no longer synchronized with those of the Primary VM</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Secondary VM became inactive because a hardware or network failure caused the Primary VM to lose the Primary-to-Secondary connection</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Fault Tolerant VM became inactive due to a partial hardware failure on the physical host</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> A user stopped the Fault Tolerant VM</description> <action>The remaining Fault Tolerant VM takes over as the Primary VM. vSphere HA will attempt to restart the Secondary VM.</action> </cause> </EventLongDescription> VmGuestOSCrashedEventGuest operating system crashederror{vm.name} on {host.name}: Guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.{vm.name}: Guest operating system has crashed.This virtual machine's guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.VmGuestRebootEventGuest rebootinfoGuest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name}Guest OS rebootGuest OS reboot for {vm.name} on {host.name} in {datacenter.name}VmGuestShutdownEventGuest OS shut downinfoGuest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name}Guest OS shut downGuest OS shut down for {vm.name} on {host.name} in {datacenter.name}VmGuestStandbyEventGuest standbyinfoGuest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name}Guest OS standbyGuest OS standby for {vm.name} on {host.name} in {datacenter.name}VmHealthMonitoringStateChangedEventvSphere HA VM monitoring state changedinfovSphere HA VM monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'VmInstanceUuidAssignedEventAssign a new instance UUIDinfoAssign a new instance UUID ({instanceUuid})Assign a new instance UUID ({instanceUuid}) to {vm.name} <EventLongDescription id="vim.event.VmInstanceUuidAssignedEvent"> <description>The virtual machine was assigned a new vCenter Server-specific instance UUID </description> <cause> <description> The user who created the virtual machine did not specify a vCenter Server-specific instance UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmInstanceUuidChangedEventInstance UUID ChangedinfoThe instance UUID has been changed from ({oldInstanceUuid}) to ({newInstanceUuid})The instance UUID of {vm.name} has been changed from ({oldInstanceUuid}) to ({newInstanceUuid}) <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine has changed </description> <cause> <description> A user action resulted in a change to the vCenter Server-specific instance UUID of the virtual machine </description> </cause> <cause> <description> vCenter Server changed the instance UUID of the virtual machine because it detected a conflict </description> </cause> </EventLongDescription> VmInstanceUuidConflictEventInstance UUIDs conflicterrorThe instance UUID ({instanceUuid}) conflicts with the instance UUID assigned to {conflictedVm.name}The instance UUID ({instanceUuid}) of {vm.name} conflicts with the instance UUID assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine conflicted with that of another virtual machine. </description> <cause> <description> Virtual machine instance UUID conflicts can occur if you copy virtual machine files manually without using vCenter Server. </description> </cause> </EventLongDescription> VmMacAssignedEventVM MAC assignedinfoNew MAC address ({mac}) assigned to adapter {adapter}New MAC address ({mac}) assigned to adapter {adapter} for {vm.name}VmMacChangedEventVM MAC changedwarningChanged MAC address from {oldMac} to {newMac} for adapter {adapter}Changed MAC address from {oldMac} to {newMac} for adapter {adapter} for {vm.name} <EventLongDescription id="vim.event.VmMacChangedEvent"> <description> The virtual machine MAC address has changed </description> <cause> <description> A user action changed the virtual machine MAC address </description> </cause> <cause> <description> vCenter changed the virtual machine MAC address because it detected a MAC address conflict </description> </cause> </EventLongDescription> VmMacConflictEventVM MAC conflicterrorThe MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmMacConflictEvent"> <description> The virtual machine MAC address conflicts with that of another virtual machine </description> <cause> <description> This virtual machine's MAC address is the same as that of another virtual machine. Refer to the event details for more information on the virtual machine that caused the conflict. </description> </cause> </EventLongDescription> VmMaxFTRestartCountReachedvSphere HA reached maximum Secondary VM restart count.warningvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxFTRestartCountReached"> <description> The system reached the maximum restart limit in its attempt to restart a Secondary VM </description> <cause> <description>The system exceeded the number of allowed restart attempts for the Secondary VM when it tried to reestablish Fault Tolerance</description> <action>Check the causes for the restart failures and fix them. Then disable and re-enable Fault Tolerance protection.</action> </cause> </EventLongDescription> VmMaxRestartCountReachedvSphere HA reached maximum VM restart countwarningvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name}because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart this VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxRestartCountReached"> <description> vSphere HA has reached the maximum number of failover attempts for this virtual machine and has not been able to restart it. No further failover attempts will be made. By default vSphere HA attempts to failover a virtual machine 5 times. </description> <cause> <description> Failover can fail for a number of reasons including that the configuration file of the virtual machine is corrupt or one or more of the virtual machines datastores are not accessible by any host in the cluster due to an all paths down condition. In addition, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. </description> <action> To determine why previous failover attempts failed, search the events that are logged for the VM for occurrences of the event vSphere HA reports when a failover fails. These events will report the reason for the failed failover. vSphere HA events can be located by searching for the phrase 'vSphere HA'. To determine whether any issues still exist, try to manually power on the virtual machine. If power-on fails, investigate the error that is returned. But, if the power-on remains pending for a long time, investigate whether an all paths down condition exists. Also, if any hosts have been declared dead, investigate whether a networking or storage issue may be the cause. </action> </cause> </EventLongDescription> VmMessageErrorEventVM error messageerrorError message on {vm.name} on {host.name}: {message}Error message on {vm.name} on {host.name}: {message}Error message on {vm.name}: {message}Error message from {host.name}: {message}Error message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageErrorEvent"> <description> An error message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this error occurred </description> </cause> </EventLongDescription> VmMessageEventVM information messageinfoMessage on {vm.name} on {host.name}: {message}Message on {vm.name} on {host.name}: {message}Message on {vm.name}: {message}Message from {host.name}: {message}Message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageEvent"> <description> An information message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on the messages from the virtual machine </description> </cause> </EventLongDescription> VmMessageWarningEventVM warning messagewarningWarning message on {vm.name} on {host.name}: {message}Warning message on {vm.name} on {host.name}: {message}Warning message on {vm.name}: {message}Warning message from {host.name}: {message}Warning message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageWarningEvent"> <description> A warning message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this warning was issued </description> </cause> </EventLongDescription> VmMigratedEventVM migratedinfoVirtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmMigratedEvent"> <description> The virtual machine's host was changed successfully </description> <cause> <description> A user action caused the virtual machine to be successfully migrated to a different host </description> </cause> </EventLongDescription> VmNoCompatibleHostForSecondaryEventNo compatible host for the Fault Tolerance secondary VMerrorNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name}No compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name}No compatible host for the Fault Tolerance secondary VM {vm.name}No compatible host for the Fault Tolerance secondary VMNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmNoCompatibleHostForSecondaryEvent"> <description> No compatible host was found when trying to place a Secondary VM </description> <cause> <description>There was no compatible host available to place a Secondary VM</description> <action>Resolve the incompatibilities and retry the operation</action> </cause> </EventLongDescription> VmNoNetworkAccessEventVM No Network AccesswarningNot all networks are accessible by {destHost.name}Not all networks for {vm.name} are accessible by {destHost.name}VmOrphanedEventVM orphanedwarning{vm.name} does not exist on {host.name}{vm.name} does not exist on {host.name}{vm.name} does not existVirtual machine does not exist on {host.name}{vm.name} does not exist on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmOrphanedEvent"> <description> The virtual machine does not exist on the host with which it is associated </description> <cause> <description> The virtual machine was deleted while its host was disconnected from vCenter Server. </description> </cause> </EventLongDescription> VmPowerOffOnIsolationEventvSphere HA powered off VM on isolated hostinfovSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off this virtual machine on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmPowerOffOnIsolationEvent"> <description> vSphere HA powered off this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmPoweredOffEventVM powered offinfo{vm.name} on {host.name} is powered off{vm.name} on {host.name} is powered off{vm.name} is powered offVirtual machine on {host.name} is powered off{vm.name} on {host.name} in {datacenter.name} is powered offVmPoweredOnEventVM powered oninfo{vm.name} on {host.name} has powered on{vm.name} on {host.name} has powered on{vm.name} has powered onVirtual machine on {host.name} has powered on{vm.name} on {host.name} in {datacenter.name} has powered onVmPoweringOnWithCustomizedDVPortEventVirtual machine powered on with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.infoVirtual machine powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.Virtual machine {vm.name} powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.VmPrimaryFailoverEventFault Tolerance VM failovererrorFault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name} in {datacenter.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}VmReconfiguredEventVM reconfiguredinfoReconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured virtual machine.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name} in {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}VmRegisteredEventVM registeredinfoRegistered {vm.name} on {host.name}Registered {vm.name} on {host.name} in {datacenter.name}Registered {vm.name}Registered VM on {host.name} in {datacenter.name}Registered {vm.name} on {host.name} in {datacenter.name}VmRelayoutSuccessfulEventVM relayout completedinfoRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} completedRelayout of the virtual machine completedRelayout of {vm.name} on {host.name} in {datacenter.name} completedVmRelayoutUpToDateEventVM relayout up-to-dateinfo{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} is in the correct format and relayout is not necessaryIn the correct format and relayout is not necessary{vm.name} on {host.name} in {datacenter.name} is in the correct format and relayout is not necessaryVmReloadFromPathEventVirtual machine reloaded from pathinfo{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} reloaded from new configuration {configPath}.Virtual machine on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.VmReloadFromPathFailedEventVirtual machine not reloaded from patherror{vm.name} on {host.name} could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from path {configPath}.{vm.name} could not be reloaded from {configPath}.This virtual machine could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from {configPath}. <EventLongDescription id="vim.event.VmReloadFromPathFailedEvent"> <description> Reloading the virtual machine from a new datastore path failed </description> <cause> <description>The destination datastore path was inaccessible or invalid </description> <action>Use a valid destination datastore path </action> </cause> <cause> <description>The virtual machine is in an invalid state </description> <action>Check the virtual machine state power state. If the virtual machine is powered on, power it off </action> </cause> <cause> <description>The virtual machine is enabled for Fault Tolerance </description> <action>Disable Fault Tolerance for the virtual machine and retry the operation </action> </cause> </EventLongDescription> VmRelocateFailedEventFailed to relocate VMerrorFailed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmRelocateFailedEvent"> <description> Virtual machine relocation to a different host or datastore failed </description> <cause> <description> Virtual machine relocation can fail for a number of reasons, including network outages, insufficient disk space, and so on </description> <action> Consider the task related to this event, evaluate the failure reason, and take action accordingly </action> </cause> </EventLongDescription> VmRelocateSpecEvent<VM Relocate Spec Event>info<internal><internal><internal><internal><internal>VmRelocatedEventVM relocatedinfoVirtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmRelocatedEvent"> <description> The virtual machine execution and/or storage was successfully relocated </description> <cause> <description> A user action caused the virtual machine's execution and/or storage to be successfully changed </description> </cause> </EventLongDescription> VmRemoteConsoleConnectedEventVM remote console connectedinfoRemote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name}Remote console connectedRemote console connected to {vm.name} on host {host.name}VmRemoteConsoleDisconnectedEventVM remote console disconnectedinfoRemote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name}Remote console connectedRemote console disconnected from {vm.name} on host {host.name}VmRemovedEventVM removedinfoRemoved {vm.name} on {host.name}Removed {vm.name} on {host.name}Removed {vm.name}RemovedRemoved {vm.name} on {host.name} from {datacenter.name}VmRenamedEventVM renamedwarningRenamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName} in {datacenter.name}VmRequirementsExceedCurrentEVCModeEventVirtual machine is using features that exceed the capabilities of the host's current EVC mode.warningFeature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of this host's current EVC mode.Feature requirements of this virtual machine exceed capabilities of this host's current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.VmResettingEventVM resettinginfo{vm.name} on {host.name} is reset{vm.name} on {host.name} is reset{vm.name} is resetVirtual machine on {host.name} is reset{vm.name} on {host.name} in {datacenter.name} is resetVmResourcePoolMovedEventVM resource pool movedinfoMoved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name} in {datacenter.name}VmResourceReallocatedEventVM resource reallocatedinfoResource allocation changed
Modified:
{configChanges.modified}Changed resource allocation for {vm.name}
Modified:
{configChanges.modified}VmRestartedOnAlternateHostEventVM restarted on alternate hostinfoVirtual machine {vm.name} was restarted on this host since {sourceHost.name} failedVirtual machine was restarted on {host.name} since {sourceHost.name} failedVirtual machine {vm.name} was restarted on {host.name} since {sourceHost.name} failedVmResumingEventVM resuminginfo{vm.name} on {host.name} is resuming{vm.name} on {host.name} is resuming{vm.name} is resumingVirtual machine on {host.name} is resuming{vm.name} on {host.name} in {datacenter.name} is resumingVmSecondaryAddedEventFault Tolerance secondary VM addedinfoA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name}A Fault Tolerance secondary VM has been added for {vm.name} on host {host.name}A Fault Tolerance secondary VM has been added for {vm.name}A Fault Tolerance secondary VM has been added for this VMA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryDisabledBySystemEventvCenter disabled Fault ToleranceerrorvCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the Secondary VM could not be powered On. <EventLongDescription id="vim.event.VmSecondaryDisabledBySystemEvent"> <description> vCenter Server disabled a Secondary VM because it could not power on the Secondary VM </description> <cause> <description>vCenter Server failed to power on the Secondary VM </description> <action>Check the reason in the event message for more details, fix the failure, and re-enable Fault Tolerance protection to power on the Secondary VM.</action> </cause> </EventLongDescription> VmSecondaryDisabledEventDisabled Fault Tolerance secondary VMinfoDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Disabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Disabled Fault Tolerance secondary VM for {vm.name}Disabled Fault Tolerance secondary VM for this virtual machineDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryEnabledEventEnabled Fault Tolerance secondary VMinfoEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Enabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Enabled Fault Tolerance secondary VM for {vm.name}Enabled Fault Tolerance secondary VM for this VMEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryStartedEventStarted Fault Tolerance secondary VMinfoStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Started Fault Tolerance secondary VM for {vm.name} on host {host.name}Started Fault Tolerance secondary VM for {vm.name}Started Fault Tolerance secondary VM for this virtual machineStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmShutdownOnIsolationEventvSphere HA shut down VM on isolated hostinfovSphere HA shut down {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down this virtual machine on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} was shut down on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation} <EventLongDescription id="vim.event.VmShutdownOnIsolationEvent"> <description> vSphere HA shut down this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmStartRecordingEventStart a recording sessioninfoStart a recording sessionStart a recording session on {vm.name}VmStartReplayingEventStart a replay sessioninfoStart a replay sessionStart a replay session on {vm.name}VmStartingEventVM startinginfo{vm.name} on {host.name} is starting{vm.name} on {host.name} is starting{vm.name} is startingVirtual machine is starting{vm.name} on {host.name} in {datacenter.name} is startingVmStartingSecondaryEventStarting Fault Tolerance secondary VMinfoStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Starting Fault Tolerance secondary VM for {vm.name} on host {host.name} in clusterStarting Fault Tolerance secondary VM for {vm.name}Starting Fault Tolerance secondary VM for this virtual machineStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmStaticMacConflictEventVM static MAC conflicterrorThe static MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The static MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name}VmStoppingEventVM stoppinginfo{vm.name} on {host.name} is stopping{vm.name} on {host.name} is stopping{vm.name} is stoppingVirtual machine is stopping{vm.name} on {host.name} in {datacenter.name} is stoppingVmSuspendedEventVM suspendedinfo{vm.name} on {host.name} is suspended{vm.name} on {host.name} is suspended{vm.name} is suspendedVirtual machine is suspended{vm.name} on {host.name} in {datacenter.name} is suspendedVmSuspendingEventVM being suspendedinfo{vm.name} on {host.name} is being suspended{vm.name} on {host.name} is being suspended{vm.name} is being suspendedVirtual machine is being suspended{vm.name} on {host.name} in {datacenter.name} is being suspendedVmTimedoutStartingSecondaryEventStarting the Fault Tolerance secondary VM timed outerrorStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} timed out within {timeout} ms <EventLongDescription id="vim.event.VmTimedoutStartingSecondaryEvent"> <description> An attempt to start a Secondary VM timed out. </description> <cause> <description>A user attempted to turn on or enable Fault Tolerance, triggering the start of the Secondary VM. The start operation timed out and, as a result, vCenter Server disables Fault Tolerance. </description> <action>Fix any problems and re-enable Fault Tolerance protection</action> </cause> <cause> <description>The secondary VM was started in response to a failure, but the start attempt timed out</description> <action> vSphere HA will attempt to power on the Secondary VM</action> </cause> </EventLongDescription> VmUnsupportedStartingEventVM unsupported guest OS is startingwarningUnsupported guest OS {guestId} for {vm.name}Unsupported guest OS {guestId} for {vm.name} on {host.name}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name}Unsupported guest OS {guestId}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUnsupportedStartingEvent"> <description> Attempting to power on a virtual machine that has an unsupported guest operating system </description> <cause> <description> A user action initiated a virtual machine power-on operation, but the virtual machine has an unsupported guest operating system. </description> </cause> </EventLongDescription> VmUpgradeCompleteEventVM upgrade completeinfoVirtual machine compatibility upgraded to {version.@enum.vm.hwVersion}VmUpgradeFailedEventCannot upgrade VMerrorCannot upgrade virtual machine compatibility.VmUpgradingEventUpgrading VMinfoUpgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} in {datacenter.name} to {version.@enum.vm.hwVersion} <EventLongDescription id="vim.event.VmUpgradingEvent"> <description>The virtual hardware on this virtual machine is being upgraded</description> <cause> <description>A user-initiated action triggered an upgrade of the virtual machine hardware</description> </cause> <cause> <description>A scheduled task started an upgrade of the virtual machine hardware</description> </cause> </EventLongDescription> VmUuidAssignedEventVM UUID assignedinfoAssigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name}Assigned new BIOS UUID ({uuid})Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidAssignedEvent"> <description>The virtual machine was assigned a new BIOS UUID</description> <cause> <description>The user who created the virtual machine did not specify a BIOS UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmUuidChangedEventVM UUID ChangedwarningChanged BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name}BIOS UUID was changed from {oldUuid} to {newUuid}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidChangedEvent"> <description>The virtual machine BIOS UUID has changed</description> <cause> <description> A user changed the virtual machine BIOS UUID directly on the host </description> </cause> </EventLongDescription> VmUuidConflictEventVM UUID ConflicterrorBIOS ID ({uuid}) conflicts with that of {conflictedVm.name}BIOS ID ({uuid}) of {vm.name} conflicts with that of {conflictedVm.name}VmVnicPoolReservationViolationClearEventVirtual NIC Network Resource Pool Reservation Violation Clear eventinfoThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedVmVnicPoolReservationViolationRaiseEventVirtual NIC Network Resource Pool Reservation Violation eventinfoThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedVmWwnAssignedEventVM WWN assignedinfoNew WWNs assignedNew WWNs assigned to {vm.name} <EventLongDescription id="vim.event.VmWwnAssignedEvent"> <description> The virtual machine was assigned a WWN (World Wide Name) </description> <cause> <description>The virtual machine was assigned a WWN because it was created with an RDM (Raw Device Mappings) disk or was reconfigured to access an RDM disk </description> </cause> </EventLongDescription> VmWwnChangedEventVM WWN changedwarningWWNs are changedWWNs are changed for {vm.name} <EventLongDescription id="vim.event.VmWwnChangedEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine was changed </description> <cause> <description>The virtual machine was assigned a new WWN, possibly due to a conflict caused by another virtual machine being assigned the same WWN </description> </cause> </EventLongDescription> VmWwnConflictEventVM WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {vm.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.VmWwnConflictEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine has a conflict </description> <cause> <description>The WWN assigned to this virtual machine was the same as that of a different virtual machine. </description> <action> Check the event details for more information on the conflict and correct the problem. </action>\</cause> </EventLongDescription> WarningUpgradeEventWarning upgradewarning{message}IScsiBootFailureEventBoot from iSCSI failed.warningBooting from iSCSI failed.Booting from iSCSI failed with an error. See the VMware Knowledge Base for information on configuring iBFT networking.EventExLost Network Connectivityerrorvprob.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExNo IPv6 TSO supporterrorvprob.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid vmknic specified in /Migrate/Vmknicwarningvprob.net.migrate.bindtovmk|The ESX advanced config option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Please update the config option with a valid vmknic or, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExVirtual NIC connection to switch failedwarningvprob.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningvprob.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. {3} uplinks still up. Affected portgroups:{4}.EventExLost Network Redundancywarningvprob.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExThin Provisioned Device Nearing Capacitywarningvprob.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold.EventExLost Storage Connectivityerrorvprob.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExDegraded Storage Path Redundancywarningvprob.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. {3} remaining active paths. Affected datastores: {4}.EventExLost Storage Path Redundancywarningvprob.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExVMFS Locked By Remote Hosterrorvprob.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.errorvprob.vmfs.extent.offline|An attached device {1} might be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system is online.infovprob.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Volume Connectivity Restoredinfovprob.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfovprob.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterrorvprob.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerrorvprob.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Opening volume in read-only metadata mode with limited write support.EventExVMFS Lock Corruption Detectederrorvprob.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume may be damaged too.EventExLost connection to NFS servererrorvprob.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExRestored connection to NFS serverinfovprob.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExVMFS Resource Corruption Detectederrorvprob.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExCopied Library Iteminfocom.vmware.cl.CopyLibraryItemEvent|Copied Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}({sourceLibraryItemId}), source Library {sourceLibraryName}.EventExFailed to copy Library Itemerrorcom.vmware.cl.CopyLibraryItemFailEvent|Failed to copy Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}, source Library {sourceLibraryName}.EventExCreated Libraryinfocom.vmware.cl.CreateLibraryEvent|Created Library {libraryName}EventExFailed to create Libraryerrorcom.vmware.cl.CreateLibraryFailEvent|Failed to create Library {libraryName}EventExCreated Library Iteminfocom.vmware.cl.CreateLibraryItemEvent|Created Library Item {libraryItemName} in Library {libraryName}.EventExFailed to create Library Itemerrorcom.vmware.cl.CreateLibraryItemFailEvent|Failed to create Library Item {libraryItemName} in Library {libraryName}.EventExDeleted Libraryinfocom.vmware.cl.DeleteLibraryEvent|Deleted Library {libraryName}EventExFailed to delete Libraryerrorcom.vmware.cl.DeleteLibraryFailEvent|Failed to delete Library {libraryName}EventExDeleted Library Iteminfocom.vmware.cl.DeleteLibraryItemEvent|Deleted Library Item {libraryItemName} in Library {libraryName}.EventExFailed to delete Library Itemerrorcom.vmware.cl.DeleteLibraryItemFailEvent|Failed to delete Library Item {libraryItemName} in Library {libraryName}.EventExPublished Libraryinfocom.vmware.cl.PublishLibraryEvent|Published Library {libraryName}EventExFailed to publish Libraryerrorcom.vmware.cl.PublishLibraryFailEvent|Failed to publish Library {libraryName}EventExPublished Library Iteminfocom.vmware.cl.PublishLibraryItemEvent|Published Library Item {libraryItemName} in Library {libraryName}EventExFailed to publish Library Itemerrorcom.vmware.cl.PublishLibraryItemFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName}EventExPublished Library Item to Subscriptioninfocom.vmware.cl.PublishLibraryItemSubscriptionEvent|Published Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library Item to Subscriptionerrorcom.vmware.cl.PublishLibraryItemSubscriptionFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExPublished Library to Subscriptioninfocom.vmware.cl.PublishLibrarySubscriptionEvent|Published Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library to Subscriptionerrorcom.vmware.cl.PublishLibrarySubscriptionFailEvent|Failed to publish Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExCreated Subscriptioninfocom.vmware.cl.SubscriptionCreateEvent|Created subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to create Subscriptionerrorcom.vmware.cl.SubscriptionCreateFailEvent|Failed to create subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExDeleted Subscriptioninfocom.vmware.cl.SubscriptionDeleteEvent|Deleted subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to delete Subscriptionerrorcom.vmware.cl.SubscriptionDeleteFailEvent|Failed to delete subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExUpdated Subscriptioninfocom.vmware.cl.SubscriptionUpdateEvent|Updated subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to update Subscriptionerrorcom.vmware.cl.SubscriptionUpdateFailEvent|Failed to update subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExSynchronized Libraryinfocom.vmware.cl.SyncLibraryEvent|Synchronized Library {libraryName}EventExFailed to Synchronize Libraryerrorcom.vmware.cl.SyncLibraryFailEvent|Failed to Synchronize Library {libraryName}EventExSynchronized Library Iteminfocom.vmware.cl.SyncLibraryItemEvent|Synchronized Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Itemerrorcom.vmware.cl.SyncLibraryItemFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Iteminfocom.vmware.cl.SyncNfcFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}. Failure may be due to a network error or a host entering maintenance mode.EventExUpdated Libraryinfocom.vmware.cl.UpdateLibraryEvent|Updated Library {libraryName}EventExFailed to update Libraryerrorcom.vmware.cl.UpdateLibraryFailEvent|Failed to update Library {libraryName}EventExUpdated Library Iteminfocom.vmware.cl.UpdateLibraryItemEvent|Updated Library Item {libraryItemName} in Library {libraryName}.EventExFailed to update Library Itemerrorcom.vmware.cl.UpdateLibraryItemFailEvent|Failed to update Library Item {libraryItemName} in Library {libraryName}.EventExCould not locate Library Item file on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryItemFileOnRestoreEvent|File '{fileName}' in Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library Item folder on the storage backing after restorecom.vmware.cl.restore.DeletedLibraryItemOnRestoreEvent|Folder for Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library folder on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryOnRestoreEvent|Library '{libraryName}' folder could not be located on the storage backing after restoreEventExCould not locate Library Item content after restorecom.vmware.cl.restore.MissingLibraryItemContentOnRestoreEvent|The content of Library Item '{libraryItemName}' could not be located on storage after restoreEventExNew Library Item file found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemFileOnRestoreEvent|New Library Item file '{fileName}' found on the storage backing for Library Item '{libraryItemName}' after restore. Path to the file on storage: '{filePath}'EventExNew Library Item folder found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemOnRestoreEvent|New Library Item folder '{itemFolderName}' found on the storage backing for Library '{libraryName}' after restore. Path to the item folder on storage: '{itemFolderPath}'ExtendedEventCancel LWD snapshotinfoCancelling LWD snapshotcom.vmware.dp.events.cancelsnapshot|Cancelling LWD snapshotExtendedEventLWD snapshot is cancelledinfoLWD snapshot is cancelledcom.vmware.dp.events.cancelsnapshotdone|LWD snapshot is cancelledExtendedEventFailed to cancel LWD snapshoterrorFailed to cancel LWD snapshotcom.vmware.dp.events.cancelsnapshotfailed|Failed to cancel LWD snapshotExtendedEventPerform 'commit' phase of LWD-based restoreinfoPerforming 'commit' phase of LWD-based restorecom.vmware.dp.events.commitrestore|Performing 'commit' phase of LWD-based restoreExtendedEvent'commit' phase of LWD-based restore is completedinfo'commit' phase of LWD-based restore is completedcom.vmware.dp.events.commitrestoredone|'commit' phase of LWD-based restore is completedExtendedEvent'commit' phase of LWD-based restore failederror'commit' phase of LWD-based restore failedcom.vmware.dp.events.commitrestorefailed|'commit' phase of LWD-based restore failedExtendedEventEnabling protection services on hosts in the clusterinfoEnabling protection services on hosts in the clusterEnabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservices|Enabling protection services on hosts in the clusterExtendedEventFinished enabling protection services on hosts in the clusterinfoFinished enabling protection services on hosts in the clusterFinished enabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesdone|Finished enabling protection services on hosts in the clusterExtendedEventFailed to enable protection services on hosts in the clustererrorFailed to enable protection services on hosts in the clusterFailed to enable protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesfailed|Failed to enable protection services on hosts in the clusterExtendedEventPerform 'prepare' phase of LWD-based restoreinfoPerforming 'prepare' phase of LWD-based restorecom.vmware.dp.events.preparerestore|Perform 'prepare' phase of LWD restoreExtendedEvent'prepare' phase of LWD-based restore is completedinfo'prepare' phase of LWD-based restore is completedcom.vmware.dp.events.preparerestoredone|'prepare' phase of LWD-based restore is completedExtendedEvent'prepare' phase of LWD-based restore failederror'prepare' phase of LWD-based restore failedcom.vmware.dp.events.preparerestorefailed|'prepare' phase of LWD-based restore failedExtendedEventEnable LWD data protectioninfoEnabling LWD data protectioncom.vmware.dp.events.protect|Enabling LWD data protectionExtendedEventLWD data protection enabledinfoLWD data protection enabledcom.vmware.dp.events.protectdone|LWD data protection enabledExtendedEventFailed to enable LWD data protectionerrorFailed to enable LWD data protectioncom.vmware.dp.events.protectfailed|Failed to enable LWD data protectionExtendedEventQuerying entity for protection infoinfoQuerying entity for protection infocom.vmware.dp.events.queryprotectedentityinfo|Querying entity for protection infoExtendedEventFinished querying entity for protection infoinfoFinished querying entity for protection infocom.vmware.dp.events.queryprotectedentityinfodone|Finished querying entity for protection infoExtendedEventFailed to query entity for protection infoerrorFailed to query entity for protection infocom.vmware.dp.events.queryprotectedentityinfofailed|Failed to query entity for protection infoExtendedEventRetire LWD snapshotinfoRetiring LWD snapshotcom.vmware.dp.events.retiresnapshot|Retiring LWD snapshotExtendedEventLWD snapshot is retiredinfoLWD snapshot is retiredcom.vmware.dp.events.retiresnapshotdone|LWD snapshot is retiredExtendedEventFailed to retire LWD snapshoterrorFailed to retire LWD snapshotcom.vmware.dp.events.retiresnapshotfailed|Failed to retire LWD snapshotExtendedEventTake LWD application-consistent snapshotinfoTaking LWD application-consistent snapshotcom.vmware.dp.events.snapshot.applicationconsistent|Taking LWD application-consistent snapshotExtendedEventTake LWD crash-consistent snapshotinfoTaking LWD crash-consistent snapshotcom.vmware.dp.events.snapshot.crashconsistent|Taking LWD crash-consistent snapshotExtendedEventTake LWD metadata-only snapshotinfoTaking LWD metadata-only snapshotcom.vmware.dp.events.snapshot.metadataonly|Taking LWD metadata-only snapshotExtendedEventTake LWD VSS application-consistent snapshotinfoTaking LWD VSS application-consistent snapshotcom.vmware.dp.events.snapshot.vssappconsistent|Taking LWD VSS application-consistent snapshotExtendedEventLWD application-consistent snapshot takeninfoLWD application-consistent snapshot takencom.vmware.dp.events.snapshotdone.applicationconsistent|LWD application-consistent snapshot takenExtendedEventLWD crash-consistent snapshot takeninfoLWD crash-consistent snapshot takencom.vmware.dp.events.snapshotdone.crashconsistent|LWD crash-consistent snapshot takenExtendedEventLWD metadata-only snapshot takeninfoLWD metadata-only snapshot takencom.vmware.dp.events.snapshotdone.metadataonly|LWD metadata-only snapshot takenExtendedEventLWD VSS application-consistent snapshot takeninfoLWD VSS application-consistent snapshot takencom.vmware.dp.events.snapshotdone.vssappconsistent|LWD VSS application-consistent snapshot takenExtendedEventLWD application-consistent snapshot failederrorLWD application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.applicationconsistent|LWD application-consistent snapshot failedExtendedEventLWD crash-consistent snapshot failederrorLWD crash-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.crashconsistent|LWD crash-consistent snapshot failedExtendedEventLWD metadata-only snapshot failederrorLWD metadata-only snapshot failedcom.vmware.dp.events.snapshotfailed.metadataonly|LWD metadata-only snapshot failedExtendedEventLWD VSS application-consistent snapshot failederrorLWD VSS application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.vssappconsistent|LWD VSS application-consistent snapshot failedExtendedEventPerform LWD snapshot syncinfoPerforming LWD snapshot synccom.vmware.dp.events.sync|Performing LWD snapshot syncExtendedEventLWD snapshot sync is completedinfoLWD snapshot sync is completedcom.vmware.dp.events.syncdone|LWD snapshot sync is completedExtendedEventLWD snapshot sync failederrorLWD snapshot sync failedcom.vmware.dp.events.syncfailed|LWD snapshot sync failedExtendedEventDisable LWD data protectioninfoDisabling LWD data protectioncom.vmware.dp.events.unprotect|Disabling LWD data protectionExtendedEventLWD data protection disabledinfoLWD data protection disabledcom.vmware.dp.events.unprotectdone|LWD data protection disabledExtendedEventFailed to disable LWD data protectionerrorFailed to disable LWD data protectioncom.vmware.dp.events.unprotectfailed|Failed to disable LWD data protectionEventExDeployed entity from Content Libraryinfocom.vmware.ovfs.DeployEvent|Deployed entity from Library Item {libraryItemName} in Library {libraryName}EventExFailed to deploy entity from Content Libraryerrorcom.vmware.ovfs.DeployFailEvent|Failed to deploy entity from Library Item {libraryItemName} in Library {libraryName}EventExCloned entity to Content Libraryinfocom.vmware.ovfs.ExportEvent|Cloned entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExFailed to clone entity to Content Libraryerrorcom.vmware.ovfs.ExportFailEvent|Failed to clone entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExinfocom.vmware.rbd.activateRuleSet|Activate Rule SetEventExwarningcom.vmware.rbd.fdmPackageMissing|A host in a HA cluster does not have the 'vmware-fdm' package in its image profileEventExwarningcom.vmware.rbd.hostProfileRuleAssocEvent|A host profile associated with one or more active rules was deleted.EventExerrorcom.vmware.rbd.hostScriptFailure|An error encountered while running a user defined script: {scriptName} on the host: {ip}. Status: {status}EventExwarningcom.vmware.rbd.ignoreMachineIdentity|Ignoring the AutoDeploy.MachineIdentity event, since the host is already provisioned through Auto DeployEventExinfocom.vmware.rbd.pxeBootNoImageRule|Unable to PXE boot host since it does not match any rulesEventExinfocom.vmware.rbd.pxeBootUnknownHost|PXE Booting unknown hostEventExinfocom.vmware.rbd.pxeProfileAssoc|Attach PXE ProfileEventExinfocom.vmware.rbd.scriptBundleAssoc|Script Bundle Name: {name} attached to moref {moref}, entity-id {entity-id}EventExerrorcom.vmware.rbd.vmcaCertGenerationFailureEvent|Failed to generate host certificates using VMCAEventExCreated Harbor registryinfocom.vmware.registry.HarborCreateEvent|Created Harbor registry {registryName} on cluster {clusterId}.EventExFailed to create Harbor registryerrorcom.vmware.registry.HarborCreateFailEvent|Failed to create Harbor registry {registryName} on cluster {clusterId}.EventExDeleted Harbor registryinfocom.vmware.registry.HarborDeleteEvent|Deleted Harbor registry {registryName} on cluster {clusterId}.EventExFailed to delete Harbor registryerrorcom.vmware.registry.HarborDeleteFailEvent|Failed to delete Harbor registry {registryName} on cluster {clusterId}.EventExCreated Harbor projectinfocom.vmware.registry.HarborProjectCreateEvent|Created Harbor project {projectName} for registry {registryId}.EventExFailed to create Harbor projecterrorcom.vmware.registry.HarborProjectCreateFailEvent|Failed to create Harbor project {projectName} for registry {registryId}.EventExDeleted Harbor projectinfocom.vmware.registry.HarborProjectDeleteEvent|Deleted Harbor project {projectName} for registry {registryId}.EventExFailed to delete Harbor projecterrorcom.vmware.registry.HarborProjectDeleteFailEvent|Failed to delete Harbor project {projectName} for registry {registryId}.EventExCreated Harbor project memberinfocom.vmware.registry.HarborProjectMemberCreateEvent|Created Harbor project member {memberName} for project {projectName}.EventExFailed to create Harbor project membererrorcom.vmware.registry.HarborProjectMemberCreateFailEvent|Failed to create Harbor project member {memberName} for project {projectName}.EventExDeleted Harbor project memberinfocom.vmware.registry.HarborProjectMemberDeleteEvent|Deleted Harbor project member {memberName} from project {projectName}.EventExFailed to delete Harbor project membererrorcom.vmware.registry.HarborProjectMemberDeleteFailEvent|Failed to delete Harbor project member {memberName} from project {projectName}.EventExUpdated Harbor project memberinfocom.vmware.registry.HarborProjectMemberUpdateEvent|Updated Harbor project member {memberName} for project {projectName}.EventExFailed to update Harbor project membererrorcom.vmware.registry.HarborProjectMemberUpdateFailEvent|Failed to update Harbor project member {memberName} for project {projectName}.EventExPurged Harbor projectinfocom.vmware.registry.HarborProjectPurgeEvent|Purged Harbor project {projectName} for registry {registryId}.EventExFailed to purge Harbor projecterrorcom.vmware.registry.HarborProjectPurgeFailEvent|Failed to purge Harbor project {projectName} for registry {registryId}.EventExRestoring Harbor registryinfocom.vmware.registry.HarborRestoreEvent|Restoring Harbor registry {registryName} on cluster {clusterId}.EventExFailed to restore Harbor registryerrorcom.vmware.registry.HarborRestoreFailEvent|Failed to restore Harbor registry {registryName} on cluster {clusterId}.EventExRestored Harbor registryinfocom.vmware.registry.HarborRestoreSuccessEvent|Restored Harbor registry {registryName} on cluster {clusterId}.ExtendedEventProactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!errorcom.vmware.vc.proactivehdw.DbError|Proactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!EventExProactive hardware management: Host is disabled with proactive hardware management.warningcom.vmware.vc.proactivehdw.Disabled|Host is disabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: Host is enabled with proactive hardware management.infocom.vmware.vc.proactivehdw.Enabled|Host is enabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: received a failure health update from vendor.errorcom.vmware.vc.proactivehdw.Failure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: Polled health updates from HSM are discarded due to health update response content size limit being exceeded.warningcom.vmware.vc.proactivehdw.HealthUpdatesResponseLimitExceed|Proactive hardware management: Polled health updates from HSM {VendorDisplayName} are discarded due to health update response content size limit being exceeded. Refer to vSAN health logs for more details.EventExProactive hardware management: Some health updates from HSM are discarded due to validation failures.warningcom.vmware.vc.proactivehdw.HealthUpdatesValidationFail|Proactive hardware management: Some health updates from HSM {VendorDisplayName} are discarded due to validation failures. Refer to vSAN health logs for more details.EventExProactive hardware management: Error occurred when posting host-level event for unregistration of HSMerrorcom.vmware.vc.proactivehdw.HostEventPostFailed|Proactive hardware management: After HSM {VendorDisplayName} was unregistered an internal error prevented a host event from posting. The following hosts are affected: {AffectedHosts}.EventExProactive hardware management: Failed to contact an HSMerrorcom.vmware.vc.proactivehdw.HsmCommunicationError|Proactive hardware management: Failed to contact HSM with vendor: {VendorDisplayName}.EventExProactive hardware management: Error occured in poll HSM requesterrorcom.vmware.vc.proactivehdw.HsmRequestError|Proactive hardware management: Internal error occurred during polling HSM from vendor {VendorDisplayName}.EventExProactive hardware management: HSM is unregistered.infocom.vmware.vc.proactivehdw.HsmUnregistration|Proactive hardware management: HSM is unregistered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: received a predictive failure health update from vendor.warningcom.vmware.vc.proactivehdw.PredictiveFailure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: HSM is unregistered but with a failure in removing resource bundle.errorcom.vmware.vc.proactivehdw.ResourceBundleCleanupError|Proactive hardware management: HSM from {VendorDisplayName} is unregistered but with a failure in removing resource bundle - likely the resource bundle is currently in use. Please refer to vSAN health logs for the underlying cause and perform manual clean up on the resource bundle.EventExProactive hardware management: Failed to create/update subscription for HSM due to a communication error with HSMerrorcom.vmware.vc.proactivehdw.SubscriptionHsmCommError|Proactive hardware management: Failed to create/update subscription for HSM {VendorDisplayName} due to a communication error with HSM.EventExProactive hardware management: Failed to create/update subscription for HSM due to internal errorerrorcom.vmware.vc.proactivehdw.SubscriptionInternalError|Proactive hardware management: Failed to perform subscription create/update for HSM {VendorDisplayName} due to an internal error. Please refer to the vSAN health logs for more details.EventExProactive hardware management: A new HSM is registered.infocom.vmware.vc.proactivehdw.registration.NewRegistration|Proactive hardware management: A new HSM is registered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: HSM registration is updated.infocom.vmware.vc.proactivehdw.registration.UpdateSuccess|Proactive hardware management: The registration information on the following HSM: '{VendorDisplayName}' has been updated. Here are its supported health update infos: '{EnabledHealthUpdateInfos}'ExtendedEventinfocom.vmware.vcIntegrity.CancelTask|Canceling task on [data.name].ExtendedEventinfocom.vmware.vcIntegrity.CheckNotification|Successfully downloaded notifications. New notifications: [data.Notifications]ExtendedEventerrorcom.vmware.vcIntegrity.CheckNotificationFailed|Could not download notifications.ExtendedEventerrorcom.vmware.vcIntegrity.CheckPXEBootHostFailure|Cannot determine whether host {host.name} is PXE booted. The host will be excluded for the current operation.ExtendedEventwarningcom.vmware.vcIntegrity.ClusterConfigurationOutOfCompliance|Hosts in Cluster [data.resource] are out of compliance.ExtendedEventerrorcom.vmware.vcIntegrity.ClusterOperationCancelledDueToCertRefresh|In-flight VUM task on Cluster [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventwarningcom.vmware.vcIntegrity.CriticallyLowDiskSpace|VMware vSphere Lifecycle Manager is critically low on storage space! Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.DisableToolsRemediateOnReboot|Successfully disabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.DisableToolsRemediateOnRebootFailed|Could not disable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.DownloadAlert|VMware vSphere Lifecycle Manager download alert (critical/total): ESX [data.esxCritical]/[data.esxTotal]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadFailedPatchBinary|Could not download patch packages for following patches: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestPackage|Successfully downloaded guest patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestPackageFailed|Could not download guest patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixPackage|Successfully downloaded guest patch packages for UNIX. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixPackageFailed|Could not download guest patch packages for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixUpdate|Successfully downloaded guest patch definitions for UNIX. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixUpdateFailed|Could not download guest patch definitions for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUpdate|Successfully downloaded guest patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUpdateFailed|Could not download guest patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostPackage|Successfully downloaded host patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostPackageFailed|Could not download host patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostUpdate|Successfully downloaded host patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostUpdateFailed|Could not download host patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.EnableToolsRemediateOnReboot|Successfully enabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.EnableToolsRemediateOnRebootFailed|Could not enable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.FailToLock|There are running tasks for the entity [data.name] that cannot finish within a specific time. The operation will stop.ExtendedEventcom.vmware.vcIntegrity.FtFailedEvent|ExtendedEventerrorcom.vmware.vcIntegrity.GADvdMountError|VMware vSphere Lifecycle Manager Guest Agent could not access the DVD drive on {vm.name}. Verify that a DVD drive is available and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAError|An internal error occurred in communication with VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Verify that the VM is powered on and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAInstallFailed|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Make sure that the VM is powered on.ExtendedEventinfocom.vmware.vcIntegrity.GAInstalled|VMware vSphere Lifecycle Manager Guest Agent successfully installed on {vm.name}.ExtendedEventerrorcom.vmware.vcIntegrity.GARuntimeError|An unknown internal error occurred during the required operation on {vm.name}. Check the logs for more details and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GATimeout|VMware vSphere Lifecycle Manager Guest Agent could not respond in time on {vm.name}. Verify that the VM is powered on and that the Guest Agent is running.ExtendedEventwarningcom.vmware.vcIntegrity.HostConfigurationOutOfCompliance|Configuration of Host [data.resource] is out of compliance.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallClose|Close [data.name] firewall ports.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallOpen|Open [data.name] firewall ports.ExtendedEventerrorcom.vmware.vcIntegrity.HostOperationCancelledDueToCertRefresh|In-flight VUM task on Host [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportCancelled|Host patch offline bundle upload is canceled by user.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportSuccess|[data.numBulletins] new bulletins uploaded successfully through offline bundle.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchBundleImportUnknownError|Host patch offline bundle upload did not succeed.ExtendedEventcom.vmware.vcIntegrity.HostPatchInputRecalledFailure|ExtendedEventcom.vmware.vcIntegrity.HostPatchPrerequisiteRecalledFailure|ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be remediated. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry remediation operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be remediated. Remove either of the patch from the baseline and retry the remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be staged. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be staged. Remove either of the patch from the baseline and retry the stage operation.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmFtEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster and contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmScanEvent|Cannot scan host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmStageEvent|Cannot stage host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtDiffPatchesEvent|Host {host.name} has FT enabled VMs. If you apply different patches to hosts in a cluster, FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtEvent|Cannot remediate host {host.name} because it contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtPairEvent|Host {host.name} has FT enabled VMs. The host on which the Secondary VMs reside is not selected for remediation. As a result FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedHacEvent|Cannot remediate host {host.name} because it is a part of a HA admission control enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedPxeUpgradeUnsupported|Upgrade operations are not supported on host {host.name} because it is PXE booted.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedRemovableDeviceEvent|Cannot remediate host {host.name} because it has VMs with a connected removable device. Disconnect all removable devices before remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorEsxFileDownload|Host [data.name] cannot download files from the VMware vSphere Lifecycle Manager patch store. Check the network connectivity and firewall setup, and verify that the host can access the configured patch store.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorNotInstallable|The selected patches [data.arg1] cannot be installed on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateConflictDependencies|The patches selected for remediation on the host [data.name] depend on other patches that have conflicts.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDefault|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDeviceAttached|Remediation did not succeed for [data.name]. The host has virtual machines [data.arg1] with connected removable media devices. This prevents the host from entering maintenance mode. Disconnect the removable devices and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateEnterMmode|Remediation did not succeed for [data.name]. The host could not enter maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateExitMmode|Remediation did not succeed for [data.name]. The host could not exit maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReboot|Remediation did not succeed for [data.name]. The host did not reboot after remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRebootReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect after a reboot.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRestoreVm|Remediation did not succeed for [data.name]. Restoring the power state or device connection state for one or more virtual machines on the host did not succeed.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateMetadataCorrupt|Remediation did not succeed for [data.name]. The patch metadata is corrupted. This might be caused by an invalid format of metadata content. You can try to re-download the patches.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateVibDownload|Remediation did not succeed for [data.name]. There were errors while downloading one or more software packages. Check the VMware vSphere Lifecycle Manager network connectivity settings.ExtendedEventcom.vmware.vcIntegrity.HostUpdateErrorVsanHealthCheckFailed|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeAgentDeployFailure|Cannot deploy upgrade agent on host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailBootDiskSize|The boot disk has a size of [data.found] MiB, the minimum requirement of the upgrade image is [data.expected] MiB.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailConflictingVibs|The upgrade contains conflicting VIBs. Remove the conflicting VIBs or use Image Builder to create a custom upgrade ISO image that contains the newer versions of the conflicting VIBs, and try to upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreakageUnsure|Cannot determine whether the upgrade breaks Cisco Nexus 1000V virtual network switch feature on the host. If the host does not have the feature, you can ignore this warning.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreaks|Cisco Nexus 1000V virtual network switch feature installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailEESXInsufficientSpaceForImage|Cannot create a ramdisk of size [data.expected]MB to store the upgrade image. Check if the host has sufficient memory.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailESXInsufficientSpaceForImage|Upgrade requires at least [data.expected]MB free space on boot partition to store the upgrade image, only [data.found]MB found. Retry after freeing up sufficient space or perform a CD-based installation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailHostHardwareMismatch|The upgrade is not supported on the host hardware. The upgrade ISO image contains VIBs that failed the host hardware compatibility check.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPInImage|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V software package [data.expected] installed on the host. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPUSupportedHost|The host is managing a DPU(s) and is a part of vLCM baselines-managed cluster, which is not supported. Move the host to vLCM image-managed cluster and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleHostAcceptance|The upgrade package is not compatible with the host. Use an upgrade package that meets the host's acceptance level or change the host's acceptance level to match that of the upgrade package.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePartitionLayout|The host cannot be upgraded due to incompatible partition layout.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePasswords|The passwords cannot be migrated because the password encryption scheme is incompatible.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleSphereletVersion|Spherelet version [data.found] is not compatible with ESXi 8.0 and later version. Please upgrade your WCP cluster to install a compatible Spherelet version, or remove Spherelet if the host is not in a WCP cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleWithDvsCP|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientEntropyCache|Storage entropy cache is not full. A full entropy cache is required for upgrade. Refer to KB 89854 for steps on how to refill the cache.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientMemory|Insufficient memory found on the host: [data.expected]MB required, [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientSpaceForConfig|Upgrade requires at least [data.expected]MB free space on a local VMFS datastore, only [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailLockerSpaceAvail|The system has insufficient locker space for the image profile.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPBreaksDvsCP|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image that is compatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPInImage|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image [data.found]. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingGunzipChecksumVibs|These VIB(s) on the host do not have the required sha-256 gunzip checksum for their payloads: [data.found]. This will prevent VIB security verification and secure boot from functioning properly. Please remove these VIBs and check with your vendor for a replacement of these VIBs.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeBootBank|The system image on the attached iso lacks a storage driver for the installed bootbank.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeNic|The system image on the attached iso lacks a NIC driver for the management network traffic.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMD5RootPassword|The root password is not using MD5 hashing, causing it to be authenticated up to only 8 characters. For instructions on how to correct this, see VMware KB 1024500 at http://kb.vmware.com/kb/1024500.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMinCpuCores|New ESXi version requires a minimum of [data.expected] processor cores.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoVt|Processor does not support hardware virtualization or it is disabled in BIOS. Virtual machine performance may be slow.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNonVmwareSoftware|The software modules [data.found] found on the host are not part of the upgrade image. These modules will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNvdsToCvdsMigration|ESXi host is not ready for NSX-T vSphere Distributed Switch (VDS) migration included with this ESXi upgrade. Please run Upgrade Readiness Tool (URT) from the NSX-T Manager managing this host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNxbitEnabled|No eXecute (NX) bit is not enabled on the host. New ESXi version requires a CPU with NX/XD bit supported and enabled.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPendingReboot|Host software configuration requires a reboot. Reboot the host and try upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPowerPathBreaks|EMC PowerPath module [data.found] installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailRRFTVMsPresent|Legacy FT is not compatible with upgraded version. Disable legacy FT.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailScriptInitFailed|Host upgrade validity checks are not successful.ExtendedEventcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailTbootRequired|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnknown|The upgrade precheck script returned unknown error.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedConfig|Error in ESX configuration file (esx.conf).ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedDevices|Unsupported devices [data.found] found on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedHostVersion|Host version [data.found] is not supported for upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedLongMode|Host CPU is unsupported. New ESXi version requires a 64-bit CPU with support for LAHF/SAHF instructions in long mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedSHA1Cert|SHA-1 signature found in host certificate {data.cert} with subject {data.subject}. Support for certificates with weak signature algorithm SHA-1 has been removed in ESXi 8.0. To proceed with upgrade, replace it with a SHA-2 signature based certificate. Refer to release notes and KB 89424 for more details.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedTPMVersion|TPM 1.2 device detected. Support for TPM version 1.2 is discontinued. Installation may proceed, but may cause the system to behave unexpectedly.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailVFATCorruption|A problem with one or more vFAT bootbank partitions was detected. Please refer to KB 91136 and run dosfsck on bootbank partitions.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeProgressAborted|Host upgrade installer stopped.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressAuth|Host upgrade in progress: Configuring authentication.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressBootloader|Host upgrade in progress: Boot setup.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressClearpart|Host upgrade in progress: Clearing partitions.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressComplete|Host upgrade installer completed.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressKeyboard|Host upgrade in progress: Setting keyboard.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressLanguage|Host upgrade in progress: Setting language.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMigrating|Host upgrade in progress: Migrating ESX v3 configuration to ESX v4.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMount|Host upgrade in progress: Mounting file systems.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressNetworking|Host upgrade in progress: Installing network configuration.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPackages|Host upgrade in progress: Installing packages.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartphys|Host upgrade in progress: Partitioning physical hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartvirt|Host upgrade in progress: Partitioning virtual hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPostscript|Host upgrade in progress: Running postinstallation script.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressRootpass|Host upgrade in progress: Setting root passwordExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressTimezone|Host upgrade in progress: Setting timezone.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressUnknown|Host upgrade in progress.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeRunScriptFailure|Cannot run upgrade script on host.ExtendedEventerrorcom.vmware.vcIntegrity.ImageRecommendationGenerationError|The image recommendation generation failed.ExtendedEventinfocom.vmware.vcIntegrity.ImageRecommendationGenerationFinished|The image recommendation generation finished.ExtendedEventerrorcom.vmware.vcIntegrity.IncompatibleTools|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name} because VMware Tools is not installed or is of an incompatible version. The required version is [data.requiredVersion] and the installed version is [data.installedVersion].ExtendedEventinfocom.vmware.vcIntegrity.InstallAddOnUpdate|The following additional patches are included to resolve a conflict for installation on [data.entityName]: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestion|To resolve a conflict for installation on [data.entityName], the following additional patches might need to be included in the baseline: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestionNotFound|VMware vSphere Lifecycle Manager could not find patches to resolve the conflict for installation on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdate|Installation of patches [data.updateId] started on host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdateComplete|Installation of patches succeeded on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.InstallUpdateError|Could not install patches on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.LinuxOffLineScanNotSupported|Cannot scan [data.name] for patches. Scan of powered off or suspended Linux VMs is not supported.ExtendedEventwarningcom.vmware.vcIntegrity.LowDiskSpace|VMware vSphere Lifecycle Manager is running out of storage space. Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.MetadataCorrupted|Patch definition for [data.name] is corrupt. Check the logs for more details. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.MetadataNotFound|Patch definitions for [data.name] are missing. Download patch definitions first.ExtendedEventerrorcom.vmware.vcIntegrity.NoRequiredLicense|There is no VMware vSphere Lifecycle Manager license for [data.name] for the required operation.ExtendedEventinfocom.vmware.vcIntegrity.NotificationCriticalInfoAlert|VMware vSphere Lifecycle Manager informative notification (critical) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationDownloadAlert|VMware vSphere Lifecycle Manager notification download alertExtendedEventinfocom.vmware.vcIntegrity.NotificationImportantInfoAlert|VMware vSphere Lifecycle Manager informative notification (important) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationModerateInfoAlert|VMware vSphere Lifecycle Manager informative notification (moderate) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallAlert|VMware vSphere Lifecycle Manager recall alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallFixAlert|VMware vSphere Lifecycle Manager recall fix alertExtendedEventerrorcom.vmware.vcIntegrity.OperationCancelledDueToCertRefresh|In-flight VUM task on [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventcom.vmware.vcIntegrity.PXEBootedHostEvent|ExtendedEventinfocom.vmware.vcIntegrity.PackageImport|Package [data.name] is successfully imported.ExtendedEventerrorcom.vmware.vcIntegrity.PackageImportFailure|Import of package: [data.name] did not succeed.ExtendedEventinfocom.vmware.vcIntegrity.RebootHostComplete|Host [data.entityName] is successfully rebooted.ExtendedEventerrorcom.vmware.vcIntegrity.RebootHostError|Cannot reboot host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostStart|Start rebooting host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostWait|Waiting for host [data.entityName] to reboot.ExtendedEventerrorcom.vmware.vcIntegrity.ReconfigureClusterFailedEvent|VMware vSphere Lifecycle Manager could not restore HA admission control/DPM settings for cluster {computeResource.name} to their original values. These settings have been changed for patch installation. Check the cluster settings and restore them manually.ExtendedEventinfocom.vmware.vcIntegrity.Remediate|Remediation succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedHost|Could not remediate {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedVm|Could not remediate {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDpmDisableHost|Could not remediate host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.RemediateFailed|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostInvalidPowerState|Cannot remediate the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostOnUnsupportedHost|Could not remediate {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediateOrphanedVm|Could not remediate orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.RemediateStart|Remediating object [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateVmOnUnsupportedHost|Could not remediate {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediationStatusEvent|Current progress of remediation: [data.noOfSucceededHosts] hosts completed successfully, [data.noOfFailedHosts] hosts completed with errors, [data.noOfHostsBeingRemediatedCurrently] hosts are being remediated, [data.noOfWaitingHosts] hosts are waiting to start remediation, and [data.noOfRetryHosts] hosts could not enter maintenance mode and are waiting to retry.ExtendedEventinfocom.vmware.vcIntegrity.Scan|Successfully scanned [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanCancelled|Scanning of [data.name] is canceled by user.ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedHost|Could not scan {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedVm|Could not scan {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDpmDisableHost|Could not scan host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.ScanFailed|Could not scan [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostInvalidPowerState|Cannot scan the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostOnUnsupportedHost|Could not scan {host.name} for patches because it is of unsupported version [data.version].ExtendedEventwarningcom.vmware.vcIntegrity.ScanMissingUpdate|Found a missing patch: [data.message] when scanning [data.name]. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.ScanOrphanedVm|Could not scan orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.ScanStart|Scanning object [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.ScanUnsupportedVolume|{vm.name} contains an unsupported volume [data.volumeLabel]. Scan results for this VM might be incomplete.ExtendedEventerrorcom.vmware.vcIntegrity.ScanVmOnUnsupportedHost|Could not scan {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventerrorcom.vmware.vcIntegrity.SequentialRemediateFailedEvent|An error occured during the sequential remediation of hosts in cluster {computeResource.name}. Check the related events for more details.ExtendedEventinfocom.vmware.vcIntegrity.SkipSuspendedVm|Suspended VM {vm.name} has been skipped.ExtendedEventinfocom.vmware.vcIntegrity.Stage|Staging succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.StageDisconnectedHost|Could not stage patches to {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageDpmDisableHost|Could not stage patches to host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.StageFailed|Staging did not succeed for [data.name][data.message].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostInvalidPowerState|Cannot stage patches to the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostOnUnsupportedHost|Could not stage patches to {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.StageStart|Staging patches to host [data.name].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdate|Started staging of patches [data.updateId] on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdateComplete|Staging of patch to [data.entityName] succeeded.ExtendedEventerrorcom.vmware.vcIntegrity.StageUpdateError|Cannot stage patch [data.updateId] to [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.SysprepDisabled|Sysprep is disabled during the remediation.ExtendedEventinfocom.vmware.vcIntegrity.SysprepEnabled|Sysprep settings are restored.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepHandleFailure|Cannot access the sysprep settings for VM {vm.name}. Retry the operation after disabling sysprep for the VM.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepNotFound|Cannot locate the sysprep settings for VM {vm.name}. For Windows 7 and Windows 2008 R2, offline VM remediation is supported only if the system volume is present in the primary disk partition. Retry the operation after disabling sysprep for the VM.ExtendedEventinfocom.vmware.vcIntegrity.ToolsRemediate|VMware Tools upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ToolsRemediateFailed|VMware Tools upgrade did not succeed for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.ToolsScan|Successfully scanned [data.name] for VMware Tools upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.ToolsScanFailed|Could not scan [data.name] for VMware Tools upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsScanInstallNotSupported|VMware Tools is not installed on [data.name]. VMware vSphere Lifecycle Manager supports upgrading only an existing VMware Tools installation.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeRemediateSkippedOnHost|VMware Tools upgrade was not performed on {vm.name}. VMware Tools upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade is not supported for virtual appliances.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeScanSkippedOnHost|VMware Tools upgrade scan was not performed on {vm.name}. VMware Tools upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedHostRemediateSpecialVMEvent|The host [data.name] has a VM [data.vm] with VMware vSphere Lifecycle Manager or VMware vCenter Server installed. The VM must be moved to another host for the remediation to proceed.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedLinuxAction|Action is not supported for Linux VM/VA {vm.name}. VMware Tools is not installed or the machine cannot start.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedOs|Scan or remediation is not supported on [data.name] because of unsupported OS [data.os].ExtendedEventinfocom.vmware.vcIntegrity.UnsupportedPXEBootHost|Scanning, remediation, and staging are not supported on PXE booted ESXi hosts.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedSpecialVMEvent|VM [data.name] has either VMware vSphere Lifecycle Manager or VMware vCenter Server installed. This VM will be ignored for scan and remediation.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedVaAction|Action is not supported for offline or suspended virtual appliance {vm.name}. ExtendedEventerrorcom.vmware.vcIntegrity.VAAutoUpdateOn|Auto update is set to ON for virtual appliance [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADiscovery|Successfully discovered virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADiscoveryFailed|Could not discover virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadGenericFailure|Could not download virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadInvalidUrl|[data.name] is not a valid virtual appliance download URL.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadMetadataFailure|Could not download virtual appliance upgrade metadata for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADownloadSuccess|Successfully downloaded virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VARepositoryAddressNotSet|No repository address is set for virtual appliance [data.name]. The appliance does not support updates by vCenter Server.ExtendedEventinfocom.vmware.vcIntegrity.VAScan|Successfully scanned [data.name] for VA upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VAScanFailed|Could not scan [data.name] for VA upgrades.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeRemediate|Virtual Hardware upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateFailed|Could not perform Virtual Hardware upgrade on [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateSkippedOnHost|Virtual Hardware upgrade was not performed for {vm.name}. Virtual Hardware upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade is not supported for virtual appliances.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeScan|Successfully scanned [data.name] for Virtual Hardware upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeScanFailed|Could not scan [data.name] for Virtual Hardware upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeScanSkippedOnHost|Virtual Hardware upgrade scan was not performed for {vm.name}. Virtual Hardware upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotInstalled|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not installed. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotLatest|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not the latest version supported by the host. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnknown|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is unknown. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnmanaged|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is not managed by VMware vSphere. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsAutoUpgradeUnsupported|The version of VMware Tools installed in {vm.name} does not support automatic upgrade. Upgrade VMware Tools manually.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsNotRunning|Error while waiting for VMware Tools to respond. Verify that VMware Tools is running in VM {vm.name}.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForInstall|Patch [data.inputBulletin] was excluded from the remediation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the remediation. You can also add the baselines to a baseline group for convenience and perform the remediation.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForStage|Patch [data.inputBulletin] was excluded from the stage operation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the stage operation. You can also add the baselines to a baseline group for convenience and perform the stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.VmDevicesRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original removable device connection settings for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the settings for the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmMigrationFailedEvent|Cannot migrate VM {vm.name} from [data.srcHost] to [data.destHost].ExtendedEventerrorcom.vmware.vcIntegrity.VmPowerRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original power state for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the original power state of the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmotionCompatibilityCheckFailedEvent|Cannot check compatibility of the VM {vm.name} for migration with vMotion to host [data.hostName].EventExAgency createdinfocom.vmware.vim.eam.agency.create|{agencyName} created by {ownerName}EventExAgency destroyedinfocom.vmware.vim.eam.agency.destroyed|{agencyName} removed from the vSphere ESX Agent ManagerEventExAgency state changedinfocom.vmware.vim.eam.agency.goalstate|{agencyName} changed goal state from {oldGoalState} to {newGoalState}EventExAgency status changedinfocom.vmware.vim.eam.agency.statusChanged|Agency status changed from {oldStatus} to {newStatus}EventExAgency reconfiguredinfocom.vmware.vim.eam.agency.updated|Configuration updated {agencyName}EventExCluster Agent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterPowerOn|Cluster Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterProvisioning|Cluster Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM is about to be powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailablePrePowerOn|Cluster Agent VM {vm.name} is about to be powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent added to hostinfocom.vmware.vim.eam.agent.created|Agent added to host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyed|Agent removed from host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyedNoHost|Agent removed from host ({agencyName})EventExAgent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterPowerOn|Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterProvisioning|Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent status changedinfocom.vmware.vim.eam.agent.statusChanged|Agent status changed from {oldStatus} to {newStatus}EventExAgent VM is deletedinfocom.vmware.vim.eam.agent.task.deleteVm|Agent VM {vmName} is deleted on host {host.name} ({agencyName})EventExAgent VM is provisionedinfocom.vmware.vim.eam.agent.task.deployVm|Agent VM {vm.name} is provisioned on host {host.name} ({agencyName})EventExAgent VM powered offinfocom.vmware.vim.eam.agent.task.powerOffVm|Agent VM {vm.name} powered off, on host {host.name} ({agencyName})EventExAgent VM powered oninfocom.vmware.vim.eam.agent.task.powerOnVm|Agent VM {vm.name} powered on, on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibInstalled|Agent installed VIB {vib} on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibUninstalled|Agent uninstalled VIB {vib} on host {host.name} ({agencyName})EventExwarningcom.vmware.vim.eam.issue.agencyDisabled|Agency is disabledEventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentOVF|Unable to access agent OVF package at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentVib|Unable to access agent VIB module at {url} ({agencyName})EventExcom.vmware.vim.eam.issue.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInMaintenanceMode|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterResources|Cluster Agent VM cannot be powered on due to insufficient resources on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterSpace|Cluster Agent VM on cluster {computeResource.name} cannot be provisioned due to insufficient space on cluster datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.invalidConfig|Cluster Agent VM {vm.name} on cluster {computeResource.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmDatastore|Cluster Agent VM datastore(s) {customAgentVmDatastoreName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmNetwork|Cluster Agent VM network(s) {customAgentVmNetworkName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.ovfInvalidProperty|OVF environment used to provision cluster Agent VM on cluster {computeResource.name} has one or more invalid properties ({agencyName})EventExcom.vmware.vim.eam.issue.cluster.agent.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotDeployed|Cluster Agent VM is missing on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotRemoved|Cluster Agent VM {vm.name} is provisioned when it should be removed ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOff|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOn|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmSuspended|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostInMaintenanceMode|Agent cannot complete an operation since the host {host.name} is in maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.hostInStandbyMode|Agent cannot complete an operation since the host {host.name} is in standby mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostNotReachable|Host {host.name} must be powered on and connected to complete agent operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.immediateHostRebootRequired|Host {host.name} must be rebooted immediately to unblock agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.incompatibleHostVersion|Agent is not deployed due to incompatible host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientResources|Agent cannot be provisioned due to insufficient resources on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientSpace|Agent on {host.name} cannot be provisioned due to insufficient space on datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotDeleteSoftware|Cannot remove the Baseline associated with agency {agencyName} from VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotStageSoftware|The software defined by agency {agencyName} cannot be staged in VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.vUMUnavailable|VMware Update Manager was unavailable during agency {agencyName} operationsEventExerrorcom.vmware.vim.eam.issue.invalidConfig|Agent VM {vm.name} on host {host.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmDatastore|No agent datastore configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmNetwork|No agent network configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmDatastore|Agent datastore(s) {customAgentVmDatastoreName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmNetwork|Agent network(s) {customAgentVmNetworkName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmDatastore|Agent datastore cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmNetwork|Agent network(s) cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidFormat|OVF used to provision agent on host {host.name} has invalid format ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidProperty|OVF environment used to provision agent on host {host.name} has one or more invalid properties ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotConfigureSolutions|The required solutions defined by agency {agencyName} cannot be configured in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotUploadDepot|Software defined by agency {agencyName} cannot be uploaded in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.inaccessibleDepot|Unable to access software defined by agency {agencyName}EventExerrorcom.vmware.vim.eam.issue.personality.agency.invalidDepot|Software defined by agency {agencyName} contains invalid vSphere Lifecycle Manager related metadataEventExerrorcom.vmware.vim.eam.issue.personality.agency.pMUnavailable|vSphere Lifecycle Manager was unavailable during agency {agencyName} operationsEventExinfocom.vmware.vim.eam.issue.personality.agent.awaitingPMRemediation|Agent requires application of configured solutions through vSphere Lifecycle Manager on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agent.blockedByAgencyOperation|Agency issues related to vSphere Lifecycle Manager require resolution to unblock host {host.name} ({agencyName})EventExinfocom.vmware.vim.eam.issue.resolved|Issue {type} resolved (key {key})EventExerrorcom.vmware.vim.eam.issue.vibCannotPutHostInMaintenanceMode|Cannot put host into maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.vibCannotPutHostOutOfMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.vibDependenciesNotMetByHost|VIB module dependencies for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibInvalidFormat|Invalid format for VIB module at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibNotInstalled|VIB module for agent is not installed/removed on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequirementsNotMetByHost|VIB system requirements for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostInMaintenanceMode|Host must be put into maintenance mode to complete agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostReboot|Host {host.name} must be reboot to complete agent VIB installation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualInstallation|VIB {vib} requires manual installation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualUninstallation|VIB {vib} requires manual uninstallation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmCorrupted|Agent VM {vm.name} on host {host.name} is corrupted ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmDeployed|Agent VM {vm.name} is provisioned on host {host.name} when it should be removed ({agencyName})EventExcom.vmware.vim.eam.issue.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.vmNotDeployed|Agent VM is missing on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmOrphaned|Orphaned agent VM {vm.name} on host {host.name} detected ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOff|Agent VM {vm.name} on host {host.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOn|Agent VM {vm.name} on host {host.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmRequiresHostOutOfMaintenanceMode|Agent cannot deploy Agent VM since the host {host.name} is in maintenance mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmSuspended|Agent VM {vm.name} on host {host.name} is expected to be powered on but is suspended ({agencyName})ExtendedEventInvalid loginwarningcom.vmware.vim.eam.login.invalid|Failed login to vSphere ESX Agent ManagerEventExSuccessful login to vSphere ESX Agent Managerinfocom.vmware.vim.eam.login.succeeded|Successful login by {user} into vSphere ESX Agent ManagerEventExUser logged out of vSphere ESX Agent Managerinfocom.vmware.vim.eam.logout|User {user} logged out of vSphere ESX Agent Manager by logging out of the vCenter serverEventExUnauthorized access in vSphere ESX Agent Managerwarningcom.vmware.vim.eam.unauthorized.access|Unauthorized access by {user} in vSphere ESX Agent ManagerEventExChecked in virtual machine into a virtual machine template iteminfocom.vmware.vmtx.LibraryItemCheckInEvent|Checked in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to check in virtual machine into a virtual machine template itemerrorcom.vmware.vmtx.LibraryItemCheckInFailEvent|Failed to check in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'EventExDeleted the virtual machine checked out from the VM template iteminfocom.vmware.vmtx.LibraryItemCheckOutDeleteEvent|Deleted the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExFailed to delete the virtual machine checked out from the VM template itemerrorcom.vmware.vmtx.LibraryItemCheckOutDeleteFailEvent|Failed to delete the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExChecked out virtual machine template item as a virtual machineinfocom.vmware.vmtx.LibraryItemCheckOutEvent|Checked out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExFailed to check out virtual machine template item as a virtual machineerrorcom.vmware.vmtx.LibraryItemCheckOutFailEvent|Failed to check out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExA virtual machine checked out from the VM template item was orphaned after restorewarningcom.vmware.vmtx.LibraryItemCheckoutOrphanedOnRestoreEvent|A virtual machine (ID: {vmId}) checked out from the VM template item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was orphaned after restoreEventExCloned virtual machine to Content Library as VM templateinfocom.vmware.vmtx.LibraryItemCreateEvent|Cloned virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExFailed to clone virtual machine to Content Library as VM templateerrorcom.vmware.vmtx.LibraryItemCreateFailEvent|Failed to clone virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExDeleted a version of the virtual machine template iteminfocom.vmware.vmtx.LibraryItemDeleteVersionEvent|Deleted VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to delete a version of the virtual machine template itemerrorcom.vmware.vmtx.LibraryItemDeleteVersionFailEvent|Failed to delete VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'EventExDeployed virtual machine from Content Libraryinfocom.vmware.vmtx.LibraryItemDeployEvent|Deployed virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExFailed to deploy virtual machine from Content Libraryerrorcom.vmware.vmtx.LibraryItemDeployFailEvent|Failed to deploy virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExRolled back virtual machine template item to a previous versioninfocom.vmware.vmtx.LibraryItemRollbackEvent|Rolled back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'ExtendedEventFailed to roll back virtual machine template item to a previous versionerrorcom.vmware.vmtx.LibraryItemRollbackFailEvent|Failed to roll back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'EventExA virtual machine template managed by Content Library was converted to a virtual machineerrorcom.vmware.vmtx.LibraryItemTemplateConvertedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was converted to a virtual machineEventExA virtual machine template managed by Content Library was converted to a virtual machine after restorewarningcom.vmware.vmtx.LibraryItemTemplateConvertedOnRestoreEvent|The virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was found converted to a virtual machine after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was deletedEventExCould not locate a virtual machine template managed by Content Library after restorewarningcom.vmware.vmtx.LibraryItemTemplateDeletedOnRestoreEvent|Could not locate the virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateLatestVersionDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) is rolled back to the previous version because the latest VM template (ID: {vmId}) was deletedEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplatePreviousVersionDeletedEvent|Previous VM template (ID: {vmId}) of the library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was deletedEventExA virtual machine template managed by Content Library was renamedwarningcom.vmware.vmtx.LibraryItemTemplateRenamedEvent|The name of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will change to '{newItemName}' because the virtual machine template (ID: {vmId}) that the item manages was renamedExtendedEventAdded witness host to the cluster.infoAdded witness host to the cluster.com.vmware.vsan.clusterconfig.events.witnessadditiondone|Added witness host to the cluster.ExtendedEventRemoved witness host from the cluster.infoRemoved witness host from the cluster.com.vmware.vsan.clusterconfig.events.witnessremovaldone|Removed witness host from the cluster.ExtendedEventAdd disk group back to the vSAN cluster.infoAdd disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.adddisks|Add disk group back to the vSAN cluster on host {host.name}.ExtendedEventFailed to add disk group back to the vSAN cluster.errorFailed to add disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.addfail|Failed to add disk group back to the vSAN cluster on host {host.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formatdone|Disk format conversion is done on cluster {computeResource.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formathostdone|Disk format conversion is done on host {host.name}.ExtendedEventFailed to migrate vsanSparse objects.errorFailed to migrate vsanSparse objects.com.vmware.vsan.diskconversion.events.migrationfail|Failed to migrate vsanSparse objects on cluster {computeResource.name}.ExtendedEventNo disk conversion performed, all mounted disk groups on host are compliantinfoNo disk conversion performed, all mounted disk groups on host are compliant.com.vmware.vsan.diskconversion.events.noneed|No disk conversion performed, all mounted disk groups on host {host.name} are already compliant.ExtendedEventCheck existing objects on the vSAN cluster.infoCheck existing objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objectcheck|Check existing objects on the vSAN cluster.ExtendedEventObject conversion is done.infoObject conversion is done.com.vmware.vsan.diskconversion.events.objectdone|Object conversion is done.ExtendedEventFailed to convert objects on the vSAN cluster.errorFailed to convert objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objecterror|Failed to convert objects on the vSAN cluster.ExtendedEventRemove disk group from the vSAN cluster.infoRemove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removedisks|Remove disk group from the vSAN cluster on host {host.name}.ExtendedEventFailed to remove disk group from the vSAN cluster.errorFailed to remove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removefail|Failed to remove disk group on host {host.name} from the vSAN cluster.ExtendedEventRestore disk group from last break point.infoRestore disk group from last break point..com.vmware.vsan.diskconversion.events.restore|Restore disk group from last break point.ExtendedEventNo disk conversion performed, host has no mounted disk groups.infoNo disk conversion performed, host has no mounted disk groups.com.vmware.vsan.diskconversion.events.skiphost|No disk conversion performed, host {host.name} has no mounted disk groups.ExtendedEventCheck cluster status for disk format conversion.infoCheck cluster status for disk format conversion.com.vmware.vsan.diskconversion.events.statuscheck|Check status of cluster {computeResource.name} status for disk format conversion.ExtendedEventcom.vmware.vsan.diskconversion.events.syncingtimeout|ExtendedEventUpdate the vSAN cluster system settings.infoUpdate the vSAN cluster system settings.com.vmware.vsan.diskconversion.events.updatesetting|Update the vSAN cluster system settings on host {host.name}.ExtendedEventDisk format conversion failed in what if upgrade.infoDisk format conversion faild in what if upgrade check.com.vmware.vsan.diskconversion.events.whatifupgradefailed|Disk format conversion failed in what if upgrade check.EventExMark ssd(s) as capacity flash.infoMark {disks} as capacity flash.com.vmware.vsan.diskmgmt.events.tagcapacityflash|Mark {disks} as capacity flash.EventExMark ssd as hdd.infoMark ssd {disk} as hdd.com.vmware.vsan.diskmgmt.events.taghdd|Mark ssd {disk} as hdd.EventExMark remote disk as local disk.infoMark remote disk {disk} as local disk.com.vmware.vsan.diskmgmt.events.taglocal|Mark remote disk {disk} as local disk.EventExMark hdd as ssd.infoMark hdd {disk} as ssd.com.vmware.vsan.diskmgmt.events.tagssd|Mark hdd {disk} as ssd.EventExRemove capacity flash mark from ssd(s).infoRemove capacity flash mark from {disks}.com.vmware.vsan.diskmgmt.events.untagcapacityflash|Remove capacity flash mark from {disks}.EventExAdvisorvSAN Health Test 'Advisor' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.advisor.event|vSAN Health Test 'Advisor' changed from '{prestatus}' to '{curstatus}'EventExAudit CEIP Collected DatavSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.auditceip.event|vSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'EventExCNS Critical Alert - Patch available with important fixesvSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.cnspatchalert.event|vSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.controllercacheconfig.event|vSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExCoredump partition size checkvSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.coredumpartitionsize.event|vSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'EventExUpgrade vSphere CSI driver with cautionvSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.csidriver.event|vSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.diskusage.event|vSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dualencryption.event|vSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExProper vSAN network traffic shaping policy is configuredvSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dvsportspeedlimit.event|vSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'EventExEnd of general support for lower vSphere versionvSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.eoscheck.event|vSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'EventExImportant patch available for vSAN issuevSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.fsvlcmpatchalert.event|vSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.h730.event|vSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExHPE SAS Solid State DrivevSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.hpesasssd.event|vSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration check for large scale clustervSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.largescalecluster.event|vSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lavenderalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExvSAN critical alert regarding a potential data inconsistencyvSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilacdeltacomponenttest.event|vSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Critical Alert - Patch available for critical vSAN issuevSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilypatchalert.event|vSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.marigoldalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmode.event|vSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmodeh730.event|vSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvSAN storage policy compliance up-to-datevSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.objspbm.event|vSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.patchalert.event|vSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.pnicconsistent.event|vSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExVM storage policy is not-recommendedvSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.policyupdate.event|vSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'EventExMaximum host number in vSAN over RDMAvSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.rdmanodesalert.event|vSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExESXi system logs stored outside vSAN datastorevSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.scratchconfig.event|vSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.smalldiskstest.event|vSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExThick-provisioned VMs on vSANvSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.thickprovision.event|vSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExFix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabledvSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.unmaptest.event|vSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'EventExvSAN v1 disk in usevSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.v1diskcheck.event|vSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'EventExvCenter Server up to datevSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vcuptodate.event|vSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'EventExMultiple VMs share the same vSAN home namespacevSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vmns.event|vSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Support InsightvSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanenablesupportinsight.event|vSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpefwtest.event|vSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExCustomer advisory for HPE Smart ArrayvSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpesmartarraytest.event|vSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'EventExvSAN management service resource checkvSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanmgmtresource.event|vSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.witnesshw.event|vSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Advanced Configuration Check for Urgent vSAN ESA PatchvSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.zdomadvcfgenabled.event|vSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all of the hosts in a vSAN cluster have consistent advanced configuration options.vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.advcfgsync.event|vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN host daemon liveness.vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clomdliveness.event|vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSphere cluster members match vSAN cluster members.vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clustermembership.event|vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster configuration consistencyvSAN Health Test 'vSAN cluster configuration consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.consistentconfig.event|vSAN Health Test 'vSAN configuration consistency' status changed from '{prestatus}' to '{curstatus}'EventExESA prescriptive disk claimvSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.ddsconfig.event|vSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk group layoutvSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.dglayout.event|vSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN disk balance statusvSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.diskbalance.event|vSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Conversion HealthvSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.esaconversionhealth.event|vSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN extended configuration in syncvSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.extendedconfig.event|vSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Managed disk claimvSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.hcldiskclaimcheck.event|vSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'EventExCheck host maintenance mode is in sync with vSAN node decommission state.vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.mmdecominsync.event|vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'EventExvSAN optimal datastore default policy configurationvSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.optimaldsdefaultpolicy.event|vSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN with RDMA supports up to 32 hosts.vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.rdmanodes.event|vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExResync operations throttlingvSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.resynclimit.event|vSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN Cluster time sync status among hosts and VCvSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.timedrift.event|vSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk format statusvSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradelowerhosts.event|vSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'EventExSoftware version compatibilityvSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradesoftware.event|vSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'EventExVMware vCenter state is authoritativevSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vcauthoritative.event|vSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Direct homogeneous disk claimingvSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsandconfigconsistency.event|vSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'EventExvSphere Lifecycle Manager (vLCM) configurationvSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsanesavlcmcheck.event|vSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks the object format status of all vSAN objects.vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objectformat.event|vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health status of all vSAN objects.vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objecthealth.event|vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'EventExpNic RX/TX PauseRX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.pausecount.event|RX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX CRC ErrorRX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxcrcerr.event|RX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Generic ErrorRX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxerr.event|RX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX FIFO ErrorRX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxfifoerr.event|RX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Missed ErrorRX missed error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxmisserr.event|RX missed error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Buffer Overflow ErrorRX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxoverr.event|RX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Carrier ErrorTX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txcarerr.event|TX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Generic ErrorTX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txerr.event|TX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExRDT Checksum Mismatch ErrorRDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})vsan.health.test.diagnostics.rdt.checksummismatchcount.event|RDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})EventExData-in-transit encryption configuration checkvSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.ditencryption.ditconfig.event|vSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.dualencryption.event|vSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExChecks if CPU AES-NI is disabled on hostsvSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.hostcpuaesni.event|vSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VMware vCenter or any hosts are not connected to Key Management ServersvSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.kmsconnection.event|vSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Prescriptive Disk Claim ConfigurationsHost {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsvsan.health.test.esaprescriptivediskclaim.noeligibledisk|Host {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsEventExCheck vSAN File Service host file server agent vm state.vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.fileserver.event|vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'EventExInfrastructure HealthvSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.host.event|vSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'EventExFile Share HealthvSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.sharehealth.event|vSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'EventExVDS compliance check for hyperconverged cluster configurationvSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.dvshciconfig.event|vSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExHost compliance check for hyperconverged cluster configurationvSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.hosthciconfig.event|vSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN health alarm enablement statusvSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hciskip.event|vSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'EventExvSAN HCL DB Auto UpdatevSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.autohclupdate.event|vSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllercacheconfig.event|vSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN disk group type (All-Flash or Hybrid) is VMware certified for the used SCSI controllervSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdiskmode.event|vSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller driver is VMware certified.vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdriver.event|vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller firmware is VMware certified.vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerfirmware.event|vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller is compatible with the VMWARE Compatibility GuidevSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controlleronhcl.event|vSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExDisplays information about whether there is any driver supported for a given controller in the release of ESXi installed.vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerreleasesupport.event|vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.h730.event|vSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks the age of the VMware Hardware Compatibility Guid database.vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hcldbuptodate.event|vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any host failed to return its hardware information.vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hclhostbadstate.event|vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'EventExHost physical memory compliance checkvSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hostmemcheck.event|vSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmode.event|vSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmodeh730.event|vSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvsan.health.test.hcl.nvmeonhcl.event|EventExNetwork Interface Cards (NICs) used in vSAN hosts must meet certain requirements. These NIC requirements assume that the packet loss is not more than 0.0001% in the hyper-converged environments. It's recommended to use NIC which link speed can meet the minimum requirement. Otherwise, there can be a drastic impact on the vSAN performance.vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.pniclinkspeed.event|vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NICs used in this RDMA enabled vSAN cluster are certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmaniciscertified.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NIC's driver and firmware combination is certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportdriverfirmware.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the current ESXi release is certified for the RDMA NIC by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportesxrelease.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.vsanhpefwtest.event|vSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExHome objectvSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsihomeobjectstatustest.event|vSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExLUN runtime healthvSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsilunruntimetest.event|vSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExNetwork configurationvSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicenetworktest.event|vSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExService runtime statusvSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicerunningtest.event|vSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster claimed capacity is more than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.error.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExvSAN cluster claimed capacity is less than the entitled capacity.vSAN cluster claimed capacity is less than the entitled capacity.vsan.health.test.licensecapacityusage.green.event|vSAN cluster claimed capacity is less than the entitled capacity.EventExvSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.warn.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExChecks the vSAN cluster storage space utilizationvSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.diskspace.event|vSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN component limits, disk space and RC reservations assuming one host failure.vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.limit1hf.event|vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'EventExChecks the component utilization for the vSAN cluster and each host in the cluster.vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.nodecomponentlimit.event|vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN cluster read cache utilizationvSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.rcreservation.event|vSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN cluster is partitioned due to a network issue.vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.clusterpartition.event|vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExCheck if there are duplicate IP addresses configured for vmknic interfaces.vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.duplicateip.event|vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a connectivity check for vSAN Max Client Network by checking the heartbeats from each host to all other hosts in server clustervSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.externalconnectivity.event|vSAN Health Test 'vSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if API calls from VC to a host are failing while the host is in vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostconnectivity.event|vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VC has an active connection to all hosts in the cluster.vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostdisconnected.event|vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a network latency check via ping small packet size ping test from all hosts to all other hostsvSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostlatencycheck.event|vSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN API calls from each host can reach to other peer hosts in the clustervSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.interhostconnectivity.event|vSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExCheck if LACP is working properly.vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.lacpstatus.event|vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hostsvSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.largeping.event|vSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster receive the multicast heartbeat of the vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastdeepdive.event|vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have matching IP multicast configuration.vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsettings.event|vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any of the hosts in the vSAN cluster have IP multicast connectivity issue.vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsuspected.event|vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'EventExCheck if any host in remote vSAN client or server cluster has more than one vSAN vmknic configured.vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multiplevsanvmknic.event|vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnicconsistent.event|vSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExCheck if TSO is enabled for pNIC.vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnictso.event|vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'EventExCheck if the vSAN RDMA enabled physical NIC is configured for lossless traffic.vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.rdmaconfig.event|vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all hosts in client cluster have been in a single partition with all hosts in server vSAN cluster.vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.serverpartition.event|vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hostsvSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.smallping.event|vSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpinglarge.event|vSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpingsmall.event|vSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExCheck if all hosts in server cluster have a dedicated vSAN external vmknic configured.vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanexternalvmknic.event|vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have a configured vmknic with vSAN traffic enabled.vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanvmknic.event|vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'EventExCheck all remote VMware vCenter network connectivity.vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.xvcconnectivity.event|vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'EventExvSAN overall health statusvSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.overallsummary.event|vSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service data collectionvSAN Health Test 'Checks the statistics collection of the vSAN performance service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.collection.event|vSAN Health Test 'Checks statistics collection of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service network diagnostic mode statusvSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.diagmode.event|vSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'EventExNot all hosts are contributing stats to vSAN Performance ServicevSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.hostsmissing.event|vSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats primary electionvSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.masterexist.event|vSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service statusvSAN Health Test 'Checks status of vSAN Performance Service changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.perfsvcstatus.event|vSAN Health Test 'Checks status of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats DB object conflictsvSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.renameddirs.event|vSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the vSAN performance service statistics database objectvSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.statsdb.event|vSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service verbose mode statusvSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.verbosemode.event|vSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN has encountered an integrity issue of the metadata of a component on this disk.vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.componentmetadata.event|vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.diskusage.event|vSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on vital memory pools, needed for the correct operation of physical disks.vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomheap.event|vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on the vital memory pool, needed for the operation of physical disks.vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomslab.event|vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'EventExStorage Vendor Reported Drive HealthvSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.phmhealth.event|vSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the free space on physical disks in the vSAN cluster.vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcapacity.event|vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the number of components on the physical disk reaches the maximum limitationvSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcomplimithealth.event|vSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN is using the disk with reduced performance.vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcongestion.event|vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'EventExChecks if there is an issue retrieving the physical disk information from hosts in the vSAN cluster.vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskhostissues.event|vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the physical disks for all hosts in the vSAN cluster.vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskoverall.event|vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.smalldiskstest.event|vSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExCluster Name is not found in ssd endurance alarmClusters - {clustername} is/are not found in alarm - vSAN Health Alarm for disk endurance check.vsan.health.test.ssdendurance.clusternotfound.event|Clusters - {clustername} is/are not found. Please edit alarm - 'vSAN Health Alarm for disk endurance check' and correct the cluster name.EventExThe stretched cluster contains multiple unicast agents. This means multiple unicast agents were set on non-witness hostsvSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithmultipleunicastagents.event|vSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain a valid witness hostvSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithoutonewitnesshost.event|vSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain two valid fault domainsvSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithouttwodatafaultdomains.event|vSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'EventExHost should setup unicast agent so that they are able to communicate with the witness nodevSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostunicastagentunset.event|vSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'EventExHost with an invalid unicast agentvsan.health.test.stretchedcluster.hostwithinvalidunicastagent.event|vSAN Health Test 'Invalid unicast agent' status changed from '{prestatus}' to '{curstatus}'EventExCluster contains hosts that do not support stretched clustervSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostwithnostretchedclustersupport.event|vSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'EventExUnexpected number of data hosts in shared witness cluster. This means more than 2 data hosts in one shared witness cluster.vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnessclusterdatahostnumexceed.event|vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'EventExPer cluster component limit scaled down for shared witness host because of insufficient memoryvSAN Health Test 'Shared witness per cluster component limit scaled down' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnesscomponentlimitscaleddown.event|vSAN Health Test 'Shared witness per-cluster component limit inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExChecks the network latency between the two fault domains and the witness hostvSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.siteconnectivity.event|vSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'EventExWitness node is managed by vSphere Lifecycle ManagervSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.vlcmwitnessconfig.event|vSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'EventExThe following witness node resides in one of the data fault domainsvSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessfaultdomaininvalid.event|vSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster incorporates a witness host inside VMware vCenter clustervSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessinsidevccluster.event|vSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'EventExThe following (witness) hosts have invalid preferred fault domainsvSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomaininvalid.event|vSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'EventExThe preferred fault domain does not exist in the cluster for the following witness hostvSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomainnotexist.event|vSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevsan.health.test.stretchedcluster.witnessupgissue.event|vSAN Health Test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExWitness appliance upgrade to vSphere 7.0 or higher with cautionvsan.health.test.stretchedcluster.witnessupgrade.event|vSAN Health Test 'Witness appliance upgrade to vSphere 7.0 or higher with caution' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster contains witness hosts with no disk claimedvSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesswithnodiskmapping.event|vSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'EventExVMware Certified vSAN HardwarevSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vsanhardwarecert.event|vSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.patchalert.event|vSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExvSAN release catalog up-to-datevSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.releasecataloguptodate.event|vSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'EventExCheck configuration issues for vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumconfig.event|vSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'EventExESXi build recommended by vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumrecommendation.event|vSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'EventExThis object has the risk of PSOD issue due to improper DOM object flag leakThis object has the risk of PSOD issue due to improper DOM object flag leakvsan.health.test.zdom.leak|Objects {1} have the risk of PSOD issue due to improper DOM object flag leak. Please refer KB https://kb.vmware.com/s/article/89564VirtualMachineFaultToleranceStateFault Tolerance has not been configured for this virtual machinenotConfiguredFault Tolerance is disableddisabledFault Tolerance is enabledenabledFault Tolerant Secondary VM is not runningneedSecondaryFault Tolerance is startingstartingFault Tolerance is runningrunning
12857:20241101:185543.545 End of vmware_service_get_evt_severity() evt_severities:1989
12857:20241101:185543.546 In vmware_service_get_hv_ds_dc_dvs_list()
12857:20241101:185543.551 vmware_service_get_hv_ds_dc_dvs_list() SOAP response:
group-d1triggeredAlarmState365.1group-d1alarm-365yellowfalse39701datacenter-3nameNTK-corptriggeredAlarmStategroup-n7triggeredAlarmStategroup-h5triggeredAlarmStatedatastore-4041datastore-4050datastore-4046datastore-2007datastore-2006datastore-2005group-v4triggeredAlarmStategroup-n4029triggeredAlarmStategroup-v11triggeredAlarmStategroup-v4027triggeredAlarmStatedvs-21nameNTK-DSwitchuuid50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbgroup-v4056triggeredAlarmStatehost-4047host-4043host-4038
12857:20241101:185543.551 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_datacenters_list'
12857:20241101:185543.551 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_datacenters_list' found:0 total:0
12857:20241101:185543.552 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_hv_ds_dc_dvs_list'
12857:20241101:185543.552 In vmware_service_alarm_details_update() alarm:alarm-365
12857:20241101:185543.554 vmware_service_alarm_details_update() SOAP response:
alarm-365info.descriptionThis alarm is fired when vSphere Health detects new issues in your environment. This alarm will be retriggered even if acknowledged when new issues are detected. Go to Monitor -> Health for a detailed description of the issues.info.enabledtrueinfo.nameSkyline Health has detected issues in your vSphere environmentinfo.systemNameSkyline Health has detected issues in your vSphere environment
12857:20241101:185543.554 End of vmware_service_alarm_details_update() index:0
12857:20241101:185543.555 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_hv_ds_dc_dvs_list' found:1 total:1
12857:20241101:185543.555 End of vmware_service_get_hv_ds_dc_dvs_list():SUCCEED found hv:3 ds:6 dc:1
12857:20241101:185543.555 In vmware_service_create_datastore() datastore:'datastore-4041'
12857:20241101:185543.557 vmware_service_create_datastore() SOAP response:
datastore-4041infoLocal_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/34100425523270368744177664703687441776642024-10-24T08:57:27.792Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0334252364185616396313666.8267155e10-d4545cb2-5b01-3cecef012e78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682111______8falsetruesummarydatastore-4041Local_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12857:20241101:185543.557 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185543.557 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185543.557 End of vmware_service_create_datastore()
12857:20241101:185543.557 In vmware_service_create_datastore() datastore:'datastore-4050'
12857:20241101:185543.559 vmware_service_create_datastore() SOAP response:
datastore-4050infoLocal_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/3410042552327036874417766468169720922112703687441776642024-11-01T13:06:44.907432Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0134252364185616396313666.8267155cc9-bea5e318-19fd-ac1f6bb14c78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______8falsetruetruesummarydatastore-4050Local_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12857:20241101:185543.559 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185543.559 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185543.559 End of vmware_service_create_datastore()
12857:20241101:185543.559 In vmware_service_create_datastore() datastore:'datastore-4046'
12857:20241101:185543.561 vmware_service_create_datastore() SOAP response:
datastore-4046infoLocal_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/34100425523270368744177664703687441776642024-11-01T11:53:36.643999Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0234252364185616396313666.8267155ba7-5e9d16d6-0733-3cecef02b6e0t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682100______8falsetruesummarydatastore-4046Local_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12857:20241101:185543.561 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185543.561 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185543.561 End of vmware_service_create_datastore()
12857:20241101:185543.561 In vmware_service_create_datastore() datastore:'datastore-2007'
12857:20241101:185543.563 vmware_service_create_datastore() SOAP response:
datastore-2007info3PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/5031560478727036874417766468169720922112703687441776642024-11-01T13:06:44.904493Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID53153660247654416396313666.826704dec9-75e6c68a-c19e-9440c9831520naa.60002ac00000000000000054000228a31falsefalsefalsesummarydatastore-20073PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/53660247654450315604787242237661184truetrueVMFSnormaltriggeredAlarmState
12857:20241101:185543.564 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185543.564 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185543.564 End of vmware_service_create_datastore()
12857:20241101:185543.564 In vmware_service_create_datastore() datastore:'datastore-2006'
12857:20241101:185543.565 vmware_service_create_datastore() SOAP response:
datastore-2006info3PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/1592765972487036874417766468169720922112703687441776642024-11-01T13:06:44.898963Z7036874417766468169720922112VMFS3PAR_KARTOHA_SAS_NTK_ID53516079283814416396313666.826703d63f-3516ce66-4bee-9440c9831520naa.60002ac0000000000000042f000219831falsefalsefalsesummarydatastore-20063PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/160792838144159276597248truetrueVMFSnormaltriggeredAlarmState
12857:20241101:185543.566 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185543.566 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185543.566 End of vmware_service_create_datastore()
12857:20241101:185543.566 In vmware_service_create_datastore() datastore:'datastore-2005'
12857:20241101:185543.568 vmware_service_create_datastore() SOAP response:
datastore-2005info3PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/8543356846087036874417766468169720922112703687441776642024-11-01T18:34:30.288888Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID530_mgmt107347338854416396313666.826703d517-82086a06-cec0-9440c9831520naa.60002ac0000000000000004a000228a31falsefalsefalsesummarydatastore-20053PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/10734733885448543356846080truetrueVMFSnormaltriggeredAlarmState
12857:20241101:185543.568 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185543.568 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185543.568 End of vmware_service_create_datastore()
12857:20241101:185543.568 In vmware_service_get_clusters_and_resourcepools()
12857:20241101:185543.571 vmware_service_get_clusters_and_resourcepools() SOAP response:
domain-c1002nameNTK-corptriggeredAlarmStateresgroup-1003nameResourcesparentdomain-c1002resourcePoolresgroup-4001resgroup-4026resgroup-4026nameNTKparentresgroup-1003resourcePoolresgroup-4001namemgmtparentresgroup-1003resourcePool
12857:20241101:185543.572 In vmware_service_process_cluster_data()
12857:20241101:185543.572 In vmware_service_get_alarms_data(), func_parent:'vmware_service_process_cluster_data'
12857:20241101:185543.572 End of vmware_service_get_alarms_data() func_parent:'vmware_service_process_cluster_data' found:0 total:1
12857:20241101:185543.572 End of vmware_service_process_cluster_data():SUCCEED cl:1 rp:3
12857:20241101:185543.572 In vmware_service_get_cluster_state() clusterid:'domain-c1002'
12857:20241101:185543.575 vmware_service_get_cluster_state() SOAP response:
domain-c1002datastoredatastore-2005datastore-2006datastore-2007datastore-4041datastore-4046datastore-4050summary.overallStatusgreen
12857:20241101:185543.575 End of vmware_service_get_cluster_state():SUCCEED
12857:20241101:185543.575 End of vmware_service_get_clusters_and_resourcepools():SUCCEED found cl:1 rp:2
12857:20241101:185543.575 In vmware_service_init_hv() hvid:'host-4047'
12857:20241101:185543.575 In vmware_service_get_hv_data() guesthvid:'host-4047'
12857:20241101:185543.575 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4047false
12857:20241101:185543.592 vmware_service_get_hv_data() SOAP response:
host-4047config.network.dnsConfigfalsentk-esxi-01m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrueac:1f:6b:b1:4c:783ac:1f:6b:b1:4c:7800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrueac:1f:6b:b1:4c:793ac:1f:6b:b1:4c:7900falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:20:55:a8350:7c:6f:20:55:a800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:20:55:a9350:7c:6f:20:55:a900falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383139353420202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4050overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-01.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686832898048summary.hardware.modelSuper Serversummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-ac1f6bb14c78summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage198summary.quickStats.overallMemoryUsage16598summary.quickStats.uptime691070summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6300-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen107-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen103-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen184-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen341-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen516-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen183-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen186-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen340-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen510-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1170-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen690000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 4 FAN4The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen650000-2RPMnonefanFan Device 1 FAN1The sensor is operating under normal conditionsGreen660000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3100-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4800-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen4400-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5800-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5300-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4060
12857:20241101:185543.593 End of vmware_service_get_hv_data():SUCCEED
12857:20241101:185543.594 In vmware_service_get_hv_pnics_data()
12857:20241101:185543.594 End of vmware_service_get_hv_pnics_data() found:4
12857:20241101:185543.594 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12857:20241101:185543.594 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12857:20241101:185543.595 In vmware_hv_ip_search()
12857:20241101:185543.595 End of vmware_hv_ip_search() ip:10.50.242.11
12857:20241101:185543.595 In vmware_hv_get_parent_data() id:'host-4047'
12857:20241101:185543.598 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12857:20241101:185543.598 End of vmware_hv_get_parent_data():SUCCEED
12857:20241101:185543.598 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4047"
12857:20241101:185543.598 In vmware_service_hv_disks_get_info() hvid:'host-4047'
12857:20241101:185543.598 vmware_service_hv_disks_get_info() count of scsiLun:21
12857:20241101:185543.612 vmware_service_hv_disks_get_info() SOAP response:
host-4047config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].canonicalNamet10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].modelSAMSUNG MZ7LH480config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].queueDepth31config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].revision904Qconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].serialNumberS45PNC0R681954 config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].vendorATA config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].canonicalNamenaa.2ff70002ac021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].canonicalNamenaa.2ff70002ac0228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].canonicalNamenaa.60002ac0000000000000004a000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].canonicalNamenaa.60002ac00000000000000054000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].canonicalNamenaa.60002ac0000000000000042f00021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].vendor3PARdata
12857:20241101:185543.612 In vmware_service_hv_disks_parse_info()
12857:20241101:185543.613 End of vmware_service_hv_disks_parse_info() created:6
12857:20241101:185543.613 End of vmware_service_hv_disks_get_info():SUCCEED for 6(vsan:0) / 21
12857:20241101:185543.613 In vmware_service_hv_get_multipath_data() hvid:'host-4047'
12857:20241101:185543.626 vmware_service_hv_get_multipath_data() SOAP response:
host-4047config.storageDevice.multipathInfokey-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554e0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.MultipathInfo.Path-vmhba1:C0:T0:L0vmhba1:C0:T0:L0activeactivetruekey-vim.host.BlockHba-vmhba1key-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554eFIXEDkey-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a3565620202020020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L530vmhba2:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L530vmhba2:C0:T3:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L530vmhba3:C0:T1:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L530vmhba3:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a3565620202020020013020060002ac00000000000000054000228a3565620202020key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L531vmhba2:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L531vmhba2:C0:T3:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L531vmhba3:C0:T1:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L531vmhba3:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f00021983565620202020020017020060002ac0000000000000042f00021983565620202020key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L535vmhba3:C0:T3:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L535vmhba3:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L535vmhba2:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L535vmhba2:C0:T1:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202002000001002ff70002ac0228a3565620202020key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L256vmhba2:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L256vmhba2:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L256vmhba3:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L256vmhba3:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202002000001002ff70002ac021983565620202020key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L256vmhba3:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L256vmhba3:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L256vmhba2:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L256vmhba2:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUA
12857:20241101:185543.626 End of vmware_service_hv_get_multipath_data():SUCCEED
12857:20241101:185543.626 In vmware_hv_ds_access_update() hv id:host-4047 hv dss:4 dss:6
12857:20241101:185543.631 vmware_hv_ds_access_update() SOAP response:
datastore-2005host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2006host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2007host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-4050host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtrue
12857:20241101:185543.631 In vmware_hv_ds_access_parse()
12857:20241101:185543.631 In vmware_hv_get_ds_access() for DS:datastore-2005
12857:20241101:185543.631 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185543.631 In vmware_hv_get_ds_access() for DS:datastore-2006
12857:20241101:185543.631 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185543.631 In vmware_hv_get_ds_access() for DS:datastore-2007
12857:20241101:185543.631 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185543.631 In vmware_hv_get_ds_access() for DS:datastore-4050
12857:20241101:185543.631 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185543.631 End of vmware_hv_ds_access_parse() parsed:4
12857:20241101:185543.631 End of vmware_hv_ds_access_update():SUCCEED for 4 / 4
12857:20241101:185543.631 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID530_mgmt"
12857:20241101:185543.631 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_KARTOHA_SAS_NTK_ID535"
12857:20241101:185543.631 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID531"
12857:20241101:185543.631 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"Local_ntk-m1-esxi-01"
12857:20241101:185543.631 In vmware_service_create_vm() vmid:'vm-4060'
12857:20241101:185543.631 In vmware_service_get_vm_data() vmid:'vm-4060'
12857:20241101:185543.636 vmware_service_get_vm_data() SOAP response:
vm-4060availableFieldconfig.hardware218192falsefalse200IDE 00201IDE 11300PS2 controller 00600700100PCI controller 00500120001000150004000400SIO controller 00600Keyboard3000700Pointing device; Devicefalseautodetect3001500Video card100040961falsefalseautomatic26214412000Device on the virtual machine PCI bus that provides support for the virtual machine communication interface10017-1079927627falsetrue1000LSI Logic16100302000truenoSharing715000AHCI321002401600016000ISO [3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.iso[3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.isodatastore-2005truetruefalseok1500002000104,857,600 KB[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdatastore-2005persistentfalsefalsefalsefalse6000C29d-45c9-aa9f-3d54-a04187209ee5fa74bccac7959c5d95abe5bffffffffefalsesharingNone100001048576001073741824001000normal-11000normal05-20004000DVSwitch: 50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 db50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-400628630340996truefalsetrueok1601007assigned00:50:56:b0:80:48true050normal-1trueconfig.instanceUuid50304101-157a-f442-58f4-550f05de33feconfig.uuid42306756-2f64-b85a-a4fe-276cbfa19cb5customValuedatastoredatastore-2005guest.disk/5146047283240655228928/boot20403732481785856000guest.guestFamilylinuxGuestguest.guestFullNameUbuntu Linux (64-bit)guest.guestStaterunningguest.hostNamezabb-ntk-proxyguest.ipAddress10.50.242.76guest.netntk_dmz_vlan_112910.50.242.76fe80::250:56ff:feb0:804800:50:56:b0:80:48true400010.50.242.7628preferredfe80::250:56ff:feb0:804864unknownguest.toolsRunningStatusguestToolsRunningguest.toolsVersion12389layoutEx0[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmxconfig23822382true1[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmsdsnapshotList00true2[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdiskDescriptor458458true3[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-flat.vmdkdiskExtent107374182400107374182400true4[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.nvramnvram86848684true5[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-13083273.vswpswap85899345928589934592true6[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/vmx-zabbix-proxy-ntk-4e9138c7e268bf86a750769daba1b562730af6a5e74aa2ad704e8731824ba105-1.vswpuwswap8598323285983232true2000232024-11-01T18:26:30.170712Zparentgroup-v11resourcePoolresgroup-4001summary.config.memorySizeMB8192summary.config.namezabbix-proxy-ntksummary.config.numCpu2summary.quickStats.balloonedMemory0summary.quickStats.compressedMemory0summary.quickStats.guestMemoryUsage163summary.quickStats.hostMemoryUsage8222summary.quickStats.overallCpuUsage28summary.quickStats.privateMemory8165summary.quickStats.sharedMemory3summary.quickStats.swappedMemory0summary.quickStats.uptimeSeconds75847summary.runtime.consolidationNeededfalsesummary.runtime.powerStatepoweredOnsummary.storage.committed116050111748summary.storage.uncommitted0summary.storage.unshared107374182858triggeredAlarmStategroup-v11nameDiscovered virtual machineparentgroup-v4group-v4namevmparentdatacenter-3
12857:20241101:185543.636 End of vmware_service_get_vm_data():SUCCEED
12857:20241101:185543.638 In vmware_service_get_vm_folder() folder id:'group-v11'
12857:20241101:185543.638 End of vmware_service_get_vm_folder(): vm folder:Discovered virtual machine
12857:20241101:185543.638 In vmware_vm_get_nic_devices()
12857:20241101:185543.638 End of vmware_vm_get_nic_devices() found:1
12857:20241101:185543.638 In vmware_vm_get_disk_devices()
12857:20241101:185543.638 End of vmware_vm_get_disk_devices() found:1
12857:20241101:185543.638 In vmware_vm_get_file_systems()
12857:20241101:185543.638 End of vmware_vm_get_file_systems() found:2
12857:20241101:185543.638 In vmware_vm_get_custom_attrs()
12857:20241101:185543.638 End of vmware_vm_get_custom_attrs() attributes:0
12857:20241101:185543.638 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_vm'
12857:20241101:185543.638 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_vm' found:0 total:1
12857:20241101:185543.638 End of vmware_service_create_vm():SUCCEED
12857:20241101:185543.638 End of vmware_service_init_hv():SUCCEED
12857:20241101:185543.639 In vmware_service_init_hv() hvid:'host-4043'
12857:20241101:185543.639 In vmware_service_get_hv_data() guesthvid:'host-4043'
12857:20241101:185543.639 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4043false
12857:20241101:185543.653 vmware_service_get_hv_data() SOAP response:
host-4043config.network.dnsConfigfalsentk-esxi-02m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrue3c:ec:ef:02:b6:e033c:ec:ef:02:b6:e000falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrue3c:ec:ef:02:b6:e133c:ec:ef:02:b6:e100falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:3b:d8:c6350:7c:6f:3b:d8:c600falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:3b:d8:c7350:7c:6f:3b:d8:c700falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383231303020202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383231303020202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4046overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-02.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686831919104summary.hardware.modelSYS-6019P-WTRsummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-3cecef02b6e0summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage397summary.quickStats.overallMemoryUsage8843summary.quickStats.uptime691193summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6700-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen106-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen102-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen182-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen335-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen343-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1164-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen560000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen590000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen610000-2RPMnonefanFan Device 2 FAN2The sensor is operating under normal conditionsGreen600000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3600-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen4000-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen4600-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5000-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4057
12857:20241101:185543.653 End of vmware_service_get_hv_data():SUCCEED
12857:20241101:185543.655 In vmware_service_get_hv_pnics_data()
12857:20241101:185543.655 End of vmware_service_get_hv_pnics_data() found:4
12857:20241101:185543.655 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12857:20241101:185543.655 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12857:20241101:185543.655 In vmware_hv_ip_search()
12857:20241101:185543.655 End of vmware_hv_ip_search() ip:10.50.242.12
12857:20241101:185543.655 In vmware_hv_get_parent_data() id:'host-4043'
12857:20241101:185543.658 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12857:20241101:185543.658 End of vmware_hv_get_parent_data():SUCCEED
12857:20241101:185543.658 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4043"
12857:20241101:185543.658 In vmware_service_hv_disks_get_info() hvid:'host-4043'
12857:20241101:185543.658 vmware_service_hv_disks_get_info() count of scsiLun:21
12859:20241101:185543.970 In vmware_job_get() queue:2
12859:20241101:185543.970 End of vmware_job_get() queue:1 type:update_perf_counters
12859:20241101:185543.970 In vmware_job_exec() type:update_perf_counters
12859:20241101:185543.970 End of vmware_job_exec() type:update_perf_counters ret:FAIL
12859:20241101:185543.970 In vmware_job_schedule() queue:1 type:update_perf_counters
12859:20241101:185543.970 End of vmware_job_schedule() type:update_perf_counters nextcheck:18:56:43
12859:20241101:185543.970 In vmware_job_get() queue:2
12859:20241101:185543.970 End of vmware_job_get() queue:2 type:none
12861:20241101:185543.970 In vmware_job_get() queue:2
12861:20241101:185543.970 End of vmware_job_get() queue:2 type:none
12855:20241101:185543.970 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001346 sec]'
12855:20241101:185543.970 In vmware_job_get() queue:2
12855:20241101:185543.970 End of vmware_job_get() queue:2 type:none
12861:20241101:185544.970 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001165 sec]'
12861:20241101:185544.970 In vmware_job_get() queue:2
12861:20241101:185544.970 End of vmware_job_get() queue:2 type:none
12859:20241101:185544.970 In vmware_job_get() queue:2
12859:20241101:185544.970 End of vmware_job_get() queue:2 type:none
12855:20241101:185544.970 In vmware_job_get() queue:2
12855:20241101:185544.970 End of vmware_job_get() queue:2 type:none
12855:20241101:185545.970 In vmware_job_get() queue:2
12855:20241101:185545.970 End of vmware_job_get() queue:2 type:none
12861:20241101:185545.970 In vmware_job_get() queue:2
12861:20241101:185545.970 End of vmware_job_get() queue:2 type:none
12859:20241101:185545.970 In vmware_job_get() queue:2
12859:20241101:185545.970 End of vmware_job_get() queue:2 type:none
12861:20241101:185546.970 In vmware_job_get() queue:2
12861:20241101:185546.970 End of vmware_job_get() queue:2 type:none
12859:20241101:185546.970 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001302 sec]'
12859:20241101:185546.970 In vmware_job_get() queue:2
12859:20241101:185546.970 End of vmware_job_get() queue:2 type:none
12855:20241101:185546.970 In vmware_job_get() queue:2
12855:20241101:185546.970 End of vmware_job_get() queue:2 type:none
12859:20241101:185547.970 In vmware_job_get() queue:2
12859:20241101:185547.971 End of vmware_job_get() queue:2 type:none
12861:20241101:185547.971 In vmware_job_get() queue:2
12861:20241101:185547.971 End of vmware_job_get() queue:2 type:none
12855:20241101:185547.971 In vmware_job_get() queue:2
12855:20241101:185547.971 End of vmware_job_get() queue:2 type:none
12837:20241101:185548.335 received configuration data from server at "10.50.242.78", datalen 437
12861:20241101:185548.971 In vmware_job_get() queue:2
12861:20241101:185548.971 End of vmware_job_get() queue:2 type:none
12859:20241101:185548.971 In vmware_job_get() queue:2
12859:20241101:185548.971 End of vmware_job_get() queue:2 type:none
12855:20241101:185548.971 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000812 sec]'
12855:20241101:185548.971 In vmware_job_get() queue:2
12855:20241101:185548.971 End of vmware_job_get() queue:2 type:none
12861:20241101:185549.971 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000903 sec]'
12861:20241101:185549.971 In vmware_job_get() queue:2
12861:20241101:185549.971 End of vmware_job_get() queue:2 type:none
12855:20241101:185549.971 In vmware_job_get() queue:2
12855:20241101:185549.971 End of vmware_job_get() queue:2 type:none
12859:20241101:185549.971 In vmware_job_get() queue:2
12859:20241101:185549.971 End of vmware_job_get() queue:2 type:none
12861:20241101:185550.971 In vmware_job_get() queue:2
12861:20241101:185550.971 End of vmware_job_get() queue:2 type:none
12855:20241101:185550.971 In vmware_job_get() queue:2
12855:20241101:185550.971 End of vmware_job_get() queue:2 type:none
12859:20241101:185550.971 In vmware_job_get() queue:2
12859:20241101:185550.971 End of vmware_job_get() queue:2 type:none
12861:20241101:185551.971 In vmware_job_get() queue:2
12861:20241101:185551.971 End of vmware_job_get() queue:2 type:none
12855:20241101:185551.972 In vmware_job_get() queue:2
12855:20241101:185551.972 End of vmware_job_get() queue:2 type:none
12859:20241101:185551.972 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000974 sec]'
12859:20241101:185551.972 In vmware_job_get() queue:2
12859:20241101:185551.972 End of vmware_job_get() queue:2 type:none
12855:20241101:185552.972 In vmware_job_get() queue:2
12855:20241101:185552.972 End of vmware_job_get() queue:2 type:none
12859:20241101:185552.972 In vmware_job_get() queue:2
12859:20241101:185552.972 End of vmware_job_get() queue:2 type:none
12861:20241101:185552.972 In vmware_job_get() queue:2
12861:20241101:185552.972 End of vmware_job_get() queue:2 type:none
12857:20241101:185553.660 End of vmware_service_hv_disks_get_info():FAIL for 0(vsan:0) / 21
12857:20241101:185553.660 End of vmware_service_init_hv():FAIL
12857:20241101:185553.660 Unable initialize hv host-4043: Timeout was reached.
12857:20241101:185553.660 In vmware_service_init_hv() hvid:'host-4038'
12857:20241101:185553.660 In vmware_service_get_hv_data() guesthvid:'host-4038'
12857:20241101:185553.660 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4038false
12855:20241101:185553.972 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001591 sec]'
12855:20241101:185553.972 In vmware_job_get() queue:2
12855:20241101:185553.972 End of vmware_job_get() queue:2 type:none
12859:20241101:185553.972 In vmware_job_get() queue:2
12859:20241101:185553.972 End of vmware_job_get() queue:2 type:none
12861:20241101:185553.972 In vmware_job_get() queue:2
12861:20241101:185553.972 End of vmware_job_get() queue:2 type:none
12855:20241101:185554.972 In vmware_job_get() queue:2
12855:20241101:185554.972 End of vmware_job_get() queue:2 type:none
12861:20241101:185554.973 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001647 sec]'
12861:20241101:185554.973 In vmware_job_get() queue:2
12861:20241101:185554.973 End of vmware_job_get() queue:2 type:none
12859:20241101:185554.973 In vmware_job_get() queue:2
12859:20241101:185554.973 End of vmware_job_get() queue:2 type:none
12855:20241101:185555.973 In vmware_job_get() queue:2
12855:20241101:185555.973 End of vmware_job_get() queue:2 type:none
12861:20241101:185555.973 In vmware_job_get() queue:2
12861:20241101:185555.973 End of vmware_job_get() queue:2 type:none
12859:20241101:185555.973 In vmware_job_get() queue:2
12859:20241101:185555.973 End of vmware_job_get() queue:2 type:none
12855:20241101:185556.973 In vmware_job_get() queue:2
12855:20241101:185556.973 End of vmware_job_get() queue:2 type:none
12861:20241101:185556.974 In vmware_job_get() queue:2
12861:20241101:185556.974 End of vmware_job_get() queue:2 type:none
12859:20241101:185556.974 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001644 sec]'
12859:20241101:185556.974 In vmware_job_get() queue:2
12859:20241101:185556.974 End of vmware_job_get() queue:2 type:none
12855:20241101:185557.974 In vmware_job_get() queue:2
12855:20241101:185557.974 End of vmware_job_get() queue:2 type:none
12859:20241101:185557.974 In vmware_job_get() queue:2
12859:20241101:185557.974 End of vmware_job_get() queue:2 type:none
12861:20241101:185557.974 In vmware_job_get() queue:2
12861:20241101:185557.974 End of vmware_job_get() queue:2 type:none
12837:20241101:185558.351 received configuration data from server at "10.50.242.78", datalen 437
12855:20241101:185558.974 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001911 sec]'
12855:20241101:185558.974 In vmware_job_get() queue:2
12855:20241101:185558.974 End of vmware_job_get() queue:2 type:none
12859:20241101:185558.974 In vmware_job_get() queue:2
12859:20241101:185558.974 End of vmware_job_get() queue:2 type:none
12861:20241101:185558.974 In vmware_job_get() queue:2
12861:20241101:185558.974 End of vmware_job_get() queue:2 type:none
12855:20241101:185559.974 In vmware_job_get() queue:2
12855:20241101:185559.974 End of vmware_job_get() queue:2 type:none
12861:20241101:185559.974 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001934 sec]'
12861:20241101:185559.974 In vmware_job_get() queue:2
12861:20241101:185559.974 End of vmware_job_get() queue:2 type:none
12859:20241101:185559.975 In vmware_job_get() queue:2
12859:20241101:185559.975 End of vmware_job_get() queue:2 type:none
12855:20241101:185600.975 In vmware_job_get() queue:2
12855:20241101:185600.975 End of vmware_job_get() queue:2 type:none
12859:20241101:185600.975 In vmware_job_get() queue:2
12859:20241101:185600.975 End of vmware_job_get() queue:2 type:none
12861:20241101:185600.975 In vmware_job_get() queue:2
12861:20241101:185600.975 End of vmware_job_get() queue:2 type:none
12855:20241101:185601.975 In vmware_job_get() queue:2
12855:20241101:185601.975 End of vmware_job_get() queue:2 type:none
12861:20241101:185601.976 In vmware_job_get() queue:2
12861:20241101:185601.976 End of vmware_job_get() queue:2 type:none
12859:20241101:185601.976 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001749 sec]'
12859:20241101:185601.976 In vmware_job_get() queue:2
12859:20241101:185601.976 End of vmware_job_get() queue:2 type:none
12855:20241101:185602.976 In vmware_job_get() queue:2
12861:20241101:185602.976 In vmware_job_get() queue:2
12861:20241101:185602.976 End of vmware_job_get() queue:2 type:none
12859:20241101:185602.976 In vmware_job_get() queue:2
12859:20241101:185602.976 End of vmware_job_get() queue:2 type:none
12855:20241101:185602.976 End of vmware_job_get() queue:2 type:none
12857:20241101:185603.662 End of vmware_service_get_hv_data():FAIL
12857:20241101:185603.662 End of vmware_service_init_hv():FAIL
12857:20241101:185603.662 Unable initialize hv host-4038: Timeout was reached.
12857:20241101:185603.662 In vmware_service_dvswitch_load() dvs count:0
12857:20241101:185603.662 End of vmware_service_dvswitch_load() count: 0 / 0
12857:20241101:185603.662 In vmware_service_props_load() props total:0
12857:20241101:185603.662 End of vmware_service_props_load() count: 0 / 0
12857:20241101:185603.662 In vmware_service_get_maxquerymetrics()
12861:20241101:185603.976 In vmware_job_get() queue:2
12861:20241101:185603.976 End of vmware_job_get() queue:2 type:none
12855:20241101:185603.976 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001733 sec]'
12855:20241101:185603.976 In vmware_job_get() queue:2
12855:20241101:185603.976 End of vmware_job_get() queue:2 type:none
12859:20241101:185603.976 In vmware_job_get() queue:2
12859:20241101:185603.976 End of vmware_job_get() queue:2 type:none
12861:20241101:185604.976 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001645 sec]'
12859:20241101:185604.976 In vmware_job_get() queue:2
12859:20241101:185604.976 End of vmware_job_get() queue:2 type:none
12861:20241101:185604.976 In vmware_job_get() queue:2
12861:20241101:185604.976 End of vmware_job_get() queue:2 type:none
12855:20241101:185604.976 In vmware_job_get() queue:2
12855:20241101:185604.976 End of vmware_job_get() queue:2 type:none
12859:20241101:185605.976 In vmware_job_get() queue:2
12859:20241101:185605.976 End of vmware_job_get() queue:2 type:none
12855:20241101:185605.976 In vmware_job_get() queue:2
12855:20241101:185605.976 End of vmware_job_get() queue:2 type:none
12861:20241101:185605.976 In vmware_job_get() queue:2
12861:20241101:185605.976 End of vmware_job_get() queue:2 type:none
12859:20241101:185606.976 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001731 sec]'
12859:20241101:185606.977 In vmware_job_get() queue:2
12859:20241101:185606.977 End of vmware_job_get() queue:2 type:none
12855:20241101:185606.977 In vmware_job_get() queue:2
12855:20241101:185606.977 End of vmware_job_get() queue:2 type:none
12861:20241101:185606.977 In vmware_job_get() queue:2
12861:20241101:185606.977 End of vmware_job_get() queue:2 type:none
12859:20241101:185607.977 In vmware_job_get() queue:2
12859:20241101:185607.977 End of vmware_job_get() queue:2 type:none
12855:20241101:185607.977 In vmware_job_get() queue:2
12855:20241101:185607.977 End of vmware_job_get() queue:2 type:none
12861:20241101:185607.977 In vmware_job_get() queue:2
12861:20241101:185607.977 End of vmware_job_get() queue:2 type:none
12837:20241101:185608.374 received configuration data from server at "10.50.242.78", datalen 10093
12859:20241101:185608.977 In vmware_job_get() queue:2
12859:20241101:185608.977 End of vmware_job_get() queue:2 type:none
12855:20241101:185608.977 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000980 sec]'
12855:20241101:185608.977 In vmware_job_get() queue:2
12855:20241101:185608.977 End of vmware_job_get() queue:2 type:none
12861:20241101:185608.977 In vmware_job_get() queue:2
12861:20241101:185608.977 End of vmware_job_get() queue:2 type:none
12859:20241101:185609.977 In vmware_job_get() queue:2
12859:20241101:185609.977 End of vmware_job_get() queue:2 type:none
12855:20241101:185609.977 In vmware_job_get() queue:2
12855:20241101:185609.977 End of vmware_job_get() queue:2 type:none
12861:20241101:185609.977 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001091 sec]'
12861:20241101:185609.977 In vmware_job_get() queue:2
12861:20241101:185609.977 End of vmware_job_get() queue:2 type:none
12859:20241101:185610.977 In vmware_job_get() queue:2
12859:20241101:185610.977 End of vmware_job_get() queue:2 type:none
12855:20241101:185610.977 In vmware_job_get() queue:2
12855:20241101:185610.977 End of vmware_job_get() queue:2 type:none
12861:20241101:185610.977 In vmware_job_get() queue:2
12861:20241101:185610.977 End of vmware_job_get() queue:2 type:none
12859:20241101:185611.977 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000863 sec]'
12859:20241101:185611.977 In vmware_job_get() queue:2
12861:20241101:185611.979 In vmware_job_get() queue:2
12861:20241101:185611.979 End of vmware_job_get() queue:2 type:none
12855:20241101:185611.979 In vmware_job_get() queue:2
12855:20241101:185611.979 End of vmware_job_get() queue:2 type:none
12859:20241101:185611.979 End of vmware_job_get() queue:2 type:none
12861:20241101:185612.979 In vmware_job_get() queue:2
12861:20241101:185612.979 End of vmware_job_get() queue:2 type:none
12855:20241101:185612.979 In vmware_job_get() queue:2
12855:20241101:185612.979 End of vmware_job_get() queue:2 type:none
12859:20241101:185612.979 In vmware_job_get() queue:2
12859:20241101:185612.979 End of vmware_job_get() queue:2 type:none
12857:20241101:185613.664 End of vmware_service_get_maxquerymetrics():FAIL
12857:20241101:185613.664 In vmware_service_update_perf_entities()
12857:20241101:185613.664 In vmware_service_add_perf_entity() type:HostSystem id:host-4047
12857:20241101:185613.664 In zbx_vmware_service_get_perf_entity() type:HostSystem id:host-4047
12857:20241101:185613.664 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185613.664 In zbx_vmware_service_get_counterid() path:net/packetsRx[summation]
12857:20241101:185613.664 zbx_vmware_service_get_counterid() counterid:153
12857:20241101:185613.664 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.664 In zbx_vmware_service_get_counterid() path:net/packetsTx[summation]
12857:20241101:185613.664 zbx_vmware_service_get_counterid() counterid:154
12857:20241101:185613.664 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:net/received[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:155
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:net/transmitted[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:156
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:datastore/totalReadLatency[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:189
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:datastore/totalWriteLatency[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:190
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:datastore/numberReadAveraged[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:185
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:datastore/numberWriteAveraged[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:186
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:cpu/usage[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:2
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:cpu/utilization[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:398
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:power/power[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:164
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:power/powerCap[average]
12857:20241101:185613.665 zbx_vmware_service_get_counterid() counterid:165
12857:20241101:185613.665 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.665 In zbx_vmware_service_get_counterid() path:net/droppedRx[summation]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:605
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:net/droppedTx[summation]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:606
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:net/errorsRx[summation]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:613
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:net/errorsTx[summation]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:614
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:net/broadcastRx[summation]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:609
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:net/broadcastTx[summation]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:610
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 End of vmware_service_add_perf_entity() perfcounters:18
12857:20241101:185613.666 In vmware_service_add_perf_entity() type:VirtualMachine id:vm-4060
12857:20241101:185613.666 In zbx_vmware_service_get_perf_entity() type:VirtualMachine id:vm-4060
12857:20241101:185613.666 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:virtualDisk/read[average]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:180
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:virtualDisk/write[average]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:181
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:virtualDisk/numberReadAveraged[average]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:178
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:virtualDisk/numberWriteAveraged[average]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:179
12857:20241101:185613.666 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.666 In zbx_vmware_service_get_counterid() path:net/packetsRx[summation]
12857:20241101:185613.666 zbx_vmware_service_get_counterid() counterid:153
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:net/packetsTx[summation]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:154
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:net/received[average]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:155
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:net/transmitted[average]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:156
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:cpu/ready[summation]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:12
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:net/usage[average]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:150
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:cpu/usage[average]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:2
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:cpu/latency[average]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:540
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:cpu/readiness[average]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:548
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:cpu/swapwait[summation]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:531
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:sys/osUptime[latest]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:643
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:mem/consumed[average]
12857:20241101:185613.667 zbx_vmware_service_get_counterid() counterid:98
12857:20241101:185613.667 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.667 In zbx_vmware_service_get_counterid() path:mem/usage[average]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:24
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.668 In zbx_vmware_service_get_counterid() path:mem/swapped[average]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:70
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.668 In zbx_vmware_service_get_counterid() path:net/usage[average]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:150
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.668 In zbx_vmware_service_get_counterid() path:virtualDisk/readOIO[latest]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:349
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.668 In zbx_vmware_service_get_counterid() path:virtualDisk/writeOIO[latest]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:350
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.668 In zbx_vmware_service_get_counterid() path:virtualDisk/totalWriteLatency[average]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:183
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.668 In zbx_vmware_service_get_counterid() path:virtualDisk/totalReadLatency[average]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:182
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.668 End of vmware_service_add_perf_entity() perfcounters:23
12857:20241101:185613.668 vmware_service_update_perf_entities() for type: VirtualMachine hv id: host-4047 hv uuid: 00000000-0000-0000-0000-ac1f6bb14c78 linked vm id: vm-4060 vm uuid: 50304101-157a-f442-58f4-550f05de33fe
12857:20241101:185613.668 In vmware_service_add_perf_entity() type:Datastore id:datastore-2005
12857:20241101:185613.668 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2005
12857:20241101:185613.668 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185613.668 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.668 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.668 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185613.668 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185613.668 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.669 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185613.669 vmware_service_update_perf_entities() for type: Datastore id: datastore-2005 name: 3PAR_GOROH_SSD_NTK_ID530_mgmt uuid: 6703d517-82086a06-cec0-9440c9831520
12857:20241101:185613.669 In vmware_service_add_perf_entity() type:Datastore id:datastore-2006
12857:20241101:185613.669 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2006
12857:20241101:185613.669 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185613.669 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185613.669 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185613.669 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.669 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185613.669 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185613.669 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.669 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185613.669 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185613.669 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.669 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185613.669 vmware_service_update_perf_entities() for type: Datastore id: datastore-2006 name: 3PAR_KARTOHA_SAS_NTK_ID535 uuid: 6703d63f-3516ce66-4bee-9440c9831520
12857:20241101:185613.669 In vmware_service_add_perf_entity() type:Datastore id:datastore-2007
12857:20241101:185613.669 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2007
12857:20241101:185613.669 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185613.669 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185613.669 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185613.669 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.669 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185613.669 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185613.669 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.669 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185613.669 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185613.669 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.669 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185613.669 vmware_service_update_perf_entities() for type: Datastore id: datastore-2007 name: 3PAR_GOROH_SSD_NTK_ID531 uuid: 6704dec9-75e6c68a-c19e-9440c9831520
12857:20241101:185613.669 In vmware_service_add_perf_entity() type:Datastore id:datastore-4046
12857:20241101:185613.669 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4046
12857:20241101:185613.669 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185613.669 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185613.669 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185613.670 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.670 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185613.670 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185613.670 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.670 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185613.670 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185613.670 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.670 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185613.670 vmware_service_update_perf_entities() for type: Datastore id: datastore-4046 name: Local_ntk-m1-esxi-02 uuid: 67155ba7-5e9d16d6-0733-3cecef02b6e0
12857:20241101:185613.670 In vmware_service_add_perf_entity() type:Datastore id:datastore-4050
12857:20241101:185613.670 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4050
12857:20241101:185613.670 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185613.670 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185613.670 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185613.670 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.670 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185613.670 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185613.670 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.670 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185613.670 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185613.670 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.670 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185613.670 vmware_service_update_perf_entities() for type: Datastore id: datastore-4050 name: Local_ntk-m1-esxi-01 uuid: 67155cc9-bea5e318-19fd-ac1f6bb14c78
12857:20241101:185613.670 In vmware_service_add_perf_entity() type:Datastore id:datastore-4041
12857:20241101:185613.670 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4041
12857:20241101:185613.670 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185613.670 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185613.670 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185613.670 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.670 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185613.670 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185613.670 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.670 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185613.670 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185613.670 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185613.671 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185613.671 vmware_service_update_perf_entities() for type: Datastore id: datastore-4041 name: Local_ntk-m1-esxi-03 uuid: 67155e10-d4545cb2-5b01-3cecef012e78
12857:20241101:185613.671 End of vmware_service_update_perf_entities() entities:8
12857:20241101:185613.671 === memory statistics for vmware cache size ===
12857:20241101:185613.671 free chunks of size >= 256 bytes: 4
12857:20241101:185613.671 min chunk size: 760 bytes
12857:20241101:185613.671 max chunk size: 1073164312 bytes
12857:20241101:185613.671 memory of total size 1073625776 bytes fragmented into 7203 chunks
12857:20241101:185613.671 of those, 1073166912 bytes are in 4 free chunks
12857:20241101:185613.671 of those, 458864 bytes are in 7199 used chunks
12857:20241101:185613.671 of those, 115232 bytes are used by allocation overhead
12857:20241101:185613.671 ================================
12857:20241101:185613.671 End of zbx_vmware_service_update():FAIL processed:1638400 bytes of data. Events:0 DC:1 DS:6 CL:1 HV:1 VM:1 DVS:1 Alarms:1 VMwareCache memory usage (free/strpool/total): 1073166912 / 3009592 / 1073741008
12857:20241101:185613.671 End of vmware_job_exec() type:update_conf ret:FAIL
12857:20241101:185613.671 In vmware_job_schedule() queue:2 type:update_conf
12857:20241101:185613.671 End of vmware_job_schedule() type:update_conf nextcheck:18:56:43
12857:20241101:185613.671 In vmware_job_get() queue:3
12857:20241101:185613.671 End of vmware_job_get() queue:3 type:none
12857:20241101:185613.671 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 4.000000 sec during 34.348215 sec]'
12857:20241101:185613.671 In vmware_job_get() queue:3
12857:20241101:185613.671 End of vmware_job_get() queue:3 type:none
12861:20241101:185613.979 In vmware_job_get() queue:3
12861:20241101:185613.979 End of vmware_job_get() queue:3 type:none
12855:20241101:185613.979 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002517 sec]'
12855:20241101:185613.980 In vmware_job_get() queue:3
12855:20241101:185613.980 End of vmware_job_get() queue:3 type:none
12859:20241101:185613.980 In vmware_job_get() queue:3
12859:20241101:185613.980 End of vmware_job_get() queue:3 type:none
12857:20241101:185614.671 In vmware_job_get() queue:3
12857:20241101:185614.671 End of vmware_job_get() queue:3 type:none
12861:20241101:185614.980 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002369 sec]'
12861:20241101:185614.980 In vmware_job_get() queue:3
12861:20241101:185614.980 End of vmware_job_get() queue:3 type:none
12855:20241101:185614.980 In vmware_job_get() queue:3
12855:20241101:185614.980 End of vmware_job_get() queue:3 type:none
12859:20241101:185614.980 In vmware_job_get() queue:3
12859:20241101:185614.980 End of vmware_job_get() queue:3 type:none
12857:20241101:185615.672 In vmware_job_get() queue:3
12857:20241101:185615.672 End of vmware_job_get() queue:3 type:none
12861:20241101:185615.980 In vmware_job_get() queue:3
12861:20241101:185615.980 End of vmware_job_get() queue:3 type:none
12855:20241101:185615.980 In vmware_job_get() queue:3
12855:20241101:185615.980 End of vmware_job_get() queue:3 type:none
12859:20241101:185615.980 In vmware_job_get() queue:3
12859:20241101:185615.980 End of vmware_job_get() queue:3 type:none
12857:20241101:185616.672 In vmware_job_get() queue:3
12857:20241101:185616.672 End of vmware_job_get() queue:3 type:none
12861:20241101:185616.980 In vmware_job_get() queue:3
12861:20241101:185616.980 End of vmware_job_get() queue:3 type:none
12855:20241101:185616.981 In vmware_job_get() queue:3
12855:20241101:185616.981 End of vmware_job_get() queue:3 type:none
12859:20241101:185616.981 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002911 sec]'
12859:20241101:185616.981 In vmware_job_get() queue:3
12859:20241101:185616.981 End of vmware_job_get() queue:3 type:none
12857:20241101:185617.672 In vmware_job_get() queue:3
12857:20241101:185617.672 End of vmware_job_get() queue:3 type:none
12861:20241101:185617.981 In vmware_job_get() queue:3
12861:20241101:185617.981 End of vmware_job_get() queue:3 type:none
12859:20241101:185617.981 In vmware_job_get() queue:3
12859:20241101:185617.981 End of vmware_job_get() queue:3 type:none
12855:20241101:185617.981 In vmware_job_get() queue:3
12855:20241101:185617.981 End of vmware_job_get() queue:3 type:none
12837:20241101:185618.406 received configuration data from server at "10.50.242.78", datalen 10211
12857:20241101:185618.673 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001530 sec]'
12857:20241101:185618.673 In vmware_job_get() queue:3
12857:20241101:185618.673 End of vmware_job_get() queue:3 type:none
12861:20241101:185618.981 In vmware_job_get() queue:3
12861:20241101:185618.981 End of vmware_job_get() queue:3 type:none
12855:20241101:185618.982 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002141 sec]'
12855:20241101:185618.982 In vmware_job_get() queue:3
12855:20241101:185618.982 End of vmware_job_get() queue:3 type:none
12859:20241101:185618.982 In vmware_job_get() queue:3
12859:20241101:185618.982 End of vmware_job_get() queue:3 type:none
12857:20241101:185619.673 In vmware_job_get() queue:3
12857:20241101:185619.673 End of vmware_job_get() queue:3 type:none
12861:20241101:185619.982 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002156 sec]'
12861:20241101:185619.982 In vmware_job_get() queue:3
12861:20241101:185619.982 End of vmware_job_get() queue:3 type:none
12855:20241101:185619.982 In vmware_job_get() queue:3
12855:20241101:185619.982 End of vmware_job_get() queue:3 type:none
12859:20241101:185619.982 In vmware_job_get() queue:3
12859:20241101:185619.982 End of vmware_job_get() queue:3 type:none
12857:20241101:185620.673 In vmware_job_get() queue:3
12857:20241101:185620.673 End of vmware_job_get() queue:3 type:none
12861:20241101:185620.982 In vmware_job_get() queue:3
12861:20241101:185620.982 End of vmware_job_get() queue:3 type:none
12855:20241101:185620.982 In vmware_job_get() queue:3
12855:20241101:185620.982 End of vmware_job_get() queue:3 type:none
12859:20241101:185620.982 In vmware_job_get() queue:3
12859:20241101:185620.982 End of vmware_job_get() queue:3 type:none
12857:20241101:185621.673 In vmware_job_get() queue:3
12857:20241101:185621.673 End of vmware_job_get() queue:3 type:none
12861:20241101:185621.982 In vmware_job_get() queue:3
12861:20241101:185621.984 End of vmware_job_get() queue:3 type:none
12859:20241101:185621.984 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001976 sec]'
12859:20241101:185621.984 In vmware_job_get() queue:3
12859:20241101:185621.984 End of vmware_job_get() queue:3 type:none
12855:20241101:185621.984 In vmware_job_get() queue:3
12855:20241101:185621.984 End of vmware_job_get() queue:3 type:none
12857:20241101:185622.673 In vmware_job_get() queue:3
12857:20241101:185622.673 End of vmware_job_get() queue:3 type:none
12861:20241101:185622.984 In vmware_job_get() queue:3
12861:20241101:185622.984 End of vmware_job_get() queue:3 type:none
12859:20241101:185622.984 In vmware_job_get() queue:3
12859:20241101:185622.984 End of vmware_job_get() queue:3 type:none
12855:20241101:185622.984 In vmware_job_get() queue:3
12855:20241101:185622.984 End of vmware_job_get() queue:3 type:none
12857:20241101:185623.673 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000796 sec]'
12857:20241101:185623.673 In vmware_job_get() queue:3
12857:20241101:185623.674 End of vmware_job_get() queue:3 type:none
12861:20241101:185623.984 In vmware_job_get() queue:3
12861:20241101:185623.984 End of vmware_job_get() queue:3 type:none
12859:20241101:185623.984 In vmware_job_get() queue:3
12859:20241101:185623.984 End of vmware_job_get() queue:3 type:none
12855:20241101:185623.984 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002486 sec]'
12855:20241101:185623.984 In vmware_job_get() queue:3
12855:20241101:185623.984 End of vmware_job_get() queue:3 type:none
12857:20241101:185624.674 In vmware_job_get() queue:3
12857:20241101:185624.674 End of vmware_job_get() queue:3 type:none
12861:20241101:185624.984 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002439 sec]'
12861:20241101:185624.984 In vmware_job_get() queue:3
12855:20241101:185624.984 In vmware_job_get() queue:3
12855:20241101:185624.984 End of vmware_job_get() queue:3 type:none
12859:20241101:185624.984 In vmware_job_get() queue:3
12859:20241101:185624.984 End of vmware_job_get() queue:3 type:none
12861:20241101:185624.984 End of vmware_job_get() queue:3 type:none
12857:20241101:185625.674 In vmware_job_get() queue:3
12857:20241101:185625.674 End of vmware_job_get() queue:3 type:none
12859:20241101:185625.984 In vmware_job_get() queue:3
12859:20241101:185625.984 End of vmware_job_get() queue:3 type:none
12855:20241101:185625.984 In vmware_job_get() queue:3
12855:20241101:185625.985 End of vmware_job_get() queue:3 type:none
12861:20241101:185625.985 In vmware_job_get() queue:3
12861:20241101:185625.985 End of vmware_job_get() queue:3 type:none
12857:20241101:185626.674 In vmware_job_get() queue:3
12857:20241101:185626.674 End of vmware_job_get() queue:3 type:none
12859:20241101:185626.984 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002252 sec]'
12859:20241101:185626.985 In vmware_job_get() queue:3
12859:20241101:185626.985 End of vmware_job_get() queue:3 type:none
12861:20241101:185626.985 In vmware_job_get() queue:3
12861:20241101:185626.985 End of vmware_job_get() queue:3 type:none
12855:20241101:185626.985 In vmware_job_get() queue:3
12855:20241101:185626.985 End of vmware_job_get() queue:3 type:none
12857:20241101:185627.674 In vmware_job_get() queue:3
12857:20241101:185627.675 End of vmware_job_get() queue:3 type:none
12859:20241101:185627.985 In vmware_job_get() queue:3
12859:20241101:185627.985 End of vmware_job_get() queue:3 type:none
12861:20241101:185627.985 In vmware_job_get() queue:3
12861:20241101:185627.985 End of vmware_job_get() queue:3 type:none
12855:20241101:185627.985 In vmware_job_get() queue:3
12855:20241101:185627.985 End of vmware_job_get() queue:3 type:none
12837:20241101:185628.423 received configuration data from server at "10.50.242.78", datalen 437
12857:20241101:185628.675 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001295 sec]'
12857:20241101:185628.675 In vmware_job_get() queue:3
12857:20241101:185628.675 End of vmware_job_get() queue:3 type:none
12859:20241101:185628.985 In vmware_job_get() queue:3
12859:20241101:185628.985 End of vmware_job_get() queue:3 type:none
12861:20241101:185628.986 In vmware_job_get() queue:3
12861:20241101:185628.986 End of vmware_job_get() queue:3 type:none
12855:20241101:185628.986 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001577 sec]'
12855:20241101:185628.986 In vmware_job_get() queue:3
12855:20241101:185628.986 End of vmware_job_get() queue:3 type:none
12857:20241101:185629.675 In vmware_job_get() queue:3
12857:20241101:185629.676 End of vmware_job_get() queue:3 type:none
12859:20241101:185629.985 In vmware_job_get() queue:3
12859:20241101:185629.985 End of vmware_job_get() queue:3 type:none
12861:20241101:185629.986 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001639 sec]'
12861:20241101:185629.986 In vmware_job_get() queue:3
12861:20241101:185629.986 End of vmware_job_get() queue:3 type:none
12855:20241101:185629.986 In vmware_job_get() queue:3
12855:20241101:185629.986 End of vmware_job_get() queue:3 type:none
12857:20241101:185630.676 In vmware_job_get() queue:3
12857:20241101:185630.676 End of vmware_job_get() queue:3 type:none
12859:20241101:185630.986 In vmware_job_get() queue:3
12859:20241101:185630.986 End of vmware_job_get() queue:3 type:none
12861:20241101:185630.986 In vmware_job_get() queue:3
12861:20241101:185630.986 End of vmware_job_get() queue:3 type:none
12855:20241101:185630.986 In vmware_job_get() queue:3
12855:20241101:185630.986 End of vmware_job_get() queue:3 type:none
12857:20241101:185631.676 In vmware_job_get() queue:3
12857:20241101:185631.676 End of vmware_job_get() queue:3 type:none
12859:20241101:185631.986 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001328 sec]'
12859:20241101:185631.986 In vmware_job_get() queue:3
12859:20241101:185631.986 End of vmware_job_get() queue:3 type:none
12861:20241101:185631.986 In vmware_job_get() queue:3
12861:20241101:185631.986 End of vmware_job_get() queue:3 type:none
12855:20241101:185631.986 In vmware_job_get() queue:3
12855:20241101:185631.986 End of vmware_job_get() queue:3 type:none
12857:20241101:185632.677 In vmware_job_get() queue:3
12857:20241101:185632.677 End of vmware_job_get() queue:3 type:none
12859:20241101:185632.986 In vmware_job_get() queue:3
12859:20241101:185632.986 End of vmware_job_get() queue:3 type:none
12861:20241101:185632.986 In vmware_job_get() queue:3
12861:20241101:185632.986 End of vmware_job_get() queue:3 type:none
12855:20241101:185632.987 In vmware_job_get() queue:3
12855:20241101:185632.987 End of vmware_job_get() queue:3 type:none
12857:20241101:185633.677 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001995 sec]'
12857:20241101:185633.677 In vmware_job_get() queue:3
12857:20241101:185633.677 End of vmware_job_get() queue:3 type:none
12859:20241101:185633.986 In vmware_job_get() queue:3
12859:20241101:185633.987 End of vmware_job_get() queue:3 type:none
12861:20241101:185633.987 In vmware_job_get() queue:3
12861:20241101:185633.987 End of vmware_job_get() queue:3 type:none
12855:20241101:185633.987 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001015 sec]'
12855:20241101:185633.987 In vmware_job_get() queue:3
12855:20241101:185633.987 End of vmware_job_get() queue:3 type:none
12857:20241101:185634.677 In vmware_job_get() queue:3
12857:20241101:185634.677 End of vmware_job_get() queue:3 type:none
12859:20241101:185634.987 In vmware_job_get() queue:3
12859:20241101:185634.987 End of vmware_job_get() queue:3 type:none
12861:20241101:185634.987 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001094 sec]'
12861:20241101:185634.987 In vmware_job_get() queue:3
12861:20241101:185634.987 End of vmware_job_get() queue:3 type:none
12855:20241101:185634.987 In vmware_job_get() queue:3
12855:20241101:185634.987 End of vmware_job_get() queue:3 type:none
12857:20241101:185635.677 In vmware_job_get() queue:3
12857:20241101:185635.677 End of vmware_job_get() queue:3 type:none
12859:20241101:185635.987 In vmware_job_get() queue:3
12859:20241101:185635.987 End of vmware_job_get() queue:3 type:none
12861:20241101:185635.987 In vmware_job_get() queue:3
12861:20241101:185635.987 End of vmware_job_get() queue:3 type:none
12855:20241101:185635.987 In vmware_job_get() queue:3
12855:20241101:185635.987 End of vmware_job_get() queue:3 type:none
12857:20241101:185636.677 In vmware_job_get() queue:3
12857:20241101:185636.677 End of vmware_job_get() queue:3 type:none
12859:20241101:185636.987 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001500 sec]'
12859:20241101:185636.987 In vmware_job_get() queue:3
12859:20241101:185636.987 End of vmware_job_get() queue:3 type:none
12855:20241101:185636.988 In vmware_job_get() queue:3
12855:20241101:185636.988 End of vmware_job_get() queue:3 type:none
12861:20241101:185636.988 In vmware_job_get() queue:3
12861:20241101:185636.988 End of vmware_job_get() queue:3 type:none
12857:20241101:185637.677 In vmware_job_get() queue:3
12857:20241101:185637.678 End of vmware_job_get() queue:3 type:none
12859:20241101:185637.988 In vmware_job_get() queue:3
12859:20241101:185637.988 End of vmware_job_get() queue:3 type:none
12855:20241101:185637.988 In vmware_job_get() queue:3
12855:20241101:185637.988 End of vmware_job_get() queue:3 type:none
12861:20241101:185637.988 In vmware_job_get() queue:3
12861:20241101:185637.988 End of vmware_job_get() queue:3 type:none
12837:20241101:185638.438 received configuration data from server at "10.50.242.78", datalen 437
12857:20241101:185638.678 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001073 sec]'
12857:20241101:185638.678 In vmware_job_get() queue:3
12857:20241101:185638.678 End of vmware_job_get() queue:3 type:none
12859:20241101:185638.988 In vmware_job_get() queue:3
12859:20241101:185638.988 End of vmware_job_get() queue:3 type:none
12855:20241101:185638.988 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001327 sec]'
12855:20241101:185638.988 In vmware_job_get() queue:3
12855:20241101:185638.988 End of vmware_job_get() queue:3 type:none
12861:20241101:185638.988 In vmware_job_get() queue:3
12861:20241101:185638.988 End of vmware_job_get() queue:3 type:none
12857:20241101:185639.678 In vmware_job_get() queue:3
12857:20241101:185639.678 End of vmware_job_get() queue:3 type:none
12859:20241101:185639.988 In vmware_job_get() queue:3
12859:20241101:185639.988 End of vmware_job_get() queue:3 type:none
12855:20241101:185639.988 In vmware_job_get() queue:3
12855:20241101:185639.988 End of vmware_job_get() queue:3 type:none
12861:20241101:185639.988 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001393 sec]'
12861:20241101:185639.988 In vmware_job_get() queue:3
12861:20241101:185639.988 End of vmware_job_get() queue:3 type:none
12857:20241101:185640.678 In vmware_job_get() queue:3
12857:20241101:185640.679 End of vmware_job_get() queue:3 type:none
12859:20241101:185640.988 In vmware_job_get() queue:3
12859:20241101:185640.988 End of vmware_job_get() queue:3 type:none
12861:20241101:185640.988 In vmware_job_get() queue:3
12861:20241101:185640.988 End of vmware_job_get() queue:3 type:none
12855:20241101:185640.988 In vmware_job_get() queue:3
12855:20241101:185640.989 End of vmware_job_get() queue:3 type:none
12857:20241101:185641.679 In vmware_job_get() queue:3
12857:20241101:185641.679 End of vmware_job_get() queue:3 type:none
12859:20241101:185641.988 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000966 sec]'
12859:20241101:185641.988 In vmware_job_get() queue:3
12859:20241101:185641.988 End of vmware_job_get() queue:3 type:none
12861:20241101:185641.989 In vmware_job_get() queue:3
12861:20241101:185641.989 End of vmware_job_get() queue:3 type:none
12855:20241101:185641.989 In vmware_job_get() queue:3
12855:20241101:185641.989 End of vmware_job_get() queue:3 type:none
12857:20241101:185642.679 In vmware_job_get() queue:3
12857:20241101:185642.679 End of vmware_job_get() queue:3 type:none
12859:20241101:185642.988 In vmware_job_get() queue:3
12859:20241101:185642.989 End of vmware_job_get() queue:3 type:none
12861:20241101:185642.989 In vmware_job_get() queue:3
12861:20241101:185642.989 End of vmware_job_get() queue:3 type:none
12855:20241101:185642.989 In vmware_job_get() queue:3
12855:20241101:185642.989 End of vmware_job_get() queue:3 type:none
12857:20241101:185643.679 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001287 sec]'
12857:20241101:185643.679 In vmware_job_get() queue:3
12857:20241101:185643.679 End of vmware_job_get() queue:2 type:update_tags
12857:20241101:185643.679 In vmware_job_exec() type:update_tags
12857:20241101:185643.679 End of vmware_job_exec() type:update_tags ret:FAIL
12857:20241101:185643.679 In vmware_job_schedule() queue:2 type:update_tags
12857:20241101:185643.679 End of vmware_job_schedule() type:update_tags nextcheck:18:57:43
12857:20241101:185643.680 In vmware_job_get() queue:3
12857:20241101:185643.680 End of vmware_job_get() queue:2 type:update_conf
12857:20241101:185643.680 In vmware_job_exec() type:update_conf
12857:20241101:185643.680 In zbx_vmware_service_update() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12857:20241101:185643.680 In vmware_service_cust_query_prep() cust_queries:0
12857:20241101:185643.680 End of vmware_service_cust_query_prep() cq_values:0
12857:20241101:185643.680 In vmware_service_cust_query_prep() cust_queries:0
12857:20241101:185643.680 End of vmware_service_cust_query_prep() cq_values:0
12857:20241101:185643.680 In vmware_service_authenticate() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12857:20241101:185643.735 vmware_service_authenticate() SOAP response:
5287cdb7-f05c-42b8-f431-94c3151cf2b8VSPHERE.LOCAL\zabbix2024-11-01T18:56:43.742055Z2024-11-01T18:56:43.742055Zenenfalse10.50.242.760
12857:20241101:185643.735 End of vmware_service_authenticate():SUCCEED
12857:20241101:185643.738 vmware_service_get_contents() SOAP response:
group-d1propertyCollectorViewManagerVMware vCenter ServerVMware vCenter Server 8.0.3 build-24322831VMware, Inc.8.0.324322831INTL000linux-x64vpxVirtualCenter8.0.3.09a31b4b0-64a6-48e1-919a-e9f7ca1668b6VMware VirtualCenter Server8.0VpxSettingsUserDirectorySessionManagerAuthorizationManagerServiceMgrPerfMgrScheduledTaskManagerAlarmManagerEventManagerTaskManagerExtensionManagerCustomizationSpecManagerCustomFieldsManagerDiagMgrLicenseManagerSearchIndexFileManagerDatastoreNamespaceManagervirtualDiskManagerSnmpSystemProvCheckerCompatCheckerOvfManagerIpPoolManagerDVSManagerHostProfileManagerClusterProfileManagerMoComplianceManagerLocalizationManagerStorageResourceManagerguestOperationsManagerOverheadMemoryManagercertificateManagerIoFilterManager
12857:20241101:185643.738 In vmware_service_get_perf_counters()
12857:20241101:185643.759 vmware_service_get_perf_counters() SOAP response:
PerfMgrperfCounter1CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentnonerate442CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentaveragerate133CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentminimumrate444CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentmaximumrate445CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertznonerate446CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzaveragerate137CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzminimumrate448CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzmaximumrate449Total CPU capacity reserved by virtual machinesreservedCapacityCPUcpuMegahertzmegaHertzaverageabsolute2310Amount of time spent on system processes on each virtual CPU in the virtual machinesystemCPUcpuMillisecondmillisecondsummationdelta3311Total CPU time spent in wait statewaitCPUcpuMillisecondmillisecondsummationdelta3312Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement intervalreadyCPUcpuMillisecondmillisecondsummationdelta1313Total time that the CPU spent in an idle stateidleCPUcpuMillisecondmillisecondsummationdelta2314Total CPU usageusedCPUcpuMillisecondmillisecondsummationdelta3315Capacity in MHz of the physical CPU corescapacity.provisionedCPUcpuMegahertzmegaHertzaverageabsolute4416CPU resources devoted by the ESXi scheduler to the virtual machines and resource poolscapacity.entitlementCPUcpuMegahertzmegaHertzaverageabsolute4417CPU usage as a percent during the intervalcapacity.usageCPUcpuMegahertzmegaHertzaveragerate4418The amount of CPU resources a VM would use if there were no CPU contentioncapacity.demandCPUcpuMegahertzmegaHertzaverageabsolute4419Percent of time the VM is unable to run because it is contending for access to the physical CPU(s)capacity.contentionCPUcpuPercentagepercentaveragerate4420The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberaverageabsolute4421The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberaverageabsolute4422Time the VM vCPU is ready to run, but is unable to run due to co-scheduling constraintscorecount.contentionCPUcpuPercentagepercentaveragerate4423Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentnoneabsolute4424Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentaverageabsolute1325Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentminimumabsolute4426Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentmaximumabsolute4427Memory reservation consumed by powered-on virtual machinesreservedCapacityMemorymemMegabytemegaBytesaverageabsolute2328Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesnoneabsolute4429Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesaverageabsolute2330Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesminimumabsolute4431Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesmaximumabsolute4432Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesnoneabsolute4433Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesaverageabsolute2334Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesminimumabsolute4435Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesmaximumabsolute4436Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesnoneabsolute4437Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesaverageabsolute2338Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesminimumabsolute4439Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesmaximumabsolute4440Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesnoneabsolute4441Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesaverageabsolute2342Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesminimumabsolute4443Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesmaximumabsolute4444Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesnoneabsolute4445Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesaverageabsolute2346Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesminimumabsolute4447Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesmaximumabsolute4448Swap storage space consumedswapusedMemorymemKilobytekiloBytesnoneabsolute4449Swap storage space consumedswapusedMemorymemKilobytekiloBytesaverageabsolute2350Swap storage space consumedswapusedMemorymemKilobytekiloBytesminimumabsolute4451Swap storage space consumedswapusedMemorymemKilobytekiloBytesmaximumabsolute4452swapunreservedMemorymemKilobytekiloBytesnoneabsolute4453swapunreservedMemorymemKilobytekiloBytesaverageabsolute4454swapunreservedMemorymemKilobytekiloBytesminimumabsolute4455swapunreservedMemorymemKilobytekiloBytesmaximumabsolute4456Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesnoneabsolute4457Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesaverageabsolute2358Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesminimumabsolute4459Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesmaximumabsolute4460Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesnoneabsolute4461Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesaverageabsolute4462Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesminimumabsolute4463Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesmaximumabsolute4464Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesnoneabsolute4465Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesaverageabsolute4466Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesminimumabsolute4467Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesmaximumabsolute4468Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machinesstateMemorymemNumbernumberlatestabsolute2369Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesnoneabsolute4470Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesaverageabsolute2371Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesminimumabsolute4472Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesmaximumabsolute4473Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesnoneabsolute4474Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesaverageabsolute2375Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesminimumabsolute4476Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesmaximumabsolute4477swapInMemorymemKilobytekiloBytesnoneabsolute4478swapInMemorymemKilobytekiloBytesaverageabsolute2379swapInMemorymemKilobytekiloBytesminimumabsolute4480swapInMemorymemKilobytekiloBytesmaximumabsolute4481swapOutMemorymemKilobytekiloBytesnoneabsolute4482swapOutMemorymemKilobytekiloBytesaverageabsolute2383swapOutMemorymemKilobytekiloBytesminimumabsolute4484swapOutMemorymemKilobytekiloBytesmaximumabsolute4485Rate at which guest physical memory is swapped in from the swap spaceswapinRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1386Rate at which guest physical memory is swapped out to the swap spaceswapoutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1387Amount of memory that is swapped out for the Service ConsoleswapOutManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3388Amount of memory that is swapped in for the Service ConsoleswapInManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3389Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesnoneabsolute4490Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesaverageabsolute1391Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesminimumabsolute4492Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesmaximumabsolute4493Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesnoneabsolute4494Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesaverageabsolute2395Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesminimumabsolute4496Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesmaximumabsolute4497Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesnoneabsolute4498Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesaverageabsolute1399Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesminimumabsolute44100Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesmaximumabsolute44101Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesnoneabsolute44102Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesaverageabsolute11103Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesminimumabsolute44104Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesmaximumabsolute44105Guest physical memory pages that have undergone memory compressioncompressedMemorymemKilobytekiloBytesaverageabsolute23106Rate of guest physical memory page compression by ESXicompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23107Rate of guest physical memory decompressiondecompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23108Total amount of memory available to the hostcapacity.provisionedMemorymemKilobytekiloBytesaverageabsolute44109Amount of host physical memory the VM is entitled to, as determined by the ESXi schedulercapacity.entitlementMemorymemKilobytekiloBytesaverageabsolute44110Amount of physical memory available for use by virtual machines on this hostcapacity.usableMemorymemKilobytekiloBytesaverageabsolute44111Amount of physical memory actively usedcapacity.usageMemorymemKilobytekiloBytesaverageabsolute44112Percentage of time VMs are waiting to access swapped, compressed or ballooned memorycapacity.contentionMemorymemPercentagepercentaveragerate44113capacity.usage.vmMemorymemKilobytekiloBytesaverageabsolute24114capacity.usage.vmOvrhdMemorymemKilobytekiloBytesaverageabsolute24115capacity.usage.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24116capacity.usage.userworldMemorymemKilobytekiloBytesaverageabsolute24117reservedCapacity.vmMemorymemKilobytekiloBytesaverageabsolute24118reservedCapacity.vmOvhdMemorymemKilobytekiloBytesaverageabsolute24119reservedCapacity.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24120reservedCapacity.userworldMemorymemKilobytekiloBytesaverageabsolute24121Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentaverageabsolute44122Amount of physical memory consumed by VMs on this hostconsumed.vmsMemorymemKilobytekiloBytesaverageabsolute24123Amount of physical memory consumed by userworlds on this hostconsumed.userworldsMemorymemKilobytekiloBytesaverageabsolute24124Current read bandwidth of this memory typebandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22125Current write bandwidth of this memory typebandwidth.writeMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22126Total read and write bandwidth of this memory typebandwidth.totalMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute11127vm.bandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22128Get the current miss rate of this memory typemissrateMemorymemPercentagepercentlatestabsolute22129Get the current read latency of this memory typelatency.readMemorymemNanosecondnanosecondlatestabsolute33130Get the current write latency of this memory typelatency.writeMemorymemNanosecondnanosecondlatestabsolute33131Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondnonerate44132Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate13133Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondminimumrate44134Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondmaximumrate44135Number of disk reads during the collection intervalnumberReadDiskdiskNumbernumbersummationdelta33136Number of disk writes during the collection intervalnumberWriteDiskdiskNumbernumbersummationdelta33137Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23138Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23139Average amount of time taken during the collection interval to process a Storage command issued by the guest OS to the virtual machinetotalLatencyDiskdiskMillisecondmillisecondaverageabsolute33140Highest latency value across all disks used by the hostmaxTotalLatencyDiskdiskMillisecondmillisecondlatestabsolute13141Number of Storage commands aborted during the collection intervalcommandsAbortedDiskdiskNumbernumbersummationdelta23142Number of Storage-bus reset commands issued during the collection intervalbusResetsDiskdiskNumbernumbersummationdelta23143Average number of disk reads per second during the collection intervalnumberReadAveragedDiskdiskNumbernumberaveragerate13144Average number of disk writes per second during the collection intervalnumberWriteAveragedDiskdiskNumbernumberaveragerate13145Aggregated disk I/O rate, including the rates for all virtual machines running on the host during the collection intervalthroughput.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate44146Average amount of time for an I/O operation to complete successfullythroughput.contentionDiskdiskMillisecondmillisecondaverageabsolute44147Number of Storage reservation conflicts for the LUN during the collection intervalscsiReservationConflictsDiskdiskNumbernumbersummationdelta22148Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaverageabsolute44149Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondnonerate44150Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate13151Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondminimumrate44152Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondmaximumrate44153Number of packets received during the intervalpacketsRxNetworknetNumbernumbersummationdelta23154Number of packets transmitted during the intervalpacketsTxNetworknetNumbernumbersummationdelta23155Average rate at which data was received during the intervalreceivedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23156Average rate at which data was transmitted during the intervaltransmittedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23157The maximum network bandwidth for the hostthroughput.provisionedNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44158The current available network bandwidth for the hostthroughput.usableNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44159The current network bandwidth usage for the hostthroughput.usageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44160The aggregate network droppped packets for the hostthroughput.contentionNetworknetNumbernumbersummationdelta44161Average rate of packets received and transmitted per secondthroughput.packetsPerSecNetworknetNumbernumberaveragerate44162Total time elapsed, in seconds, since last system startupuptimeSystemsysSecondsecondlatestabsolute13163Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumbersummationdelta13164Current power usagepowerPowerpowerWattwattaveragerate23165Maximum allowed power usagepowerCapPowerpowerWattwattaverageabsolute33166Total energy used since last stats resetenergyPowerpowerJoulejoulesummationdelta33167Current power usage as a percentage of maximum allowed powercapacity.usagePctPowerpowerPercentagepercentaverageabsolute44168Average number of commands issued per second by the storage adapter during the collection intervalcommandsAveragedStorage adapterstorageAdapterNumbernumberaveragerate22169Average number of read commands issued per second by the storage adapter during the collection intervalnumberReadAveragedStorage adapterstorageAdapterNumbernumberaveragerate22170Average number of write commands issued per second by the storage adapter during the collection intervalnumberWriteAveragedStorage adapterstorageAdapterNumbernumberaveragerate22171Rate of reading data by the storage adapterreadStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22172Rate of writing data by the storage adapterwriteStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22173The average time a read by the storage adapter takestotalReadLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22174The average time a write by the storage adapter takestotalWriteLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22175Highest latency value across all storage adapters used by the hostmaxTotalLatencyStorage adapterstorageAdapterMillisecondmillisecondlatestabsolute33176Average amount of time for an I/O operation to complete successfullythroughput.contStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute44177The percent of I/Os that have been issued but have not yet completedOIOsPctStorage adapterstorageAdapterPercentagepercentaverageabsolute44178Average number of read commands issued per second to the virtual disk during the collection intervalnumberReadAveragedVirtual diskvirtualDiskNumbernumberaveragerate13179Average number of write commands issued per second to the virtual disk during the collection intervalnumberWriteAveragedVirtual diskvirtualDiskNumbernumberaveragerate13180Rate of reading data from the virtual diskreadVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22181Rate of writing data to the virtual diskwriteVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22182The average time a read from the virtual disk takestotalReadLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13183The average time a write to the virtual disk takestotalWriteLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13184Average amount of time for an I/O operation to complete successfullythroughput.contVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute44185Average number of read commands issued per second to the datastore during the collection intervalnumberReadAveragedDatastoredatastoreNumbernumberaveragerate13186Average number of write commands issued per second to the datastore during the collection intervalnumberWriteAveragedDatastoredatastoreNumbernumberaveragerate13187Rate of reading data from the datastorereadDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22188Rate of writing data to the datastorewriteDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22189The average time a read from the datastore takestotalReadLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13190The average time a write to the datastore takestotalWriteLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13191Highest latency value across all datastores used by the hostmaxTotalLatencyDatastoredatastoreMillisecondmillisecondlatestabsolute33192Storage I/O Control aggregated IOPSdatastoreIopsDatastoredatastoreNumbernumberaverageabsolute13193Storage I/O Control size-normalized I/O latencysizeNormalizedDatastoreLatencyDatastoredatastoreMicrosecondmicrosecondaverageabsolute13194throughput.usageDatastoredatastoreKilobytes per secondkiloBytesPerSecondaverageabsolute44195throughput.contentionDatastoredatastoreMillisecondmillisecondaverageabsolute44196busResetsDatastoredatastoreNumbernumbersummationdelta22197commandsAbortedDatastoredatastoreNumbernumbersummationdelta22198Percentage of time Storage I/O Control actively controlled datastore latencysiocActiveTimePercentageDatastoredatastorePercentagepercentaverageabsolute13199Average amount of time for an I/O operation to complete successfullythroughput.contStorage pathstoragePathMillisecondmillisecondaverageabsolute44200Highest latency value across all storage paths used by the hostmaxTotalLatencyStorage pathstoragePathMillisecondmillisecondlatestabsolute33201Virtual disk I/O ratethroughput.usageVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate44202Number of terminations to a virtual diskcommandsAbortedVirtual diskvirtualDiskNumbernumbersummationdelta24203Number of resets to a virtual diskbusResetsVirtual diskvirtualDiskNumbernumbersummationdelta24204The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberaverageabsolute22205The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberaverageabsolute22206The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberaverageabsolute22207Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22208The storage adapter's I/O ratethroughput.usagStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate44209Number of Storage-bus reset commands issued during the collection intervalbusResetsStorage pathstoragePathNumbernumbersummationdelta23210Number of Storage commands terminated during the collection intervalcommandsAbortedStorage pathstoragePathNumbernumbersummationdelta23211Storage path I/O ratethroughput.usageStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate44212Average pNic I/O rate for VMsthroughput.usage.vmNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33213Average pNic I/O rate for NFSthroughput.usage.nfsNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33214Average pNic I/O rate for vMotionthroughput.usage.vmotionNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33215Average pNic I/O rate for FTthroughput.usage.ftNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33216Average pNic I/O rate for iSCSIthroughput.usage.iscsiNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33217Average pNic I/O rate for HBRthroughput.usage.hbrNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33218Current maximum allowed power usagecapacity.usablePowerpowerWattwattaverageabsolute44219Current power usagecapacity.usagePowerpowerWattwattaverageabsolute44220Power usage due to host idlenesscapacity.usageIdlePowerpowerWattwattaverageabsolute23221Power usage due to non-VM activitiescapacity.usageSystemPowerpowerWattwattaverageabsolute23222Power usage due to VM workloadscapacity.usageVmPowerpowerWattwattaverageabsolute23223Static power usage of VMcapacity.usageStaticPowerpowerWattwattaverageabsolute23224Amount of CPU resources allocated to the virtual machine or resource pool, based on the total cluster capacity and the resource configuration of the resource hierarchycpuentitlementCPUcpuMegahertzmegaHertzlatestabsolute23225Memory allocation as calculated by the VMkernel scheduler based on current estimated demand and reservation, limit, and shares policies set for all virtual machines and resource pools in the host or clustermementitlementMemorymemMegabytemegaByteslatestabsolute23226DRS score of the virtual machinevmDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11227Fairness of distributed CPU resource allocationcpufairnessCluster servicesclusterServicesNumbernumberlatestabsolute13228Aggregate available memory resources of all the hosts within a clustermemfairnessCluster servicesclusterServicesNumbernumberlatestabsolute13229The rate of transmitted packets for this VDSthroughput.pktsTxNetworknetNumbernumberaverageabsolute33230The rate of transmitted Multicast packets for this VDSthroughput.pktsTxMulticastNetworknetNumbernumberaverageabsolute33231The rate of transmitted Broadcast packets for this VDSthroughput.pktsTxBroadcastNetworknetNumbernumberaverageabsolute33232The rate of received packets for this vDSthroughput.pktsRxNetworknetNumbernumberaverageabsolute33233The rate of received Multicast packets for this VDSthroughput.pktsRxMulticastNetworknetNumbernumberaverageabsolute33234The rate of received Broadcast packets for this VDSthroughput.pktsRxBroadcastNetworknetNumbernumberaverageabsolute33235Count of dropped transmitted packets for this VDSthroughput.droppedTxNetworknetNumbernumberaverageabsolute33236Count of dropped received packets for this VDSthroughput.droppedRxNetworknetNumbernumberaverageabsolute33237The rate of transmitted packets for this DVPortthroughput.vds.pktsTxNetworknetNumbernumberaverageabsolute33238The rate of transmitted multicast packets for this DVPortthroughput.vds.pktsTxMcastNetworknetNumbernumberaverageabsolute33239The rate of transmitted broadcast packets for this DVPortthroughput.vds.pktsTxBcastNetworknetNumbernumberaverageabsolute33240The rate of received packets for this DVPortthroughput.vds.pktsRxNetworknetNumbernumberaverageabsolute33241The rate of received multicast packets for this DVPortthroughput.vds.pktsRxMcastNetworknetNumbernumberaverageabsolute33242The rate of received broadcast packets for this DVPortthroughput.vds.pktsRxBcastNetworknetNumbernumberaverageabsolute33243Count of dropped transmitted packets for this DVPortthroughput.vds.droppedTxNetworknetNumbernumberaverageabsolute33244Count of dropped received packets for this DVPortthroughput.vds.droppedRxNetworknetNumbernumberaverageabsolute33245The rate of transmitted packets for this LAGthroughput.vds.lagTxNetworknetNumbernumberaverageabsolute33246The rate of transmitted Multicast packets for this LAGthroughput.vds.lagTxMcastNetworknetNumbernumberaverageabsolute33247The rate of transmitted Broadcast packets for this LAGthroughput.vds.lagTxBcastNetworknetNumbernumberaverageabsolute33248The rate of received packets for this LAGthroughput.vds.lagRxNetworknetNumbernumberaverageabsolute33249The rate of received multicast packets for this LAGthroughput.vds.lagRxMcastNetworknetNumbernumberaverageabsolute33250The rate of received Broadcast packets for this LAGthroughput.vds.lagRxBcastNetworknetNumbernumberaverageabsolute33251Count of dropped transmitted packets for this LAGthroughput.vds.lagDropTxNetworknetNumbernumberaverageabsolute33252Count of dropped received packets for this LAGthroughput.vds.lagDropRxNetworknetNumbernumberaverageabsolute33253Number of virtual machine power on operationsnumPoweronVirtual machine operationsvmopNumbernumberlatestabsolute13254Number of virtual machine power off operationsnumPoweroffVirtual machine operationsvmopNumbernumberlatestabsolute13255Number of virtual machine suspend operationsnumSuspendVirtual machine operationsvmopNumbernumberlatestabsolute13256Number of virtual machine reset operationsnumResetVirtual machine operationsvmopNumbernumberlatestabsolute13257Number of virtual machine guest reboot operationsnumRebootGuestVirtual machine operationsvmopNumbernumberlatestabsolute13258Number of virtual machine standby guest operationsnumStandbyGuestVirtual machine operationsvmopNumbernumberlatestabsolute13259Number of virtual machine guest shutdown operationsnumShutdownGuestVirtual machine operationsvmopNumbernumberlatestabsolute13260Number of virtual machine create operationsnumCreateVirtual machine operationsvmopNumbernumberlatestabsolute13261Number of virtual machine delete operationsnumDestroyVirtual machine operationsvmopNumbernumberlatestabsolute13262Number of virtual machine register operationsnumRegisterVirtual machine operationsvmopNumbernumberlatestabsolute13263Number of virtual machine unregister operationsnumUnregisterVirtual machine operationsvmopNumbernumberlatestabsolute13264Number of virtual machine reconfigure operationsnumReconfigureVirtual machine operationsvmopNumbernumberlatestabsolute13265Number of virtual machine clone operationsnumCloneVirtual machine operationsvmopNumbernumberlatestabsolute13266Number of virtual machine template deploy operationsnumDeployVirtual machine operationsvmopNumbernumberlatestabsolute13267Number of host change operations for powered-off and suspended VMsnumChangeHostVirtual machine operationsvmopNumbernumberlatestabsolute13268Number of datastore change operations for powered-off and suspended virtual machinesnumChangeDSVirtual machine operationsvmopNumbernumberlatestabsolute13269Number of host and datastore change operations for powered-off and suspended virtual machinesnumChangeHostDSVirtual machine operationsvmopNumbernumberlatestabsolute13270Number of migrations with vMotion (host change operations for powered-on VMs)numVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13271Number of migrations with Storage vMotion (datastore change operations for powered-on VMs)numSVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13272Number of host and datastore change operations for powered-on and suspended virtual machinesnumXVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13273Total available CPU resources of all hosts within a clustereffectivecpuCluster servicesclusterServicesMegahertzmegaHertzaveragerate13274Total amount of machine memory of all hosts in the cluster that is available for use for virtual machine memory and overhead memoryeffectivememCluster servicesclusterServicesMegabytemegaBytesaverageabsolute13275Total amount of CPU resources of all hosts in the clustertotalmhzCPUcpuMegahertzmegaHertzaveragerate13276Total amount of host physical memory of all hosts in the cluster that is available for virtual machine memory (physical memory for use by the guest OS) and virtual machine overhead memorytotalmbMemorymemMegabytemegaBytesaverageabsolute13277DRS score of the clusterclusterDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11278vSphere HA number of failures that can be toleratedfailoverCluster servicesclusterServicesNumbernumberlatestabsolute13279The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentaverageabsolute13280The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesaverageabsolute13281The amount of GPU memory reserved in kilobytesmem.reservedGPUgpuKilobytekiloByteslatestabsolute13282The power used by a GPU in wattspower.usedGPUgpuWattwattlatestabsolute13283The temperature of a GPU in degrees celsiustemperatureGPUgpuTemperature in degrees Celsiuscelsiusaverageabsolute13284The total amount of GPU memory in kilobytesmem.totalGPUgpuKilobytekiloByteslatestabsolute13285Amount of space actually used by the virtual machine or the datastoreusedDiskdiskKilobytekiloByteslatestabsolute11286Amount of storage set aside for use by a datastore or a virtual machineprovisionedDiskdiskKilobytekiloByteslatestabsolute11287Configured size of the datastorecapacityDiskdiskKilobytekiloByteslatestabsolute13288Amount of space associated exclusively with a virtual machineunsharedDiskdiskKilobytekiloByteslatestabsolute11289Amount of disk actually used on the datastoreactualusedDiskdiskMegabytemegaByteslatestabsolute23290Storage overhead of a virtual machine or a datastore due to delta disk backingsdeltausedDiskdiskKilobytekiloByteslatestabsolute23291Virtual disk: The maximum capacity size of the virtual disk. Virtual machine: The provisioned size of all virtual disks plus snapshot files and the swap file, if the VM is running. Datastore: The maximum capacity of the datastore. POD: The maximum capacity of all datastores in the POD.capacity.provisionedDiskdiskKilobytekiloBytesaverageabsolute44292The amount of storage capacity currently being consumed by the entity or on the entity.capacity.usageDiskdiskKilobytekiloBytesaverageabsolute44293The amount of storage capacity overcommitment for the entity, measured in percent.capacity.contentionDiskdiskPercentagepercentaverageabsolute44294The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44295The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44296The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11297Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44298Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44299Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11300Total size of in-memory cache of blocks (buffer cache) read in from block devices (i.e., disk devices) on the system where vCenter Server is runningbufferszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44301Total size of in-memory caches of pages (page cache) for files from on-disk and in-memory filesystems on the system where vCenter Server is runningcacheszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44302Number of context switches per second on the system where vCenter Server is runningctxswitchesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11303Disk sectors read per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskreadsectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44304Number of disk reads per second on the system where vCenter Server is runningdiskreadsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11305Disk sectors written per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskwritesectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44306Number of disk writes per second on the system where vCenter Server is runningdiskwritesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11307The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44308The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44309The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11310The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44311The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44312The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11313vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44314vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44315vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11316vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44317vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44318vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11319vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44320vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44321vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11322Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44323Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44324Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11325Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44326Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44327Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11328Rate of the number of total packets received per second on the system where vCenter Server is runningpacketrecvratevCenter resource usage informationvcResourcesNumbernumberaveragerate11329Number of total packets sent per second on the system where vCenter Server is runningpacketsentratevCenter resource usage informationvcResourcesNumbernumberaveragerate11330Total system CPU used on the system where vCenter Server in runningsystemcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11331Number of page faults per second on the system where vCenter Server is runningpagefaultratevCenter resource usage informationvcResourcesNumbernumberaveragerate11332Physical memory used by vCenterphysicalmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11333CPU used by vCenter Server in privileged modepriviledgedcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11334Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44335Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44336Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute33337The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44338The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44339The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11340Number of systems calls made per second on the system where vCenter Server is runningsyscallsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11341The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44342The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44343The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11344CPU used by vCenter Server in user modeusercpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11345vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44346vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44347vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11348Virtual memory used by vCenter ServervirtualmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11349Average number of outstanding read requests to the virtual disk during the collection intervalreadOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22350Average number of outstanding write requests to the virtual disk during the collection intervalwriteOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22351Storage DRS virtual disk metric for the read workload modelreadLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22352Storage DRS virtual disk metric for the write workload modelwriteLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22353CPU active average over 1 minuteactav1Resource group CPUrescpuPercentagepercentlatestabsolute33354Storage DRS datastore bytes readdatastoreReadBytesDatastoredatastoreNumbernumberlatestabsolute22355Storage DRS datastore bytes writtendatastoreWriteBytesDatastoredatastoreNumbernumberlatestabsolute22356Storage DRS datastore read I/O ratedatastoreReadIopsDatastoredatastoreNumbernumberlatestabsolute13357Storage DRS datastore write I/O ratedatastoreWriteIopsDatastoredatastoreNumbernumberlatestabsolute13358Storage DRS datastore outstanding read requestsdatastoreReadOIODatastoredatastoreNumbernumberlatestabsolute13359Storage DRS datastore outstanding write requestsdatastoreWriteOIODatastoredatastoreNumbernumberlatestabsolute13360Storage DRS datastore normalized read latencydatastoreNormalReadLatencyDatastoredatastoreNumbernumberlatestabsolute22361Storage DRS datastore normalized write latencydatastoreNormalWriteLatencyDatastoredatastoreNumbernumberlatestabsolute22362Storage DRS datastore metric for read workload modeldatastoreReadLoadMetricDatastoredatastoreNumbernumberlatestabsolute44363Storage DRS datastore metric for write workload modeldatastoreWriteLoadMetricDatastoredatastoreNumbernumberlatestabsolute44364The average datastore latency as seen by virtual machinesdatastoreVMObservedLatencyDatastoredatastoreMicrosecondmicrosecondlatestabsolute13365Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaveragerate44366Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskNumbernumberlatestabsolute44367Number of failed reads on the diskreadFailedDiskdiskNumbernumberlatestabsolute44368Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskNumbernumberlatestabsolute44369Number of failed writes on the diskwriteFailedDiskdiskNumbernumberlatestabsolute44370Number of successful commands on the diskcommands.successDiskdiskNumbernumberlatestabsolute44371Number of failed commands on the diskcommands.failedDiskdiskNumbernumberlatestabsolute44372Number of queued commands on the diskcommands.queuedDiskdiskNumbernumberlatestabsolute44373Number of active commands on the diskcommands.activeDiskdiskNumbernumberlatestabsolute44374Current state of devicestateDiskdiskNumbernumberlatestabsolute44375Total number of aborts on a diskTM.abortDiskdiskNumbernumberlatestabsolute44376Total number of aborts retries on a diskTM.abortRetryDiskdiskNumbernumberlatestabsolute44377Total number of failed aborts on a diskTM.abortFailedDiskdiskNumbernumberlatestabsolute44378Total number of virt resets TM.virtResetDiskdiskNumbernumberlatestabsolute44379Total number of virt-reset retries TM.virtResetRetryDiskdiskNumbernumberlatestabsolute44380Total number of failed virt-resetsTM.virtResetFailedDiskdiskNumbernumberlatestabsolute44381Total number of lun resets TM.lunResetDiskdiskNumbernumberlatestabsolute44382Total number of lun-reset retries TM.lunResetRetryDiskdiskNumbernumberlatestabsolute44383Total number of failed lun-resetsTM.lunResetFailedDiskdiskNumbernumberlatestabsolute44384Total number of device resets TM.deviceResetDiskdiskNumbernumberlatestabsolute44385Total number of device-reset retries TM.deviceResetRetryDiskdiskNumbernumberlatestabsolute44386Total number of failed device-resetsTM.deviceResetFailedDiskdiskNumbernumberlatestabsolute44387Total number of bus resets TM.busResetDiskdiskNumbernumberlatestabsolute44388Total number of bus-reset retries TM.busResetRetryDiskdiskNumbernumberlatestabsolute44389Total number of failed bus-resetsTM.busResetFailedDiskdiskNumbernumberlatestabsolute44390Average time, in microseconds, spent by Queue to process each Storage commandlatency.qavgDiskdiskMicrosecondmicrosecondlatestabsolute44391Average time, in microseconds, spent by Device to process each Storage commandlatency.davgDiskdiskMicrosecondmicrosecondlatestabsolute44392Average time, in microseconds, spent by kernel to process each Storage commandlatency.kavgDiskdiskMicrosecondmicrosecondlatestabsolute44393Average time, in microseconds, spent by Guest to process each Storage commandlatency.gavgDiskdiskMicrosecondmicrosecondlatestabsolute44394The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberlatestabsolute44395The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberlatestabsolute44396The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberlatestabsolute44397The percentage HT partner usage per physical CPUpartnerBusyTimeCPUcpuPercentagepercentaveragerate44398CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentaveragerate23399The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberlatestabsolute44400The amount of L3 cache the VM usescache.l3.occupancyCPUcpuKilobytekiloBytesaverageabsolute44401The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberlatestabsolute44402CPU load average over the past 1 minute, sampled on every 6 secondsload.avg1minCPUcpuPercentagepercentlatestabsolute44403CPU load average over the past 5 minutes, sampled on every 6 secondsload.avg5minCPUcpuPercentagepercentlatestabsolute44404CPU load average over the past 15 minutes, sampled on every 6 secondsload.avg15minCPUcpuPercentagepercentlatestabsolute44405Total amount of memory available to the hostcapacity.provisionedMemorymemMegabytemegaByteslatestabsolute44406Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentlatestabsolute44407Ratio of total requested memory and the managed memory minus 1 over the past 1 minuteovercommit.avg1minMemorymemNumbernumberlatestabsolute44408Ratio of total requested memory and the managed memory minus 1 over the past 5 minutesovercommit.avg5minMemorymemNumbernumberlatestabsolute44409Ratio of total requested memory and the managed memory minus 1 over the past 15 minutesovercommit.avg15minMemorymemNumbernumberlatestabsolute44410Total amount of machine memory on the ESXi hostphysical.totalMemorymemMegabytemegaByteslatestabsolute44411Amount of machine memory being used by everything other than VMkernelphysical.userMemorymemMegabytemegaByteslatestabsolute44412Amount of machine memory that is free on the ESXi hostphysical.freeMemorymemMegabytemegaByteslatestabsolute44413Total amount of machine memory managed by VMkernelkernel.managedMemorymemMegabytemegaByteslatestabsolute44414Mininum amount of machine memory that VMkernel likes to keep freekernel.minfreeMemorymemMegabytemegaByteslatestabsolute44415Amount of machine memory that is currently unreservedkernel.unreservedMemorymemMegabytemegaByteslatestabsolute44416Amount of physical memory that is being sharedpshare.sharedMemorymemMegabytemegaByteslatestabsolute44417Amount of machine memory that is common across World(s)pshare.commonMemorymemMegabytemegaByteslatestabsolute44418Amount of machine memory saved due to page-sharingpshare.sharedSaveMemorymemMegabytemegaByteslatestabsolute44419Current swap usageswap.currentMemorymemMegabytemegaByteslatestabsolute44420Where ESXi expects the reclaimed memory using swapping and compression to beswap.targetMemorymemMegabytemegaByteslatestabsolute44421Rate at which memory is swapped in by ESXi from diskswap.readrateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44422Rate at which memory is swapped to disk by the ESXiswap.writerateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44423Total compressed physical memoryzip.zippedMemorymemMegabytemegaByteslatestabsolute44424Saved memory by compressionzip.savedMemorymemMegabytemegaByteslatestabsolute44425Total amount of physical memory reclaimed using the vmmemctl modulesmemctl.currentMemorymemMegabytemegaByteslatestabsolute44426Total amount of physical memory ESXi would like to reclaim using the vmmemctl modulesmemctl.targetMemorymemMegabytemegaByteslatestabsolute44427Maximum amount of physical memory ESXi can reclaim using the vmmemctl modulesmemctl.maxMemorymemMegabytemegaByteslatestabsolute44428Memory reservation health state, 2->Red, 1->Greenhealth.reservationStateMemorymemNumbernumberlatestabsolute44429Amount of Overhead memory actively usedcapacity.overheadMemorymemMegabytemegaBytesaverageabsolute44430Amount of OverheadResv memorycapacity.overheadResvMemorymemMegabytemegaBytesaverageabsolute44431Per tier consumed memory. This value is expressed in megabytescapacity.consumedMemorymemMegabytemegaByteslatestabsolute44432Per tier active memory. This value is expressed in megabytescapacity.activeMemorymemMegabytemegaByteslatestabsolute44433Current CPU power usagecapacity.usageCpuPowerpowerWattwattaverageabsolute44434Current memory power usagecapacity.usageMemPowerpowerWattwattaverageabsolute44435Current other power usagecapacity.usageOtherPowerpowerWattwattaverageabsolute44436vmkernel.downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44437downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44438precopy.timeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44439rttMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44440dst.migration.timeMigration of powered on VMvmotionSecondsecondlatestabsolute44441mem.sizembMigration of powered on VMvmotionMegabytemegaByteslatestabsolute44442Current number of replicated virtual machinesvmsvSphere ReplicationhbrNumbernumberlatestabsolute44443Average amount of data received per secondthroughput.hbr.inboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44444Average amount of data transmitted per secondthroughput.hbr.outboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44445Average disk read latency seen by vSphere Replicationhbr.readLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44446Average guest I/O stall introduced by vSphere Replicationhbr.stallLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44447Average latency seen by vSphere Replicationlatency.hbr.outboundNetworknetMillisecondmillisecondlatestabsolute44448Number of Lightweight Delta (LWD) snapshots takennumSnapshotsvSphere Data Protection (LWD)lwdNumbernumberlatestabsolute44449APD state of the nfs volumeapdStateNFSnfsNumbernumberlatestabsolute44450Cumulative read issue time on NFS volumereadIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44451Cumulative write issue time on NFS volumewriteIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44452Total reads on NFS volumetotalReadsNFSnfsNumbernumberlatestabsolute44453Total reads failed on NFS volumereadsFailedNFSnfsNumbernumberlatestabsolute44454Total writes on NFS volumetotalWritesNFSnfsNumbernumberlatestabsolute44455Total writes failed on NFS volumewritesFailedNFSnfsNumbernumberlatestabsolute44456Cumulative readTime on NFS volumereadTimeNFSnfsMicrosecondmicrosecondlatestabsolute44457Cumulative writeTime on NFS volumewriteTimeNFSnfsMicrosecondmicrosecondlatestabsolute44458Total IO requests queued in NFS volumeioRequestsQueuedNFSnfsNumbernumberlatestabsolute44459Total create calls on NFS volumetotalCreateNFSnfsNumbernumberlatestabsolute44460Total create calls failed on NFS volumecreateFailedNFSnfsNumbernumberlatestabsolute44461Number of times we hit into socket buffer out of space condition for NFS volumesocketBufferFullNFSnfsNumbernumberlatestabsolute44462Total journal transactions on VMFS volumevmfs.totalTxnDatastoredatastoreNumbernumberlatestabsolute44463Total cancelled journal transactions on VMFS volumevmfs.cancelledTxnDatastoredatastoreNumbernumberlatestabsolute44464Current APD state of the VMFS volumevmfs.apdStateDatastoredatastoreNumbernumberlatestabsolute44465Total apd timeout events received on the VMFS volumevmfs.apdCountDatastoredatastoreNumbernumberlatestabsolute44466vVol PE is accessiblepe.isaccessiblevVol object related statsvvolNumbernumberlatestabsolute44467Total no. of read cmds done on vVol PEpe.reads.donevVol object related statsvvolNumbernumberlatestabsolute44468Total no. of write cmds done on vVol PEpe.writes.donevVol object related statsvvolNumbernumberlatestabsolute44469Total no. of cmds done on vVol PEpe.total.donevVol object related statsvvolNumbernumberlatestabsolute44470Total no. of read cmds sent on vVol PEpe.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44471Total no. of write cmds sent on vVol PEpe.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44472Total no. of cmds sent on vVol PEpe.total.sentvVol object related statsvvolNumbernumberlatestabsolute44473No. of read cmds issued on vVol PE that failedpe.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44474No. of write cmds issued on vVol PE that failedpe.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44475Total no. of cmds issued on vVol PE that failedpe.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44476Total no. of read cmds failed on vVol PEpe.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44477Total no. of write cmds failed on vVol PEpe.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44478Total no. of cmds failed on vVol PEpe.total.failedvVol object related statsvvolNumbernumberlatestabsolute44479Cumulative latency of successful reads on vVol PEpe.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44480Cumulative latency of successful writes on vVol PEpe.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44481Cumulative latency of cmds that failed before issue on vVol PEpe.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44482Cumulative latency of all issued cmds on vVol PEpe.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44483Total no. of cancel cmds sent on vVol PEpe.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44484Total no. of cancel cmds failed on vVol PEpe.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44485Total no. of device reset cmds sent on vVol PEpe.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44486Total no. of device reset cmds failed on vVol PEpe.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44487Total no. of reset cmds sent on vVol PEpe.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44488Total no. of reset cmds failed on vVol PEpe.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44489Total no. of unmap cmds sent on vVol PEpe.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44490Total no. of unmap cmds failed on vVol PEpe.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44491Total no. of read cmds done by vVol Containercontainer.reads.donevVol object related statsvvolNumbernumberlatestabsolute44492Total no. of write cmds done by vVol Containercontainer.writes.donevVol object related statsvvolNumbernumberlatestabsolute44493Total no. of cmds done by vVol Containercontainer.total.donevVol object related statsvvolNumbernumberlatestabsolute44494Total no. of read cmds sent by vVol Containercontainer.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44495Total no. of write cmds sent by vVol Containercontainer.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44496Total no. of cmds sent by vVol Containercontainer.total.sentvVol object related statsvvolNumbernumberlatestabsolute44497No. of read cmds issued by vVol Container that failedcontainer.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44498No. of write cmds issued by vVol Container that failedcontainer.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44499Total no. of cmds issued by vVol Container that failedcontainer.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44500Total no. of read cmds failed by vVol Containercontainer.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44501Container:Total no. of write cmds failed by vVol Containercontainer.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44502Total no. of cmds failed by vVol Containercontainer.total.failedvVol object related statsvvolNumbernumberlatestabsolute44503Cumulative latency of successful reads by vVol Containercontainer.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44504Cumulative latency of successful writes by vVol Containercontainer.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44505Cumulative latency of cmds that failed before issue by vVol Containercontainer.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44506Cumulative latency of all issued cmds by vVol Containercontainer.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44507Total no. of read cmds done by vVol Devicedevice.reads.donevVol object related statsvvolNumbernumberlatestabsolute44508Total no. of write cmds done by vVol Devicedevice.writes.donevVol object related statsvvolNumbernumberlatestabsolute44509Total no. of cmds done by vVol Devicedevice.total.donevVol object related statsvvolNumbernumberlatestabsolute44510Total no. of read cmds sent by vVol Devicedevice.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44511Total no. of write cmds sent by vVol Devicedevice.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44512Total no. of cmds sent by vVol Devicedevice.total.sentvVol object related statsvvolNumbernumberlatestabsolute44513No. of read cmds issued by vVol Device that faileddevice.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44514No. of write cmds issued by vVol Device that faileddevice.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44515Total no. of cmds issued by vVol Device that faileddevice.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44516Total no. of read cmds failed by vVol Devicedevice.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44517Total no. of write cmds failed by vVol Devicedevice.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44518Total no. of cmds failed by vVol Devicedevice.total.failedvVol object related statsvvolNumbernumberlatestabsolute44519Cumulative latency of successful reads by vVol Devicedevice.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44520Cumulative latency of successful writes by vVol Devicedevice.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44521Cumulative latency of cmds that failed before issue by vVol Devicedevice.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44522Cumulative latency of all issued cmds by vVol Devicedevice.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44523Total no. of cancel cmds sent by vVol Devicedevice.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44524Total no. of cancel cmds failed by vVol Devicedevice.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44525Total no. of device reset cmds sent by vVol Devicedevice.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44526Total no. of device reset cmds failed by vVol Devicedevice.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44527Total no. of reset cmds sent by vVol Devicedevice.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44528Total no. of reset cmds failed by vVol Devicedevice.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44529Total no. of unmap cmds sent by vVol Devicedevice.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44530Total no. of unmap cmds failed by vVol Devicedevice.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44531CPU time spent waiting for swap-inswapwaitCPUcpuMillisecondmillisecondsummationdelta33532CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentnonerate44533CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentmaximumrate44534CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentminimumrate44535CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentnonerate44536CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentaveragerate23537CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentmaximumrate44538CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentminimumrate44539Total CPU capacity reserved by and available for virtual machinestotalCapacityCPUcpuMegahertzmegaHertzaverageabsolute23540Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)latencyCPUcpuPercentagepercentaveragerate23541CPU resources devoted by the ESX schedulerentitlementCPUcpuMegahertzmegaHertzlatestabsolute23542The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limitdemandCPUcpuMegahertzmegaHertzaverageabsolute23543Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraintscostopCPUcpuMillisecondmillisecondsummationdelta23544Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit settingmaxlimitedCPUcpuMillisecondmillisecondsummationdelta23545Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machinesoverlapCPUcpuMillisecondmillisecondsummationdelta33546Time the virtual machine is scheduled to runrunCPUcpuMillisecondmillisecondsummationdelta23547CPU resource entitlement to CPU demand ratio (in percents)demandEntitlementRatioCPUcpuPercentagepercentlatestabsolute44548Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPUreadinessCPUcpuPercentagepercentaveragerate44549Virtual CPU usage as a percentage during the intervalusage.vcpusCPUcpuPercentagepercentaveragerate44550Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesnoneabsolute44551Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesaverageabsolute23552Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesmaximumabsolute44553Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesminimumabsolute44554Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesnoneabsolute44555Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesaverageabsolute23556Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesmaximumabsolute44557Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesminimumabsolute44558Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesnoneabsolute44559Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesaverageabsolute23560Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesmaximumabsolute44561Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesminimumabsolute44562Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXiactivewriteMemorymemKilobytekiloBytesaverageabsolute23563Host physical memory reserved by ESXi, for its data structures, for running the virtual machineoverheadMaxMemorymemKilobytekiloBytesaverageabsolute23564Total reservation, available and consumed, for powered-on virtual machinestotalCapacityMemorymemMegabytemegaBytesaverageabsolute23565Amount of guest physical memory pages compressed by ESXizippedMemorymemKilobytekiloByteslatestabsolute23566Host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memoryzipSavedMemorymemKilobytekiloByteslatestabsolute23567Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memorylatencyMemorymemPercentagepercentaverageabsolute23568Amount of host physical memory the virtual machine deserves, as determined by ESXientitlementMemorymemKilobytekiloBytesaverageabsolute23569Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooninglowfreethresholdMemorymemKilobytekiloBytesaverageabsolute23570Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesnoneabsolute44571Rate at which guest physical memory is swapped in from the host swap cachellSwapInRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23572Rate at which guest physical memory is swapped out to the host swap cachellSwapOutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23573Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXioverheadTouchedMemorymemKilobytekiloBytesaverageabsolute44574Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesaverageabsolute44575Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesmaximumabsolute44576Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesminimumabsolute44577Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesnoneabsolute44578Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesaverageabsolute44579Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesmaximumabsolute44580Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesminimumabsolute44581Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesnoneabsolute44582Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesaverageabsolute44583Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesmaximumabsolute44584Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesminimumabsolute44585Space used for holding VMFS Pointer Blocks in memoryvmfs.pbc.sizeMemorymemMegabytemegaByteslatestabsolute44586Maximum size the VMFS Pointer Block Cache can grow tovmfs.pbc.sizeMaxMemorymemMegabytemegaByteslatestabsolute44587Amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMemorymemTerabyteteraByteslatestabsolute44588Maximum amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMaxMemorymemTerabyteteraByteslatestabsolute44589Amount of VMFS heap used by the VMFS PB Cachevmfs.pbc.overheadMemorymemKilobytekiloByteslatestabsolute44590Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cachevmfs.pbc.capMissRatioMemorymemPercentagepercentlatestabsolute44591Number of Storage commands issued during the collection intervalcommandsDiskdiskNumbernumbersummationdelta23592Average amount of time, in milliseconds, to read from the physical devicedeviceReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23593Average amount of time, in milliseconds, spent by VMkernel to process each Storage read commandkernelReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23594Average amount of time taken during the collection interval to process a Storage read command issued from the guest OS to the virtual machinetotalReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23595Average amount of time spent in the VMkernel queue, per Storage read command, during the collection intervalqueueReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23596Average amount of time, in milliseconds, to write to the physical devicedeviceWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23597Average amount of time, in milliseconds, spent by VMkernel to process each Storage write commandkernelWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23598Average amount of time taken during the collection interval to process a Storage write command issued by the guest OS to the virtual machinetotalWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23599Average amount of time spent in the VMkernel queue, per Storage write command, during the collection intervalqueueWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23600Average amount of time, in milliseconds, to complete a Storage command from the physical devicedeviceLatencyDiskdiskMillisecondmillisecondaverageabsolute13601Average amount of time, in milliseconds, spent by VMkernel to process each Storage commandkernelLatencyDiskdiskMillisecondmillisecondaverageabsolute23602Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyDiskdiskMillisecondmillisecondaverageabsolute23603Maximum queue depthmaxQueueDepthDiskdiskNumbernumberaverageabsolute13604Average number of Storage commands issued per second during the collection intervalcommandsAveragedDiskdiskNumbernumberaveragerate23605Number of receives droppeddroppedRxNetworknetNumbernumbersummationdelta23606Number of transmits droppeddroppedTxNetworknetNumbernumbersummationdelta23607Average amount of data received per secondbytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23608Average amount of data transmitted per secondbytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23609Number of broadcast packets received during the sampling intervalbroadcastRxNetworknetNumbernumbersummationdelta23610Number of broadcast packets transmitted during the sampling intervalbroadcastTxNetworknetNumbernumbersummationdelta23611Number of multicast packets received during the sampling intervalmulticastRxNetworknetNumbernumbersummationdelta23612Number of multicast packets transmitted during the sampling intervalmulticastTxNetworknetNumbernumbersummationdelta23613Number of packets with errors received during the sampling intervalerrorsRxNetworknetNumbernumbersummationdelta23614Number of packets with errors transmitted during the sampling intervalerrorsTxNetworknetNumbernumbersummationdelta23615Number of frames with unknown protocol received during the sampling intervalunknownProtosNetworknetNumbernumbersummationdelta23616Average amount of data received per second by a pNicpnicBytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44617Average amount of data transmitted per second through a pNicpnicBytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44618Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumberlatestabsolute44619Amount of disk space usage for each mount pointdiskUsageSystemsysPercentagepercentlatestabsolute33620Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertznonerate44621Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzaveragerate33622Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzmaximumrate44623Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzminimumrate44624Memory touched by the system resource groupresourceMemTouchedSystemsysKilobytekiloByteslatestabsolute33625Memory mapped by the system resource groupresourceMemMappedSystemsysKilobytekiloByteslatestabsolute33626Memory saved due to sharing by the system resource groupresourceMemSharedSystemsysKilobytekiloByteslatestabsolute33627Memory swapped out by the system resource groupresourceMemSwappedSystemsysKilobytekiloByteslatestabsolute33628Overhead memory consumed by the system resource groupresourceMemOverheadSystemsysKilobytekiloByteslatestabsolute33629Memory shared by the system resource groupresourceMemCowSystemsysKilobytekiloByteslatestabsolute33630Zero filled memory used by the system resource groupresourceMemZeroSystemsysKilobytekiloByteslatestabsolute33631CPU running average over 1 minute of the system resource groupresourceCpuRun1SystemsysPercentagepercentlatestabsolute33632CPU active average over 1 minute of the system resource groupresourceCpuAct1SystemsysPercentagepercentlatestabsolute33633CPU maximum limited over 1 minute of the system resource groupresourceCpuMaxLimited1SystemsysPercentagepercentlatestabsolute33634CPU running average over 5 minutes of the system resource groupresourceCpuRun5SystemsysPercentagepercentlatestabsolute33635CPU active average over 5 minutes of the system resource groupresourceCpuAct5SystemsysPercentagepercentlatestabsolute33636CPU maximum limited over 5 minutes of the system resource groupresourceCpuMaxLimited5SystemsysPercentagepercentlatestabsolute33637CPU allocation reservation (in MHz) of the system resource groupresourceCpuAllocMinSystemsysMegahertzmegaHertzlatestabsolute33638CPU allocation limit (in MHz) of the system resource groupresourceCpuAllocMaxSystemsysMegahertzmegaHertzlatestabsolute33639CPU allocation shares of the system resource groupresourceCpuAllocSharesSystemsysNumbernumberlatestabsolute33640Memory allocation reservation (in KB) of the system resource groupresourceMemAllocMinSystemsysKilobytekiloByteslatestabsolute33641Memory allocation limit (in KB) of the system resource groupresourceMemAllocMaxSystemsysKilobytekiloByteslatestabsolute33642Memory allocation shares of the system resource groupresourceMemAllocSharesSystemsysNumbernumberlatestabsolute33643Total time elapsed, in seconds, since last operating system boot-uposUptimeSystemsysSecondsecondlatestabsolute44644Memory consumed by the system resource groupresourceMemConsumedSystemsysKilobytekiloByteslatestabsolute44645Number of file descriptors used by the system resource groupresourceFdUsageSystemsysNumbernumberlatestabsolute44646CPU active peak over 1 minuteactpk1Resource group CPUrescpuPercentagepercentlatestabsolute33647CPU running average over 1 minuterunav1Resource group CPUrescpuPercentagepercentlatestabsolute33648CPU active average over 5 minutesactav5Resource group CPUrescpuPercentagepercentlatestabsolute33649CPU active peak over 5 minutesactpk5Resource group CPUrescpuPercentagepercentlatestabsolute33650CPU running average over 5 minutesrunav5Resource group CPUrescpuPercentagepercentlatestabsolute33651CPU active average over 15 minutesactav15Resource group CPUrescpuPercentagepercentlatestabsolute33652CPU active peak over 15 minutesactpk15Resource group CPUrescpuPercentagepercentlatestabsolute33653CPU running average over 15 minutesrunav15Resource group CPUrescpuPercentagepercentlatestabsolute33654CPU running peak over 1 minuterunpk1Resource group CPUrescpuPercentagepercentlatestabsolute33655Amount of CPU resources over the limit that were refused, average over 1 minutemaxLimited1Resource group CPUrescpuPercentagepercentlatestabsolute33656CPU running peak over 5 minutesrunpk5Resource group CPUrescpuPercentagepercentlatestabsolute33657Amount of CPU resources over the limit that were refused, average over 5 minutesmaxLimited5Resource group CPUrescpuPercentagepercentlatestabsolute33658CPU running peak over 15 minutesrunpk15Resource group CPUrescpuPercentagepercentlatestabsolute33659Amount of CPU resources over the limit that were refused, average over 15 minutesmaxLimited15Resource group CPUrescpuPercentagepercentlatestabsolute33660Group CPU sample countsampleCountResource group CPUrescpuNumbernumberlatestabsolute33661Group CPU sample periodsamplePeriodResource group CPUrescpuMillisecondmillisecondlatestabsolute33662Amount of total configured memory that is available for usememUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33663Sum of the memory swapped by all powered-on virtual machines on the hostswapUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33664Amount of Service Console CPU usagecpuUsageManagement agentmanagementAgentMegahertzmegaHertzaveragerate33665Average number of commands issued per second on the storage path during the collection intervalcommandsAveragedStorage pathstoragePathNumbernumberaveragerate33666Average number of read commands issued per second on the storage path during the collection intervalnumberReadAveragedStorage pathstoragePathNumbernumberaveragerate33667Average number of write commands issued per second on the storage path during the collection intervalnumberWriteAveragedStorage pathstoragePathNumbernumberaveragerate33668Rate of reading data on the storage pathreadStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33669Rate of writing data on the storage pathwriteStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33670The average time a read issued on the storage path takestotalReadLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33671The average time a write issued on the storage path takestotalWriteLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33672Average read request size in bytesreadIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44673Average write request size in byteswriteIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44674Number of seeks during the interval that were less than 64 LBNs apartsmallSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44675Number of seeks during the interval that were between 64 and 8192 LBNs apartmediumSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44676Number of seeks during the interval that were greater than 8192 LBNs apartlargeSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44677Read latency in microsecondsreadLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44678Write latency in microsecondswriteLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44679Storage I/O Control datastore maximum queue depthdatastoreMaxQueueDepthDatastoredatastoreNumbernumberlatestabsolute13680Unmapped size in MBunmapSizeDatastoredatastoreMegabytemegaBytessummationdelta44681Number of unmap IOs issuedunmapIOsDatastoredatastoreNumbernumbersummationdelta44682Current number of replicated virtual machineshbrNumVmsvSphere ReplicationhbrNumbernumberaverageabsolute44683Average amount of data received per secondhbrNetRxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44684Average amount of data transmitted per secondhbrNetTxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44685Average network latency seen by vSphere ReplicationhbrNetLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44686Average disk read latency seen by vSphere ReplicationhbrDiskReadLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44687Average guest I/O stall introduced by vSphere ReplicationhbrDiskStallLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44688Average amount of successful transfer time per diskhbrDiskTransferSuccessvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44689Average amount of idle time per diskhbrDiskTransferIdlevSphere ReplicationhbrMillisecondmillisecondaverageabsolute44690Average amount of data in KB successfully transferred per diskhbrDiskTransferBytesvSphere ReplicationhbrKilobytekiloBytesaverageabsolute44691Number of caches controlled by the virtual flash modulenumActiveVMDKsVirtual flash module related statistical valuesvflashModuleNumbernumberlatestabsolute44692Read IOPSreadIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44693Read throughput in kBpsreadThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44694Average read latency in msreadAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44695Max read latency in msreadMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44696Cache hit rate percentagereadCacheHitRatevSAN DOM object related statistical valuesvsanDomObjPercentagepercentlatestabsolute44697Read congestionreadCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44698Write IOPSwriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44699Write throughput in kBpswriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44700Average write latency in mswriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44701Max write latency in mswriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44702Write congestionwriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44703Recovery write IOPSrecoveryWriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44704Recovery write through-put in kBpsrecoveryWriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44705Average recovery write latency in msrecoveryWriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44706Max recovery write latency in msrecoveryWriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44707Recovery write congestionrecoveryWriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44708The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentnoneabsolute44709The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentmaximumabsolute44710The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentminimumabsolute44711The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesnoneabsolute44712The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesmaximumabsolute44713The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesminimumabsolute44714The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentnoneabsolute44715The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentaverageabsolute44716The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentmaximumabsolute44717The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentminimumabsolute44718The amount of GPU memory used in gigabytesmem.used.gbGPUgpuGigabytegigaByteslatestabsolute33719The amount of GPU memory reserved in gigabytesmem.reserved.gbGPUgpuGigabytegigaByteslatestabsolute33720The total amount of GPU memory in gigabytesmem.total.gbGPUgpuGigabytegigaByteslatestabsolute33721Persistent memory available reservation on a host.available.reservationPMEMpmemMegabytemegaByteslatestabsolute44722Persistent memory reservation managed by DRS on a host.drsmanaged.reservationPMEMpmemMegabytemegaByteslatestabsolute44723Total count of virtual CPUs in VMnumVCPUsVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44724Minimum clock speed of the vCPUs during last stats intervalvcpusMhzMinVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44725Maximum clock speed of the vCPUs during last stats intervalvcpusMhzMaxVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44726Average clock speed of the vCPUs during last stats intervalvcpusMhzMeanVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44727Actual clock speed of host CPUcpuSpeedVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44728Minimum overhead heap memory usage since the VM started runningoverheadMemSizeMinVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44729Maximum overhead heap memory usage since the VM started runningoverheadMemSizeMaxVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44730vigor.opsTotalVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44731poll.itersPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44732userRpc.opsPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44
12857:20241101:185643.766 adding performance counter cpu/usage[none]:1
12857:20241101:185643.766 adding performance counter cpu/usage[none,rate]:1
12857:20241101:185643.766 adding performance counter cpu/usage[average]:2
12857:20241101:185643.766 adding performance counter cpu/usage[average,rate]:2
12857:20241101:185643.766 adding performance counter cpu/usage[minimum]:3
12857:20241101:185643.766 adding performance counter cpu/usage[minimum,rate]:3
12857:20241101:185643.766 adding performance counter cpu/usage[maximum]:4
12857:20241101:185643.766 adding performance counter cpu/usage[maximum,rate]:4
12857:20241101:185643.767 adding performance counter cpu/usagemhz[none]:5
12857:20241101:185643.767 adding performance counter cpu/usagemhz[none,rate]:5
12857:20241101:185643.767 adding performance counter cpu/usagemhz[average]:6
12857:20241101:185643.767 adding performance counter cpu/usagemhz[average,rate]:6
12857:20241101:185643.767 adding performance counter cpu/usagemhz[minimum]:7
12857:20241101:185643.767 adding performance counter cpu/usagemhz[minimum,rate]:7
12857:20241101:185643.767 adding performance counter cpu/usagemhz[maximum]:8
12857:20241101:185643.767 adding performance counter cpu/usagemhz[maximum,rate]:8
12857:20241101:185643.767 adding performance counter cpu/reservedCapacity[average]:9
12857:20241101:185643.767 adding performance counter cpu/reservedCapacity[average,absolute]:9
12857:20241101:185643.767 adding performance counter cpu/system[summation]:10
12857:20241101:185643.767 adding performance counter cpu/system[summation,delta]:10
12857:20241101:185643.767 adding performance counter cpu/wait[summation]:11
12857:20241101:185643.767 adding performance counter cpu/wait[summation,delta]:11
12857:20241101:185643.767 adding performance counter cpu/ready[summation]:12
12857:20241101:185643.767 adding performance counter cpu/ready[summation,delta]:12
12857:20241101:185643.767 adding performance counter cpu/idle[summation]:13
12857:20241101:185643.767 adding performance counter cpu/idle[summation,delta]:13
12857:20241101:185643.767 adding performance counter cpu/used[summation]:14
12857:20241101:185643.767 adding performance counter cpu/used[summation,delta]:14
12857:20241101:185643.768 adding performance counter cpu/capacity.provisioned[average]:15
12857:20241101:185643.768 adding performance counter cpu/capacity.provisioned[average,absolute]:15
12857:20241101:185643.768 adding performance counter cpu/capacity.entitlement[average]:16
12857:20241101:185643.768 adding performance counter cpu/capacity.entitlement[average,absolute]:16
12857:20241101:185643.768 adding performance counter cpu/capacity.usage[average]:17
12857:20241101:185643.768 adding performance counter cpu/capacity.usage[average,rate]:17
12857:20241101:185643.768 adding performance counter cpu/capacity.demand[average]:18
12857:20241101:185643.768 adding performance counter cpu/capacity.demand[average,absolute]:18
12857:20241101:185643.768 adding performance counter cpu/capacity.contention[average]:19
12857:20241101:185643.768 adding performance counter cpu/capacity.contention[average,rate]:19
12857:20241101:185643.768 adding performance counter cpu/corecount.provisioned[average]:20
12857:20241101:185643.768 adding performance counter cpu/corecount.provisioned[average,absolute]:20
12857:20241101:185643.768 adding performance counter cpu/corecount.usage[average]:21
12857:20241101:185643.768 adding performance counter cpu/corecount.usage[average,absolute]:21
12857:20241101:185643.768 adding performance counter cpu/corecount.contention[average]:22
12857:20241101:185643.768 adding performance counter cpu/corecount.contention[average,rate]:22
12857:20241101:185643.768 adding performance counter mem/usage[none]:23
12857:20241101:185643.768 adding performance counter mem/usage[none,absolute]:23
12857:20241101:185643.768 adding performance counter mem/usage[average]:24
12857:20241101:185643.768 adding performance counter mem/usage[average,absolute]:24
12857:20241101:185643.769 adding performance counter mem/usage[minimum]:25
12857:20241101:185643.769 adding performance counter mem/usage[minimum,absolute]:25
12857:20241101:185643.769 adding performance counter mem/usage[maximum]:26
12857:20241101:185643.769 adding performance counter mem/usage[maximum,absolute]:26
12857:20241101:185643.769 adding performance counter mem/reservedCapacity[average]:27
12857:20241101:185643.769 adding performance counter mem/reservedCapacity[average,absolute]:27
12857:20241101:185643.769 adding performance counter mem/granted[none]:28
12857:20241101:185643.769 adding performance counter mem/granted[none,absolute]:28
12857:20241101:185643.769 adding performance counter mem/granted[average]:29
12857:20241101:185643.769 adding performance counter mem/granted[average,absolute]:29
12857:20241101:185643.769 adding performance counter mem/granted[minimum]:30
12857:20241101:185643.769 adding performance counter mem/granted[minimum,absolute]:30
12857:20241101:185643.769 adding performance counter mem/granted[maximum]:31
12857:20241101:185643.769 adding performance counter mem/granted[maximum,absolute]:31
12857:20241101:185643.769 adding performance counter mem/active[none]:32
12857:20241101:185643.769 adding performance counter mem/active[none,absolute]:32
12857:20241101:185643.769 adding performance counter mem/active[average]:33
12857:20241101:185643.769 adding performance counter mem/active[average,absolute]:33
12857:20241101:185643.769 adding performance counter mem/active[minimum]:34
12857:20241101:185643.769 adding performance counter mem/active[minimum,absolute]:34
12857:20241101:185643.770 adding performance counter mem/active[maximum]:35
12857:20241101:185643.770 adding performance counter mem/active[maximum,absolute]:35
12857:20241101:185643.770 adding performance counter mem/shared[none]:36
12857:20241101:185643.770 adding performance counter mem/shared[none,absolute]:36
12857:20241101:185643.770 adding performance counter mem/shared[average]:37
12857:20241101:185643.770 adding performance counter mem/shared[average,absolute]:37
12857:20241101:185643.770 adding performance counter mem/shared[minimum]:38
12857:20241101:185643.770 adding performance counter mem/shared[minimum,absolute]:38
12857:20241101:185643.770 adding performance counter mem/shared[maximum]:39
12857:20241101:185643.770 adding performance counter mem/shared[maximum,absolute]:39
12857:20241101:185643.770 adding performance counter mem/zero[none]:40
12857:20241101:185643.770 adding performance counter mem/zero[none,absolute]:40
12857:20241101:185643.770 adding performance counter mem/zero[average]:41
12857:20241101:185643.770 adding performance counter mem/zero[average,absolute]:41
12857:20241101:185643.770 adding performance counter mem/zero[minimum]:42
12857:20241101:185643.770 adding performance counter mem/zero[minimum,absolute]:42
12857:20241101:185643.770 adding performance counter mem/zero[maximum]:43
12857:20241101:185643.770 adding performance counter mem/zero[maximum,absolute]:43
12857:20241101:185643.770 adding performance counter mem/unreserved[none]:44
12857:20241101:185643.770 adding performance counter mem/unreserved[none,absolute]:44
12857:20241101:185643.771 adding performance counter mem/unreserved[average]:45
12857:20241101:185643.771 adding performance counter mem/unreserved[average,absolute]:45
12857:20241101:185643.771 adding performance counter mem/unreserved[minimum]:46
12857:20241101:185643.771 adding performance counter mem/unreserved[minimum,absolute]:46
12857:20241101:185643.771 adding performance counter mem/unreserved[maximum]:47
12857:20241101:185643.771 adding performance counter mem/unreserved[maximum,absolute]:47
12857:20241101:185643.771 adding performance counter mem/swapused[none]:48
12857:20241101:185643.771 adding performance counter mem/swapused[none,absolute]:48
12857:20241101:185643.771 adding performance counter mem/swapused[average]:49
12857:20241101:185643.771 adding performance counter mem/swapused[average,absolute]:49
12857:20241101:185643.771 adding performance counter mem/swapused[minimum]:50
12857:20241101:185643.771 adding performance counter mem/swapused[minimum,absolute]:50
12857:20241101:185643.771 adding performance counter mem/swapused[maximum]:51
12857:20241101:185643.771 adding performance counter mem/swapused[maximum,absolute]:51
12857:20241101:185643.771 adding performance counter mem/swapunreserved[none]:52
12857:20241101:185643.771 adding performance counter mem/swapunreserved[none,absolute]:52
12857:20241101:185643.771 adding performance counter mem/swapunreserved[average]:53
12857:20241101:185643.771 adding performance counter mem/swapunreserved[average,absolute]:53
12857:20241101:185643.771 adding performance counter mem/swapunreserved[minimum]:54
12857:20241101:185643.771 adding performance counter mem/swapunreserved[minimum,absolute]:54
12857:20241101:185643.771 adding performance counter mem/swapunreserved[maximum]:55
12857:20241101:185643.772 adding performance counter mem/swapunreserved[maximum,absolute]:55
12857:20241101:185643.772 adding performance counter mem/sharedcommon[none]:56
12857:20241101:185643.772 adding performance counter mem/sharedcommon[none,absolute]:56
12857:20241101:185643.772 adding performance counter mem/sharedcommon[average]:57
12857:20241101:185643.772 adding performance counter mem/sharedcommon[average,absolute]:57
12857:20241101:185643.772 adding performance counter mem/sharedcommon[minimum]:58
12857:20241101:185643.772 adding performance counter mem/sharedcommon[minimum,absolute]:58
12857:20241101:185643.772 adding performance counter mem/sharedcommon[maximum]:59
12857:20241101:185643.772 adding performance counter mem/sharedcommon[maximum,absolute]:59
12857:20241101:185643.772 adding performance counter mem/heap[none]:60
12857:20241101:185643.772 adding performance counter mem/heap[none,absolute]:60
12857:20241101:185643.772 adding performance counter mem/heap[average]:61
12857:20241101:185643.772 adding performance counter mem/heap[average,absolute]:61
12857:20241101:185643.772 adding performance counter mem/heap[minimum]:62
12857:20241101:185643.772 adding performance counter mem/heap[minimum,absolute]:62
12857:20241101:185643.772 adding performance counter mem/heap[maximum]:63
12857:20241101:185643.772 adding performance counter mem/heap[maximum,absolute]:63
12857:20241101:185643.772 adding performance counter mem/heapfree[none]:64
12857:20241101:185643.772 adding performance counter mem/heapfree[none,absolute]:64
12857:20241101:185643.773 adding performance counter mem/heapfree[average]:65
12857:20241101:185643.773 adding performance counter mem/heapfree[average,absolute]:65
12857:20241101:185643.773 adding performance counter mem/heapfree[minimum]:66
12857:20241101:185643.773 adding performance counter mem/heapfree[minimum,absolute]:66
12857:20241101:185643.773 adding performance counter mem/heapfree[maximum]:67
12857:20241101:185643.773 adding performance counter mem/heapfree[maximum,absolute]:67
12857:20241101:185643.773 adding performance counter mem/state[latest]:68
12857:20241101:185643.773 adding performance counter mem/state[latest,absolute]:68
12857:20241101:185643.773 adding performance counter mem/swapped[none]:69
12857:20241101:185643.773 adding performance counter mem/swapped[none,absolute]:69
12857:20241101:185643.773 adding performance counter mem/swapped[average]:70
12857:20241101:185643.773 adding performance counter mem/swapped[average,absolute]:70
12857:20241101:185643.773 adding performance counter mem/swapped[minimum]:71
12857:20241101:185643.773 adding performance counter mem/swapped[minimum,absolute]:71
12857:20241101:185643.773 adding performance counter mem/swapped[maximum]:72
12857:20241101:185643.773 adding performance counter mem/swapped[maximum,absolute]:72
12857:20241101:185643.773 adding performance counter mem/swaptarget[none]:73
12857:20241101:185643.773 adding performance counter mem/swaptarget[none,absolute]:73
12857:20241101:185643.773 adding performance counter mem/swaptarget[average]:74
12857:20241101:185643.773 adding performance counter mem/swaptarget[average,absolute]:74
12857:20241101:185643.773 adding performance counter mem/swaptarget[minimum]:75
12857:20241101:185643.774 adding performance counter mem/swaptarget[minimum,absolute]:75
12857:20241101:185643.774 adding performance counter mem/swaptarget[maximum]:76
12857:20241101:185643.774 adding performance counter mem/swaptarget[maximum,absolute]:76
12857:20241101:185643.774 adding performance counter mem/swapIn[none]:77
12857:20241101:185643.774 adding performance counter mem/swapIn[none,absolute]:77
12857:20241101:185643.774 adding performance counter mem/swapIn[average]:78
12857:20241101:185643.774 adding performance counter mem/swapIn[average,absolute]:78
12857:20241101:185643.774 adding performance counter mem/swapIn[minimum]:79
12857:20241101:185643.774 adding performance counter mem/swapIn[minimum,absolute]:79
12857:20241101:185643.774 adding performance counter mem/swapIn[maximum]:80
12857:20241101:185643.774 adding performance counter mem/swapIn[maximum,absolute]:80
12857:20241101:185643.774 adding performance counter mem/swapOut[none]:81
12857:20241101:185643.774 adding performance counter mem/swapOut[none,absolute]:81
12857:20241101:185643.774 adding performance counter mem/swapOut[average]:82
12857:20241101:185643.774 adding performance counter mem/swapOut[average,absolute]:82
12857:20241101:185643.774 adding performance counter mem/swapOut[minimum]:83
12857:20241101:185643.774 adding performance counter mem/swapOut[minimum,absolute]:83
12857:20241101:185643.774 adding performance counter mem/swapOut[maximum]:84
12857:20241101:185643.774 adding performance counter mem/swapOut[maximum,absolute]:84
12857:20241101:185643.774 adding performance counter mem/swapinRate[average]:85
12857:20241101:185643.774 adding performance counter mem/swapinRate[average,rate]:85
12857:20241101:185643.775 adding performance counter mem/swapoutRate[average]:86
12857:20241101:185643.775 adding performance counter mem/swapoutRate[average,rate]:86
12857:20241101:185643.775 adding performance counter managementAgent/swapOut[average]:87
12857:20241101:185643.775 adding performance counter managementAgent/swapOut[average,rate]:87
12857:20241101:185643.775 adding performance counter managementAgent/swapIn[average]:88
12857:20241101:185643.775 adding performance counter managementAgent/swapIn[average,rate]:88
12857:20241101:185643.775 adding performance counter mem/vmmemctl[none]:89
12857:20241101:185643.775 adding performance counter mem/vmmemctl[none,absolute]:89
12857:20241101:185643.775 adding performance counter mem/vmmemctl[average]:90
12857:20241101:185643.775 adding performance counter mem/vmmemctl[average,absolute]:90
12857:20241101:185643.775 adding performance counter mem/vmmemctl[minimum]:91
12857:20241101:185643.775 adding performance counter mem/vmmemctl[minimum,absolute]:91
12857:20241101:185643.775 adding performance counter mem/vmmemctl[maximum]:92
12857:20241101:185643.775 adding performance counter mem/vmmemctl[maximum,absolute]:92
12857:20241101:185643.775 adding performance counter mem/vmmemctltarget[none]:93
12857:20241101:185643.775 adding performance counter mem/vmmemctltarget[none,absolute]:93
12857:20241101:185643.775 adding performance counter mem/vmmemctltarget[average]:94
12857:20241101:185643.775 adding performance counter mem/vmmemctltarget[average,absolute]:94
12857:20241101:185643.775 adding performance counter mem/vmmemctltarget[minimum]:95
12857:20241101:185643.775 adding performance counter mem/vmmemctltarget[minimum,absolute]:95
12857:20241101:185643.776 adding performance counter mem/vmmemctltarget[maximum]:96
12857:20241101:185643.776 adding performance counter mem/vmmemctltarget[maximum,absolute]:96
12857:20241101:185643.776 adding performance counter mem/consumed[none]:97
12857:20241101:185643.776 adding performance counter mem/consumed[none,absolute]:97
12857:20241101:185643.776 adding performance counter mem/consumed[average]:98
12857:20241101:185643.776 adding performance counter mem/consumed[average,absolute]:98
12857:20241101:185643.776 adding performance counter mem/consumed[minimum]:99
12857:20241101:185643.776 adding performance counter mem/consumed[minimum,absolute]:99
12857:20241101:185643.776 adding performance counter mem/consumed[maximum]:100
12857:20241101:185643.776 adding performance counter mem/consumed[maximum,absolute]:100
12857:20241101:185643.776 adding performance counter mem/overhead[none]:101
12857:20241101:185643.776 adding performance counter mem/overhead[none,absolute]:101
12857:20241101:185643.776 adding performance counter mem/overhead[average]:102
12857:20241101:185643.776 adding performance counter mem/overhead[average,absolute]:102
12857:20241101:185643.776 adding performance counter mem/overhead[minimum]:103
12857:20241101:185643.776 adding performance counter mem/overhead[minimum,absolute]:103
12857:20241101:185643.776 adding performance counter mem/overhead[maximum]:104
12857:20241101:185643.776 adding performance counter mem/overhead[maximum,absolute]:104
12857:20241101:185643.776 adding performance counter mem/compressed[average]:105
12857:20241101:185643.776 adding performance counter mem/compressed[average,absolute]:105
12857:20241101:185643.776 adding performance counter mem/compressionRate[average]:106
12857:20241101:185643.776 adding performance counter mem/compressionRate[average,rate]:106
12857:20241101:185643.777 adding performance counter mem/decompressionRate[average]:107
12857:20241101:185643.777 adding performance counter mem/decompressionRate[average,rate]:107
12857:20241101:185643.777 adding performance counter mem/capacity.provisioned[average]:108
12857:20241101:185643.777 adding performance counter mem/capacity.provisioned[average,absolute]:108
12857:20241101:185643.777 adding performance counter mem/capacity.entitlement[average]:109
12857:20241101:185643.777 adding performance counter mem/capacity.entitlement[average,absolute]:109
12857:20241101:185643.777 adding performance counter mem/capacity.usable[average]:110
12857:20241101:185643.777 adding performance counter mem/capacity.usable[average,absolute]:110
12857:20241101:185643.777 adding performance counter mem/capacity.usage[average]:111
12857:20241101:185643.777 adding performance counter mem/capacity.usage[average,absolute]:111
12857:20241101:185643.777 adding performance counter mem/capacity.contention[average]:112
12857:20241101:185643.777 adding performance counter mem/capacity.contention[average,rate]:112
12857:20241101:185643.777 adding performance counter mem/capacity.usage.vm[average]:113
12857:20241101:185643.777 adding performance counter mem/capacity.usage.vm[average,absolute]:113
12857:20241101:185643.777 adding performance counter mem/capacity.usage.vmOvrhd[average]:114
12857:20241101:185643.777 adding performance counter mem/capacity.usage.vmOvrhd[average,absolute]:114
12857:20241101:185643.777 adding performance counter mem/capacity.usage.vmkOvrhd[average]:115
12857:20241101:185643.777 adding performance counter mem/capacity.usage.vmkOvrhd[average,absolute]:115
12857:20241101:185643.777 adding performance counter mem/capacity.usage.userworld[average]:116
12857:20241101:185643.777 adding performance counter mem/capacity.usage.userworld[average,absolute]:116
12857:20241101:185643.778 adding performance counter mem/reservedCapacity.vm[average]:117
12857:20241101:185643.778 adding performance counter mem/reservedCapacity.vm[average,absolute]:117
12857:20241101:185643.778 adding performance counter mem/reservedCapacity.vmOvhd[average]:118
12857:20241101:185643.778 adding performance counter mem/reservedCapacity.vmOvhd[average,absolute]:118
12857:20241101:185643.778 adding performance counter mem/reservedCapacity.vmkOvrhd[average]:119
12857:20241101:185643.778 adding performance counter mem/reservedCapacity.vmkOvrhd[average,absolute]:119
12857:20241101:185643.778 adding performance counter mem/reservedCapacity.userworld[average]:120
12857:20241101:185643.778 adding performance counter mem/reservedCapacity.userworld[average,absolute]:120
12857:20241101:185643.778 adding performance counter mem/reservedCapacityPct[average]:121
12857:20241101:185643.778 adding performance counter mem/reservedCapacityPct[average,absolute]:121
12857:20241101:185643.778 adding performance counter mem/consumed.vms[average]:122
12857:20241101:185643.778 adding performance counter mem/consumed.vms[average,absolute]:122
12857:20241101:185643.778 adding performance counter mem/consumed.userworlds[average]:123
12857:20241101:185643.778 adding performance counter mem/consumed.userworlds[average,absolute]:123
12857:20241101:185643.778 adding performance counter mem/bandwidth.read[latest]:124
12857:20241101:185643.778 adding performance counter mem/bandwidth.read[latest,absolute]:124
12857:20241101:185643.778 adding performance counter mem/bandwidth.write[latest]:125
12857:20241101:185643.778 adding performance counter mem/bandwidth.write[latest,absolute]:125
12857:20241101:185643.778 adding performance counter mem/bandwidth.total[latest]:126
12857:20241101:185643.778 adding performance counter mem/bandwidth.total[latest,absolute]:126
12857:20241101:185643.778 adding performance counter mem/vm.bandwidth.read[latest]:127
12857:20241101:185643.779 adding performance counter mem/vm.bandwidth.read[latest,absolute]:127
12857:20241101:185643.779 adding performance counter mem/missrate[latest]:128
12857:20241101:185643.779 adding performance counter mem/missrate[latest,absolute]:128
12857:20241101:185643.779 adding performance counter mem/latency.read[latest]:129
12857:20241101:185643.779 adding performance counter mem/latency.read[latest,absolute]:129
12857:20241101:185643.779 adding performance counter mem/latency.write[latest]:130
12857:20241101:185643.779 adding performance counter mem/latency.write[latest,absolute]:130
12857:20241101:185643.779 adding performance counter disk/usage[none]:131
12857:20241101:185643.779 adding performance counter disk/usage[none,rate]:131
12857:20241101:185643.779 adding performance counter disk/usage[average]:132
12857:20241101:185643.779 adding performance counter disk/usage[average,rate]:132
12857:20241101:185643.779 adding performance counter disk/usage[minimum]:133
12857:20241101:185643.779 adding performance counter disk/usage[minimum,rate]:133
12857:20241101:185643.779 adding performance counter disk/usage[maximum]:134
12857:20241101:185643.779 adding performance counter disk/usage[maximum,rate]:134
12857:20241101:185643.779 adding performance counter disk/numberRead[summation]:135
12857:20241101:185643.779 adding performance counter disk/numberRead[summation,delta]:135
12857:20241101:185643.779 adding performance counter disk/numberWrite[summation]:136
12857:20241101:185643.779 adding performance counter disk/numberWrite[summation,delta]:136
12857:20241101:185643.779 adding performance counter disk/read[average]:137
12857:20241101:185643.779 adding performance counter disk/read[average,rate]:137
12857:20241101:185643.780 adding performance counter disk/write[average]:138
12857:20241101:185643.780 adding performance counter disk/write[average,rate]:138
12857:20241101:185643.780 adding performance counter disk/totalLatency[average]:139
12857:20241101:185643.780 adding performance counter disk/totalLatency[average,absolute]:139
12857:20241101:185643.780 adding performance counter disk/maxTotalLatency[latest]:140
12857:20241101:185643.780 adding performance counter disk/maxTotalLatency[latest,absolute]:140
12857:20241101:185643.780 adding performance counter disk/commandsAborted[summation]:141
12857:20241101:185643.780 adding performance counter disk/commandsAborted[summation,delta]:141
12857:20241101:185643.780 adding performance counter disk/busResets[summation]:142
12857:20241101:185643.780 adding performance counter disk/busResets[summation,delta]:142
12857:20241101:185643.780 adding performance counter disk/numberReadAveraged[average]:143
12857:20241101:185643.780 adding performance counter disk/numberReadAveraged[average,rate]:143
12857:20241101:185643.780 adding performance counter disk/numberWriteAveraged[average]:144
12857:20241101:185643.780 adding performance counter disk/numberWriteAveraged[average,rate]:144
12857:20241101:185643.780 adding performance counter disk/throughput.usage[average]:145
12857:20241101:185643.780 adding performance counter disk/throughput.usage[average,rate]:145
12857:20241101:185643.780 adding performance counter disk/throughput.contention[average]:146
12857:20241101:185643.780 adding performance counter disk/throughput.contention[average,absolute]:146
12857:20241101:185643.780 adding performance counter disk/scsiReservationConflicts[summation]:147
12857:20241101:185643.780 adding performance counter disk/scsiReservationConflicts[summation,delta]:147
12857:20241101:185643.780 adding performance counter disk/scsiReservationCnflctsPct[average]:148
12857:20241101:185643.781 adding performance counter disk/scsiReservationCnflctsPct[average,absolute]:148
12857:20241101:185643.781 adding performance counter net/usage[none]:149
12857:20241101:185643.781 adding performance counter net/usage[none,rate]:149
12857:20241101:185643.781 adding performance counter net/usage[average]:150
12857:20241101:185643.781 adding performance counter net/usage[average,rate]:150
12857:20241101:185643.781 adding performance counter net/usage[minimum]:151
12857:20241101:185643.781 adding performance counter net/usage[minimum,rate]:151
12857:20241101:185643.781 adding performance counter net/usage[maximum]:152
12857:20241101:185643.781 adding performance counter net/usage[maximum,rate]:152
12857:20241101:185643.781 adding performance counter net/packetsRx[summation]:153
12857:20241101:185643.781 adding performance counter net/packetsRx[summation,delta]:153
12857:20241101:185643.781 adding performance counter net/packetsTx[summation]:154
12857:20241101:185643.781 adding performance counter net/packetsTx[summation,delta]:154
12857:20241101:185643.781 adding performance counter net/received[average]:155
12857:20241101:185643.781 adding performance counter net/received[average,rate]:155
12857:20241101:185643.781 adding performance counter net/transmitted[average]:156
12857:20241101:185643.781 adding performance counter net/transmitted[average,rate]:156
12857:20241101:185643.781 adding performance counter net/throughput.provisioned[average]:157
12857:20241101:185643.781 adding performance counter net/throughput.provisioned[average,absolute]:157
12857:20241101:185643.781 adding performance counter net/throughput.usable[average]:158
12857:20241101:185643.781 adding performance counter net/throughput.usable[average,absolute]:158
12857:20241101:185643.782 adding performance counter net/throughput.usage[average]:159
12857:20241101:185643.782 adding performance counter net/throughput.usage[average,rate]:159
12857:20241101:185643.782 adding performance counter net/throughput.contention[summation]:160
12857:20241101:185643.782 adding performance counter net/throughput.contention[summation,delta]:160
12857:20241101:185643.782 adding performance counter net/throughput.packetsPerSec[average]:161
12857:20241101:185643.782 adding performance counter net/throughput.packetsPerSec[average,rate]:161
12857:20241101:185643.782 adding performance counter sys/uptime[latest]:162
12857:20241101:185643.782 adding performance counter sys/uptime[latest,absolute]:162
12857:20241101:185643.782 adding performance counter sys/heartbeat[summation]:163
12857:20241101:185643.782 adding performance counter sys/heartbeat[summation,delta]:163
12857:20241101:185643.782 adding performance counter power/power[average]:164
12857:20241101:185643.782 adding performance counter power/power[average,rate]:164
12857:20241101:185643.782 adding performance counter power/powerCap[average]:165
12857:20241101:185643.782 adding performance counter power/powerCap[average,absolute]:165
12857:20241101:185643.782 adding performance counter power/energy[summation]:166
12857:20241101:185643.782 adding performance counter power/energy[summation,delta]:166
12857:20241101:185643.782 adding performance counter power/capacity.usagePct[average]:167
12857:20241101:185643.782 adding performance counter power/capacity.usagePct[average,absolute]:167
12857:20241101:185643.782 adding performance counter storageAdapter/commandsAveraged[average]:168
12857:20241101:185643.782 adding performance counter storageAdapter/commandsAveraged[average,rate]:168
12857:20241101:185643.783 adding performance counter storageAdapter/numberReadAveraged[average]:169
12857:20241101:185643.783 adding performance counter storageAdapter/numberReadAveraged[average,rate]:169
12857:20241101:185643.783 adding performance counter storageAdapter/numberWriteAveraged[average]:170
12857:20241101:185643.783 adding performance counter storageAdapter/numberWriteAveraged[average,rate]:170
12857:20241101:185643.783 adding performance counter storageAdapter/read[average]:171
12857:20241101:185643.783 adding performance counter storageAdapter/read[average,rate]:171
12857:20241101:185643.783 adding performance counter storageAdapter/write[average]:172
12857:20241101:185643.783 adding performance counter storageAdapter/write[average,rate]:172
12857:20241101:185643.783 adding performance counter storageAdapter/totalReadLatency[average]:173
12857:20241101:185643.783 adding performance counter storageAdapter/totalReadLatency[average,absolute]:173
12857:20241101:185643.783 adding performance counter storageAdapter/totalWriteLatency[average]:174
12857:20241101:185643.783 adding performance counter storageAdapter/totalWriteLatency[average,absolute]:174
12857:20241101:185643.783 adding performance counter storageAdapter/maxTotalLatency[latest]:175
12857:20241101:185643.783 adding performance counter storageAdapter/maxTotalLatency[latest,absolute]:175
12857:20241101:185643.783 adding performance counter storageAdapter/throughput.cont[average]:176
12857:20241101:185643.783 adding performance counter storageAdapter/throughput.cont[average,absolute]:176
12857:20241101:185643.783 adding performance counter storageAdapter/OIOsPct[average]:177
12857:20241101:185643.783 adding performance counter storageAdapter/OIOsPct[average,absolute]:177
12857:20241101:185643.783 adding performance counter virtualDisk/numberReadAveraged[average]:178
12857:20241101:185643.783 adding performance counter virtualDisk/numberReadAveraged[average,rate]:178
12857:20241101:185643.783 adding performance counter virtualDisk/numberWriteAveraged[average]:179
12857:20241101:185643.783 adding performance counter virtualDisk/numberWriteAveraged[average,rate]:179
12857:20241101:185643.784 adding performance counter virtualDisk/read[average]:180
12857:20241101:185643.784 adding performance counter virtualDisk/read[average,rate]:180
12857:20241101:185643.784 adding performance counter virtualDisk/write[average]:181
12857:20241101:185643.784 adding performance counter virtualDisk/write[average,rate]:181
12857:20241101:185643.784 adding performance counter virtualDisk/totalReadLatency[average]:182
12857:20241101:185643.784 adding performance counter virtualDisk/totalReadLatency[average,absolute]:182
12857:20241101:185643.784 adding performance counter virtualDisk/totalWriteLatency[average]:183
12857:20241101:185643.784 adding performance counter virtualDisk/totalWriteLatency[average,absolute]:183
12857:20241101:185643.784 adding performance counter virtualDisk/throughput.cont[average]:184
12857:20241101:185643.784 adding performance counter virtualDisk/throughput.cont[average,absolute]:184
12857:20241101:185643.784 adding performance counter datastore/numberReadAveraged[average]:185
12857:20241101:185643.784 adding performance counter datastore/numberReadAveraged[average,rate]:185
12857:20241101:185643.784 adding performance counter datastore/numberWriteAveraged[average]:186
12857:20241101:185643.784 adding performance counter datastore/numberWriteAveraged[average,rate]:186
12857:20241101:185643.784 adding performance counter datastore/read[average]:187
12857:20241101:185643.784 adding performance counter datastore/read[average,rate]:187
12857:20241101:185643.784 adding performance counter datastore/write[average]:188
12857:20241101:185643.784 adding performance counter datastore/write[average,rate]:188
12857:20241101:185643.784 adding performance counter datastore/totalReadLatency[average]:189
12857:20241101:185643.784 adding performance counter datastore/totalReadLatency[average,absolute]:189
12857:20241101:185643.785 adding performance counter datastore/totalWriteLatency[average]:190
12857:20241101:185643.785 adding performance counter datastore/totalWriteLatency[average,absolute]:190
12857:20241101:185643.785 adding performance counter datastore/maxTotalLatency[latest]:191
12857:20241101:185643.785 adding performance counter datastore/maxTotalLatency[latest,absolute]:191
12857:20241101:185643.785 adding performance counter datastore/datastoreIops[average]:192
12857:20241101:185643.785 adding performance counter datastore/datastoreIops[average,absolute]:192
12857:20241101:185643.785 adding performance counter datastore/sizeNormalizedDatastoreLatency[average]:193
12857:20241101:185643.785 adding performance counter datastore/sizeNormalizedDatastoreLatency[average,absolute]:193
12857:20241101:185643.785 adding performance counter datastore/throughput.usage[average]:194
12857:20241101:185643.785 adding performance counter datastore/throughput.usage[average,absolute]:194
12857:20241101:185643.785 adding performance counter datastore/throughput.contention[average]:195
12857:20241101:185643.785 adding performance counter datastore/throughput.contention[average,absolute]:195
12857:20241101:185643.785 adding performance counter datastore/busResets[summation]:196
12857:20241101:185643.785 adding performance counter datastore/busResets[summation,delta]:196
12857:20241101:185643.785 adding performance counter datastore/commandsAborted[summation]:197
12857:20241101:185643.785 adding performance counter datastore/commandsAborted[summation,delta]:197
12857:20241101:185643.785 adding performance counter datastore/siocActiveTimePercentage[average]:198
12857:20241101:185643.785 adding performance counter datastore/siocActiveTimePercentage[average,absolute]:198
12857:20241101:185643.785 adding performance counter storagePath/throughput.cont[average]:199
12857:20241101:185643.785 adding performance counter storagePath/throughput.cont[average,absolute]:199
12857:20241101:185643.786 adding performance counter storagePath/maxTotalLatency[latest]:200
12857:20241101:185643.786 adding performance counter storagePath/maxTotalLatency[latest,absolute]:200
12857:20241101:185643.786 adding performance counter virtualDisk/throughput.usage[average]:201
12857:20241101:185643.786 adding performance counter virtualDisk/throughput.usage[average,rate]:201
12857:20241101:185643.786 adding performance counter virtualDisk/commandsAborted[summation]:202
12857:20241101:185643.786 adding performance counter virtualDisk/commandsAborted[summation,delta]:202
12857:20241101:185643.786 adding performance counter virtualDisk/busResets[summation]:203
12857:20241101:185643.786 adding performance counter virtualDisk/busResets[summation,delta]:203
12857:20241101:185643.786 adding performance counter storageAdapter/outstandingIOs[average]:204
12857:20241101:185643.786 adding performance counter storageAdapter/outstandingIOs[average,absolute]:204
12857:20241101:185643.786 adding performance counter storageAdapter/queued[average]:205
12857:20241101:185643.786 adding performance counter storageAdapter/queued[average,absolute]:205
12857:20241101:185643.786 adding performance counter storageAdapter/queueDepth[average]:206
12857:20241101:185643.786 adding performance counter storageAdapter/queueDepth[average,absolute]:206
12857:20241101:185643.786 adding performance counter storageAdapter/queueLatency[average]:207
12857:20241101:185643.786 adding performance counter storageAdapter/queueLatency[average,absolute]:207
12857:20241101:185643.786 adding performance counter storageAdapter/throughput.usag[average]:208
12857:20241101:185643.786 adding performance counter storageAdapter/throughput.usag[average,rate]:208
12857:20241101:185643.786 adding performance counter storagePath/busResets[summation]:209
12857:20241101:185643.786 adding performance counter storagePath/busResets[summation,delta]:209
12857:20241101:185643.787 adding performance counter storagePath/commandsAborted[summation]:210
12857:20241101:185643.787 adding performance counter storagePath/commandsAborted[summation,delta]:210
12857:20241101:185643.787 adding performance counter storagePath/throughput.usage[average]:211
12857:20241101:185643.787 adding performance counter storagePath/throughput.usage[average,rate]:211
12857:20241101:185643.787 adding performance counter net/throughput.usage.vm[average]:212
12857:20241101:185643.787 adding performance counter net/throughput.usage.vm[average,rate]:212
12857:20241101:185643.787 adding performance counter net/throughput.usage.nfs[average]:213
12857:20241101:185643.787 adding performance counter net/throughput.usage.nfs[average,rate]:213
12857:20241101:185643.787 adding performance counter net/throughput.usage.vmotion[average]:214
12857:20241101:185643.787 adding performance counter net/throughput.usage.vmotion[average,rate]:214
12857:20241101:185643.787 adding performance counter net/throughput.usage.ft[average]:215
12857:20241101:185643.787 adding performance counter net/throughput.usage.ft[average,rate]:215
12857:20241101:185643.787 adding performance counter net/throughput.usage.iscsi[average]:216
12857:20241101:185643.787 adding performance counter net/throughput.usage.iscsi[average,rate]:216
12857:20241101:185643.787 adding performance counter net/throughput.usage.hbr[average]:217
12857:20241101:185643.787 adding performance counter net/throughput.usage.hbr[average,rate]:217
12857:20241101:185643.787 adding performance counter power/capacity.usable[average]:218
12857:20241101:185643.787 adding performance counter power/capacity.usable[average,absolute]:218
12857:20241101:185643.787 adding performance counter power/capacity.usage[average]:219
12857:20241101:185643.787 adding performance counter power/capacity.usage[average,absolute]:219
12857:20241101:185643.787 adding performance counter power/capacity.usageIdle[average]:220
12857:20241101:185643.787 adding performance counter power/capacity.usageIdle[average,absolute]:220
12857:20241101:185643.788 adding performance counter power/capacity.usageSystem[average]:221
12857:20241101:185643.788 adding performance counter power/capacity.usageSystem[average,absolute]:221
12857:20241101:185643.788 adding performance counter power/capacity.usageVm[average]:222
12857:20241101:185643.788 adding performance counter power/capacity.usageVm[average,absolute]:222
12857:20241101:185643.788 adding performance counter power/capacity.usageStatic[average]:223
12857:20241101:185643.788 adding performance counter power/capacity.usageStatic[average,absolute]:223
12857:20241101:185643.788 adding performance counter cpu/cpuentitlement[latest]:224
12857:20241101:185643.788 adding performance counter cpu/cpuentitlement[latest,absolute]:224
12857:20241101:185643.788 adding performance counter mem/mementitlement[latest]:225
12857:20241101:185643.788 adding performance counter mem/mementitlement[latest,absolute]:225
12857:20241101:185643.788 adding performance counter clusterServices/vmDrsScore[latest]:226
12857:20241101:185643.788 adding performance counter clusterServices/vmDrsScore[latest,absolute]:226
12857:20241101:185643.788 adding performance counter clusterServices/cpufairness[latest]:227
12857:20241101:185643.788 adding performance counter clusterServices/cpufairness[latest,absolute]:227
12857:20241101:185643.788 adding performance counter clusterServices/memfairness[latest]:228
12857:20241101:185643.788 adding performance counter clusterServices/memfairness[latest,absolute]:228
12857:20241101:185643.788 adding performance counter net/throughput.pktsTx[average]:229
12857:20241101:185643.788 adding performance counter net/throughput.pktsTx[average,absolute]:229
12857:20241101:185643.788 adding performance counter net/throughput.pktsTxMulticast[average]:230
12857:20241101:185643.788 adding performance counter net/throughput.pktsTxMulticast[average,absolute]:230
12857:20241101:185643.789 adding performance counter net/throughput.pktsTxBroadcast[average]:231
12857:20241101:185643.789 adding performance counter net/throughput.pktsTxBroadcast[average,absolute]:231
12857:20241101:185643.789 adding performance counter net/throughput.pktsRx[average]:232
12857:20241101:185643.789 adding performance counter net/throughput.pktsRx[average,absolute]:232
12857:20241101:185643.789 adding performance counter net/throughput.pktsRxMulticast[average]:233
12857:20241101:185643.789 adding performance counter net/throughput.pktsRxMulticast[average,absolute]:233
12857:20241101:185643.789 adding performance counter net/throughput.pktsRxBroadcast[average]:234
12857:20241101:185643.789 adding performance counter net/throughput.pktsRxBroadcast[average,absolute]:234
12857:20241101:185643.789 adding performance counter net/throughput.droppedTx[average]:235
12857:20241101:185643.789 adding performance counter net/throughput.droppedTx[average,absolute]:235
12857:20241101:185643.789 adding performance counter net/throughput.droppedRx[average]:236
12857:20241101:185643.789 adding performance counter net/throughput.droppedRx[average,absolute]:236
12857:20241101:185643.789 adding performance counter net/throughput.vds.pktsTx[average]:237
12857:20241101:185643.789 adding performance counter net/throughput.vds.pktsTx[average,absolute]:237
12857:20241101:185643.789 adding performance counter net/throughput.vds.pktsTxMcast[average]:238
12857:20241101:185643.789 adding performance counter net/throughput.vds.pktsTxMcast[average,absolute]:238
12857:20241101:185643.789 adding performance counter net/throughput.vds.pktsTxBcast[average]:239
12857:20241101:185643.789 adding performance counter net/throughput.vds.pktsTxBcast[average,absolute]:239
12857:20241101:185643.789 adding performance counter net/throughput.vds.pktsRx[average]:240
12857:20241101:185643.789 adding performance counter net/throughput.vds.pktsRx[average,absolute]:240
12857:20241101:185643.789 adding performance counter net/throughput.vds.pktsRxMcast[average]:241
12857:20241101:185643.790 adding performance counter net/throughput.vds.pktsRxMcast[average,absolute]:241
12857:20241101:185643.790 adding performance counter net/throughput.vds.pktsRxBcast[average]:242
12857:20241101:185643.790 adding performance counter net/throughput.vds.pktsRxBcast[average,absolute]:242
12857:20241101:185643.790 adding performance counter net/throughput.vds.droppedTx[average]:243
12857:20241101:185643.790 adding performance counter net/throughput.vds.droppedTx[average,absolute]:243
12857:20241101:185643.790 adding performance counter net/throughput.vds.droppedRx[average]:244
12857:20241101:185643.790 adding performance counter net/throughput.vds.droppedRx[average,absolute]:244
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagTx[average]:245
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagTx[average,absolute]:245
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagTxMcast[average]:246
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagTxMcast[average,absolute]:246
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagTxBcast[average]:247
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagTxBcast[average,absolute]:247
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagRx[average]:248
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagRx[average,absolute]:248
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagRxMcast[average]:249
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagRxMcast[average,absolute]:249
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagRxBcast[average]:250
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagRxBcast[average,absolute]:250
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagDropTx[average]:251
12857:20241101:185643.790 adding performance counter net/throughput.vds.lagDropTx[average,absolute]:251
12857:20241101:185643.791 adding performance counter net/throughput.vds.lagDropRx[average]:252
12857:20241101:185643.791 adding performance counter net/throughput.vds.lagDropRx[average,absolute]:252
12857:20241101:185643.791 adding performance counter vmop/numPoweron[latest]:253
12857:20241101:185643.791 adding performance counter vmop/numPoweron[latest,absolute]:253
12857:20241101:185643.791 adding performance counter vmop/numPoweroff[latest]:254
12857:20241101:185643.791 adding performance counter vmop/numPoweroff[latest,absolute]:254
12857:20241101:185643.791 adding performance counter vmop/numSuspend[latest]:255
12857:20241101:185643.791 adding performance counter vmop/numSuspend[latest,absolute]:255
12857:20241101:185643.791 adding performance counter vmop/numReset[latest]:256
12857:20241101:185643.791 adding performance counter vmop/numReset[latest,absolute]:256
12857:20241101:185643.791 adding performance counter vmop/numRebootGuest[latest]:257
12857:20241101:185643.791 adding performance counter vmop/numRebootGuest[latest,absolute]:257
12857:20241101:185643.791 adding performance counter vmop/numStandbyGuest[latest]:258
12857:20241101:185643.791 adding performance counter vmop/numStandbyGuest[latest,absolute]:258
12857:20241101:185643.791 adding performance counter vmop/numShutdownGuest[latest]:259
12857:20241101:185643.791 adding performance counter vmop/numShutdownGuest[latest,absolute]:259
12857:20241101:185643.791 adding performance counter vmop/numCreate[latest]:260
12857:20241101:185643.791 adding performance counter vmop/numCreate[latest,absolute]:260
12857:20241101:185643.791 adding performance counter vmop/numDestroy[latest]:261
12857:20241101:185643.791 adding performance counter vmop/numDestroy[latest,absolute]:261
12857:20241101:185643.792 adding performance counter vmop/numRegister[latest]:262
12857:20241101:185643.792 adding performance counter vmop/numRegister[latest,absolute]:262
12857:20241101:185643.792 adding performance counter vmop/numUnregister[latest]:263
12857:20241101:185643.792 adding performance counter vmop/numUnregister[latest,absolute]:263
12857:20241101:185643.792 adding performance counter vmop/numReconfigure[latest]:264
12857:20241101:185643.792 adding performance counter vmop/numReconfigure[latest,absolute]:264
12857:20241101:185643.792 adding performance counter vmop/numClone[latest]:265
12857:20241101:185643.792 adding performance counter vmop/numClone[latest,absolute]:265
12857:20241101:185643.792 adding performance counter vmop/numDeploy[latest]:266
12857:20241101:185643.792 adding performance counter vmop/numDeploy[latest,absolute]:266
12857:20241101:185643.792 adding performance counter vmop/numChangeHost[latest]:267
12857:20241101:185643.792 adding performance counter vmop/numChangeHost[latest,absolute]:267
12857:20241101:185643.792 adding performance counter vmop/numChangeDS[latest]:268
12857:20241101:185643.792 adding performance counter vmop/numChangeDS[latest,absolute]:268
12857:20241101:185643.792 adding performance counter vmop/numChangeHostDS[latest]:269
12857:20241101:185643.792 adding performance counter vmop/numChangeHostDS[latest,absolute]:269
12857:20241101:185643.792 adding performance counter vmop/numVMotion[latest]:270
12857:20241101:185643.792 adding performance counter vmop/numVMotion[latest,absolute]:270
12857:20241101:185643.792 adding performance counter vmop/numSVMotion[latest]:271
12857:20241101:185643.792 adding performance counter vmop/numSVMotion[latest,absolute]:271
12857:20241101:185643.792 adding performance counter vmop/numXVMotion[latest]:272
12857:20241101:185643.792 adding performance counter vmop/numXVMotion[latest,absolute]:272
12857:20241101:185643.793 adding performance counter clusterServices/effectivecpu[average]:273
12857:20241101:185643.793 adding performance counter clusterServices/effectivecpu[average,rate]:273
12857:20241101:185643.793 adding performance counter clusterServices/effectivemem[average]:274
12857:20241101:185643.793 adding performance counter clusterServices/effectivemem[average,absolute]:274
12857:20241101:185643.793 adding performance counter cpu/totalmhz[average]:275
12857:20241101:185643.793 adding performance counter cpu/totalmhz[average,rate]:275
12857:20241101:185643.793 adding performance counter mem/totalmb[average]:276
12857:20241101:185643.793 adding performance counter mem/totalmb[average,absolute]:276
12857:20241101:185643.793 adding performance counter clusterServices/clusterDrsScore[latest]:277
12857:20241101:185643.793 adding performance counter clusterServices/clusterDrsScore[latest,absolute]:277
12857:20241101:185643.793 adding performance counter clusterServices/failover[latest]:278
12857:20241101:185643.793 adding performance counter clusterServices/failover[latest,absolute]:278
12857:20241101:185643.793 adding performance counter gpu/utilization[average]:279
12857:20241101:185643.793 adding performance counter gpu/utilization[average,absolute]:279
12857:20241101:185643.793 adding performance counter gpu/mem.used[average]:280
12857:20241101:185643.793 adding performance counter gpu/mem.used[average,absolute]:280
12857:20241101:185643.793 adding performance counter gpu/mem.reserved[latest]:281
12857:20241101:185643.793 adding performance counter gpu/mem.reserved[latest,absolute]:281
12857:20241101:185643.793 adding performance counter gpu/power.used[latest]:282
12857:20241101:185643.793 adding performance counter gpu/power.used[latest,absolute]:282
12857:20241101:185643.794 adding performance counter gpu/temperature[average]:283
12857:20241101:185643.794 adding performance counter gpu/temperature[average,absolute]:283
12857:20241101:185643.794 adding performance counter gpu/mem.total[latest]:284
12857:20241101:185643.794 adding performance counter gpu/mem.total[latest,absolute]:284
12857:20241101:185643.794 adding performance counter disk/used[latest]:285
12857:20241101:185643.794 adding performance counter disk/used[latest,absolute]:285
12857:20241101:185643.794 adding performance counter disk/provisioned[latest]:286
12857:20241101:185643.794 adding performance counter disk/provisioned[latest,absolute]:286
12857:20241101:185643.794 adding performance counter disk/capacity[latest]:287
12857:20241101:185643.794 adding performance counter disk/capacity[latest,absolute]:287
12857:20241101:185643.794 adding performance counter disk/unshared[latest]:288
12857:20241101:185643.794 adding performance counter disk/unshared[latest,absolute]:288
12857:20241101:185643.794 adding performance counter disk/actualused[latest]:289
12857:20241101:185643.794 adding performance counter disk/actualused[latest,absolute]:289
12857:20241101:185643.794 adding performance counter disk/deltaused[latest]:290
12857:20241101:185643.794 adding performance counter disk/deltaused[latest,absolute]:290
12857:20241101:185643.794 adding performance counter disk/capacity.provisioned[average]:291
12857:20241101:185643.794 adding performance counter disk/capacity.provisioned[average,absolute]:291
12857:20241101:185643.794 adding performance counter disk/capacity.usage[average]:292
12857:20241101:185643.794 adding performance counter disk/capacity.usage[average,absolute]:292
12857:20241101:185643.794 adding performance counter disk/capacity.contention[average]:293
12857:20241101:185643.795 adding performance counter disk/capacity.contention[average,absolute]:293
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationlatencystats[maximum]:294
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationlatencystats[maximum,absolute]:294
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationlatencystats[minimum]:295
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationlatencystats[minimum,absolute]:295
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationlatencystats[summation]:296
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationlatencystats[summation,absolute]:296
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationstats[maximum]:297
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationstats[maximum,absolute]:297
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationstats[minimum]:298
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationstats[minimum,absolute]:298
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationstats[summation]:299
12857:20241101:185643.795 adding performance counter vcDebugInfo/activationstats[summation,absolute]:299
12857:20241101:185643.795 adding performance counter vcResources/buffersz[average]:300
12857:20241101:185643.795 adding performance counter vcResources/buffersz[average,absolute]:300
12857:20241101:185643.795 adding performance counter vcResources/cachesz[average]:301
12857:20241101:185643.795 adding performance counter vcResources/cachesz[average,absolute]:301
12857:20241101:185643.795 adding performance counter vcResources/ctxswitchesrate[average]:302
12857:20241101:185643.795 adding performance counter vcResources/ctxswitchesrate[average,rate]:302
12857:20241101:185643.795 adding performance counter vcResources/diskreadsectorrate[average]:303
12857:20241101:185643.795 adding performance counter vcResources/diskreadsectorrate[average,rate]:303
12857:20241101:185643.796 adding performance counter vcResources/diskreadsrate[average]:304
12857:20241101:185643.796 adding performance counter vcResources/diskreadsrate[average,rate]:304
12857:20241101:185643.796 adding performance counter vcResources/diskwritesectorrate[average]:305
12857:20241101:185643.796 adding performance counter vcResources/diskwritesectorrate[average,rate]:305
12857:20241101:185643.796 adding performance counter vcResources/diskwritesrate[average]:306
12857:20241101:185643.796 adding performance counter vcResources/diskwritesrate[average,rate]:306
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsynclatencystats[maximum]:307
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsynclatencystats[maximum,absolute]:307
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsynclatencystats[minimum]:308
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsynclatencystats[minimum,absolute]:308
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsynclatencystats[summation]:309
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsynclatencystats[summation,absolute]:309
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsyncstats[maximum]:310
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsyncstats[maximum,absolute]:310
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsyncstats[minimum]:311
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsyncstats[minimum,absolute]:311
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsyncstats[summation]:312
12857:20241101:185643.796 adding performance counter vcDebugInfo/hostsyncstats[summation,absolute]:312
12857:20241101:185643.796 adding performance counter vcDebugInfo/inventorystats[maximum]:313
12857:20241101:185643.796 adding performance counter vcDebugInfo/inventorystats[maximum,absolute]:313
12857:20241101:185643.796 adding performance counter vcDebugInfo/inventorystats[minimum]:314
12857:20241101:185643.796 adding performance counter vcDebugInfo/inventorystats[minimum,absolute]:314
12857:20241101:185643.797 adding performance counter vcDebugInfo/inventorystats[summation]:315
12857:20241101:185643.797 adding performance counter vcDebugInfo/inventorystats[summation,absolute]:315
12857:20241101:185643.797 adding performance counter vcDebugInfo/lockstats[maximum]:316
12857:20241101:185643.797 adding performance counter vcDebugInfo/lockstats[maximum,absolute]:316
12857:20241101:185643.797 adding performance counter vcDebugInfo/lockstats[minimum]:317
12857:20241101:185643.797 adding performance counter vcDebugInfo/lockstats[minimum,absolute]:317
12857:20241101:185643.797 adding performance counter vcDebugInfo/lockstats[summation]:318
12857:20241101:185643.797 adding performance counter vcDebugInfo/lockstats[summation,absolute]:318
12857:20241101:185643.797 adding performance counter vcDebugInfo/lrostats[maximum]:319
12857:20241101:185643.797 adding performance counter vcDebugInfo/lrostats[maximum,absolute]:319
12857:20241101:185643.797 adding performance counter vcDebugInfo/lrostats[minimum]:320
12857:20241101:185643.797 adding performance counter vcDebugInfo/lrostats[minimum,absolute]:320
12857:20241101:185643.797 adding performance counter vcDebugInfo/lrostats[summation]:321
12857:20241101:185643.797 adding performance counter vcDebugInfo/lrostats[summation,absolute]:321
12857:20241101:185643.797 adding performance counter vcDebugInfo/miscstats[maximum]:322
12857:20241101:185643.797 adding performance counter vcDebugInfo/miscstats[maximum,absolute]:322
12857:20241101:185643.797 adding performance counter vcDebugInfo/miscstats[minimum]:323
12857:20241101:185643.797 adding performance counter vcDebugInfo/miscstats[minimum,absolute]:323
12857:20241101:185643.797 adding performance counter vcDebugInfo/miscstats[summation]:324
12857:20241101:185643.797 adding performance counter vcDebugInfo/miscstats[summation,absolute]:324
12857:20241101:185643.798 adding performance counter vcDebugInfo/morefregstats[maximum]:325
12857:20241101:185643.798 adding performance counter vcDebugInfo/morefregstats[maximum,absolute]:325
12857:20241101:185643.798 adding performance counter vcDebugInfo/morefregstats[minimum]:326
12857:20241101:185643.798 adding performance counter vcDebugInfo/morefregstats[minimum,absolute]:326
12857:20241101:185643.798 adding performance counter vcDebugInfo/morefregstats[summation]:327
12857:20241101:185643.798 adding performance counter vcDebugInfo/morefregstats[summation,absolute]:327
12857:20241101:185643.798 adding performance counter vcResources/packetrecvrate[average]:328
12857:20241101:185643.798 adding performance counter vcResources/packetrecvrate[average,rate]:328
12857:20241101:185643.798 adding performance counter vcResources/packetsentrate[average]:329
12857:20241101:185643.798 adding performance counter vcResources/packetsentrate[average,rate]:329
12857:20241101:185643.798 adding performance counter vcResources/systemcpuusage[average]:330
12857:20241101:185643.798 adding performance counter vcResources/systemcpuusage[average,rate]:330
12857:20241101:185643.798 adding performance counter vcResources/pagefaultrate[average]:331
12857:20241101:185643.798 adding performance counter vcResources/pagefaultrate[average,rate]:331
12857:20241101:185643.798 adding performance counter vcResources/physicalmemusage[average]:332
12857:20241101:185643.798 adding performance counter vcResources/physicalmemusage[average,absolute]:332
12857:20241101:185643.798 adding performance counter vcResources/priviledgedcpuusage[average]:333
12857:20241101:185643.798 adding performance counter vcResources/priviledgedcpuusage[average,rate]:333
12857:20241101:185643.798 adding performance counter vcDebugInfo/scoreboard[maximum]:334
12857:20241101:185643.798 adding performance counter vcDebugInfo/scoreboard[maximum,absolute]:334
12857:20241101:185643.799 adding performance counter vcDebugInfo/scoreboard[minimum]:335
12857:20241101:185643.799 adding performance counter vcDebugInfo/scoreboard[minimum,absolute]:335
12857:20241101:185643.799 adding performance counter vcDebugInfo/scoreboard[summation]:336
12857:20241101:185643.799 adding performance counter vcDebugInfo/scoreboard[summation,absolute]:336
12857:20241101:185643.799 adding performance counter vcDebugInfo/sessionstats[maximum]:337
12857:20241101:185643.799 adding performance counter vcDebugInfo/sessionstats[maximum,absolute]:337
12857:20241101:185643.799 adding performance counter vcDebugInfo/sessionstats[minimum]:338
12857:20241101:185643.799 adding performance counter vcDebugInfo/sessionstats[minimum,absolute]:338
12857:20241101:185643.799 adding performance counter vcDebugInfo/sessionstats[summation]:339
12857:20241101:185643.799 adding performance counter vcDebugInfo/sessionstats[summation,absolute]:339
12857:20241101:185643.799 adding performance counter vcResources/syscallsrate[average]:340
12857:20241101:185643.799 adding performance counter vcResources/syscallsrate[average,rate]:340
12857:20241101:185643.799 adding performance counter vcDebugInfo/systemstats[maximum]:341
12857:20241101:185643.799 adding performance counter vcDebugInfo/systemstats[maximum,absolute]:341
12857:20241101:185643.799 adding performance counter vcDebugInfo/systemstats[minimum]:342
12857:20241101:185643.799 adding performance counter vcDebugInfo/systemstats[minimum,absolute]:342
12857:20241101:185643.799 adding performance counter vcDebugInfo/systemstats[summation]:343
12857:20241101:185643.799 adding performance counter vcDebugInfo/systemstats[summation,absolute]:343
12857:20241101:185643.799 adding performance counter vcResources/usercpuusage[average]:344
12857:20241101:185643.799 adding performance counter vcResources/usercpuusage[average,rate]:344
12857:20241101:185643.800 adding performance counter vcDebugInfo/vcservicestats[maximum]:345
12857:20241101:185643.800 adding performance counter vcDebugInfo/vcservicestats[maximum,absolute]:345
12857:20241101:185643.800 adding performance counter vcDebugInfo/vcservicestats[minimum]:346
12857:20241101:185643.800 adding performance counter vcDebugInfo/vcservicestats[minimum,absolute]:346
12857:20241101:185643.800 adding performance counter vcDebugInfo/vcservicestats[summation]:347
12857:20241101:185643.800 adding performance counter vcDebugInfo/vcservicestats[summation,absolute]:347
12857:20241101:185643.800 adding performance counter vcResources/virtualmemusage[average]:348
12857:20241101:185643.800 adding performance counter vcResources/virtualmemusage[average,absolute]:348
12857:20241101:185643.800 adding performance counter virtualDisk/readOIO[latest]:349
12857:20241101:185643.800 adding performance counter virtualDisk/readOIO[latest,absolute]:349
12857:20241101:185643.800 adding performance counter virtualDisk/writeOIO[latest]:350
12857:20241101:185643.800 adding performance counter virtualDisk/writeOIO[latest,absolute]:350
12857:20241101:185643.800 adding performance counter virtualDisk/readLoadMetric[latest]:351
12857:20241101:185643.800 adding performance counter virtualDisk/readLoadMetric[latest,absolute]:351
12857:20241101:185643.800 adding performance counter virtualDisk/writeLoadMetric[latest]:352
12857:20241101:185643.800 adding performance counter virtualDisk/writeLoadMetric[latest,absolute]:352
12857:20241101:185643.800 adding performance counter rescpu/actav1[latest]:353
12857:20241101:185643.800 adding performance counter rescpu/actav1[latest,absolute]:353
12857:20241101:185643.800 adding performance counter datastore/datastoreReadBytes[latest]:354
12857:20241101:185643.800 adding performance counter datastore/datastoreReadBytes[latest,absolute]:354
12857:20241101:185643.800 adding performance counter datastore/datastoreWriteBytes[latest]:355
12857:20241101:185643.800 adding performance counter datastore/datastoreWriteBytes[latest,absolute]:355
12857:20241101:185643.801 adding performance counter datastore/datastoreReadIops[latest]:356
12857:20241101:185643.801 adding performance counter datastore/datastoreReadIops[latest,absolute]:356
12857:20241101:185643.801 adding performance counter datastore/datastoreWriteIops[latest]:357
12857:20241101:185643.801 adding performance counter datastore/datastoreWriteIops[latest,absolute]:357
12857:20241101:185643.801 adding performance counter datastore/datastoreReadOIO[latest]:358
12857:20241101:185643.801 adding performance counter datastore/datastoreReadOIO[latest,absolute]:358
12857:20241101:185643.801 adding performance counter datastore/datastoreWriteOIO[latest]:359
12857:20241101:185643.801 adding performance counter datastore/datastoreWriteOIO[latest,absolute]:359
12857:20241101:185643.801 adding performance counter datastore/datastoreNormalReadLatency[latest]:360
12857:20241101:185643.801 adding performance counter datastore/datastoreNormalReadLatency[latest,absolute]:360
12857:20241101:185643.801 adding performance counter datastore/datastoreNormalWriteLatency[latest]:361
12857:20241101:185643.801 adding performance counter datastore/datastoreNormalWriteLatency[latest,absolute]:361
12857:20241101:185643.801 adding performance counter datastore/datastoreReadLoadMetric[latest]:362
12857:20241101:185643.801 adding performance counter datastore/datastoreReadLoadMetric[latest,absolute]:362
12857:20241101:185643.801 adding performance counter datastore/datastoreWriteLoadMetric[latest]:363
12857:20241101:185643.801 adding performance counter datastore/datastoreWriteLoadMetric[latest,absolute]:363
12857:20241101:185643.801 adding performance counter datastore/datastoreVMObservedLatency[latest]:364
12857:20241101:185643.801 adding performance counter datastore/datastoreVMObservedLatency[latest,absolute]:364
12857:20241101:185643.801 adding performance counter disk/scsiReservationCnflctsPct[average]:365
12857:20241101:185643.802 adding performance counter disk/scsiReservationCnflctsPct[average,rate]:365
12857:20241101:185643.802 adding performance counter disk/read[latest]:366
12857:20241101:185643.802 adding performance counter disk/read[latest,absolute]:366
12857:20241101:185643.802 adding performance counter disk/readFailed[latest]:367
12857:20241101:185643.802 adding performance counter disk/readFailed[latest,absolute]:367
12857:20241101:185643.802 adding performance counter disk/write[latest]:368
12857:20241101:185643.802 adding performance counter disk/write[latest,absolute]:368
12857:20241101:185643.802 adding performance counter disk/writeFailed[latest]:369
12857:20241101:185643.802 adding performance counter disk/writeFailed[latest,absolute]:369
12857:20241101:185643.802 adding performance counter disk/commands.success[latest]:370
12857:20241101:185643.802 adding performance counter disk/commands.success[latest,absolute]:370
12857:20241101:185643.802 adding performance counter disk/commands.failed[latest]:371
12857:20241101:185643.802 adding performance counter disk/commands.failed[latest,absolute]:371
12857:20241101:185643.802 adding performance counter disk/commands.queued[latest]:372
12857:20241101:185643.802 adding performance counter disk/commands.queued[latest,absolute]:372
12857:20241101:185643.802 adding performance counter disk/commands.active[latest]:373
12857:20241101:185643.802 adding performance counter disk/commands.active[latest,absolute]:373
12857:20241101:185643.802 adding performance counter disk/state[latest]:374
12857:20241101:185643.802 adding performance counter disk/state[latest,absolute]:374
12857:20241101:185643.802 adding performance counter disk/TM.abort[latest]:375
12857:20241101:185643.802 adding performance counter disk/TM.abort[latest,absolute]:375
12857:20241101:185643.803 adding performance counter disk/TM.abortRetry[latest]:376
12857:20241101:185643.803 adding performance counter disk/TM.abortRetry[latest,absolute]:376
12857:20241101:185643.803 adding performance counter disk/TM.abortFailed[latest]:377
12857:20241101:185643.803 adding performance counter disk/TM.abortFailed[latest,absolute]:377
12857:20241101:185643.803 adding performance counter disk/TM.virtReset[latest]:378
12857:20241101:185643.803 adding performance counter disk/TM.virtReset[latest,absolute]:378
12857:20241101:185643.803 adding performance counter disk/TM.virtResetRetry[latest]:379
12857:20241101:185643.803 adding performance counter disk/TM.virtResetRetry[latest,absolute]:379
12857:20241101:185643.803 adding performance counter disk/TM.virtResetFailed[latest]:380
12857:20241101:185643.803 adding performance counter disk/TM.virtResetFailed[latest,absolute]:380
12857:20241101:185643.803 adding performance counter disk/TM.lunReset[latest]:381
12857:20241101:185643.803 adding performance counter disk/TM.lunReset[latest,absolute]:381
12857:20241101:185643.803 adding performance counter disk/TM.lunResetRetry[latest]:382
12857:20241101:185643.803 adding performance counter disk/TM.lunResetRetry[latest,absolute]:382
12857:20241101:185643.803 adding performance counter disk/TM.lunResetFailed[latest]:383
12857:20241101:185643.803 adding performance counter disk/TM.lunResetFailed[latest,absolute]:383
12857:20241101:185643.803 adding performance counter disk/TM.deviceReset[latest]:384
12857:20241101:185643.803 adding performance counter disk/TM.deviceReset[latest,absolute]:384
12857:20241101:185643.803 adding performance counter disk/TM.deviceResetRetry[latest]:385
12857:20241101:185643.803 adding performance counter disk/TM.deviceResetRetry[latest,absolute]:385
12857:20241101:185643.804 adding performance counter disk/TM.deviceResetFailed[latest]:386
12857:20241101:185643.804 adding performance counter disk/TM.deviceResetFailed[latest,absolute]:386
12857:20241101:185643.804 adding performance counter disk/TM.busReset[latest]:387
12857:20241101:185643.804 adding performance counter disk/TM.busReset[latest,absolute]:387
12857:20241101:185643.804 adding performance counter disk/TM.busResetRetry[latest]:388
12857:20241101:185643.804 adding performance counter disk/TM.busResetRetry[latest,absolute]:388
12857:20241101:185643.804 adding performance counter disk/TM.busResetFailed[latest]:389
12857:20241101:185643.804 adding performance counter disk/TM.busResetFailed[latest,absolute]:389
12857:20241101:185643.804 adding performance counter disk/latency.qavg[latest]:390
12857:20241101:185643.804 adding performance counter disk/latency.qavg[latest,absolute]:390
12857:20241101:185643.804 adding performance counter disk/latency.davg[latest]:391
12857:20241101:185643.804 adding performance counter disk/latency.davg[latest,absolute]:391
12857:20241101:185643.804 adding performance counter disk/latency.kavg[latest]:392
12857:20241101:185643.804 adding performance counter disk/latency.kavg[latest,absolute]:392
12857:20241101:185643.804 adding performance counter disk/latency.gavg[latest]:393
12857:20241101:185643.804 adding performance counter disk/latency.gavg[latest,absolute]:393
12857:20241101:185643.804 adding performance counter storageAdapter/outstandingIOs[latest]:394
12857:20241101:185643.804 adding performance counter storageAdapter/outstandingIOs[latest,absolute]:394
12857:20241101:185643.804 adding performance counter storageAdapter/queued[latest]:395
12857:20241101:185643.804 adding performance counter storageAdapter/queued[latest,absolute]:395
12857:20241101:185643.804 adding performance counter storageAdapter/queueDepth[latest]:396
12857:20241101:185643.804 adding performance counter storageAdapter/queueDepth[latest,absolute]:396
12857:20241101:185643.805 adding performance counter cpu/partnerBusyTime[average]:397
12857:20241101:185643.805 adding performance counter cpu/partnerBusyTime[average,rate]:397
12857:20241101:185643.805 adding performance counter cpu/utilization[average]:398
12857:20241101:185643.805 adding performance counter cpu/utilization[average,rate]:398
12857:20241101:185643.805 adding performance counter cpu/corecount.provisioned[latest]:399
12857:20241101:185643.805 adding performance counter cpu/corecount.provisioned[latest,absolute]:399
12857:20241101:185643.805 adding performance counter cpu/cache.l3.occupancy[average]:400
12857:20241101:185643.805 adding performance counter cpu/cache.l3.occupancy[average,absolute]:400
12857:20241101:185643.805 adding performance counter cpu/corecount.usage[latest]:401
12857:20241101:185643.805 adding performance counter cpu/corecount.usage[latest,absolute]:401
12857:20241101:185643.805 adding performance counter cpu/load.avg1min[latest]:402
12857:20241101:185643.805 adding performance counter cpu/load.avg1min[latest,absolute]:402
12857:20241101:185643.805 adding performance counter cpu/load.avg5min[latest]:403
12857:20241101:185643.805 adding performance counter cpu/load.avg5min[latest,absolute]:403
12857:20241101:185643.805 adding performance counter cpu/load.avg15min[latest]:404
12857:20241101:185643.805 adding performance counter cpu/load.avg15min[latest,absolute]:404
12857:20241101:185643.805 adding performance counter mem/capacity.provisioned[latest]:405
12857:20241101:185643.805 adding performance counter mem/capacity.provisioned[latest,absolute]:405
12857:20241101:185643.805 adding performance counter mem/reservedCapacityPct[latest]:406
12857:20241101:185643.805 adding performance counter mem/reservedCapacityPct[latest,absolute]:406
12857:20241101:185643.806 adding performance counter mem/overcommit.avg1min[latest]:407
12857:20241101:185643.806 adding performance counter mem/overcommit.avg1min[latest,absolute]:407
12857:20241101:185643.806 adding performance counter mem/overcommit.avg5min[latest]:408
12857:20241101:185643.806 adding performance counter mem/overcommit.avg5min[latest,absolute]:408
12857:20241101:185643.806 adding performance counter mem/overcommit.avg15min[latest]:409
12857:20241101:185643.806 adding performance counter mem/overcommit.avg15min[latest,absolute]:409
12857:20241101:185643.806 adding performance counter mem/physical.total[latest]:410
12857:20241101:185643.806 adding performance counter mem/physical.total[latest,absolute]:410
12857:20241101:185643.806 adding performance counter mem/physical.user[latest]:411
12857:20241101:185643.806 adding performance counter mem/physical.user[latest,absolute]:411
12857:20241101:185643.806 adding performance counter mem/physical.free[latest]:412
12857:20241101:185643.806 adding performance counter mem/physical.free[latest,absolute]:412
12857:20241101:185643.806 adding performance counter mem/kernel.managed[latest]:413
12857:20241101:185643.806 adding performance counter mem/kernel.managed[latest,absolute]:413
12857:20241101:185643.806 adding performance counter mem/kernel.minfree[latest]:414
12857:20241101:185643.806 adding performance counter mem/kernel.minfree[latest,absolute]:414
12857:20241101:185643.806 adding performance counter mem/kernel.unreserved[latest]:415
12857:20241101:185643.806 adding performance counter mem/kernel.unreserved[latest,absolute]:415
12857:20241101:185643.806 adding performance counter mem/pshare.shared[latest]:416
12857:20241101:185643.806 adding performance counter mem/pshare.shared[latest,absolute]:416
12857:20241101:185643.806 adding performance counter mem/pshare.common[latest]:417
12857:20241101:185643.807 adding performance counter mem/pshare.common[latest,absolute]:417
12857:20241101:185643.807 adding performance counter mem/pshare.sharedSave[latest]:418
12857:20241101:185643.807 adding performance counter mem/pshare.sharedSave[latest,absolute]:418
12857:20241101:185643.807 adding performance counter mem/swap.current[latest]:419
12857:20241101:185643.807 adding performance counter mem/swap.current[latest,absolute]:419
12857:20241101:185643.807 adding performance counter mem/swap.target[latest]:420
12857:20241101:185643.807 adding performance counter mem/swap.target[latest,absolute]:420
12857:20241101:185643.807 adding performance counter mem/swap.readrate[average]:421
12857:20241101:185643.807 adding performance counter mem/swap.readrate[average,rate]:421
12857:20241101:185643.807 adding performance counter mem/swap.writerate[average]:422
12857:20241101:185643.807 adding performance counter mem/swap.writerate[average,rate]:422
12857:20241101:185643.807 adding performance counter mem/zip.zipped[latest]:423
12857:20241101:185643.807 adding performance counter mem/zip.zipped[latest,absolute]:423
12857:20241101:185643.807 adding performance counter mem/zip.saved[latest]:424
12857:20241101:185643.807 adding performance counter mem/zip.saved[latest,absolute]:424
12857:20241101:185643.807 adding performance counter mem/memctl.current[latest]:425
12857:20241101:185643.807 adding performance counter mem/memctl.current[latest,absolute]:425
12857:20241101:185643.807 adding performance counter mem/memctl.target[latest]:426
12857:20241101:185643.807 adding performance counter mem/memctl.target[latest,absolute]:426
12857:20241101:185643.807 adding performance counter mem/memctl.max[latest]:427
12857:20241101:185643.807 adding performance counter mem/memctl.max[latest,absolute]:427
12857:20241101:185643.808 adding performance counter mem/health.reservationState[latest]:428
12857:20241101:185643.808 adding performance counter mem/health.reservationState[latest,absolute]:428
12857:20241101:185643.808 adding performance counter mem/capacity.overhead[average]:429
12857:20241101:185643.808 adding performance counter mem/capacity.overhead[average,absolute]:429
12857:20241101:185643.808 adding performance counter mem/capacity.overheadResv[average]:430
12857:20241101:185643.808 adding performance counter mem/capacity.overheadResv[average,absolute]:430
12857:20241101:185643.808 adding performance counter mem/capacity.consumed[latest]:431
12857:20241101:185643.808 adding performance counter mem/capacity.consumed[latest,absolute]:431
12857:20241101:185643.808 adding performance counter mem/capacity.active[latest]:432
12857:20241101:185643.808 adding performance counter mem/capacity.active[latest,absolute]:432
12857:20241101:185643.808 adding performance counter power/capacity.usageCpu[average]:433
12857:20241101:185643.808 adding performance counter power/capacity.usageCpu[average,absolute]:433
12857:20241101:185643.808 adding performance counter power/capacity.usageMem[average]:434
12857:20241101:185643.808 adding performance counter power/capacity.usageMem[average,absolute]:434
12857:20241101:185643.808 adding performance counter power/capacity.usageOther[average]:435
12857:20241101:185643.808 adding performance counter power/capacity.usageOther[average,absolute]:435
12857:20241101:185643.808 adding performance counter vmotion/vmkernel.downtime[latest]:436
12857:20241101:185643.808 adding performance counter vmotion/vmkernel.downtime[latest,absolute]:436
12857:20241101:185643.808 adding performance counter vmotion/downtime[latest]:437
12857:20241101:185643.808 adding performance counter vmotion/downtime[latest,absolute]:437
12857:20241101:185643.809 adding performance counter vmotion/precopy.time[latest]:438
12857:20241101:185643.809 adding performance counter vmotion/precopy.time[latest,absolute]:438
12857:20241101:185643.809 adding performance counter vmotion/rtt[latest]:439
12857:20241101:185643.809 adding performance counter vmotion/rtt[latest,absolute]:439
12857:20241101:185643.809 adding performance counter vmotion/dst.migration.time[latest]:440
12857:20241101:185643.809 adding performance counter vmotion/dst.migration.time[latest,absolute]:440
12857:20241101:185643.809 adding performance counter vmotion/mem.sizemb[latest]:441
12857:20241101:185643.809 adding performance counter vmotion/mem.sizemb[latest,absolute]:441
12857:20241101:185643.809 adding performance counter hbr/vms[latest]:442
12857:20241101:185643.809 adding performance counter hbr/vms[latest,absolute]:442
12857:20241101:185643.809 adding performance counter net/throughput.hbr.inbound[average]:443
12857:20241101:185643.809 adding performance counter net/throughput.hbr.inbound[average,rate]:443
12857:20241101:185643.809 adding performance counter net/throughput.hbr.outbound[average]:444
12857:20241101:185643.809 adding performance counter net/throughput.hbr.outbound[average,rate]:444
12857:20241101:185643.809 adding performance counter virtualDisk/hbr.readLatencyMS[latest]:445
12857:20241101:185643.809 adding performance counter virtualDisk/hbr.readLatencyMS[latest,absolute]:445
12857:20241101:185643.809 adding performance counter virtualDisk/hbr.stallLatencyMS[latest]:446
12857:20241101:185643.809 adding performance counter virtualDisk/hbr.stallLatencyMS[latest,absolute]:446
12857:20241101:185643.809 adding performance counter net/latency.hbr.outbound[latest]:447
12857:20241101:185643.809 adding performance counter net/latency.hbr.outbound[latest,absolute]:447
12857:20241101:185643.810 adding performance counter lwd/numSnapshots[latest]:448
12857:20241101:185643.810 adding performance counter lwd/numSnapshots[latest,absolute]:448
12857:20241101:185643.810 adding performance counter nfs/apdState[latest]:449
12857:20241101:185643.810 adding performance counter nfs/apdState[latest,absolute]:449
12857:20241101:185643.810 adding performance counter nfs/readIssueTime[latest]:450
12857:20241101:185643.810 adding performance counter nfs/readIssueTime[latest,absolute]:450
12857:20241101:185643.810 adding performance counter nfs/writeIssueTime[latest]:451
12857:20241101:185643.810 adding performance counter nfs/writeIssueTime[latest,absolute]:451
12857:20241101:185643.810 adding performance counter nfs/totalReads[latest]:452
12857:20241101:185643.810 adding performance counter nfs/totalReads[latest,absolute]:452
12857:20241101:185643.810 adding performance counter nfs/readsFailed[latest]:453
12857:20241101:185643.810 adding performance counter nfs/readsFailed[latest,absolute]:453
12857:20241101:185643.810 adding performance counter nfs/totalWrites[latest]:454
12857:20241101:185643.810 adding performance counter nfs/totalWrites[latest,absolute]:454
12857:20241101:185643.810 adding performance counter nfs/writesFailed[latest]:455
12857:20241101:185643.810 adding performance counter nfs/writesFailed[latest,absolute]:455
12857:20241101:185643.810 adding performance counter nfs/readTime[latest]:456
12857:20241101:185643.810 adding performance counter nfs/readTime[latest,absolute]:456
12857:20241101:185643.810 adding performance counter nfs/writeTime[latest]:457
12857:20241101:185643.810 adding performance counter nfs/writeTime[latest,absolute]:457
12857:20241101:185643.810 adding performance counter nfs/ioRequestsQueued[latest]:458
12857:20241101:185643.811 adding performance counter nfs/ioRequestsQueued[latest,absolute]:458
12857:20241101:185643.811 adding performance counter nfs/totalCreate[latest]:459
12857:20241101:185643.811 adding performance counter nfs/totalCreate[latest,absolute]:459
12857:20241101:185643.811 adding performance counter nfs/createFailed[latest]:460
12857:20241101:185643.811 adding performance counter nfs/createFailed[latest,absolute]:460
12857:20241101:185643.811 adding performance counter nfs/socketBufferFull[latest]:461
12857:20241101:185643.811 adding performance counter nfs/socketBufferFull[latest,absolute]:461
12857:20241101:185643.811 adding performance counter datastore/vmfs.totalTxn[latest]:462
12857:20241101:185643.811 adding performance counter datastore/vmfs.totalTxn[latest,absolute]:462
12857:20241101:185643.811 adding performance counter datastore/vmfs.cancelledTxn[latest]:463
12857:20241101:185643.811 adding performance counter datastore/vmfs.cancelledTxn[latest,absolute]:463
12857:20241101:185643.811 adding performance counter datastore/vmfs.apdState[latest]:464
12857:20241101:185643.811 adding performance counter datastore/vmfs.apdState[latest,absolute]:464
12857:20241101:185643.811 adding performance counter datastore/vmfs.apdCount[latest]:465
12857:20241101:185643.811 adding performance counter datastore/vmfs.apdCount[latest,absolute]:465
12857:20241101:185643.811 adding performance counter vvol/pe.isaccessible[latest]:466
12857:20241101:185643.811 adding performance counter vvol/pe.isaccessible[latest,absolute]:466
12857:20241101:185643.811 adding performance counter vvol/pe.reads.done[latest]:467
12857:20241101:185643.811 adding performance counter vvol/pe.reads.done[latest,absolute]:467
12857:20241101:185643.811 adding performance counter vvol/pe.writes.done[latest]:468
12857:20241101:185643.811 adding performance counter vvol/pe.writes.done[latest,absolute]:468
12857:20241101:185643.812 adding performance counter vvol/pe.total.done[latest]:469
12857:20241101:185643.812 adding performance counter vvol/pe.total.done[latest,absolute]:469
12857:20241101:185643.812 adding performance counter vvol/pe.reads.sent[latest]:470
12857:20241101:185643.812 adding performance counter vvol/pe.reads.sent[latest,absolute]:470
12857:20241101:185643.812 adding performance counter vvol/pe.writes.sent[latest]:471
12857:20241101:185643.812 adding performance counter vvol/pe.writes.sent[latest,absolute]:471
12857:20241101:185643.812 adding performance counter vvol/pe.total.sent[latest]:472
12857:20241101:185643.812 adding performance counter vvol/pe.total.sent[latest,absolute]:472
12857:20241101:185643.812 adding performance counter vvol/pe.readsissued.failed[latest]:473
12857:20241101:185643.812 adding performance counter vvol/pe.readsissued.failed[latest,absolute]:473
12857:20241101:185643.812 adding performance counter vvol/pe.writesissued.failed[latest]:474
12857:20241101:185643.812 adding performance counter vvol/pe.writesissued.failed[latest,absolute]:474
12857:20241101:185643.812 adding performance counter vvol/pe.totalissued.failed[latest]:475
12857:20241101:185643.812 adding performance counter vvol/pe.totalissued.failed[latest,absolute]:475
12857:20241101:185643.812 adding performance counter vvol/pe.reads.failed[latest]:476
12857:20241101:185643.812 adding performance counter vvol/pe.reads.failed[latest,absolute]:476
12857:20241101:185643.812 adding performance counter vvol/pe.writes.failed[latest]:477
12857:20241101:185643.812 adding performance counter vvol/pe.writes.failed[latest,absolute]:477
12857:20241101:185643.812 adding performance counter vvol/pe.total.failed[latest]:478
12857:20241101:185643.812 adding performance counter vvol/pe.total.failed[latest,absolute]:478
12857:20241101:185643.812 adding performance counter vvol/pe.read.latency[latest]:479
12857:20241101:185643.813 adding performance counter vvol/pe.read.latency[latest,absolute]:479
12857:20241101:185643.813 adding performance counter vvol/pe.write.latency[latest]:480
12857:20241101:185643.813 adding performance counter vvol/pe.write.latency[latest,absolute]:480
12857:20241101:185643.813 adding performance counter vvol/pe.issue.latency[latest]:481
12857:20241101:185643.813 adding performance counter vvol/pe.issue.latency[latest,absolute]:481
12857:20241101:185643.813 adding performance counter vvol/pe.total.latency[latest]:482
12857:20241101:185643.813 adding performance counter vvol/pe.total.latency[latest,absolute]:482
12857:20241101:185643.813 adding performance counter vvol/pe.cancel.sent[latest]:483
12857:20241101:185643.813 adding performance counter vvol/pe.cancel.sent[latest,absolute]:483
12857:20241101:185643.813 adding performance counter vvol/pe.cancel.failed[latest]:484
12857:20241101:185643.813 adding performance counter vvol/pe.cancel.failed[latest,absolute]:484
12857:20241101:185643.813 adding performance counter vvol/pe.deviceresets.sent[latest]:485
12857:20241101:185643.813 adding performance counter vvol/pe.deviceresets.sent[latest,absolute]:485
12857:20241101:185643.813 adding performance counter vvol/pe.deviceresets.failed[latest]:486
12857:20241101:185643.813 adding performance counter vvol/pe.deviceresets.failed[latest,absolute]:486
12857:20241101:185643.813 adding performance counter vvol/pe.resets.sent[latest]:487
12857:20241101:185643.813 adding performance counter vvol/pe.resets.sent[latest,absolute]:487
12857:20241101:185643.813 adding performance counter vvol/pe.resets.failed[latest]:488
12857:20241101:185643.813 adding performance counter vvol/pe.resets.failed[latest,absolute]:488
12857:20241101:185643.813 adding performance counter vvol/pe.unmaps.sent[latest]:489
12857:20241101:185643.814 adding performance counter vvol/pe.unmaps.sent[latest,absolute]:489
12857:20241101:185643.814 adding performance counter vvol/pe.unmaps.failed[latest]:490
12857:20241101:185643.814 adding performance counter vvol/pe.unmaps.failed[latest,absolute]:490
12857:20241101:185643.814 adding performance counter vvol/container.reads.done[latest]:491
12857:20241101:185643.814 adding performance counter vvol/container.reads.done[latest,absolute]:491
12857:20241101:185643.814 adding performance counter vvol/container.writes.done[latest]:492
12857:20241101:185643.814 adding performance counter vvol/container.writes.done[latest,absolute]:492
12857:20241101:185643.814 adding performance counter vvol/container.total.done[latest]:493
12857:20241101:185643.814 adding performance counter vvol/container.total.done[latest,absolute]:493
12857:20241101:185643.814 adding performance counter vvol/container.reads.sent[latest]:494
12857:20241101:185643.814 adding performance counter vvol/container.reads.sent[latest,absolute]:494
12857:20241101:185643.814 adding performance counter vvol/container.writes.sent[latest]:495
12857:20241101:185643.814 adding performance counter vvol/container.writes.sent[latest,absolute]:495
12857:20241101:185643.814 adding performance counter vvol/container.total.sent[latest]:496
12857:20241101:185643.814 adding performance counter vvol/container.total.sent[latest,absolute]:496
12857:20241101:185643.814 adding performance counter vvol/container.readsissued.failed[latest]:497
12857:20241101:185643.814 adding performance counter vvol/container.readsissued.failed[latest,absolute]:497
12857:20241101:185643.814 adding performance counter vvol/container.writesissued.failed[latest]:498
12857:20241101:185643.814 adding performance counter vvol/container.writesissued.failed[latest,absolute]:498
12857:20241101:185643.814 adding performance counter vvol/container.totalissued.failed[latest]:499
12857:20241101:185643.815 adding performance counter vvol/container.totalissued.failed[latest,absolute]:499
12857:20241101:185643.815 adding performance counter vvol/container.reads.failed[latest]:500
12857:20241101:185643.815 adding performance counter vvol/container.reads.failed[latest,absolute]:500
12857:20241101:185643.815 adding performance counter vvol/container.writes.failed[latest]:501
12857:20241101:185643.815 adding performance counter vvol/container.writes.failed[latest,absolute]:501
12857:20241101:185643.815 adding performance counter vvol/container.total.failed[latest]:502
12857:20241101:185643.815 adding performance counter vvol/container.total.failed[latest,absolute]:502
12857:20241101:185643.815 adding performance counter vvol/container.read.latency[latest]:503
12857:20241101:185643.815 adding performance counter vvol/container.read.latency[latest,absolute]:503
12857:20241101:185643.815 adding performance counter vvol/container.write.latency[latest]:504
12857:20241101:185643.815 adding performance counter vvol/container.write.latency[latest,absolute]:504
12857:20241101:185643.815 adding performance counter vvol/container.issue.latency[latest]:505
12857:20241101:185643.815 adding performance counter vvol/container.issue.latency[latest,absolute]:505
12857:20241101:185643.815 adding performance counter vvol/container.total.latency[latest]:506
12857:20241101:185643.815 adding performance counter vvol/container.total.latency[latest,absolute]:506
12857:20241101:185643.815 adding performance counter vvol/device.reads.done[latest]:507
12857:20241101:185643.815 adding performance counter vvol/device.reads.done[latest,absolute]:507
12857:20241101:185643.815 adding performance counter vvol/device.writes.done[latest]:508
12857:20241101:185643.815 adding performance counter vvol/device.writes.done[latest,absolute]:508
12857:20241101:185643.815 adding performance counter vvol/device.total.done[latest]:509
12857:20241101:185643.815 adding performance counter vvol/device.total.done[latest,absolute]:509
12857:20241101:185643.816 adding performance counter vvol/device.reads.sent[latest]:510
12857:20241101:185643.816 adding performance counter vvol/device.reads.sent[latest,absolute]:510
12857:20241101:185643.816 adding performance counter vvol/device.writes.sent[latest]:511
12857:20241101:185643.816 adding performance counter vvol/device.writes.sent[latest,absolute]:511
12857:20241101:185643.816 adding performance counter vvol/device.total.sent[latest]:512
12857:20241101:185643.816 adding performance counter vvol/device.total.sent[latest,absolute]:512
12857:20241101:185643.816 adding performance counter vvol/device.readsissued.failed[latest]:513
12857:20241101:185643.816 adding performance counter vvol/device.readsissued.failed[latest,absolute]:513
12857:20241101:185643.816 adding performance counter vvol/device.writesissued.failed[latest]:514
12857:20241101:185643.816 adding performance counter vvol/device.writesissued.failed[latest,absolute]:514
12857:20241101:185643.816 adding performance counter vvol/device.totalissued.failed[latest]:515
12857:20241101:185643.816 adding performance counter vvol/device.totalissued.failed[latest,absolute]:515
12857:20241101:185643.816 adding performance counter vvol/device.reads.failed[latest]:516
12857:20241101:185643.816 adding performance counter vvol/device.reads.failed[latest,absolute]:516
12857:20241101:185643.816 adding performance counter vvol/device.writes.failed[latest]:517
12857:20241101:185643.816 adding performance counter vvol/device.writes.failed[latest,absolute]:517
12857:20241101:185643.816 adding performance counter vvol/device.total.failed[latest]:518
12857:20241101:185643.816 adding performance counter vvol/device.total.failed[latest,absolute]:518
12857:20241101:185643.816 adding performance counter vvol/device.read.latency[latest]:519
12857:20241101:185643.816 adding performance counter vvol/device.read.latency[latest,absolute]:519
12857:20241101:185643.817 adding performance counter vvol/device.write.latency[latest]:520
12857:20241101:185643.817 adding performance counter vvol/device.write.latency[latest,absolute]:520
12857:20241101:185643.817 adding performance counter vvol/device.issue.latency[latest]:521
12857:20241101:185643.817 adding performance counter vvol/device.issue.latency[latest,absolute]:521
12857:20241101:185643.817 adding performance counter vvol/device.total.latency[latest]:522
12857:20241101:185643.817 adding performance counter vvol/device.total.latency[latest,absolute]:522
12857:20241101:185643.817 adding performance counter vvol/device.cancel.sent[latest]:523
12857:20241101:185643.817 adding performance counter vvol/device.cancel.sent[latest,absolute]:523
12857:20241101:185643.817 adding performance counter vvol/device.cancel.failed[latest]:524
12857:20241101:185643.817 adding performance counter vvol/device.cancel.failed[latest,absolute]:524
12857:20241101:185643.817 adding performance counter vvol/device.deviceresets.sent[latest]:525
12857:20241101:185643.817 adding performance counter vvol/device.deviceresets.sent[latest,absolute]:525
12857:20241101:185643.817 adding performance counter vvol/device.deviceresets.failed[latest]:526
12857:20241101:185643.817 adding performance counter vvol/device.deviceresets.failed[latest,absolute]:526
12857:20241101:185643.817 adding performance counter vvol/device.resets.sent[latest]:527
12857:20241101:185643.817 adding performance counter vvol/device.resets.sent[latest,absolute]:527
12857:20241101:185643.817 adding performance counter vvol/device.resets.failed[latest]:528
12857:20241101:185643.817 adding performance counter vvol/device.resets.failed[latest,absolute]:528
12857:20241101:185643.817 adding performance counter vvol/device.unmaps.sent[latest]:529
12857:20241101:185643.817 adding performance counter vvol/device.unmaps.sent[latest,absolute]:529
12857:20241101:185643.818 adding performance counter vvol/device.unmaps.failed[latest]:530
12857:20241101:185643.818 adding performance counter vvol/device.unmaps.failed[latest,absolute]:530
12857:20241101:185643.818 adding performance counter cpu/swapwait[summation]:531
12857:20241101:185643.818 adding performance counter cpu/swapwait[summation,delta]:531
12857:20241101:185643.818 adding performance counter cpu/utilization[none]:532
12857:20241101:185643.818 adding performance counter cpu/utilization[none,rate]:532
12857:20241101:185643.818 adding performance counter cpu/utilization[maximum]:533
12857:20241101:185643.818 adding performance counter cpu/utilization[maximum,rate]:533
12857:20241101:185643.818 adding performance counter cpu/utilization[minimum]:534
12857:20241101:185643.818 adding performance counter cpu/utilization[minimum,rate]:534
12857:20241101:185643.818 adding performance counter cpu/coreUtilization[none]:535
12857:20241101:185643.818 adding performance counter cpu/coreUtilization[none,rate]:535
12857:20241101:185643.818 adding performance counter cpu/coreUtilization[average]:536
12857:20241101:185643.818 adding performance counter cpu/coreUtilization[average,rate]:536
12857:20241101:185643.818 adding performance counter cpu/coreUtilization[maximum]:537
12857:20241101:185643.818 adding performance counter cpu/coreUtilization[maximum,rate]:537
12857:20241101:185643.818 adding performance counter cpu/coreUtilization[minimum]:538
12857:20241101:185643.818 adding performance counter cpu/coreUtilization[minimum,rate]:538
12857:20241101:185643.818 adding performance counter cpu/totalCapacity[average]:539
12857:20241101:185643.818 adding performance counter cpu/totalCapacity[average,absolute]:539
12857:20241101:185643.818 adding performance counter cpu/latency[average]:540
12857:20241101:185643.819 adding performance counter cpu/latency[average,rate]:540
12857:20241101:185643.819 adding performance counter cpu/entitlement[latest]:541
12857:20241101:185643.819 adding performance counter cpu/entitlement[latest,absolute]:541
12857:20241101:185643.819 adding performance counter cpu/demand[average]:542
12857:20241101:185643.819 adding performance counter cpu/demand[average,absolute]:542
12857:20241101:185643.819 adding performance counter cpu/costop[summation]:543
12857:20241101:185643.819 adding performance counter cpu/costop[summation,delta]:543
12857:20241101:185643.819 adding performance counter cpu/maxlimited[summation]:544
12857:20241101:185643.819 adding performance counter cpu/maxlimited[summation,delta]:544
12857:20241101:185643.819 adding performance counter cpu/overlap[summation]:545
12857:20241101:185643.819 adding performance counter cpu/overlap[summation,delta]:545
12857:20241101:185643.819 adding performance counter cpu/run[summation]:546
12857:20241101:185643.819 adding performance counter cpu/run[summation,delta]:546
12857:20241101:185643.819 adding performance counter cpu/demandEntitlementRatio[latest]:547
12857:20241101:185643.819 adding performance counter cpu/demandEntitlementRatio[latest,absolute]:547
12857:20241101:185643.819 adding performance counter cpu/readiness[average]:548
12857:20241101:185643.819 adding performance counter cpu/readiness[average,rate]:548
12857:20241101:185643.819 adding performance counter cpu/usage.vcpus[average]:549
12857:20241101:185643.819 adding performance counter cpu/usage.vcpus[average,rate]:549
12857:20241101:185643.819 adding performance counter mem/swapin[none]:550
12857:20241101:185643.819 adding performance counter mem/swapin[none,absolute]:550
12857:20241101:185643.820 adding performance counter mem/swapin[average]:551
12857:20241101:185643.820 adding performance counter mem/swapin[average,absolute]:551
12857:20241101:185643.820 adding performance counter mem/swapin[maximum]:552
12857:20241101:185643.820 adding performance counter mem/swapin[maximum,absolute]:552
12857:20241101:185643.820 adding performance counter mem/swapin[minimum]:553
12857:20241101:185643.820 adding performance counter mem/swapin[minimum,absolute]:553
12857:20241101:185643.820 adding performance counter mem/swapout[none]:554
12857:20241101:185643.820 adding performance counter mem/swapout[none,absolute]:554
12857:20241101:185643.820 adding performance counter mem/swapout[average]:555
12857:20241101:185643.820 adding performance counter mem/swapout[average,absolute]:555
12857:20241101:185643.820 adding performance counter mem/swapout[maximum]:556
12857:20241101:185643.820 adding performance counter mem/swapout[maximum,absolute]:556
12857:20241101:185643.820 adding performance counter mem/swapout[minimum]:557
12857:20241101:185643.820 adding performance counter mem/swapout[minimum,absolute]:557
12857:20241101:185643.820 adding performance counter mem/sysUsage[none]:558
12857:20241101:185643.820 adding performance counter mem/sysUsage[none,absolute]:558
12857:20241101:185643.820 adding performance counter mem/sysUsage[average]:559
12857:20241101:185643.820 adding performance counter mem/sysUsage[average,absolute]:559
12857:20241101:185643.820 adding performance counter mem/sysUsage[maximum]:560
12857:20241101:185643.820 adding performance counter mem/sysUsage[maximum,absolute]:560
12857:20241101:185643.821 adding performance counter mem/sysUsage[minimum]:561
12857:20241101:185643.821 adding performance counter mem/sysUsage[minimum,absolute]:561
12857:20241101:185643.821 adding performance counter mem/activewrite[average]:562
12857:20241101:185643.821 adding performance counter mem/activewrite[average,absolute]:562
12857:20241101:185643.821 adding performance counter mem/overheadMax[average]:563
12857:20241101:185643.821 adding performance counter mem/overheadMax[average,absolute]:563
12857:20241101:185643.821 adding performance counter mem/totalCapacity[average]:564
12857:20241101:185643.821 adding performance counter mem/totalCapacity[average,absolute]:564
12857:20241101:185643.821 adding performance counter mem/zipped[latest]:565
12857:20241101:185643.821 adding performance counter mem/zipped[latest,absolute]:565
12857:20241101:185643.821 adding performance counter mem/zipSaved[latest]:566
12857:20241101:185643.821 adding performance counter mem/zipSaved[latest,absolute]:566
12857:20241101:185643.821 adding performance counter mem/latency[average]:567
12857:20241101:185643.821 adding performance counter mem/latency[average,absolute]:567
12857:20241101:185643.821 adding performance counter mem/entitlement[average]:568
12857:20241101:185643.821 adding performance counter mem/entitlement[average,absolute]:568
12857:20241101:185643.821 adding performance counter mem/lowfreethreshold[average]:569
12857:20241101:185643.821 adding performance counter mem/lowfreethreshold[average,absolute]:569
12857:20241101:185643.821 adding performance counter mem/llSwapUsed[none]:570
12857:20241101:185643.821 adding performance counter mem/llSwapUsed[none,absolute]:570
12857:20241101:185643.821 adding performance counter mem/llSwapInRate[average]:571
12857:20241101:185643.822 adding performance counter mem/llSwapInRate[average,rate]:571
12857:20241101:185643.822 adding performance counter mem/llSwapOutRate[average]:572
12857:20241101:185643.822 adding performance counter mem/llSwapOutRate[average,rate]:572
12857:20241101:185643.822 adding performance counter mem/overheadTouched[average]:573
12857:20241101:185643.822 adding performance counter mem/overheadTouched[average,absolute]:573
12857:20241101:185643.822 adding performance counter mem/llSwapUsed[average]:574
12857:20241101:185643.822 adding performance counter mem/llSwapUsed[average,absolute]:574
12857:20241101:185643.822 adding performance counter mem/llSwapUsed[maximum]:575
12857:20241101:185643.822 adding performance counter mem/llSwapUsed[maximum,absolute]:575
12857:20241101:185643.822 adding performance counter mem/llSwapUsed[minimum]:576
12857:20241101:185643.822 adding performance counter mem/llSwapUsed[minimum,absolute]:576
12857:20241101:185643.822 adding performance counter mem/llSwapIn[none]:577
12857:20241101:185643.822 adding performance counter mem/llSwapIn[none,absolute]:577
12857:20241101:185643.822 adding performance counter mem/llSwapIn[average]:578
12857:20241101:185643.822 adding performance counter mem/llSwapIn[average,absolute]:578
12857:20241101:185643.822 adding performance counter mem/llSwapIn[maximum]:579
12857:20241101:185643.822 adding performance counter mem/llSwapIn[maximum,absolute]:579
12857:20241101:185643.822 adding performance counter mem/llSwapIn[minimum]:580
12857:20241101:185643.822 adding performance counter mem/llSwapIn[minimum,absolute]:580
12857:20241101:185643.822 adding performance counter mem/llSwapOut[none]:581
12857:20241101:185643.822 adding performance counter mem/llSwapOut[none,absolute]:581
12857:20241101:185643.823 adding performance counter mem/llSwapOut[average]:582
12857:20241101:185643.823 adding performance counter mem/llSwapOut[average,absolute]:582
12857:20241101:185643.823 adding performance counter mem/llSwapOut[maximum]:583
12857:20241101:185643.823 adding performance counter mem/llSwapOut[maximum,absolute]:583
12857:20241101:185643.823 adding performance counter mem/llSwapOut[minimum]:584
12857:20241101:185643.823 adding performance counter mem/llSwapOut[minimum,absolute]:584
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.size[latest]:585
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.size[latest,absolute]:585
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.sizeMax[latest]:586
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.sizeMax[latest,absolute]:586
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.workingSet[latest]:587
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.workingSet[latest,absolute]:587
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.workingSetMax[latest]:588
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.workingSetMax[latest,absolute]:588
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.overhead[latest]:589
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.overhead[latest,absolute]:589
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.capMissRatio[latest]:590
12857:20241101:185643.823 adding performance counter mem/vmfs.pbc.capMissRatio[latest,absolute]:590
12857:20241101:185643.823 adding performance counter disk/commands[summation]:591
12857:20241101:185643.823 adding performance counter disk/commands[summation,delta]:591
12857:20241101:185643.824 adding performance counter disk/deviceReadLatency[average]:592
12857:20241101:185643.824 adding performance counter disk/deviceReadLatency[average,absolute]:592
12857:20241101:185643.824 adding performance counter disk/kernelReadLatency[average]:593
12857:20241101:185643.824 adding performance counter disk/kernelReadLatency[average,absolute]:593
12857:20241101:185643.824 adding performance counter disk/totalReadLatency[average]:594
12857:20241101:185643.824 adding performance counter disk/totalReadLatency[average,absolute]:594
12857:20241101:185643.824 adding performance counter disk/queueReadLatency[average]:595
12857:20241101:185643.824 adding performance counter disk/queueReadLatency[average,absolute]:595
12857:20241101:185643.824 adding performance counter disk/deviceWriteLatency[average]:596
12857:20241101:185643.824 adding performance counter disk/deviceWriteLatency[average,absolute]:596
12857:20241101:185643.824 adding performance counter disk/kernelWriteLatency[average]:597
12857:20241101:185643.824 adding performance counter disk/kernelWriteLatency[average,absolute]:597
12857:20241101:185643.824 adding performance counter disk/totalWriteLatency[average]:598
12857:20241101:185643.824 adding performance counter disk/totalWriteLatency[average,absolute]:598
12857:20241101:185643.824 adding performance counter disk/queueWriteLatency[average]:599
12857:20241101:185643.824 adding performance counter disk/queueWriteLatency[average,absolute]:599
12857:20241101:185643.824 adding performance counter disk/deviceLatency[average]:600
12857:20241101:185643.824 adding performance counter disk/deviceLatency[average,absolute]:600
12857:20241101:185643.824 adding performance counter disk/kernelLatency[average]:601
12857:20241101:185643.824 adding performance counter disk/kernelLatency[average,absolute]:601
12857:20241101:185643.824 adding performance counter disk/queueLatency[average]:602
12857:20241101:185643.825 adding performance counter disk/queueLatency[average,absolute]:602
12857:20241101:185643.825 adding performance counter disk/maxQueueDepth[average]:603
12857:20241101:185643.825 adding performance counter disk/maxQueueDepth[average,absolute]:603
12857:20241101:185643.825 adding performance counter disk/commandsAveraged[average]:604
12857:20241101:185643.825 adding performance counter disk/commandsAveraged[average,rate]:604
12857:20241101:185643.825 adding performance counter net/droppedRx[summation]:605
12857:20241101:185643.825 adding performance counter net/droppedRx[summation,delta]:605
12857:20241101:185643.825 adding performance counter net/droppedTx[summation]:606
12857:20241101:185643.825 adding performance counter net/droppedTx[summation,delta]:606
12857:20241101:185643.825 adding performance counter net/bytesRx[average]:607
12857:20241101:185643.825 adding performance counter net/bytesRx[average,rate]:607
12857:20241101:185643.825 adding performance counter net/bytesTx[average]:608
12857:20241101:185643.825 adding performance counter net/bytesTx[average,rate]:608
12857:20241101:185643.825 adding performance counter net/broadcastRx[summation]:609
12857:20241101:185643.825 adding performance counter net/broadcastRx[summation,delta]:609
12857:20241101:185643.825 adding performance counter net/broadcastTx[summation]:610
12857:20241101:185643.825 adding performance counter net/broadcastTx[summation,delta]:610
12857:20241101:185643.825 adding performance counter net/multicastRx[summation]:611
12857:20241101:185643.825 adding performance counter net/multicastRx[summation,delta]:611
12857:20241101:185643.825 adding performance counter net/multicastTx[summation]:612
12857:20241101:185643.825 adding performance counter net/multicastTx[summation,delta]:612
12857:20241101:185643.826 adding performance counter net/errorsRx[summation]:613
12857:20241101:185643.826 adding performance counter net/errorsRx[summation,delta]:613
12857:20241101:185643.826 adding performance counter net/errorsTx[summation]:614
12857:20241101:185643.826 adding performance counter net/errorsTx[summation,delta]:614
12857:20241101:185643.826 adding performance counter net/unknownProtos[summation]:615
12857:20241101:185643.826 adding performance counter net/unknownProtos[summation,delta]:615
12857:20241101:185643.826 adding performance counter net/pnicBytesRx[average]:616
12857:20241101:185643.826 adding performance counter net/pnicBytesRx[average,rate]:616
12857:20241101:185643.826 adding performance counter net/pnicBytesTx[average]:617
12857:20241101:185643.826 adding performance counter net/pnicBytesTx[average,rate]:617
12857:20241101:185643.826 adding performance counter sys/heartbeat[latest]:618
12857:20241101:185643.826 adding performance counter sys/heartbeat[latest,absolute]:618
12857:20241101:185643.826 adding performance counter sys/diskUsage[latest]:619
12857:20241101:185643.826 adding performance counter sys/diskUsage[latest,absolute]:619
12857:20241101:185643.826 adding performance counter sys/resourceCpuUsage[none]:620
12857:20241101:185643.826 adding performance counter sys/resourceCpuUsage[none,rate]:620
12857:20241101:185643.826 adding performance counter sys/resourceCpuUsage[average]:621
12857:20241101:185643.826 adding performance counter sys/resourceCpuUsage[average,rate]:621
12857:20241101:185643.826 adding performance counter sys/resourceCpuUsage[maximum]:622
12857:20241101:185643.826 adding performance counter sys/resourceCpuUsage[maximum,rate]:622
12857:20241101:185643.827 adding performance counter sys/resourceCpuUsage[minimum]:623
12857:20241101:185643.827 adding performance counter sys/resourceCpuUsage[minimum,rate]:623
12857:20241101:185643.827 adding performance counter sys/resourceMemTouched[latest]:624
12857:20241101:185643.827 adding performance counter sys/resourceMemTouched[latest,absolute]:624
12857:20241101:185643.827 adding performance counter sys/resourceMemMapped[latest]:625
12857:20241101:185643.827 adding performance counter sys/resourceMemMapped[latest,absolute]:625
12857:20241101:185643.827 adding performance counter sys/resourceMemShared[latest]:626
12857:20241101:185643.827 adding performance counter sys/resourceMemShared[latest,absolute]:626
12857:20241101:185643.827 adding performance counter sys/resourceMemSwapped[latest]:627
12857:20241101:185643.827 adding performance counter sys/resourceMemSwapped[latest,absolute]:627
12857:20241101:185643.827 adding performance counter sys/resourceMemOverhead[latest]:628
12857:20241101:185643.827 adding performance counter sys/resourceMemOverhead[latest,absolute]:628
12857:20241101:185643.827 adding performance counter sys/resourceMemCow[latest]:629
12857:20241101:185643.827 adding performance counter sys/resourceMemCow[latest,absolute]:629
12857:20241101:185643.827 adding performance counter sys/resourceMemZero[latest]:630
12857:20241101:185643.827 adding performance counter sys/resourceMemZero[latest,absolute]:630
12857:20241101:185643.827 adding performance counter sys/resourceCpuRun1[latest]:631
12857:20241101:185643.827 adding performance counter sys/resourceCpuRun1[latest,absolute]:631
12857:20241101:185643.827 adding performance counter sys/resourceCpuAct1[latest]:632
12857:20241101:185643.827 adding performance counter sys/resourceCpuAct1[latest,absolute]:632
12857:20241101:185643.827 adding performance counter sys/resourceCpuMaxLimited1[latest]:633
12857:20241101:185643.828 adding performance counter sys/resourceCpuMaxLimited1[latest,absolute]:633
12857:20241101:185643.828 adding performance counter sys/resourceCpuRun5[latest]:634
12857:20241101:185643.828 adding performance counter sys/resourceCpuRun5[latest,absolute]:634
12857:20241101:185643.828 adding performance counter sys/resourceCpuAct5[latest]:635
12857:20241101:185643.828 adding performance counter sys/resourceCpuAct5[latest,absolute]:635
12857:20241101:185643.828 adding performance counter sys/resourceCpuMaxLimited5[latest]:636
12857:20241101:185643.828 adding performance counter sys/resourceCpuMaxLimited5[latest,absolute]:636
12857:20241101:185643.828 adding performance counter sys/resourceCpuAllocMin[latest]:637
12857:20241101:185643.828 adding performance counter sys/resourceCpuAllocMin[latest,absolute]:637
12857:20241101:185643.828 adding performance counter sys/resourceCpuAllocMax[latest]:638
12857:20241101:185643.828 adding performance counter sys/resourceCpuAllocMax[latest,absolute]:638
12857:20241101:185643.828 adding performance counter sys/resourceCpuAllocShares[latest]:639
12857:20241101:185643.828 adding performance counter sys/resourceCpuAllocShares[latest,absolute]:639
12857:20241101:185643.828 adding performance counter sys/resourceMemAllocMin[latest]:640
12857:20241101:185643.828 adding performance counter sys/resourceMemAllocMin[latest,absolute]:640
12857:20241101:185643.828 adding performance counter sys/resourceMemAllocMax[latest]:641
12857:20241101:185643.828 adding performance counter sys/resourceMemAllocMax[latest,absolute]:641
12857:20241101:185643.828 adding performance counter sys/resourceMemAllocShares[latest]:642
12857:20241101:185643.828 adding performance counter sys/resourceMemAllocShares[latest,absolute]:642
12857:20241101:185643.828 adding performance counter sys/osUptime[latest]:643
12857:20241101:185643.828 adding performance counter sys/osUptime[latest,absolute]:643
12857:20241101:185643.829 adding performance counter sys/resourceMemConsumed[latest]:644
12857:20241101:185643.829 adding performance counter sys/resourceMemConsumed[latest,absolute]:644
12857:20241101:185643.829 adding performance counter sys/resourceFdUsage[latest]:645
12857:20241101:185643.829 adding performance counter sys/resourceFdUsage[latest,absolute]:645
12857:20241101:185643.829 adding performance counter rescpu/actpk1[latest]:646
12857:20241101:185643.829 adding performance counter rescpu/actpk1[latest,absolute]:646
12857:20241101:185643.829 adding performance counter rescpu/runav1[latest]:647
12857:20241101:185643.829 adding performance counter rescpu/runav1[latest,absolute]:647
12857:20241101:185643.829 adding performance counter rescpu/actav5[latest]:648
12857:20241101:185643.829 adding performance counter rescpu/actav5[latest,absolute]:648
12857:20241101:185643.829 adding performance counter rescpu/actpk5[latest]:649
12857:20241101:185643.829 adding performance counter rescpu/actpk5[latest,absolute]:649
12857:20241101:185643.829 adding performance counter rescpu/runav5[latest]:650
12857:20241101:185643.829 adding performance counter rescpu/runav5[latest,absolute]:650
12857:20241101:185643.829 adding performance counter rescpu/actav15[latest]:651
12857:20241101:185643.829 adding performance counter rescpu/actav15[latest,absolute]:651
12857:20241101:185643.829 adding performance counter rescpu/actpk15[latest]:652
12857:20241101:185643.829 adding performance counter rescpu/actpk15[latest,absolute]:652
12857:20241101:185643.829 adding performance counter rescpu/runav15[latest]:653
12857:20241101:185643.829 adding performance counter rescpu/runav15[latest,absolute]:653
12857:20241101:185643.830 adding performance counter rescpu/runpk1[latest]:654
12857:20241101:185643.830 adding performance counter rescpu/runpk1[latest,absolute]:654
12857:20241101:185643.830 adding performance counter rescpu/maxLimited1[latest]:655
12857:20241101:185643.830 adding performance counter rescpu/maxLimited1[latest,absolute]:655
12857:20241101:185643.830 adding performance counter rescpu/runpk5[latest]:656
12857:20241101:185643.830 adding performance counter rescpu/runpk5[latest,absolute]:656
12857:20241101:185643.830 adding performance counter rescpu/maxLimited5[latest]:657
12857:20241101:185643.830 adding performance counter rescpu/maxLimited5[latest,absolute]:657
12857:20241101:185643.830 adding performance counter rescpu/runpk15[latest]:658
12857:20241101:185643.830 adding performance counter rescpu/runpk15[latest,absolute]:658
12857:20241101:185643.830 adding performance counter rescpu/maxLimited15[latest]:659
12857:20241101:185643.830 adding performance counter rescpu/maxLimited15[latest,absolute]:659
12857:20241101:185643.830 adding performance counter rescpu/sampleCount[latest]:660
12857:20241101:185643.830 adding performance counter rescpu/sampleCount[latest,absolute]:660
12857:20241101:185643.830 adding performance counter rescpu/samplePeriod[latest]:661
12857:20241101:185643.830 adding performance counter rescpu/samplePeriod[latest,absolute]:661
12857:20241101:185643.830 adding performance counter managementAgent/memUsed[average]:662
12857:20241101:185643.830 adding performance counter managementAgent/memUsed[average,absolute]:662
12857:20241101:185643.830 adding performance counter managementAgent/swapUsed[average]:663
12857:20241101:185643.830 adding performance counter managementAgent/swapUsed[average,absolute]:663
12857:20241101:185643.830 adding performance counter managementAgent/cpuUsage[average]:664
12857:20241101:185643.831 adding performance counter managementAgent/cpuUsage[average,rate]:664
12857:20241101:185643.831 adding performance counter storagePath/commandsAveraged[average]:665
12857:20241101:185643.831 adding performance counter storagePath/commandsAveraged[average,rate]:665
12857:20241101:185643.831 adding performance counter storagePath/numberReadAveraged[average]:666
12857:20241101:185643.831 adding performance counter storagePath/numberReadAveraged[average,rate]:666
12857:20241101:185643.831 adding performance counter storagePath/numberWriteAveraged[average]:667
12857:20241101:185643.831 adding performance counter storagePath/numberWriteAveraged[average,rate]:667
12857:20241101:185643.831 adding performance counter storagePath/read[average]:668
12857:20241101:185643.831 adding performance counter storagePath/read[average,rate]:668
12857:20241101:185643.831 adding performance counter storagePath/write[average]:669
12857:20241101:185643.831 adding performance counter storagePath/write[average,rate]:669
12857:20241101:185643.831 adding performance counter storagePath/totalReadLatency[average]:670
12857:20241101:185643.831 adding performance counter storagePath/totalReadLatency[average,absolute]:670
12857:20241101:185643.831 adding performance counter storagePath/totalWriteLatency[average]:671
12857:20241101:185643.831 adding performance counter storagePath/totalWriteLatency[average,absolute]:671
12857:20241101:185643.831 adding performance counter virtualDisk/readIOSize[latest]:672
12857:20241101:185643.831 adding performance counter virtualDisk/readIOSize[latest,absolute]:672
12857:20241101:185643.831 adding performance counter virtualDisk/writeIOSize[latest]:673
12857:20241101:185643.831 adding performance counter virtualDisk/writeIOSize[latest,absolute]:673
12857:20241101:185643.831 adding performance counter virtualDisk/smallSeeks[latest]:674
12857:20241101:185643.831 adding performance counter virtualDisk/smallSeeks[latest,absolute]:674
12857:20241101:185643.832 adding performance counter virtualDisk/mediumSeeks[latest]:675
12857:20241101:185643.832 adding performance counter virtualDisk/mediumSeeks[latest,absolute]:675
12857:20241101:185643.832 adding performance counter virtualDisk/largeSeeks[latest]:676
12857:20241101:185643.832 adding performance counter virtualDisk/largeSeeks[latest,absolute]:676
12857:20241101:185643.832 adding performance counter virtualDisk/readLatencyUS[latest]:677
12857:20241101:185643.832 adding performance counter virtualDisk/readLatencyUS[latest,absolute]:677
12857:20241101:185643.832 adding performance counter virtualDisk/writeLatencyUS[latest]:678
12857:20241101:185643.832 adding performance counter virtualDisk/writeLatencyUS[latest,absolute]:678
12857:20241101:185643.832 adding performance counter datastore/datastoreMaxQueueDepth[latest]:679
12857:20241101:185643.832 adding performance counter datastore/datastoreMaxQueueDepth[latest,absolute]:679
12857:20241101:185643.832 adding performance counter datastore/unmapSize[summation]:680
12857:20241101:185643.832 adding performance counter datastore/unmapSize[summation,delta]:680
12857:20241101:185643.832 adding performance counter datastore/unmapIOs[summation]:681
12857:20241101:185643.832 adding performance counter datastore/unmapIOs[summation,delta]:681
12857:20241101:185643.832 adding performance counter hbr/hbrNumVms[average]:682
12857:20241101:185643.832 adding performance counter hbr/hbrNumVms[average,absolute]:682
12857:20241101:185643.832 adding performance counter hbr/hbrNetRx[average]:683
12857:20241101:185643.832 adding performance counter hbr/hbrNetRx[average,rate]:683
12857:20241101:185643.832 adding performance counter hbr/hbrNetTx[average]:684
12857:20241101:185643.832 adding performance counter hbr/hbrNetTx[average,rate]:684
12857:20241101:185643.833 adding performance counter hbr/hbrNetLatency[average]:685
12857:20241101:185643.833 adding performance counter hbr/hbrNetLatency[average,absolute]:685
12857:20241101:185643.833 adding performance counter hbr/hbrDiskReadLatency[average]:686
12857:20241101:185643.833 adding performance counter hbr/hbrDiskReadLatency[average,absolute]:686
12857:20241101:185643.833 adding performance counter hbr/hbrDiskStallLatency[average]:687
12857:20241101:185643.833 adding performance counter hbr/hbrDiskStallLatency[average,absolute]:687
12857:20241101:185643.833 adding performance counter hbr/hbrDiskTransferSuccess[average]:688
12857:20241101:185643.833 adding performance counter hbr/hbrDiskTransferSuccess[average,absolute]:688
12857:20241101:185643.833 adding performance counter hbr/hbrDiskTransferIdle[average]:689
12857:20241101:185643.833 adding performance counter hbr/hbrDiskTransferIdle[average,absolute]:689
12857:20241101:185643.833 adding performance counter hbr/hbrDiskTransferBytes[average]:690
12857:20241101:185643.833 adding performance counter hbr/hbrDiskTransferBytes[average,absolute]:690
12857:20241101:185643.833 adding performance counter vflashModule/numActiveVMDKs[latest]:691
12857:20241101:185643.833 adding performance counter vflashModule/numActiveVMDKs[latest,absolute]:691
12857:20241101:185643.833 adding performance counter vsanDomObj/readIops[average]:692
12857:20241101:185643.833 adding performance counter vsanDomObj/readIops[average,rate]:692
12857:20241101:185643.833 adding performance counter vsanDomObj/readThroughput[average]:693
12857:20241101:185643.833 adding performance counter vsanDomObj/readThroughput[average,rate]:693
12857:20241101:185643.833 adding performance counter vsanDomObj/readAvgLatency[average]:694
12857:20241101:185643.833 adding performance counter vsanDomObj/readAvgLatency[average,absolute]:694
12857:20241101:185643.833 adding performance counter vsanDomObj/readMaxLatency[latest]:695
12857:20241101:185643.834 adding performance counter vsanDomObj/readMaxLatency[latest,absolute]:695
12857:20241101:185643.834 adding performance counter vsanDomObj/readCacheHitRate[latest]:696
12857:20241101:185643.834 adding performance counter vsanDomObj/readCacheHitRate[latest,absolute]:696
12857:20241101:185643.834 adding performance counter vsanDomObj/readCongestion[average]:697
12857:20241101:185643.834 adding performance counter vsanDomObj/readCongestion[average,rate]:697
12857:20241101:185643.834 adding performance counter vsanDomObj/writeIops[average]:698
12857:20241101:185643.834 adding performance counter vsanDomObj/writeIops[average,rate]:698
12857:20241101:185643.834 adding performance counter vsanDomObj/writeThroughput[average]:699
12857:20241101:185643.834 adding performance counter vsanDomObj/writeThroughput[average,rate]:699
12857:20241101:185643.834 adding performance counter vsanDomObj/writeAvgLatency[average]:700
12857:20241101:185643.834 adding performance counter vsanDomObj/writeAvgLatency[average,absolute]:700
12857:20241101:185643.834 adding performance counter vsanDomObj/writeMaxLatency[latest]:701
12857:20241101:185643.834 adding performance counter vsanDomObj/writeMaxLatency[latest,absolute]:701
12857:20241101:185643.834 adding performance counter vsanDomObj/writeCongestion[average]:702
12857:20241101:185643.834 adding performance counter vsanDomObj/writeCongestion[average,rate]:702
12857:20241101:185643.834 adding performance counter vsanDomObj/recoveryWriteIops[average]:703
12857:20241101:185643.834 adding performance counter vsanDomObj/recoveryWriteIops[average,rate]:703
12857:20241101:185643.834 adding performance counter vsanDomObj/recoveryWriteThroughput[average]:704
12857:20241101:185643.834 adding performance counter vsanDomObj/recoveryWriteThroughput[average,rate]:704
12857:20241101:185643.834 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average]:705
12857:20241101:185643.834 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average,absolute]:705
12857:20241101:185643.835 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest]:706
12857:20241101:185643.835 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest,absolute]:706
12857:20241101:185643.835 adding performance counter vsanDomObj/recoveryWriteCongestion[average]:707
12857:20241101:185643.835 adding performance counter vsanDomObj/recoveryWriteCongestion[average,rate]:707
12857:20241101:185643.835 adding performance counter gpu/utilization[none]:708
12857:20241101:185643.835 adding performance counter gpu/utilization[none,absolute]:708
12857:20241101:185643.835 adding performance counter gpu/utilization[maximum]:709
12857:20241101:185643.835 adding performance counter gpu/utilization[maximum,absolute]:709
12857:20241101:185643.835 adding performance counter gpu/utilization[minimum]:710
12857:20241101:185643.835 adding performance counter gpu/utilization[minimum,absolute]:710
12857:20241101:185643.835 adding performance counter gpu/mem.used[none]:711
12857:20241101:185643.835 adding performance counter gpu/mem.used[none,absolute]:711
12857:20241101:185643.835 adding performance counter gpu/mem.used[maximum]:712
12857:20241101:185643.835 adding performance counter gpu/mem.used[maximum,absolute]:712
12857:20241101:185643.835 adding performance counter gpu/mem.used[minimum]:713
12857:20241101:185643.835 adding performance counter gpu/mem.used[minimum,absolute]:713
12857:20241101:185643.835 adding performance counter gpu/mem.usage[none]:714
12857:20241101:185643.835 adding performance counter gpu/mem.usage[none,absolute]:714
12857:20241101:185643.835 adding performance counter gpu/mem.usage[average]:715
12857:20241101:185643.835 adding performance counter gpu/mem.usage[average,absolute]:715
12857:20241101:185643.835 adding performance counter gpu/mem.usage[maximum]:716
12857:20241101:185643.836 adding performance counter gpu/mem.usage[maximum,absolute]:716
12857:20241101:185643.836 adding performance counter gpu/mem.usage[minimum]:717
12857:20241101:185643.836 adding performance counter gpu/mem.usage[minimum,absolute]:717
12857:20241101:185643.836 Unknown performance counter 718 type of unitInfo:gigaBytes
12857:20241101:185643.836 adding performance counter gpu/mem.used.gb[latest]:718
12857:20241101:185643.836 Unknown performance counter 718 type of unitInfo:gigaBytes
12857:20241101:185643.836 adding performance counter gpu/mem.used.gb[latest,absolute]:718
12857:20241101:185643.836 Unknown performance counter 719 type of unitInfo:gigaBytes
12857:20241101:185643.836 adding performance counter gpu/mem.reserved.gb[latest]:719
12857:20241101:185643.836 Unknown performance counter 719 type of unitInfo:gigaBytes
12857:20241101:185643.836 adding performance counter gpu/mem.reserved.gb[latest,absolute]:719
12857:20241101:185643.836 Unknown performance counter 720 type of unitInfo:gigaBytes
12857:20241101:185643.836 adding performance counter gpu/mem.total.gb[latest]:720
12857:20241101:185643.836 Unknown performance counter 720 type of unitInfo:gigaBytes
12857:20241101:185643.836 adding performance counter gpu/mem.total.gb[latest,absolute]:720
12857:20241101:185643.836 adding performance counter pmem/available.reservation[latest]:721
12857:20241101:185643.836 adding performance counter pmem/available.reservation[latest,absolute]:721
12857:20241101:185643.836 adding performance counter pmem/drsmanaged.reservation[latest]:722
12857:20241101:185643.836 adding performance counter pmem/drsmanaged.reservation[latest,absolute]:722
12857:20241101:185643.836 adding performance counter vmx/numVCPUs[latest]:723
12857:20241101:185643.836 adding performance counter vmx/numVCPUs[latest,absolute]:723
12857:20241101:185643.836 adding performance counter vmx/vcpusMhzMin[latest]:724
12857:20241101:185643.836 adding performance counter vmx/vcpusMhzMin[latest,absolute]:724
12857:20241101:185643.837 adding performance counter vmx/vcpusMhzMax[latest]:725
12857:20241101:185643.837 adding performance counter vmx/vcpusMhzMax[latest,absolute]:725
12857:20241101:185643.837 adding performance counter vmx/vcpusMhzMean[latest]:726
12857:20241101:185643.837 adding performance counter vmx/vcpusMhzMean[latest,absolute]:726
12857:20241101:185643.837 adding performance counter vmx/cpuSpeed[latest]:727
12857:20241101:185643.837 adding performance counter vmx/cpuSpeed[latest,absolute]:727
12857:20241101:185643.837 adding performance counter vmx/overheadMemSizeMin[latest]:728
12857:20241101:185643.837 adding performance counter vmx/overheadMemSizeMin[latest,absolute]:728
12857:20241101:185643.837 adding performance counter vmx/overheadMemSizeMax[latest]:729
12857:20241101:185643.837 adding performance counter vmx/overheadMemSizeMax[latest,absolute]:729
12857:20241101:185643.837 adding performance counter vmx/vigor.opsTotal[latest]:730
12857:20241101:185643.837 adding performance counter vmx/vigor.opsTotal[latest,absolute]:730
12857:20241101:185643.837 adding performance counter vmx/poll.itersPerS[latest]:731
12857:20241101:185643.837 adding performance counter vmx/poll.itersPerS[latest,absolute]:731
12857:20241101:185643.837 adding performance counter vmx/userRpc.opsPerS[latest]:732
12857:20241101:185643.837 adding performance counter vmx/userRpc.opsPerS[latest,absolute]:732
12857:20241101:185643.838 End of vmware_service_get_perf_counters():SUCCEED
12857:20241101:185643.838 In vmware_service_get_evt_severity()
12857:20241101:185643.905 vmware_service_get_evt_severity() SOAP response:
EventManagerdescriptionInformationinfoWarningwarningErrorerrorUseruserExtendedEventImport certificate successinfoImport certificate succeeded.Import certificate succeeded.Import certificate succeeded.Import certificate succeeded.ad.event.ImportCertEvent|Import certificate succeeded. <EventLongDescription id="ad.event.ImportCertEvent"> <description> Import certificate succeeded </description> </EventLongDescription> ExtendedEventImport certificate failureerrorImport certificate failed.Import certificate failed.Import certificate failed.Import certificate failed.ad.event.ImportCertFailedEvent|Import certificate failed. <EventLongDescription id="ad.event.ImportCertFailedEvent"> <description> Import certificate failed </description> </EventLongDescription> ExtendedEventJoin domain successinfoJoin domain succeeded.Join domain succeeded.Join domain succeeded.Join domain succeeded.ad.event.JoinDomainEvent|Join domain succeeded. <EventLongDescription id="ad.event.JoinDomainEvent"> <description> Join domain succeeded </description> </EventLongDescription> ExtendedEventJoin domain failureerrorJoin domain failed.Join domain failed.Join domain failed.Join domain failed.ad.event.JoinDomainFailedEvent|Join domain failed. <EventLongDescription id="ad.event.JoinDomainFailedEvent"> <description> Join domain failed </description> </EventLongDescription> ExtendedEventLeave domain successinfoLeave domain succeeded.Leave domain succeeded.Leave domain succeeded.Leave domain succeeded.ad.event.LeaveDomainEvent|Leave domain succeeded. <EventLongDescription id="ad.event.LeaveDomainEvent"> <description> Leave domain succeeded </description> </EventLongDescription> ExtendedEventLeave domain failureerrorLeave domain failed.Leave domain failed.Leave domain failed.Leave domain failed.ad.event.LeaveDomainFailedEvent|Leave domain failed. <EventLongDescription id="ad.event.LeaveDomainFailedEvent"> <description> Leave domain failed </description> </EventLongDescription> ExtendedEventBackup job failederrorcom.vmware.applmgmt.backup.job.failed.event|Backup job failed <EventLongDescription id="com.vmware.applmgmt.backup.job.failed.event"> <description> Backup job failed </description> <cause> <description> Backup job failed </description> <action> Check backup server connectivity and available space </action> </cause> </EventLongDescription> ExtendedEventBackup job finished successfullyinfocom.vmware.applmgmt.backup.job.finished.event|Backup job finished successfully <EventLongDescription id="com.vmware.applmgmt.backup.job.finished.event"> <description> Backup job finished successfully </description> <cause> <description> Backup job finished successfully </description> </cause> </EventLongDescription> ExtendedEventGlobal Permission created for user with role and propagation.infocom.vmware.cis.CreateGlobalPermission|Global Permission created for user {User} with role {Role} and propagation {Propagation}.EventExPermission created for user on item with role.infocom.vmware.cis.CreatePermission|Permission created for user {User} on item {DocType} with role {Role}.EventExGlobal Permission removed for user.infocom.vmware.cis.RemoveGlobalPermission|Global Permission removed for user {User}.EventExPermission removed for user on iteminfocom.vmware.cis.RemovePermission|Permission removed for user {User} on item {DocType}EventExUser attached tag(s) to object(s)com.vmware.cis.tagging.attach|User {User} attached tag(s) {Tag} to object(s) {Object}EventExUser detached tag(s) from object(s)com.vmware.cis.tagging.detach|User {User} detached tag(s) {Tag} from object(s) {Object}ExtendedEventHttpNfc service disabled - missing configurationerrorHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationcom.vmware.configuration.httpnfc.missing|HttpNfc service is disabled because of missing configuration. Please check vpxa configuration file and correct the error and reconnect host. <EventLongDescription id="com.vmware.configuration.httpnfc.missing"> <description> The HttpNfc service is disabled because of missing configuration section in vpxa.cfg. Please check vpxa configuration file and correct the error and reconnect host. </description> <cause> <description>The vpxa configuration file requires a configuration section for HttpNfc</description> <action>Please check vpxa configuration file and correct the error and reconnect host.</action> </cause> </EventLongDescription> EventExAdded Licenseinfocom.vmware.license.AddLicenseEvent|License {licenseKey} added to VirtualCenterEventExAssigned Licenseinfocom.vmware.license.AssignLicenseEvent|License {licenseKey} assigned to asset {entityName} with id {entityId}EventExDownload License Informationwarningcom.vmware.license.DLFDownloadFailedEvent|Failed to download license information from the host {hostname} due to {errorReason.@enum.com.vmware.license.DLFDownloadFailedEvent.DLFDownloadFailedReason}EventExDefault License Keys Updatedinfocom.vmware.license.DefaultLicenseKeysUpdatedEvent|Default License Keys for asset {entityName} have been updatedEventExHost License Edition Not Allowedwarningcom.vmware.license.HostLicenseEditionNotAllowedEvent|The host is licensed with {edition}. The license edition of vCenter Server does not support {edition}.ExtendedEventHost license or evaluation period has expiredwarningcom.vmware.license.HostLicenseExpiredEvent|Expired host license or evaluation period. <EventLongDescription id="com.vmware.license.HostLicenseExpiredEvent"> <description> Host license or evaluation period has expired. </description> <cause> <description>Expired host license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventHost time-limited license has expiredwarningcom.vmware.license.HostSubscriptionLicenseExpiredEvent|Expired host time-limited license. <EventLongDescription id="com.vmware.license.HostSubscriptionLicenseExpiredEvent"> <description> Host time-limited license has expired. </description> <cause> <description>Expired host time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> EventExLicense assignment faultsinfocom.vmware.license.LicenseAssignFailedEvent|License assignment on the host fails. Reasons: {errorMessage.@enum.com.vmware.license.LicenseAssignError}. <EventLongDescription id="com.vmware.license.LicenseAssignFailedEvent"> <description> The host license assignment succeeds on vCenter Server but can not be successfully pushed down to the host. Any license assignment to a host proceeds in two stages. In the first stage vCenter Server does preliminary checks on the license key, the license state of the host and determines if the requested assignment is valid. If so, it stores this assignment locally in its database. In the second stage, vCenter Server pushes the newly assigned license to the host. During the second stage the host might reject the assignment under certain circumstances. These circumstances usually result from a mismatch of the information available to vCenter Server and the host concerned. Any such discrepancies are notified to the user via this event. This event lists the reason because of which it was logged and also shows up as a configuration issue on the vSphere Client. </description> <cause> <description>License expiry information mismatch between vCenter Server and host</description> <action>If the system time on the machine running vCenter Server and host are not in sync then put them in sync</action> </cause> <cause> <description>The license key is a per Virtual Machine key and the number of powered on Virtual Machines is larger than the maximum limit of the key</description> <action>Use a different key with a larger capacity</action> </cause> </EventLongDescription> EventExLicense Capacity Exceededwarningcom.vmware.license.LicenseCapacityExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the license capacity ({capacity} {costUnitText})EventExLicense ExpirywarningYour host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires.com.vmware.license.LicenseExpiryEvent|Your host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires. <EventLongDescription id="com.vmware.license.LicenseExpiryEvent"> <description> If a host is assigned a temporary license (a license key with an expiry), this event is logged in order to provide users an advanced warning on the imminent expiry of the license key. The event logging starts 15 days prior to the expiry of the license key. This event also shows up on the host summary page as a configuration issue on the vSphere Client. </description> <cause> <description>License key is about to expire or has expired</description> <action>Assign a different license key</action> </cause> </EventLongDescription> EventExLicense User Threshold Exceededwarningcom.vmware.license.LicenseUserThresholdExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the user-defined threshold ({threshold} {costUnitText}) <EventLongDescription id="com.vmware.license.LicenseUserThresholdExceededEvent"> <description> Users can define thresholds to monitor overuse of the product license. This event is logged when the license usage threshold defined by the user for a product edition is exceeded. </description> <cause> <description> License usage of a product edition has exceeded the user-defined threshold </description> <action> Review license assignments and usage </action> </cause> </EventLongDescription> EventExRemoved Licenseinfocom.vmware.license.RemoveLicenseEvent|License {licenseKey} removed from VirtualCenterEventExUnassigned Licenseinfocom.vmware.license.UnassignLicenseEvent|License unassigned from asset {entityName} with id {entityId}ExtendedEventvCenter Server license or evaluation period has expiredwarningcom.vmware.license.VcLicenseExpiredEvent|Expired vCenter Server license or evaluation period. <EventLongDescription id="com.vmware.license.VcLicenseExpiredEvent"> <description> vCenter Server license or evaluation period has expired. </description> <cause> <description>Expired vCenter Server license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventvCenter Server time-limited license has expiredwarningcom.vmware.license.VcSubscriptionLicenseExpiredEvent|Expired vCenter Server time-limited license. <EventLongDescription id="com.vmware.license.VcSubscriptionLicenseExpiredEvent"> <description> vCenter Server time-limited license has expired. </description> <cause> <description>Expired vCenter Server time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventSome in-use features are not supported by current licensewarningcom.vmware.license.vsan.FeatureBeyondCapability|In-use vSAN features {feature} are not supported by current license.ExtendedEventHost flash capacity exceeds the licensed limit for vSANwarningcom.vmware.license.vsan.HostSsdOverUsageEvent|The capacity of the flash disks on the host exceeds the limit of the vSAN license. <EventLongDescription id="com.vmware.license.vsan.HostSsdOverUsageEvent"> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <cause> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN license or evaluation period has expiredwarningcom.vmware.license.vsan.LicenseExpiryEvent|Expired vSAN license or evaluation period. <EventLongDescription id="com.vmware.license.vsan.LicenseExpiryEvent"> <description> Expired vSAN license or evaluation period. </description> <cause> <description> Expired vSAN license or evaluation period. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN time-limited license has expiredwarningcom.vmware.license.vsan.SubscriptionLicenseExpiredEvent|Expired vSAN time-limited license. <EventLongDescription id="com.vmware.license.vsan.SubscriptionLicenseExpiredEvent"> <description> Expired vSAN time-limited license. </description> <cause> <description> Expired vSAN time-limited license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> EventExStorage policy associatedinfoAssociated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}com.vmware.pbm.profile.associate|Associated storage policy: {ProfileId} with entity: {EntityId}EventExStorage policy createdinfoStorage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.create|Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}EventExStorage policy deletedinfoDeleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}com.vmware.pbm.profile.delete|Deleted storage policy: {ProfileId}EventExStorage policy dissociatedinfoDissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}com.vmware.pbm.profile.dissociate|Dissociated storage policy: {ProfileId} from entity: {EntityId}EventExStorage policy updatedinfoStorage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.update|Storage policy updated for {ProfileId}. Policy name: {ProfileName}EventExStorage policy name updatedinfoStorage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}com.vmware.pbm.profile.updateName|Storage policy name updated for {ProfileId}. New name: {NewProfileName}EventExCertificate Manager event in SSOinfocom.vmware.sso.CertificateManager|Certificate Manager event by {userName} at {timestamp} : {description}EventExConfiguration Management event in SSOinfocom.vmware.sso.ConfigurationManagement|Configuration Management event by {userName} at {timestamp} : {description}EventExDomain Management event in SSOinfocom.vmware.sso.DomainManagement|Domain Management event by {userName} at {timestamp} : {description}EventExIdentity Source Management event in SSOinfocom.vmware.sso.IdentitySourceManagement|Identity Source Management event by {userName} at {timestamp} : {description}EventExIdentity Source LDAP Certificate is about to expireinfocom.vmware.sso.LDAPCertExpiry|Renew Identity Source LDAP Certificate: {description}EventExLockout Policy event in SSOinfocom.vmware.sso.LockoutPolicy|Lockout Policy event by {userName} at {timestamp} : {description}EventExFailed login attempt event in SSOerrorcom.vmware.sso.LoginFailure|Failed login {userName} from {userIp} at {timestamp} in SSOEventExSuccessful login attempt event in SSOinfocom.vmware.sso.LoginSuccess|Successful login {userName} from {userIp} at {timestamp} in SSOEventExLogout attempt event in SSOinfocom.vmware.sso.Logout|Logout event by {userName} from {userIp} at {timestamp} in SSOEventExPassword Policy event in SSOinfocom.vmware.sso.PasswordPolicy|Password Policy event by {userName} at {timestamp} : {description}EventExPrincipal Management event in SSOinfocom.vmware.sso.PrincipalManagement|Principal Management event by {userName} at {timestamp} : {description}EventExRole Management event in SSOinfocom.vmware.sso.RoleManagement|Role Management event by {userName} at {timestamp} : {description}EventExSTS Signing Certificates are about to expireinfocom.vmware.sso.STSCertExpiry|Renew STS Signing Certificates: {description}EventExSMTP Configuration event in SSOinfocom.vmware.sso.SmtpConfiguration|SMTP Configuration event by {userName} at {timestamp} : {description}EventExSystem Management event in SSOinfocom.vmware.sso.SystemManagement|System Management event by {userName} at {timestamp} : {description}EventExvCenter Identity event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentity|vCenter Identity event by {userName} at {timestamp} : {description}EventExvCenter Identity Providers event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentityProviders|vCenter Identity Providers event by {userName} at {timestamp} : {description}EventExvCenter Trusts event in Trustmanagementinfocom.vmware.trustmanagement.VcTrusts|vCenter Trusts event by {userName} at {timestamp} : {description}EventExIdentity Provider SSL Trust Certificate is about to expireinfocom.vmware.trustmanagement.WS1SSLCertExpiry|Renew Identity Provider SSL Trust Certificate: {description}EventExIdentity Provider Users and Groups token is about to expireinfocom.vmware.trustmanagement.WS1SyncTokenExpiry|Renew Identity Provider Users and Groups token: {description}EventExReports that a stage from autonomous cluster creation has failedwarningcom.vmware.vc.A8sCluster.CreateStageFailedEvent|Autonomous cluster creation stage: {stage} failed: {reason}EventExReports that a stage from autonomous cluster creation has completed successfullyinfocom.vmware.vc.A8sCluster.CreateStageSuccessEvent|Autonomous cluster creation stage: {stage} succeededEventExAutonomous cluster health is degraded.warningcom.vmware.vc.A8sCluster.HealthDegradedEvent|Autonomous cluster health is degraded. Reason: {reason}ExtendedEventAutonomous cluster is healthy.infocom.vmware.vc.A8sCluster.HealthHealthyEvent|Autonomous cluster is healthy.EventExAutonomous cluster is unhealthy.warningcom.vmware.vc.A8sCluster.HealthUnhealthyEvent|Autonomous cluster is unhealthy. Reason: {reason}ExtendedEventAuthz service is not running. Authorization data might not be synchronized.errorcom.vmware.vc.AuthzDataNotSynced|Authz service is not running. Authorization data might not be synchronized.ExtendedEventAuthz service is running. Authorization data is being synchronized.infocom.vmware.vc.AuthzDataSynced|Authz service is running. Authorization data is being synchronized.ExtendedEventEvent sequence ID reached its max value and was reset.infocom.vmware.vc.EventIdOverflow|Event sequence ID reached its max value and was reset.ExtendedEventcom.vmware.vc.FailedToApplyPermissionsEvent|ExtendedEventvSphere HA agent can reach all cluster management addressesinfoThe vSphere HA agent on the host {host.name} in cluster {computeResource.name} can reach all the cluster management addressesThe vSphere HA agent on the host {host.name} can reach all the cluster management addressesThe vSphere HA agent on this host can reach all the cluster management addressescom.vmware.vc.HA.AllHostAddrsPingable|The vSphere HA agent on the host {host.name} in cluster {computeResource.name} in {datacenter.name} can reach all the cluster management addresses <EventLongDescription id="com.vmware.vc.HA.AllHostAddrsPingable"> <description> The host is able to ping all of the vSphere HA management addresses of every other cluster host. </description> </EventLongDescription> ExtendedEventvSphere HA agent can reach all isolation addressesinfoAll vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name}All vSphere HA isolation addresses are reachable by this hostAll vSphere HA isolation addresses are reachable by hostcom.vmware.vc.HA.AllIsoAddrsPingable|All vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.AllIsoAddrsPingable"> <description> The host is able to ping all of the vSphere HA isolation addresses. </description> </EventLongDescription> ExtendedEventvSphere HA answered a lock-lost question on a virtual machinewarningvSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name}vSphere HA answered the lock-lost question on virtual machine {vm.name}vSphere HA answered the lock-lost question on this virtual machinecom.vmware.vc.HA.AnsweredVmLockLostQuestionEvent|vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmLockLostQuestionEvent"> <description> The virtual machine running on this host lost the exclusive lock of its files on disk. This will occur if another instance of this virtual machine is running on a different host. This situation can happen if a host loses access to both its storage and management networks but is not configured to shutdown its virtual machines on isolation. The virtual machines on this host will continue to run without access to their disks, while vSphere HA will start a new instance of the virtual machines on another host in the cluster. When the isolated host regains access to the storage network, it will try to reacquire the disk locks. This will fail since the disk locks are held by another host. The host will then issue a question on the virtual machine indicating that disk locks have been lost. vSphere HA will automatically answer this question to allow the virtual machine instance without the disk locks to power off. <description> </EventLongDescription> ExtendedEventvSphere HA answered a question from the host about terminating a virtual machinewarningvSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name}vSphere HA answered a question from host {host.name} about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating this virtual machinecom.vmware.vc.HA.AnsweredVmTerminatePDLEvent|vSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmTerminatePDLEvent"> <description> The virtual machine running on this host had a virtual disk which experienced permenant device loss. The host will issue a question if it is configured to terminate the VM automatically under such condition. This event indicates that vSphere HA answered the question. After the VM is terminated, vSphere HA will make a best effort to restart it. <description> </EventLongDescription> ExtendedEventvSphere HA disabled the automatic VM Startup/Shutdown featureinfovSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on the host {host.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature. Automatic VM restarts will interfere with HA when reacting to a host failure.com.vmware.vc.HA.AutoStartDisabled|vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Automatic VM restarts will interfere with HA when reacting to a host failure. <EventLongDescription id="com.vmware.vc.HA.AutoStartDisabled"> <description> Virtual Machine Startup/Shutdown has been disabled by HA. A host which is contained in an vSphere HA cluster is not permitted to have automatic virtual machine startup and shutdown since it may conflict with HA's attempts to relocate the virtual machines if a host fails. </description> </EventLongDescription> ExtendedEventvSphere HA did not reset a VM which had files on inaccessible datastore(s)warningvSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on host {host.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on this host because the VM had files on inaccessible datastore(s)vSphere HA did not reset this VM because the VM had file(s) on inaccessible datastore(s)com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore|vSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the VM had files on inaccessible datastore(s) <EventLongDescription id=" com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore"> <description> This event is logged when vSphere HA did not reset a VM affected by an inaccessible datastore. It will attempt to reset the VM after storage failure is cleared. </description> <cause> <description> The VM is affected by an inaccessible datastore due to storage connectivity loss. Resetting such a VM might cause the VM to be powered off and not restarted by vSphere HA. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA cluster contains incompatible hosts.warningvSphere HA Cluster {computeResource.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.vSphere HA Cluster contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.com.vmware.vc.HA.ClusterContainsIncompatibleHosts|vSphere HA Cluster {computeResource.name} in {datacenter.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported. <EventLongDescription id="com.vmware.vc.HA.ClusterContainsIncompatibleHosts"> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions. </description> <cause> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions, which isn't fully supported. Failover of VMs from ESX/ESXi 3.5 hosts to newer hosts is not guaranteed. </description> <action> Place ESX/ESXi 3.5 hosts into a separate vSphere HA cluster from hosts with more recent ESX versions. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA completed a failover actioninfovSphere HA completed a virtual machine failover action in cluster {computeResource.name}vSphere HA completed a virtual machine failover actioncom.vmware.vc.HA.ClusterFailoverActionCompletedEvent|vSphere HA completed a virtual machine failover action in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA initiated a failover actionwarningvSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name}vSphere HA initiated a failover action on {pendingVms} virtual machinescom.vmware.vc.HA.ClusterFailoverActionInitiatedEvent|vSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA failover operation in progressWarningvSphere HA failover operation in progress in cluster {computeResource.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMsvSphere HA failover operation in progress: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMscom.vmware.vc.HA.ClusterFailoverInProgressEvent|vSphere HA failover operation in progress in cluster {computeResource.name} in datacenter {datacenter.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMs <EventLongDescription id="com.vmware.vc.HA.ClusterFailoverInProgressEvent"> <description> This event is logged when a vSphere HA failover operation is in progress for virtual machines in the cluster. It also reports the number of virtual machines that are being restarted. There are four different categories of such VMs. (1) VMs being placed: vSphere HA is in the process of trying to restart these VMs; (2) VMs awaiting retry: a previous restart attempt failed, and vSphere HA is waiting for a timeout to expire before trying again; (3) VMs requiring additional resources: insufficient resources are available to restart these VMs. vSphere HA will retry when more resources become available (such as a host comes back on line); (4) Inaccessible vSAN VMs: vSphere HA cannot restart these vSAN VMs because they are not accessible. It will retry when there is a change in accessibility. </description> <cause> <description> vSphere HA is attempting to restart failed virtual machines in the cluster. It might be that the virtual machine restart is pending and has not yet completed. </description> <action> vSphere HA will retry the failover on another host unless the maximum number of failover attempts has been reached. A subsequent retry may succeed in powering on the virtual machine so allow the vSphere HA failover operation to be declared a success or failure. </action> </cause> <cause> <description> This event might also be generated when a required resource in the cluster becomes temporarily unavailabile due to network reconfiguration, hardware upgrade, software update, host overload, etc. which can cause vSphere HA to lose its network or storage hearbeats to certain hosts or virtual machines and mark them inaccessible. </description> <action> In many cases, this may be a temporary condition. If the cluster soon stabilizes to its normal condition vSphere HA will detect the host and virtual machines to be live and discard any failover attempts. In such cases, this event may be treated as a soft alarm caused by such changes. </action> </cause> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It can also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to fail over the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM might be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it might have been powered on by a user on a host. If any hosts have been declared dead, investigate whether a networking or storage issue is the cause. </action> <action> If the error reports that the virtual machine is in an invalid state, there might be an operation in progress that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation, that are taking a long time to complete. </action> </cause> </EventLongDescription> ExtendedEventHost connected to a vSphere HA masterinfovSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}com.vmware.vc.HA.ConnectedToMaster|vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.ConnectedToMaster"> <description> This event is logged whenever a host in a vSphere HA cluster transitions to a slave host state and establishes a connection with a master host. </description> </EventLongDescription> ExtendedEventvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}errorvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}com.vmware.vc.HA.CreateConfigVvolFailedEvent|vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault} <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolFailedEvent"> <description> vSphere HA failed to create a config vvol on the datastore </description> <cause> <description>A possible VP, host, network, or lack of resources prevented vSphere HA from creating a config vvol</description> <action>Look for errors in the environment, then re-enable vSphere HA</action> </cause> </EventLongDescription> ExtendedEventvSphere HA successfully created a configuration vVol after the previous failureinfovSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurecom.vmware.vc.HA.CreateConfigVvolSucceededEvent|vSphere HA successfully created a configuration vVol after the previous failure <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolSucceededEvent"> <description> vSphere HA successfully created a config vvol on the datastore. If there was a failed config vvol datastore configuration issue, it is being cleared </description> <cause> <description> There were no errors during creation of the config vvol on the datastore</description> </cause> </EventLongDescription> ExtendedEventvSphere HA agent is runninginfovSphere HA agent on host {host.name} in cluster {computeResource.name} is runningvSphere HA agent on host {host.name} is runningvSphere HA agent is runningcom.vmware.vc.HA.DasAgentRunningEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is running <EventLongDescription id=" com.vmware.vc.HA.DasAgentRunningEvent"> <description> This event is logged when the vSphere HA agent is running on a host. </description> <cause> <description> This event is reported after vSphere HA is configured on a host or after the vSphere HA agent on a host starts, such as after a host reboot. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected an HA cluster state version inconsistencywarningvSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name}vSphere HA detected an HA cluster state version inconsistencycom.vmware.vc.HA.DasClusterVersionInconsistentEvent|vSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasClusterVersionInconsistentEvent"> <description> This event is logged when vSphere HA cluster has a version inconsistency for cluster state(HostList, ClusterConfiguration, VM protection state). </description> <cause> <description> This situation could primarily occur if vCenter has been restored to an older backed up state causing vCenter to rollback to older version for the vSphere HA cluster state (HostList, ClusterConfiguration, VM protection state) while the hosts on the cluster have the latest version for the cluster state. As a result, protection state for VMs will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster, any new cluster configuration state will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster and if hosts were added or removed to/from this vSphere HA cluster after vCenter backup and before vCenter Restore, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </description> <action> Step 1. If hosts were added or removed to/from the vSphere HA cluster after vCenter backup and before vCenter Restore, please add or remove those respective hosts back to the vSphere HA cluster so that the list of hosts in the vSphere HA cluster is identical to the list of hosts in the cluster before vCenter was last restored. If you do not want to add hosts to the cluster, stop the vSphere HA process on the hosts that were added to vCenter after the backup. If this is not done, in case of a failure, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </action> <action> Step 2. Disable vSphere HA on the cluster and then re-enable vSphere HA on the cluster. This will make sure that vCenter's version for the vSphere HA cluster state(HostList, ClusterConfiguration, VM protection state) is reset with a new fault domain id for the HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a failed failover hosterrorvSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name}vSphere HA detected a possible failure of failover host {host.name}vSphere HA detected a possible failure of this failover hostcom.vmware.vc.HA.DasFailoverHostFailedEvent|vSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostFailedEvent"> <description> This event is logged when vSphere HA has detected the failure of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the failure of a failover host. A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the failover host failure, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is not running and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-isolated failover hosterrorvSphere HA detected that failover host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that failover host {host.name} is network isolated from the clustervSphere HA detected that this failover host is network isolated from the clustercom.vmware.vc.HA.DasFailoverHostIsolatedEvent|Host {host.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the network isolation of a failover host. vSphere HA reports a host as isolated if there are no heartbeats received from the HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the failover host isolation, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is isolated and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-partitioned failover hostwarningvSphere HA detected that failover host {host.name} in {computeResource.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that failover host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this failover host is in a different network partition than the mastercom.vmware.vc.HA.DasFailoverHostPartitionedEvent|Failover Host {host.name} in {computeResource.name} in {datacenter.name} is in a different network partition than the master <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostPartitionedEvent"> <description> This event is logged when vSphere HA has detected a designated failover host is network partitioned. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if a vSphere HA master agent detects a failover host is network partitioned. vSphere HA reports a host as partitioned if it cannot communicate with a subset of hosts in the cluster, yet can determine that the host is alive via its datastore heartbeats. </description> <action> Determine the cause of the partitioned failover host, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if a failover host is partitioned and a host failure occurs. See the prodcut documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA agent on a failover host is unreachableerrorThe vSphere HA agent on the failover host {host.name} in {computeResource.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on the failover host {host.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on this failover host is not reachable but host responds to ICMP pingscom.vmware.vc.HA.DasFailoverHostUnreachableEvent|The vSphere HA agent on the failover host {host.name} in cluster {computeResource.name} in {datacenter.name} is not reachable but host responds to ICMP pingsEventExHost complete datastore failureerrorAll shared datastores failed on the host {hostName} in cluster {computeResource.name}All shared datastores failed on the host {hostName}All shared datastores failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent|All shared datastores failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent"> <description> A host in a Component Protection-enabled cluster has lost connectivity to all shared datastores </description> <cause> <description>Connectivity to all shared datastores has been lost</description> <action>Reconnect at least one shared datastore</action> </cause> </EventLongDescription> EventExHost complete network failureerrorAll VM networks failed on the host {hostName} in cluster {computeResource.name}All VM networks failed on the host {hostName}All VM networks failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent|All VM networks failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent"> <description> A host in a Component Protection enabled cluster has lost connectivity to all virtual machine networks </description> <cause> <description>Connectivity to all virtual machine networks has been lost</description> <action>Reconnect at least one virtual machine network</action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a host failureerrorvSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name}vSphere HA detected a possible host failure of host {host.name}vSphere HA detected a possible host failure of this hostcom.vmware.vc.HA.DasHostFailedEvent|vSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostFailedEvent"> <description> This event is logged when vSphere HA detects a possible host failure. </description> <cause> <description> A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the host failure, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network isolated hosterrorvSphere HA detected that host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that host {host.name} is network isolated from the clustervSphere HA detected that this host is network isolated from the clustercom.vmware.vc.HA.DasHostIsolatedEvent|vSphere HA detected that host {host.name} is isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a host. </description> <cause> <description> This event will be generated if there are no heartbeats received from the vSphere HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the host isolation, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA host monitoring is disabledwarningvSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name}vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabledcom.vmware.vc.HA.DasHostMonitoringDisabledEvent|vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostMonitoringDisabledEvent"> <description> This event is logged when host monitoring has been disabled in a vSphere HA cluster. </description> <cause> <description> Host monitoring is disabled, so vSphere HA will not perform any failover actions. This event is generated to inform the user that their cluster is temporarily not being protected against host or VM failures. If host or VM failures occur while host monitoring is disabled, HA will not attempt to restart the the VMs that were running on the failed hosts. Other vSphere HA features are not impacted by whether host monitoring is disabled. </description> <action> Enable host monitoring to resume hosts monitoring. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA failed to restart a network isolated virtual machineerrorvSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart this virtual machine after it was powered off in response to a network isolation eventcom.vmware.vc.HA.FailedRestartAfterIsolationEvent|vSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} after it was powered off in response to a network isolation event. The virtual machine should be manually powered back on.EventExRunning VMs utilization cannot satisfy the configured failover resources on the cluster.warningRunning VMs utilization cannot satisfy the configured failover resources on cluster {computeResource.name}Running VMs utilization cannot satisfy the configured failover resources on the cluster.com.vmware.vc.HA.FailoverResourcesViolationEvent|Running VMs utilization cannot satisfy the configured failover resources on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.FailoverResourcesViolationEvent"> <description> This event is logged when the total utilization of the running VMs cannot satisfy the configured failover resources on a vSphere HA admission controlled cluster. </description> <cause> <description> The total utilization of the running VMs on this cluster is unable to satisfy the configured failover resources in the cluster. This event is generated to inform the user that their cluster will be running in a compromised state during failover and would not have sufficient failover resources to ensure the optimal functioning of the VMs and their workloads. The side-effect of this situation is that VMs won't be working optimally even though we ensure required failover capacity in case of failures. Other vSphere HA features are not impacted by this and this warning doesn't affect any VM related operations like power-on, vmotion etc. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to ensure that there is sufficient failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA changed a host's heartbeat datastoresinfoDatastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on this hostcom.vmware.vc.HA.HeartbeatDatastoreChanged|Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreSelected"> <description> A datastore is selected or deselected for storage heartbeating monitored by the vSphere agent on this host. vSphere HA employs stroage heartbeating to detect host failures when there is network partition. </description> </EventLongDescription> EventExvSphere HA heartbeat datastore number for a host is insufficientwarningThe number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for host {host.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for this host is {selectedNum}, which is less than required: {requiredNum}com.vmware.vc.HA.HeartbeatDatastoreNotSufficient|The number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} in {datacenter.name} is {selectedNum}, which is less than required: {requiredNum} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreNotSufficient"> <description> The number of heartbeat datastores used for this host is less than required. Multiple heartbeat datastores are needed to tolerate storage failures. The host summary page will report a configuration issue in this case. To ignore the configuration issue, use the vSphere HA cluster advanced option, das.ignoreInsufficientHbDatastore. </description> <cause> <description> The host does not have sufficient number of accessible datastores that are shared among other hosts in the cluster. </description> <action> Add more shared datastores to the host or check if any of its datastore is currently inaccessible. </action> </cause> </EventLongDescription> EventExvSphere HA agent on a host has an errorwarningvSphere HA agent for host {host.name} has an error in {computeResource.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for this host has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}com.vmware.vc.HA.HostAgentErrorEvent|vSphere HA agent for host {host.name} has an error in {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason} <EventLongDescription id="com.vmware.vc.HA.AgentErrorEvent"> <description> This event is logged when the vSphere HA agent for the host has an error. </description> <action> See product documentation for troubleshooting tips. </action> </EventLongDescription> ExtendedEventvSphere HA agent is healthyinfovSphere HA agent on host {host.name} in cluster {computeResource.name} is healthyvSphere HA agent on host {host.name} is healthyvSphere HA agent is healthycom.vmware.vc.HA.HostDasAgentHealthyEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is healthy <EventLongDescription id=" com.vmware.vc.HA.HostDasAgentHealthyEvent"> <description> This event is logged when the vSphere HA agent on a host transitions to a healthy state. </description> <cause> <description> vSphere HA reports this event when the vSphere HA agent on the host is either a master or a slave that is connected to the master over the management network. </description> </cause> </EventLongDescription> EventExvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error. {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}com.vmware.vc.HA.HostDasErrorEvent|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason} <EventLongDescription id="com.vmware.vc.HA.HostDasErrorEvent"> <description> The vSphere HA agent on this host has an error. The event may provide details with extra information indicating the cause of the error. </description> <cause> <description>There was an error configuring the vSphere HA agent on the host</description> <action> Look at the task details for the configure vSphere HA task that failed. That will provide more details about why the failure occurred. Address the problem and reconfigure vSphere HA on the host. </action> </cause> <cause> <description> There was a timeout while communicating with the vSphere HA agent. This can occur if there is a high rate of operations being performed on virtual machines in the cluster resulting in the vSphere HA agents not being able to process the changes fast enough. </description> <action> Verify that this is a transient problem by stopping operations on virtual machines in the cluster for a few minutes to give time to the vSphere HA agents to process all their pending messages. If this resolves the problem, consider reducing the rate of operations performed on the cluster. </action> </cause> <cause> <description>There is vSphere HA agent is in a shutdown or failed state</description> <action>Reconfigure vSphere HA on the host. If this fails, reconfigure vSphere HA on the cluster</action> </cause> </EventLongDescription> EventExvSphere HA detected a datastore failurewarningvSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of datastore {arg1} on host {host.name}vSphere HA detected a failure of datastore {arg1}com.vmware.vc.HA.HostDatastoreFailedEvent|vSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventUnsupported vSphere HA and vCloud Distributed Storage configurationerrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured on host {host.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured because vCloud Distributed Storage is enabled but the host does not support that featurecom.vmware.vc.HA.HostDoesNotSupportVsan|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in {datacenter.name} because vCloud Distributed Storage is enabled but the host does not support that featureExtendedEventHost has no vSphere HA isolation addresseserrorHost {host.name} in cluster {computeResource.name} has no isolation addresses defined as required by vSphere HAHost {host.name} has no isolation addresses defined as required by vSphere HAThis host has no isolation addresses defined as required by vSphere HAcom.vmware.vc.HA.HostHasNoIsolationAddrsDefined|Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no isolation addresses defined as required by vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostHasNoIsolationAddrsDefined"> <description> The host has an vSphere HA configuration issue because there were no IP addresses that vSphere HA could use for detecting network isolation. Without at least one, the host will not take any isolation response. HA, by default, will use the host's default gateway (defined in the host's networking configuration), or use the addresses that were specified in the cluster's advanced settings. </description> <action> Define a default gateway in the host's networking configuration. </action> <action> If the cluster advanced setting das.usedefaultisolationaddress is false, you must define at least one isolation address using the advanced options. </action> <action> Define one or more cluster advanced options, each containing an IP address to be pinged by vSphere HA to detect if it is network-isolated when it no longer receives communication with other hosts in the cluster. The advanced option is das.isolationAddress[n], where 'n' is a number from 1 to 9. You may specify multiple addresses. </action> </EventLongDescription> ExtendedEventvSphere HA cannot be configured on this host because there are no mounted datastores.errorvSphere HA cannot be configured on {host.name} in cluster {computeResource.name} because there are no mounted datastores.vSphere HA cannot be configured on {host.name} because there are no mounted datastores.vSphere HA cannot be configured on this host because there are no mounted datastores.com.vmware.vc.HA.HostHasNoMountedDatastores|vSphere HA cannot be configured on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because there are no mounted datastores.ExtendedEventvSphere HA requires a SSL Thumbprint for hosterrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on {host.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on this host because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for this host has been verified.com.vmware.vc.HA.HostHasNoSslThumbprint|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified. <EventLongDescription id="com.vmware.vc.HA.HostHasNoSslThumbprint"> <description> The host has an vSphere HA configuration issue because it does not have a verified ssl thumbprint. Hosts need verified SSL thumbprints for secure vSphere HA communications. </description> <action> If the host is using self-signed certificates, check that vCenter Server is configured to verify SSL certificates, and verify the thumbprints for the hosts in the vSphere HA cluster. </action> </EventLongDescription> ExtendedEventHost is incompatible with vSphere HAerrorThe product version of host {host.name} in cluster {computeResource.name} is incompatible with vSphere HA.The product version of host {host.name} is incompatible with vSphere HA.The product version of this host is incompatible with vSphere HA.com.vmware.vc.HA.HostIncompatibleWithHA|The product version of host {host.name} in cluster {computeResource.name} in {datacenter.name} is incompatible with vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostIncompatibleWithHA"> <description> The host is in a vSphere HA cluster but its product version is incompatible with HA. </description> <action> To fix the situation the host should either be moved out of the vSphere HA cluster or upgraded to a version supporting HA. </action> </EventLongDescription> EventExvSphere HA detected a network failurewarningvSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of network {network} on host {host.name}vSphere HA detected a failure of network {network}com.vmware.vc.HA.HostNetworkFailedEvent|vSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventvSphere HA detected a network-partitioned hostwarningvSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connected in {computeResource.name}vSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this host is in a different network partition than the master to which vCenter Server is connectedcom.vmware.vc.HA.HostPartitionedFromMasterEvent|vSphere HA detected that host {host.name} is in a different network partition than the master {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HostPartitionedFromMasterEvent"> <description> This event is logged when the host is in a different partition than the master. </description> </EventLongDescription> EventExThe vSphere HA host availability state changedinfoThe vSphere HA availability state of the host {host.name} in cluster {computeResource.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of the host {host.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of this host has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}com.vmware.vc.HA.HostStateChangedEvent|The vSphere HA availability state of the host {host.name} in cluster in {computeResource.name} in {datacenter.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState} <EventLongDescription id="com.vmware.vc.HA.HostStateChangedEvent"> <description> This event is logged when the availability state of a host has changed. </description> </EventLongDescription> ExtendedEventvSphere HA agent unconfigure failed on hostwarningThere was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on host {host.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on this host. To solve this problem, reconnect the host to vCenter Server.com.vmware.vc.HA.HostUnconfigureError|There was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name}. To solve this problem, reconnect the host to vCenter Server. <EventLongDescription id="com.vmware.vc.HA.HostUnconfigureError"> <description> There was an error unconfiguring the vSphere HA agent on this host. </description> <cause> <description> The vSphere HA unconfiguration task failed to send the updated hostList to vSphere HA agent on the host. This condition may interfere with the vSphere HA cluster to which the host used to belong and should be corrected. </description> <action> Add the host back to a vCenter Server of version 5.0 or later. </action> </cause> </EventLongDescription> EventExA disconnected host has vSphere HA protected VMserrorHost {host.name} in cluster {computeResource.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)Host {host.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)This host is disconnected from vCenter Server, but contains {protectedVmCount} vSphere HA protected virtual machine(s)com.vmware.vc.HA.HostUnconfiguredWithProtectedVms|Host {host.name} in cluster {computeResource.name} in {datacenter.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s) <EventLongDescription id="com.vmware.vc.HA.HostUnconfiguredWithProtectedVms"> <description> This host is disconnected and contains one or more virtual machine(s) that are still protected by vSphere HA. Consequently, these virtual machines could be failed over to another host if this host should fail. </description> <cause> <description> If a vSphere HA-enabled host is disconnected and is unable to unprotect the virtual machines currently running on it (perhaps due to datastores being unavailable, or not being able to communicate with the vSphere HA master host) then these virtual machines would still be protected, but reside on the disconnected host. Also, if a virtual machine is migrated using vMotion to a vSphere HA-enabled host that is currently in the process of disconnecting, this can lead to the same result. </description> <action> To correct this situation, ensure that the host has access to the datastores used by these virtual machines, and then reconnect the host to an vSphere HA-enabled cluster. The virtual machines should become unprotected shortly after vSphere HA is configured on the host. </action> </cause> </EventLongDescription> EventExvSphere HA configured failover resources are insufficient to satisfy desired failover levelwarningInsufficient configured resources to satisfy the desired vSphere HA failover level on cluster {computeResource.name}Insufficient configured resources to satisfy the desired vSphere HA failover levelcom.vmware.vc.HA.InsufficientFailoverLevelEvent|Insufficient configured resources to satisfy the desired vSphere HA failover level on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.InsufficientFailoverLevelEvent"> <description> The cluster does not have enough failover capacity to satisfy the desired host failures to tolerate for vSphere HA. Failovers may still be performed by vSphere HA but will be on a best effort basis and configured resources may not sufficient to respect the desired host failures to tolerate. </description> <cause> <description> The desired host failures to tolerate setting might not be completely respected since the cluster does not have the required failover capacity to satisfy the failover of the largest desired number of hosts. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to reserve more failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA detected an invalid master agentwarningvSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.com.vmware.vc.HA.InvalidMaster|vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised. <EventLongDescription id="com.vmware.vc.HA.InvalidMaster"> <description> A host in a vSphere HA cluster that is claiming to be a master has been determined to be invalid be another master host. This occurs when an existing master gets a message from another master in the same cluster. The existing master verifies that the other master is actually a valid master before it considers abdicating to the other master. An invalid master is an indication that there may be a compromised host on the network that is attempting to disrupt the HA cluster. The offending host should be examined to determine if it has been compromised. Its also possible a compromised host is impersonating a valid host so the reported host may not be the actual host that is compromised. </description> </EventLongDescription> ExtendedEventvSphere HA could not identify lock owner host on VM with duplicatesinfovSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name}vSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on this VM with duplicatescom.vmware.vc.HA.LockOwnerUnKnownForDupVms|vSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.LockOwnerUnKnownForDupVms"> <description> The vSphere HA agent could not identify lock owner host on duplicate VMs. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Could not determine the lock owner host on duplicate VM. </action> </cause> </EventLongDescription> EventExvSphere HA agent cannot reach some cluster management addressesinfovSphere HA agent on {host.name} in cluster {computeResource.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on {host.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on host cannot reach some management network addresses of other hosts: {unpingableAddrs}com.vmware.vc.HA.NotAllHostAddrsPingable|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} cannot reach some management network addresses of other hosts: {unpingableAddrs} <EventLongDescription id="com.vmware.vc.HA.NotAllIsoAddrsPingable"> <description> The vSphere HA agent on host cannot reach some of the management network addresses of other hosts, and vSphere HA may not be able to restart VMs if a host failure occurs. </description> <cause> <description> There is a network issue preventing this host from communicating with some or all of the hosts in the cluster over their vSphere HA management networks. vSphere HA reliability ic currently compromised in the cluster and failover may not reliably occur if a host or hosts should fail during this condition. </description> <action> Determine and correct the source of the communication problem. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not terminate the VM that was selected for preemptionerrorvSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name}vSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate this VM that was selected for preemptioncom.vmware.vc.HA.PreemptionFailedWithMaxRetry|vSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.PreemptionFailedWithMaxRetry"> <description> vSphere HA could not terminate the VM that was selected for preemption. </description> <cause> <description> Instances when vSphere HA receives the InsufficientResourcesFault, for any VM with fault reason indicating presence of preemptible VM. vSphere HA terminates appropriate preemptibe VM to free up resources. </description> <action> Terminate the preemptibe VM manually to free up resources. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA remediated duplicates of VMinfovSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of this VMcom.vmware.vc.HA.RemediatedDupVMs|vSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediatedDupVMs"> <description> The vSphere HA agent on host remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Kept the VM running on host which holds the lock on datastore, terminated VM on rest of the hosts where VM was running. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not remediate duplicates of VMwarningvSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of this VMcom.vmware.vc.HA.RemediationFailedForDupVMs|vSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediationFailedForDupVMs"> <description> The vSphere HA agent on host could not remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Duplicates of VM running on multiple hosts could not be terminated. </action> </cause> </EventLongDescription> EventExvSphere HA failed to start a Fault Tolerance secondary VM.errorvSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.com.vmware.vc.HA.StartFTSecondaryFailedEvent|vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name} in {datacenter.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondaryFailedEvent"> <description> vSphere HA agent failed to start a Fault Tolerance secondary VM. vSphere HA will retry until either the operation succeeds or until the maximum number of restart attempts is reached. </description> </EventLongDescription> EventExvSphere HA successfully started a Fault Tolerance secondary VM.infovSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}.com.vmware.vc.HA.StartFTSecondarySucceededEvent|vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondarySucceededEvent"> <description> vSphere HA agent successfully started a Fault Tolerance secondary virtual machine. </description> </EventLongDescription> EventExvSphere HA removed a datastore from preferred heartbeat datastoreswarningvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} because the datastore is removed from inventoryvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster because the datastore is removed from inventorycom.vmware.vc.HA.UserHeartbeatDatastoreRemoved|vSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} in {datacenter.name} because the datastore is removed from inventory <EventLongDescription id="com.vmware.vc.HA.UserHeartbeatDatastoreRemoved"> <description> The datastore is removed from the set of preferred heartbeat datastores selected for this cluster. </description> <cause> <description> The datastore does not exist in the inventory. This happens when the datastore is removed from a host in the cluster manually or via a rescan. </description> <action> Choose a different datastore by reconfiguring the vSphere HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA did not perform an isolation response for vm because its VM restart priority is DisabledinfovSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} because its VM restart priorirty is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is Disabled"vSphere HA did not perform an isolation response because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledIsolationEvent|vSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledIsolationEvent"> <description> This event is logged when a host in a vSphere HA cluster was isolated and no isolation response was taken. </description> <cause> <description> The VM restart priority setting is set to disabled, so vSphere HA did not perform any action on this VM when the host became isolated. If the restart priority is disabled, HA will not attempt to restart the VM on another host, so HA will take no action for this VM on the isolated host. This event is informational only. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA did not attempt to restart vm because its VM restart priority is DisabledinfovSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is Disabled"vSphere HA did not attempt to restart vm because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledRestartEvent|vSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledRestartEvent"> <description> This event is logged when a failed VM in a vSphere HA cluster will not be restarted because its VM restart priority setting is set to disabled. </description> <cause> <description> The restart priority for the cluster or VM is disabled, so vSphere HA did not perform any action on this VM failed. This event is informational only. </description> </cause> </EventLongDescription> EventExvCenter Server cannot communicate with the master vSphere HA agentwarningvCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name}vCenter Server cannot communicate with the master vSphere HA agent on {hostname}com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent|vCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent"> <description> This event is logged when vCenter Server cannot communicate with a vSphere HA master agent. </description> <cause> <description> This event is reported when vCenter Server is not able to communicate with a vSphere HA master agent on the host, but it can communicate with other vSphere HA agents in the cluster and these are reporting the host is a master. </description> <action> Correct the networking issue that is preventing vCenter Server from communicating with the host listed in the event. This problem can occur, for example, if the physical NIC in use by this network connection has failed. </action> </cause> </EventLongDescription> ExtendedEventvCenter Server is unable to find a master vSphere HA agentwarningvCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name}vCenter Server is unable to find a master vSphere HA agentcom.vmware.vc.HA.VcCannotFindMasterEvent|vCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotFindMasterEvent"> <description> This event is logged when vCenter Server is unable to find a master vSphere HA agent. </description> <cause> <description> </description> <action> </action> </cause> </EventLongDescription> EventExvCenter Server connected to a vSphere HA master agentinfovCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is connected to a master HA agent running on host {hostname}com.vmware.vc.HA.VcConnectedToMasterEvent|vCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcConnectedToMasterEvent"> <description> This event is logged when vCenter Server is connected with a master vSphere HA agent. </description> </EventLongDescription> EventExvCenter Server disconnected from a master vSphere HA agentwarningvCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is disconnected from a master HA agent running on host {hostname}com.vmware.vc.HA.VcDisconnectedFromMasterEvent|vCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcDisconnectedFromMasterEvent"> <description> This event is logged when vCenter Server is disconnected from a master vSphere HA agent. </description> </EventLongDescription> ExtendedEventvSphere HA was unable to reset a VM after it exhausted the retrieserrorvSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on host {host.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on this host after {retryTimes} retriesvSphere HA was unable to reset this VM after {retryTimes} retriescom.vmware.vc.HA.VmDasResetAbortedEvent|vSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmDasResetAbortedEvent"> <description> This event is logged when vSphere HA was unable to reset a VM. </description> <cause> <description> The operation to reset the VM continued to fail. vSphere HA stopped resetting the VM after it exhausted the retries. </description> <action>Ensure that the host system is manageable, for example host agent is not hung. Check if there are no other concurrent tasks running for the VM.</action> </cause> </EventLongDescription> ExtendedEventVirtual machine failed to become vSphere HA ProtectederrorVirtual machine {vm.name} in cluster {computeResource.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.This virtual machine failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.com.vmware.vc.HA.VmNotProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmNotProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation but the VM has not transitioned to vSphere HA Protected in the time period expected. This condition exists because the master vSphere HA agent has not yet persisted that the VM successfully powered on or vCenter is unaware that it did. Consequently, vSphere HA may not restart the VM after a failure. </description> <action> There are a number of reasons why a VM may remain not protected for a period of time. First, the system may be heavily loaded, in which case the transition will just take longer. Second, vCenter may be unable to communicate with the vSphere HA master agent. Examine the inventory to see if any hosts in the cluster are not responding. Third, the the management network may be partitioned, which is preventing the master that owns the VM from protecting it or reporting this information to vCenter. The cluster summary page may report a config issue in this case or hosts in the VM inventory will be reported as not responding. Finally, the vSphere HA master election is taking too long to complete. The cluster summary page will report if this situation exists. See the product documentation for additional troubleshooting tips. </action> </EventLongDescription> ExtendedEventVirtual machine is vSphere HA protectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.This virtual machine is vSphere HA Protected and HA will attempt to restart it after a failure.com.vmware.vc.HA.VmProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is vSphere HA Protected and HA will attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation and vSphere HA has persisted this fact. Consequently, vSphere HA will attempt to restart the VM after a failure. </description> </EventLongDescription> ExtendedEventVirtual machine is not vSphere HA ProtectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.This virtual machine is not vSphere HA Protected.com.vmware.vc.HA.VmUnprotectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is not vSphere HA Protected. <EventLongDescription id="com.vmware.vc.HA.VmUnprotectedEvent"> <description> The virtual machine transitioned from the vSphere HA protected to unprotected state. This transition is a result of a user powering off the virtual machine, disabling vSphere HA, disconnecting the host on which the virtual machine is running, or destroying the cluster in which the virtual machine resides. </description> </EventLongDescription> ExtendedEventvSphere HA has unprotected out-of-disk-space VMinfovSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected this virtual machine because it ran out of disk spacecom.vmware.vc.HA.VmUnprotectedOnDiskSpaceFull|vSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} because it ran out of disk spaceExtendedEventvSphere HA did not terminate a VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}warningvSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate this VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore|vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore} <EventLongDescription id=" com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore"> <description> This event is logged when a VM affected by an inaccessible datastore in a vSphere HA cluster was not terminated. </description> <cause> <description> VM Component Protection is configured to not terminate the VM, or vSphere HA host monitoring is disabled, or VM restart priority is diabled, or the VM is an agent VM, or there are no sufficient resources to fail over the VM. For the case of insufficent resources, vSphere HA will attempt to terminate the VM when resources become available. </description> <action>Select VM Component Protection option to terminate VM</action> <action>Enable host monitoring</action> <action>Enable VM Restart priority</action> <action>Reduce resource reservations of other VMs in the cluster</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition or isolation if one exists</action> <action>If vSphere DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> </EventLongDescription> ExtendedEventDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleinfoDatastore {ds.name} mounted on host {host.name} in cluster {computeResource.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on host {host.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessiblecom.vmware.vc.HA.VmcpStorageFailureCleared|Datastore {ds.name} mounted on host {host.name} was inaccessible. The condition was cleared and the datastore is now accessible <EventLongDescription id=" com.vmware.vc.HA.VmcpStorageFailureCleared"> <description> This event is logged when a datastore connectivity was restored. The host can have the following storage access failures: All Paths Down (APD) and Permanent Device Loss (PDL). Datastore was shown as unavailable/inaccessible in storage view. </description> <cause> <description> A datastore on this host was inaccessible. The condition was cleared and the datastore is now accessible. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected that a datastore was inaccessible. This affected the VM with files on the datastorewarningvSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on host {host.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on this host was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected the VM with files on the datastorecom.vmware.vc.HA.VmcpStorageFailureDetectedForVm|vSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} in {datacenter.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastore <EventLongDescription id="com.vmware.vc.HA.VmcpStorageFailureDetectedForVm"> <description> This event is logged when a VM's files were not accessible due to a storage connectivity failure. vSphere HA will take action if VM Component Protection is enabled for the VM. </description> <cause> <description> A datastore was inaccessible due to a storage connectivity loss of All Paths Down or Permenant Device Loss. A VM was affected because it had files on the inaccessible datastore. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA was unable to terminate VM affected by an inaccessible datastore after it exhausted the retrieserrorvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on this host after {retryTimes} retriesvSphere HA was unable to terminate this VM affected by an inaccessible datastore after {retryTimes} retriescom.vmware.vc.HA.VmcpTerminateVmAborted|vSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminateVmAborted"> <description> This event is logged when vSphere HA was unable to terminate a VM affected by an inaccessible datastore. </description> <cause> <description> The operation to terminate the VM continued to fail. vSphere HA stopped terminating the VM after it exhausted the retries. </description> <action> Ensure that the host system is manageable, for example host agent is not hung. Check if there are other concurrent tasks running for the VM.</action> <action> Reset the VM if guest application is not operational after the datastore becomes accessible.</action> </cause> </EventLongDescription> ExtendedEventvSphere HA attempted to terminate a VM affected by an inaccessible datastorewarningvSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on host{host.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on this host because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate this VM because the VM was affected by an inaccessible datastorecom.vmware.vc.HA.VmcpTerminatingVm|vSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} in {datacenter.name} because the VM was affected by an inaccessible datastore <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminatingVm"> <description> This event is logged when vSphere HA attempted to terminate a VM affected by an inaccessible datastore. A VM is terminated by issuing a SIGKILL to the vmx process. </description> <cause> <description> The VM was affected by an inaccessible datastore. vSphere HA VM Component Protection attempted to terminate the VM. </description> </cause> </EventLongDescription> EventExHardware Health Status Changedinfocom.vmware.vc.HardwareSensorEvent|Sensor {sensorNumber} type {sensorType}, Description {sensorName} state {status} for {message}. Part Name/Number {partName} {partNumber} Manufacturer {manufacturer}EventExStatus of each Hardware Health Sensor Groupinfocom.vmware.vc.HardwareSensorGroupStatus|Hardware Sensor Status: Processor {processor}, Memory {memory}, Fan {fan}, Voltage {voltage}, Temperature {temperature}, Power {power}, System Board {systemBoard}, Battery {battery}, Storage {storage}, Other {other}ExtendedEventHost configuration is TPM encrypted.warningcom.vmware.vc.HostTpmConfigEncryptionEvent|Host configuration is TPM encrypted.EventExOperation cleanup encountered errorsinfoOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup with task {taskId} encountered errorscom.vmware.vc.OperationCleanupErrorsEvent|Operation cleanup for {vm.name} with task {taskId} encountered errorsExtendedEventThe user does not have permission to view the entity associated with this event.infocom.vmware.vc.RestrictedAccess|The user does not have permission to view the entity associated with this event.EventExFailed to register host with Intel® SGX Registration Service.errorFailed to register host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.FailedRegistration|Failed to register host {host.name} with Intel® SGX Registration Service {registrationUrl}. The service responded with {statusCode}, {errorCode}: {errorMessage}.EventExSending registration request to Intel® SGX Registration Service.infoSending registration request to Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.InitiatingRegistration|Sending registration request for host {host.name} to Intel® SGX Registration Service {registrationUrl}.EventExSuccessfully registered host with Intel® SGX Registration Service.infoSuccessfully registered host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.SuccessfulRegistration|Successfully registered host {host.name} with Intel® SGX Registration Service {registrationUrl}.EventExStateless Alarm TriggeredinfoAlarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'com.vmware.vc.StatelessAlarmTriggeredEvent|Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'ExtendedEventTrusted Host attestation failed.errorcom.vmware.vc.TaHostAttestFailEvent|Trusted Host attestation failed.ExtendedEventTrusted Host attestation passed.infocom.vmware.vc.TaHostAttestPassEvent|Trusted Host attestation passed.ExtendedEventTrusted Host attestation status unset.infocom.vmware.vc.TaHostAttestUnsetEvent|Trusted Host attestation status unset.EventExHost Time Syncronization establishedinfocom.vmware.vc.TimeSyncEvent|Time service {serviceName} has synchronized with remote time source, details: {message}.EventExHost Time Syncronization losterrorcom.vmware.vc.TimeSyncFailedEvent|Time service {serviceName} is not sychronized with the remote time source, details: {message}.ExtendedEventHost must be decommissioned when moved out of a Trusted Infrastructure cluster.errorHost {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.com.vmware.vc.TrustAuthority.DecommissionHost|Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.ExtendedEventHost is not configured for vSphere Trust Authority.errorHost {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.com.vmware.vc.TrustAuthority.HostNotConfigured|Host {host.name} is not configured for vSphere Trust Authority.EventExThe client certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSClientCertExpirationEvent|The client certificate for the Key Provider {keyProviderId} in the Trust Authority Host {hostName} will expire in {dayNum} day(s).EventExThe server certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSServerCertExpirationEvent|The server cetificate of key server {serverName} in the Trusted Key Provider {keyProviderId} will expire in {dayNum} day(s).ExtendedEventCertificates have changed. Trust authority cluster needs to be reconfigured.errorcom.vmware.vc.TrustAuthority.StsCertificatesChange|Certificates have changed. Trust authority cluster needs to be reconfigured.EventExvCenter Service Overall Health Changedinfocom.vmware.vc.VCHealthStateChangedEvent|vCenter Service overall health changed from '{oldState}' to '{newState}' <EventLongDescription id="com.vmware.vc.VCHealthStateChangedEvent"> <description> This event is logged when the overall health of vCenter Service has changed or become unavailable. </description> <cause> <description> The vCenter Service overall health state has changed or become unavailable </description> <action> Examine the vCenter Service health state and make sure the VimWebServices service is up and running on the vCenter Server </action> </cause> </EventLongDescription> EventExDatastore is in healthy state within the clusterinfoDatastore {dsName} is in healthy state within the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreHealthy|Datastore {dsName} is in healthy state within the cluster {computeResource.name}EventExDatastore is not accessible on the host(s)warningDatastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreInaccessible|Datastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}EventExDatastore unmount is failederrorUnmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreUnmountFailed|Unmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}EventExDatastore in desired configuration is missing on the host(s)warningDatastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}com.vmware.vc.VMCStorage.DesiredDatastoreMissing|Datastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}EventExHost(s) mounted with the datastore which is not present in desired configurationerrorHost(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}com.vmware.vc.VMCStorage.NotDesiredDatastorePresent|Host(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}EventExExecuting VM Instant CloneinfoExecuting Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} to {destVmName}Executing Instant Clone to {destVmName}com.vmware.vc.VmBeginInstantCloneEvent|Executing Instant Clone of {vm.name} on {host.name} to {destVmName}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneFailedInvalidDestinationEvent|Cannot clone {vm.name} as {destVmName} to invalid or non-existent destination with ID {invalidMoRef}: {fault}EventExRestarting VM CloneinfoRestarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} to {destVmName} with task {taskId}Restarting VM Clone to {destVmName} with task {taskId}com.vmware.vc.VmCloneRestartEvent|Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneToResourcePoolFailedEvent|Cannot clone {vm.name} as {destVmName} to resource pool {destResourcePool}: {fault}EventExFailed to create virtual machineerrorFailed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName}Failed to create virtual machine on {host.name}com.vmware.vc.VmCreateFailedEvent|Failed to create virtual machine {vmName} on {host.name}ExtendedEventVirtual machine disks consolidation succeeded.infoVirtual machine {vm.name} disks consolidatation succeeded on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation succeeded on {host.name}.Virtual machine {vm.name} disks consolidation succeeded.Virtual machine disks consolidation succeeded.com.vmware.vc.VmDiskConsolidatedEvent|Virtual machine {vm.name} disks consolidated successfully on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation needed.warningVirtual machine {vm.name} disks consolidatation is needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is needed on {host.name}.Virtual machine {vm.name} disks consolidation is needed.Virtual machine disks consolidation is needed.com.vmware.vc.VmDiskConsolidationNeeded|Virtual machine {vm.name} disks consolidation is needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation no longer needed.infoVirtual machine {vm.name} disks consolidatation is no longer needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is no longer needed on {host.name}.Virtual machine {vm.name} disks consolidation is no longer needed.Virtual machine disks consolidation is no longer needed.com.vmware.vc.VmDiskConsolidationNoLongerNeeded|Virtual machine {vm.name} disks consolidation is no longer needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation failed.warningVirtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation failed on {host.name}.Virtual machine {vm.name} disks consolidation failed.Virtual machine disks consolidation failed.com.vmware.vc.VmDiskFailedToConsolidateEvent|Virtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExcom.vmware.vc.VmForkFailedInvalidDestinationEvent|EventExCannot complete Instant Clone of VMerrorCannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone to {destVmName}. Reason : {fault.msg}com.vmware.vc.VmInstantCloneFailedEvent|Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}EventExInstant Clone WarningwarningInstant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning - {warning}com.vmware.vc.VmInstantCloneWarningEvent|Instant Clone Warning for {vmName} - {warning}EventExInstant Clone of VM has completedinfoInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} has completedInstant Clone of {srcVmName} has completedcom.vmware.vc.VmInstantClonedEvent|Instant Clone of {srcVmName} on {host.name} has completedEventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.infocom.vmware.vc.VpxdMemoryUsageClearEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status}.EventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.errorcom.vmware.vc.VpxdMemoryUsageErrorEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status} (used: {usedMemory}%, soft limit: {limit}%).EventExOperation enabledinfocom.vmware.vc.authorization.MethodEnabled|The operation {MethodName} on the {EntityName} of type {EntityType} is enabled.EventExPrivilege check failedwarningPrivilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}com.vmware.vc.authorization.NoPermission|Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}ExtendedEventErrors occurred during automatic CPVM certificate rotation.errorcom.vmware.vc.certificatemanagement.CPVMCertificateUpdateFailedEvent|Errors occurred during automatic CPVM certificate rotation.ExtendedEventCPVM successfully performed automatic certificate rotation.infocom.vmware.vc.certificatemanagement.CPVMCertificateUpdateHealthyEvent|CPVM successfully performed automatic certificate rotation.ExtendedEventErrors occurred during automatic Spherelet certificate rotation.errorcom.vmware.vc.certificatemanagement.SphereletCertificateUpdateFailedEvent|Errors occurred during automatic Spherelet certificate rotation.ExtendedEventNo errors found during automatic Spherelet certificate rotation.infocom.vmware.vc.certificatemanagement.SphereletCertificateUpdateHealthyEvent|No errors found during automatic Spherelet certificate rotation.ExtendedEventTRUSTED ROOT certificates imported successfully.infocom.vmware.vc.certificatemanagement.TrustedRootsImportedEvent|TRUSTED ROOT certificates imported successfully.ExtendedEventTRUSTED ROOT certificates imported successfully, but with warnings.warningcom.vmware.vc.certificatemanagement.TrustedRootsImportedWithWarningsEvent|TRUSTED ROOT certificates imported successfully, but with warnings.ExtendedEventvCenter Server TLS certificate replaced successfully.infocom.vmware.vc.certificatemanagement.VcCertificateReplacedEvent|vCenter Server TLS certificate replaced successfully.ExtendedEventvCenter Server TLS certificate replaced successfully, but there are warnings detected.warningcom.vmware.vc.certificatemanagement.VcCertificateReplacedWithWarningsEvent|vCenter Server TLS certificate replaced successfully, but there are warnings detected.EventExFailed to update the vCenter server certificate.warningcom.vmware.vc.certificatemanagement.VcServerCertificateUpdateFailureEvent|{cause} for the {serviceName}. Remediation suggested: {remediation}. For more details, please refer to {kbLink}.EventExCA Certificates were updated on hostinfoCA Certificates were updated on {hostname}com.vmware.vc.certmgr.HostCaCertsAndCrlsUpdatedEvent|CA Certificates were updated on {hostname}EventExHost Certificate expiration is imminentwarningHost Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpirationImminentEvent|Host Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}EventExHost Certificate is nearing expirationwarningHost Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringEvent|Host Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}EventExHost Certificate will expire soonwarningHost Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringShortlyEvent|Host Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}ExtendedEventHost Certificate Management Mode changedinfocom.vmware.vc.certmgr.HostCertManagementModeChangedEvent|Host Certificate Management Mode changed from {previousMode} to {presentMode}ExtendedEventHost Certificate Management Metadata changedinfocom.vmware.vc.certmgr.HostCertMetadataChangedEvent|Host Certificate Management Metadata changedEventExHost Certificate revokedwarningHost Certificate on {hostname} is revoked.com.vmware.vc.certmgr.HostCertRevokedEvent|Host Certificate on {hostname} is revoked.EventExHost Certificate was updatedinfoHost Certificate was updated on {hostname}, new thumbprint: {thumbprint}com.vmware.vc.certmgr.HostCertUpdatedEvent|Host Certificate was updated on {hostname}, new thumbprint: {thumbprint}EventExAdding host to cluster store failederrorAdding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.AddHostFailed|Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}EventExInitializing cluster store member cache failederrorInitializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.InitializeMemberCacheFailed|Initializing cluster store member cache failed. Fault Reason : {errorMessage}EventExRemoving host from cluster store failederrorRemoving host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.RemoveHostFailed|Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}EventExUpdating host encryption keyinfocom.vmware.vc.crypto.HostKeyUpdatedEvent|Host encryption key set to {newKey}. Old key: {oldKey}EventExcom.vmware.vc.crypto.IntegrityCheckFailed|EventExcom.vmware.vc.crypto.IntegrityCheckPassed|EventExCrypto operation audit eventinfocom.vmware.vc.crypto.Operation|Cryptographic operations during {description}{operation}{diskOperations}EventExFailed to update VM fileserrorFailed to update VM files on datastore {ds.name}com.vmware.vc.datastore.UpdateVmFilesFailedEvent|Failed to update VM files on datastore {ds.name} using host {hostName}EventExUpdated VM filesinfoUpdated VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatedVmFilesEvent|Updated VM files on datastore {ds.name} using host {hostName}EventExUpdating VM FilesinfoUpdating VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatingVmFilesEvent|Updating VM files on datastore {ds.name} using host {hostName}ExtendedEventLink Aggregation Control Protocol configuration is inconsistentinfoSingle Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.com.vmware.vc.dvs.LacpConfigInconsistentEvent|Single Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.ExtendedEventFault Tolerance VM restart disabledwarningvSphere HA has been disabled in cluster {computeResource.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart this VM or its Secondary VM after a failure.com.vmware.vc.ft.VmAffectedByDasDisabledEvent|vSphere HA has been disabled in cluster {computeResource.name} of datacenter {datacenter.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure. <EventLongDescription id="com.vmware.vc.ft.VmAffectedByDasDisabledEvent"> <description> When vSphere HA is disabled in a cluster, you cannot restart a Primary VM or its Secondary VM after a failure. This event is issued when vSphere HA is disabled and a Fault Tolerant virtual machine is powered on. The event alerts you of the risk to the Fault Tolerant virtual machine that results from disabling vSphere HA. </description> <cause> <description>vSphere HA was disabled when a Fault Tolerant virtual machine was powered on</description> <action>Re-enable vSphere HA</action> </cause> </EventLongDescription> EventExGuest operationinfoGuest operation {operationName.@enum.com.vmware.vc.guestOp} performed.com.vmware.vc.guestOperations.GuestOperation|Guest operation {operationName.@enum.com.vmware.vc.guestOp} performed on Virtual machine {vm.name}.EventExGuest operation authentication failurewarningGuest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp}.com.vmware.vc.guestOperations.GuestOperationAuthFailure|Guest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp} on Virtual machine {vm.name}.ExtendedEventvSphere HA restarted a virtual machinewarningvSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA restarted virtual machine {vm.name} on host {host.name}vSphere HA restarted virtual machine {vm.name}vSphere HA restarted this virtual machinecom.vmware.vc.ha.VmRestartedByHAEvent|vSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.ha.VmRestartedByHAEvent"> <description> The virtual machine was restarted automatically by vSphere HA on this host. This response may be triggered by a failure of the host the virtual machine was originally running on or by an unclean power-off of the virtual machine (eg. if the vmx process was killed). </description> </EventLongDescription> ExtendedEventAutostart power on failederrorPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on this host failedcom.vmware.vc.host.AutoStartPowerOnFailedEvent|Powering on virtual machines according to autostart rules on host {host.name} in datacenter {datacenter.name} failedExtendedEventAutostart rules reconfigure failederrorReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on this host failedcom.vmware.vc.host.AutoStartReconfigureFailedEvent|Reconfiguring autostart rules for virtual machines on {host.name} in datacenter {datacenter.name} failedEventExEncryption mode is enabled on host.infoEncryption mode is enabled on host.com.vmware.vc.host.Crypto.Enabled|Encryption mode is enabled on host {hostName}.EventExThe operation is not supported on hosts which have encryption disabled.errorcom.vmware.vc.host.Crypto.HostCryptoDisabled|The operation is not supported on host {hostName} because encryption is disabled.EventExHost key is being renewed because an error occurred on the key provider.warningHost key is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KMSClusterError|Host key of {hostName} is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.EventExHost key is being renewed because key was missing on the key provider.warningHost key is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KeyMissingOnKMS|Host key of {hostName} is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.EventExHost requires encryption mode enabled and the key provider is not available.errorHost requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KMSClusterError|Host {hostName} requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExHost requires encryption mode enabled and the key is not available on the key provider.errorHost requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KeyMissingOnKMS|Host {hostName} requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExFailed to send keys to host because of host error.errorcom.vmware.vc.host.Crypto.SendKeyError.HostError|Failed to send keys {keys} to host {hostName}. Please check host connection.EventExHost profile {operation} failed with error: {error}.errorHost profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.com.vmware.vc.host.HPOperationFailed|Host profile {operation} failed with error: {error}.ExtendedEventHost booted from stateless cache.warningHost booted from stateless cache.Host booted from stateless cache.Host booted from stateless cache.com.vmware.vc.host.HostBootedFromStatelessCacheEvent|Host booted from stateless cache.EventExHost IP address conflict detectederrorHost IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}com.vmware.vc.host.HostIpConflictEvent|Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}ExtendedEventHost in Memory Mode and active DRAM usage is normalinfo{host.name} is in Memory Mode and its active DRAM usage is normal{host.name} is in Memory Mode and its active DRAM usage is normalThe host is in Memory Mode and its active DRAM usage is normalcom.vmware.vc.host.MemoryModeActiveDRAMGreen|Host {host.name} is in Memory Mode and its active DRAM usage is normalExtendedEventHost in Memory Mode and active DRAM usage is highwarningHost {host.name} is in Memory Mode and its active DRAM usage is highHost {host.name} is in Memory Mode and its active DRAM usage is highThe host is in Memory Mode and its active DRAM usage is highcom.vmware.vc.host.MemoryModeActiveDRAMYellow|Host {host.name} is in Memory Mode and its active DRAM usage is highExtendedEventNSX installation failed on host.errorNSX installation failed on host.NSX installation failed on host.NSX installation failed on host.com.vmware.vc.host.NsxInstallFailed|NSX installation failed on host.ExtendedEventNSX installation successful on host.infoNSX installation successful on host.NSX installation successful on host.NSX installation successful on host.com.vmware.vc.host.NsxInstallSuccess|NSX installation successful on host.ExtendedEventPartial maintenance mode status has changed.infoHost status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.com.vmware.vc.host.PartialMaintenanceModeStatusChanged|Host status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}com.vmware.vc.host.StatelessHPApplyEarlyBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}com.vmware.vc.host.StatelessHPApplyFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}com.vmware.vc.host.StatelessHPApplyPostBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}EventExHost TPM attestation failederrorHost TPM attestation failed for host {host.name}: {1}Host TPM attestation failed for host {host.name}: {1}Host TPM attestation failed: {1}com.vmware.vc.host.TPMAttestationFailedEvent|Host TPM attestation failed for host {host.name} in datacenter {datacenter.name}: {1}ExtendedEventActive DRAM usage of the memory tiered host is normalinfoHost {host.name} is a memory tiered host and its active DRAM usage is normalHost {host.name} is a memory tiered host and its active DRAM usage is normalActive DRAM usage of the memory tiered host is normalcom.vmware.vc.host.TieringMemoryActiveDRAMGreen|Host {host.name} is a memory tiered host and its active DRAM usage is normalExtendedEventActive DRAM usage of the memory tiered host is highwarningHost {host.name} is a memory tiered host and its active DRAM usage is highHost {host.name} is a memory tiered host and its active DRAM usage is highActive DRAM usage of the memory tiered host is highcom.vmware.vc.host.TieringMemoryActiveDRAMYellow|Host {host.name} is a memory tiered host and its active DRAM usage is highExtendedEventNew TPM host endorsement key doesn't match the one in the DBerrorThe new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DBcom.vmware.vc.host.TpmEndorsementKeyMismatch|The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name} in datacenter {datacenter.name}ExtendedEventHost's virtual flash resource is accessible.infoHost's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.com.vmware.vc.host.clear.vFlashResource.inaccessible|Host's virtual flash resource is restored to be accessible.EventExHost's virtual flash resource usage dropped below the threshold.infoHost's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.com.vmware.vc.host.clear.vFlashResource.reachthreshold|Host's virtual flash resource usage dropped below {1}%.ExtendedEventDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.warningDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound|Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.ExtendedEventDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostswarningDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostscom.vmware.vc.host.problem.DeprecatedVMFSVolumeFoundAfterVMFS3EOL|Deprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsExtendedEventImproved virtual disk infrastructure's catalog management turned unhealthywarningcom.vmware.vc.host.problem.VStorageObjectInfraCatalogUnhealthy|Improved virtual disk infrastructure's catalog management turned unhealthyExtendedEventImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.warningImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent|Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. <EventLongDescription id="com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent"> <description> Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. </description> <cause> <description> This is caused by creating improved virtual disk infrastructure namespaces with empty storage policy. </description> <action> Update infrastructure namespaces storage policy. </action> </cause> </EventLongDescription> ExtendedEventHost's virtual flash resource is inaccessible.warningHost's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.com.vmware.vc.host.problem.vFlashResource.inaccessible|Host's virtual flash resource is inaccessible. <EventLongDescription id="com.vmware.vc.host.problem.vFlashResource.inaccessible"> <description> Inaccessible host virtual flash resource indicates that its backing VFFS volume is inaccessible. Due to inaccessible host virtual flash resource, virtual machines with vSphere Flash Read Cache configured cannot be powered on or might experience unpredicted behavior if powered on. </description> <cause> <description> This might be caused by an unmounted VFFS volume or an APD/PDL on the VFFS volume. </description> <action> Check the backing VFFS volume connection status. For example, mount the unmounted volume or resolve the APD/PDL issues. The host virtual flash resource is accessible as long as the backing VFFS volume is accessible. </action> </cause> </EventLongDescription> EventExHost's virtual flash resource usage exceeds the threshold.warningHost's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.com.vmware.vc.host.problem.vFlashResource.reachthreshold|Host's virtual flash resource usage is more than {1}%.ExtendedEventVirtual flash resource is configured on the hostinfoVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostcom.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent|Virtual flash resource is configured on the hostExtendedEventVirtual flash resource is removed from the hostinfoVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostcom.vmware.vc.host.vFlash.VFlashResourceRemovedEvent|Virtual flash resource is removed from the hostEventExDefault virtual flash module is changed to {vFlashModule} on the hostinfoDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostcom.vmware.vc.host.vFlash.defaultModuleChangedEvent|Any new virtual Flash Read Cache configuration request will use {vFlashModule} as default virtual flash module. All existing virtual Flash Read Cache configurations remain unchanged. <EventLongDescription id="com.vmware.vc.host.vFlash.defaultModuleChangedEvent"> <description> The default virtual flash module has been changed. Any new virtual Flash Read Cache configuration uses the new default virtual flash module if undefined in configuration. All existing configurations will remain unchanged. </description> </EventLongDescription> ExtendedEventVirtual flash modules are loaded or reloaded on the hostinfoVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostcom.vmware.vc.host.vFlash.modulesLoadedEvent|Virtual flash modules are loaded or reloaded on the hostEventExEntity became healthyinfo{entityName} became healthycom.vmware.vc.infraUpdateHa.GreenHealthEvent|{entityName} became healthyEventExProvider has posted invalid health updateswarningProvider {providerName} has posted invalid health updatesProvider {providerName} has posted invalid health updatescom.vmware.vc.infraUpdateHa.InvalidUpdatesEvent|Provider {providerName} has posted invalid health updatesEventExProvider reported a healthy statusinfo{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}com.vmware.vc.infraUpdateHa.PostGreenHealthUpdateEvent|{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}EventExProvider reported a severely degraded statuswarning{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostRedHealthUpdateEvent|{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExProvider reported a moderately degraded statuswarning{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostYellowHealthUpdateEvent|{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExEntity has entered quarantine modewarning{entityName} has entered quarantine modecom.vmware.vc.infraUpdateHa.QuarantineEvent|{entityName} has entered quarantine modeEventExEntity has exited quarantine modeinfo{entityName} has exited quarantine modecom.vmware.vc.infraUpdateHa.QuarantineRemovedEvent|{entityName} has exited quarantine modeEventExEntity became severely degradedwarning{entityName} became severely degradedcom.vmware.vc.infraUpdateHa.RedHealthEvent|{entityName} became severely degradedEventExProvider has stale updateswarningProvider {providerName} has not posted an update in {timeout} secondsProvider {providerName} has not posted an update in {timeout} secondscom.vmware.vc.infraUpdateHa.StaleUpdatesEvent|Provider {providerName} has not posted an update in {timeout} secondsEventExEntity has unknown health statewarning{entityName} has unknown health statecom.vmware.vc.infraUpdateHa.UnknownHealthEvent|{entityName} has unknown health stateEventExEntity became moderately degradedwarning{entityName} became moderately degradedcom.vmware.vc.infraUpdateHa.YellowHealthEvent|{entityName} became moderately degradedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters has failederrorvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterInstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterInstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters has failederrorvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUninstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} are successfulcom.vmware.vc.iofilter.FilterUninstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters has failederrorvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} and in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUpgradeFailedEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters is successfulinfovSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterUpgradeSuccessEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has succeededEventExvSphere APIs for I/O Filters (VAIO) host vendor provider registration has failed.errorvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderRegistrationFailedEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully registeredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredcom.vmware.vc.iofilter.HostVendorProviderRegistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredEventExFailed to unregister vSphere APIs for I/O Filters (VAIO) host vendor provider.errorFailed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderUnregistrationFailedEvent|Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully unregisteredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredcom.vmware.vc.iofilter.HostVendorProviderUnregistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredExtendedEventIoFilterManager API invoked with untrusted certificate SSL trust policywarningIoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name}com.vmware.vc.iofilter.UntrustedCertificateEvent|IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventKey providers are backed up.infocom.vmware.vc.kms.crypto.AllBackedUp|All key providers are backed up.EventExKey creation failed on key provider.errorcom.vmware.vc.kms.crypto.KeyGenerateFail|Key creation failed on key provider {clusterName} with error code {errorCode}. Check log for details.EventExKey provider(s) are not backed up.errorcom.vmware.vc.kms.crypto.NotBackedUp|Key provider(s) {providerIds} are not backed up.EventExKey provider backup is suggested after it is updated.warningcom.vmware.vc.kms.crypto.NotBackedUpAfterUpdate|Key provider(s) {providerIds} are not backed up. Backup is suggested after updating a provider.EventExFailed to send keys because of key provider error.errorcom.vmware.vc.kms.crypto.SendKeyError.KMSClusterError|Failed to send keys {keys} because of KMS connection error.EventExFailed to send keys because keys are missing on key provider.errorcom.vmware.vc.kms.crypto.SendKeyError.KeyMissingOnKMS|Failed to send keys {keys} because of keys missing on key provider.EventExThe Trusted Key Provider is not available.warningcom.vmware.vc.kms.crypto.TrustAuthority.ClusterNotAvailable|The Trusted Key Provider {keyProviderId} is not available.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.ClusterUnhealthy|The Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.KmsUnhealthy|The key server {serverName} in the Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExKey Management Server is unreachableerrorcom.vmware.vc.kms.crypto.Unreachable|Key Management Server {serverName}({address}) is unreachableEventExRetrieved Key Management Server vendor information.infocom.vmware.vc.kms.crypto.Vendor|Key Management Server {serverName}({address}) vendor: {vendor}EventExVirtual NIC entered passthrough modeinfoNetwork passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is active on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent|Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExVirtual NIC exited passthrough modeinfoNetwork passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is inactive on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterExitedPassthroughEvent|Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExFailed to clone state for entity on extensionerrorFailed to clone state on extension {extensionName}com.vmware.vc.ovfconsumers.CloneOvfConsumerStateErrorEvent|Failed to clone state for the entity '{entityName}' on extension {extensionName}EventExFailed to retrieve OVF environment sections for VM on extensionerrorFailed to retrieve OVF environment sections from extension {extensionName}com.vmware.vc.ovfconsumers.GetOvfEnvironmentSectionsErrorEvent|Failed to retrieve OVF environment sections for VM '{vm.name}' from extension {extensionName}EventExUnable to power on VM after cloningerrorPowering on after cloning was blocked by an extension. Message: {description}com.vmware.vc.ovfconsumers.PowerOnAfterCloneErrorEvent|Powering on VM '{vm.name}' after cloning was blocked by an extension. Message: {description}EventExFailed to register entity on extensionerrorcom.vmware.vc.ovfconsumers.RegisterEntityErrorEvent|Failed to register entity '{entityName}' on extension {extensionName}EventExFailed to unregister entities on extensionerrorcom.vmware.vc.ovfconsumers.UnregisterEntitiesErrorEvent|Failed to unregister entities on extension {extensionName}EventExFailed to validate OVF descriptor on extensionerrorcom.vmware.vc.ovfconsumers.ValidateOstErrorEvent|Failed to validate OVF descriptor on extension {extensionName}ExtendedEventAnswer file exportedinfoAnswer file for host {host.name} has been exportedAnswer file for host {host.name} has been exportedAnswer file exportedcom.vmware.vc.profile.AnswerFileExportedEvent|Answer file for host {host.name} in datacenter {datacenter.name} has been exportedExtendedEventHost customization settings updatedinfoHost customization settings for host {host.name} has been updatedHost customization settings for host {host.name} has been updatedHost customization settings updatedcom.vmware.vc.profile.AnswerFileUpdatedEvent|Host customization settings for host {host.name} in datacenter {datacenter.name} has been updatedEventExResource pool renamedinfoResource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'com.vmware.vc.rp.ResourcePoolRenamedEvent|Resource pool '{oldName}' has been renamed to '{newName}'ExtendedEventDatastore maintenance mode operation canceledinfoThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledcom.vmware.vc.sdrs.CanceledDatastoreMaintenanceModeEvent|The datastore maintenance mode operation has been canceledExtendedEventDatastore cluster is healthyinfoDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthycom.vmware.vc.sdrs.ClearDatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} is healthyExtendedEventConfigured storage DRSinfoConfigured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.ConfiguredStorageDrsOnPodEvent|Configured storage DRS on datastore cluster {objectName}ExtendedEventDatastore cluster has datastores that belong to different SRM Consistency GroupswarningDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency Groupscom.vmware.vc.sdrs.ConsistencyGroupViolationEvent|Datastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsExtendedEventDatastore entered maintenance modeinfoDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modecom.vmware.vc.sdrs.DatastoreEnteredMaintenanceModeEvent|Datastore {ds.name} has entered maintenance modeExtendedEventDatastore is entering maintenance modeinfoDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modecom.vmware.vc.sdrs.DatastoreEnteringMaintenanceModeEvent|Datastore {ds.name} is entering maintenance modeExtendedEventDatastore exited maintenance modeinfoDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modecom.vmware.vc.sdrs.DatastoreExitedMaintenanceModeEvent|Datastore {ds.name} has exited maintenance modeEventExDatastore cluster has datastores shared across multiple datacenterswarningDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacenterscom.vmware.vc.sdrs.DatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersExtendedEventErrors encountered while datastore entering into maintenance modeerrorDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modecom.vmware.vc.sdrs.DatastoreMaintenanceModeErrorsEvent|Datastore {ds.name} encountered errors while entering maintenance modeExtendedEventStorage DRS disabledinfoDisabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsDisabledEvent|Disabled storage DRS on datastore cluster {objectName}EventExStorage DRS enabledinfoEnabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}com.vmware.vc.sdrs.StorageDrsEnabledEvent|Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}ExtendedEventStorage DRS invocation failederrorStorage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsInvocationFailedEvent|Storage DRS invocation failed on datastore cluster {objectName}ExtendedEventNew storage DRS recommendation generatedinfoA new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsNewRecommendationPendingEvent|A new storage DRS recommendation has been generated on datastore cluster {objectName}EventExDatastore cluster connected to host(s) that do not support storage DRSwarningDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRScom.vmware.vc.sdrs.StorageDrsNotSupportedHostConnectedToPodEvent|Datastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSExtendedEventPending storage recommendations were appliedinfoAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedcom.vmware.vc.sdrs.StorageDrsRecommendationApplied|All pending recommendations on datastore cluster {objectName} were appliedEventExStorage DRS migrated VM disksinfoStorage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStorageMigrationEvent|Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}EventExStorage DRS placed VM disksinfoStorage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStoragePlacementEvent|Storage DRS placed disks of VM {vm.name} on datastore {ds.name}EventExDatastore cluster createdinfoCreated datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodCreatedEvent|Created datastore cluster {objectName}EventExDatastore cluster deletedinfoRemoved datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodDestroyedEvent|Removed datastore cluster {objectName}EventExSIOC: pre-4.1 host connected to SIOC-enabled datastorewarningSIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.com.vmware.vc.sioc.NotSupportedHostConnectedToDatastoreEvent|SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.ExtendedEventESXi VASA client certificate provision has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateProvisionFailure|ESXi VASA client certificate provision has failedExtendedEventESXi VASA client certificate provision has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateProvisionSuccess|ESXi VASA client certificate provision has succeededExtendedEventESXi VASA client certificate register to some/all VP(s) has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateRegisterFailure|ESXi VASA client certificate register to some/all VP(s) has failedExtendedEventESXi VASA client certificate register to VP(s) has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateRegisterSuccess|ESXi VASA client certificate register to VP(s) has succeededEventExSystem capability warning from storage providerwarningcom.vmware.vc.sms.LunCapabilityInitEvent|Storage provider [{providerName}] : system capability warning for {eventSubjectId} : {msgTxt}EventExSystem capability normal event from storage providerinfocom.vmware.vc.sms.LunCapabilityMetEvent|Storage provider [{providerName}] : system capability normal for {eventSubjectId}EventExSystem capability alert from storage providererrorcom.vmware.vc.sms.LunCapabilityNotMetEvent|Storage provider [{providerName}] : system capability alert for {eventSubjectId} : {msgTxt}EventExA Storage Alarm of type 'Object' cleared by the VASA providerinfocom.vmware.vc.sms.ObjectTypeAlarmClearedEvent|Storage provider [{providerName}] cleared a Storage Alarm of type 'Object' on {eventSubjectId} : {msgTxt}EventExAn alert on an object raised by the VASA providererrorcom.vmware.vc.sms.ObjectTypeAlarmErrorEvent|Storage provider [{providerName}] raised an alert type 'Object' on {eventSubjectId} : {msgTxt}EventExA warning on an object raised by the VASA providerwarningcom.vmware.vc.sms.ObjectTypeAlarmWarningEvent|Storage provider [{providerName}] raised a warning of type 'Object' on {eventSubjectId} : {msgTxt}EventExRegistering renewed VC Client Certificate failed for the VASA provider.errorcom.vmware.vc.sms.RegisterVcClientCertOnRenewalFailure|Registering renewed VC Client Certificate failed for VASA provider with url : {provider}.ExtendedEventRegistering renewed VC Client Certificate succeeded for all the VASA providers.infocom.vmware.vc.sms.RegisterVcClientCertOnRenewalSuccess|Registering renewed VC Client Certificate succeeded for all the VASA providers.EventExThin provisioning capacity threshold normal event from storage providerinfocom.vmware.vc.sms.ThinProvisionedLunThresholdClearedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold normal for {eventSubjectId}EventExThin provisioning capacity threshold alert from storage providererrorcom.vmware.vc.sms.ThinProvisionedLunThresholdCrossedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold alert for {eventSubjectId}EventExThin provisioning capacity threshold warning from storage providerwarningcom.vmware.vc.sms.ThinProvisionedLunThresholdInitEvent|Storage provider [{providerName}] : thin provisioning capacity threshold warning for {eventSubjectId}EventExStorage provider certificate will expire very shortlyerrorcom.vmware.vc.sms.VasaProviderCertificateHardLimitReachedEvent|Certificate for storage provider {providerName} will expire very shortly. Expiration date : {expiryDate}EventExVASA Provider certificate is renewedinfocom.vmware.vc.sms.VasaProviderCertificateRenewalEvent|VASA Provider certificate for {providerName} is renewedEventExStorage provider certificate will expire soonwarningcom.vmware.vc.sms.VasaProviderCertificateSoftLimitReachedEvent|Certificate for storage provider {providerName} will expire soon. Expiration date : {expiryDate}EventExStorage provider certificate is validinfocom.vmware.vc.sms.VasaProviderCertificateValidEvent|Certificate for storage provider {providerName} is validEventExStorage provider is connectedinfocom.vmware.vc.sms.VasaProviderConnectedEvent|Storage provider {providerName} is connectedEventExStorage provider is disconnectederrorcom.vmware.vc.sms.VasaProviderDisconnectedEvent|Storage provider {providerName} is disconnectedEventExRefreshing CA certificates and CRLs failed for some VASA providerserrorcom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsFailure|Refreshing CA certificates and CRLs failed for VASA providers with url : {providerUrls}ExtendedEventRefreshing CA certificates and CRLs succeeded for all registered VASA providers.infocom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsSuccess|Refreshing CA certificates and CRLs succeeded for all registered VASA providers.EventExOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate failed for the VASA 5.0 or greater VASA providers.errorcom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationFailure|On VMCA Root Certificate rotation, register and refresh certificates failed for VASA 5.0 or greater VASA provider : {provider}ExtendedEventOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate succeeded for all the VASA 5.0 or greater VASA providers.infocom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationSuccess|On VMCA Root Certificate rotation, register and refresh certificates succeeded for all the VASA 5.0 or greater VASA providers.EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vmName} connected to {datastore.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vmName} connected to datastore {datastore.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vmName} connected to {datastore.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vmName} connected to {datastore.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExHealth event from storage providerinfocom.vmware.vc.sms.provider.health.event|Storage provider [{providerName}] : health event for {eventSubjectId} : {msgTxt}EventExSystem event from storage providerinfocom.vmware.vc.sms.provider.system.event|Storage provider [{providerName}] : system event : {msgTxt}EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant {operationalStatus] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExProfile association/dissociation failederrorProfile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}com.vmware.vc.spbm.ProfileAssociationFailedEvent|Profile association/dissociation failed for {entityName}EventExConfiguring storage policy failed.errorConfiguring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}com.vmware.vc.spbm.ServiceErrorEvent|Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}ExtendedEventQuick stats is not up-to-dateinfoQuick stats on {host.name} in {computeResource.name} is not up-to-dateQuick stats on {host.name} is not up-to-dateQuick stats on {host.name} is not up-to-datecom.vmware.vc.stats.HostQuickStatesNotUpToDateEvent|Quick stats on {host.name} in {computeResource.name} in {datacenter.name} is not up-to-date <EventLongDescription id="com.vmware.vc.stats.HostQuickStatesNotUpToDateEvent"> <description> "Quick stats on the host is not up-to-date. </description> <cause> <description> Quickstats on the host are not up-to-date. This is expected if the host was recently added or reconnected or VC just started up. </description> <action> No specific action needs to be taken. </action> </cause> </EventLongDescription> EventExODBC errorerrorcom.vmware.vc.stats.StatsInsertErrorEvent|Stats insertion failed for entity {entity} due to ODBC error. <EventLongDescription id="com.vmware.vc.stats.StatsInsertErrorEvent"> <description> If a set of performance statistics data insertion fails due to database related issues, this event is logged. </description> <cause> <description>Usually an attempt to insert duplicate entries causes this event</description> <action>Usually it is transient and self-healing. If not then probably the database contains rogue entries. Manually deleting the data for the particular stat provider might fix the issue</action> </cause> </EventLongDescription> EventExRoot user password expired.errorcom.vmware.vc.system.RootPasswordExpiredEvent|Root user password has expired. Log in to https://{pnid}:5480 to update the root password.EventExRoot user password is about to expire.warningcom.vmware.vc.system.RootPasswordExpiryEvent|Root user password expires in {days} days. Log in to https://{pnid}:5480 to update the root password.ExtendedEventFT Disabled VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection will protect this virtul machine as non-FT virtual machine because the FT state is disabledcom.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because the FT state is disabledExtendedEventFailover FT VM due to component failureinfoFT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} on host {host.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} is going to fail over to Secondary VM due to component failureFT Primary VM is going to fail over to Secondary VM due to component failurecom.vmware.vc.vcp.FtFailoverEvent|FT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is going to fail over to Secondary VM due to component failure ExtendedEventFT VM failover failederrorFT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} failed to failover to secondaryFT virtual machine {vm.name} on host {host.name} failed to failover to secondaryFT virtual machine {vm.name} failed to failover to secondaryFT virtual machine failed to failover to secondarycom.vmware.vc.vcp.FtFailoverFailedEvent|FT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to failover to secondaryExtendedEventRestarting FT secondary due to component failureinfoHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine due to component failurecom.vmware.vc.vcp.FtSecondaryRestartEvent|HA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to component failureExtendedEventFT secondary VM restart failederrorFT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} failed to restartFT Secondary VM {vm.name} on host {host.name} failed to restartFT Secondary VM {vm.name} failed to restartFT Secondary VM failed to restartcom.vmware.vc.vcp.FtSecondaryRestartFailedEvent|FT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartExtendedEventNeed secondary VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine since it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects this virtul machine as non-FT virtual machine because it has been in the needSecondary state too longcom.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because it has been in the needSecondary state too longEventExVM Component Protection test endsinfoVM Component Protection test ends on host {host.name} in cluster {computeResource.name}VM Component Protection test ends on host {host.name}VM Component Protection test endscom.vmware.vc.vcp.TestEndEvent|VM Component Protection test ends on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}EventExVM Component Protection test startsinfoVM Component Protection test starts on host {host.name} in cluster {computeResource.name}VM Component Protection test starts on host {host.name}VM Component Protection test startscom.vmware.vc.vcp.TestStartEvent|VM Component Protection test starts on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventNo action on VMinfoHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} due to the feature configuration settingHA VM Component Protection did not take action due to the feature configuration settingcom.vmware.vc.vcp.VcpNoActionEvent|HA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to the feature configuration settingEventExVirtual machine lost datastore accesserrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {datastore}Virtual machine {vm.name} on host {host.name} lost access to {datastore}Virtual machine {vm.name} lost access to {datastore}Virtual machine lost access to {datastore}com.vmware.vc.vcp.VmDatastoreFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {datastore}EventExVirtual machine lost VM network accessibilityerrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {network}Virtual machine {vm.name} on host {host.name} lost access to {network}Virtual machine {vm.name} lost access to {network}Virtual machine lost access to {network}com.vmware.vc.vcp.VmNetworkFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {network}EventExVM power off hangerrorHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine successfully after trying {numTimes} times and will keep tryingcom.vmware.vc.vcp.VmPowerOffHangEvent|HA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} successfully after trying {numTimes} times and will keep tryingExtendedEventRestarting VM due to component failureinfoHA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failureHA VM Component Protection is restarting virtual machine due to component failurecom.vmware.vc.vcp.VmRestartEvent|HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventVirtual machine affected by component failure failed to restarterrorVirtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} failed to restartVirtual machine {vm.name} affected by component failure on host {host.name} failed to restartVirtual machine {vm.name} affected by component failure failed to restartVirtual machine affected by component failure failed to restartcom.vmware.vc.vcp.VmRestartFailedEvent|Virtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartEventExNo candidate host to restarterrorHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for this virtual machine after waiting {numSecWait} seconds and will keep tryingcom.vmware.vc.vcp.VmWaitForCandidateHostEvent|HA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} after waiting {numSecWait} seconds and will keep tryingEventExCertificate will expire soon.warningcom.vmware.vc.vecs.CertExpirationEvent|Certificate '{subject}' from '{store}' expires on {expiryDate}EventExKMS Client Certificate will expire soon.warningcom.vmware.vc.vecs.KMSClientCertExpirationEvent|KMS Client Certificate '{subject}' expires on {expiryDate}EventExKMS Server Certificate will expire soon.warningcom.vmware.vc.vecs.KMSServerCertExpirationEvent|KMS Server Certificate '{subject}' expires on {expiryDate}EventExOperation on the SSD device failederrorConfiguration on disk {disk.path} failed. Reason : {fault.msg}com.vmware.vc.vflash.SsdConfigurationFailedEvent|Configuration on disk {disk.path} failed. Reason : {fault.msg}EventExVirtual machine is locked because an error occurred on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KMSClusterError|Virtual machine {vmName} is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the host.errorVirtual machine is locked because keys were missing on the host {host}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnHost|Virtual machine {vmName} is locked because keys were missing on the host {host}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnKMS|Virtual machine {vmName} is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because the required Trusted Key Provider(s) is unavailable.errorVirtual machine is locked. Before unlocking, check the status of Trusted Key Provider(s) {kmsCluster} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).com.vmware.vc.vm.Crypto.VMLocked.TAKMSClusterUnavaliable|Virtual machine {vmName} is locked. Before unlocking, check the status of Trusted Key Provider(s) {keyProviderId} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).EventExVirtual machine is locked because Trust Authority managed key(s) are missing on the required host.errorVirtual machine is locked because Trust Authority managed key(s) are missing on host {host}.com.vmware.vc.vm.Crypto.VMLocked.TAKeyMissingOnHost|Virtual machine {vmName} is locked because Trust Authority managed key(s) {missedkeys} are missing on the required host {host}.EventExVirtual machine is unlocked.infoVirtual machine is unlocked.com.vmware.vc.vm.Crypto.VMUnlocked|Virtual machine {vmName} is unlocked.EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}com.vmware.vc.vm.DstVmClonedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was cloned from {oldMoRef}EventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}com.vmware.vc.vm.DstVmMigratedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was migrated from {oldMoRef}ExtendedEventVirtual machine PMem bandwidth usage is normalinfoVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalThe virtual machine's PMem bandwidth usage is normalcom.vmware.vc.vm.PMemBandwidthGreen|Virtual machine {vm.name}'s PMem bandwidth usage is normalExtendedEventVirtual machine PMem bandwidth usage is highwarningVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highThe virtual machine's PMem bandwidth usage is highcom.vmware.vc.vm.PMemBandwidthYellow|Virtual machine {vm.name}'s PMem bandwidth usage is highExtendedEventVirtual machine failed to power on after cloning.errorVirtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after performing cloning operation on this host.Virtual machine failed to power on after cloning.com.vmware.vc.vm.PowerOnAfterCloneErrorEvent|Virtual machine {vm.name} failed to power on after cloning on host {host.name} in datacenter {datacenter.name}EventExVirtual machine clone failederrorVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}"Virtual machine on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}com.vmware.vc.vm.SrcVmCloneFailedEvent|Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {oldMoRef} in {computeResource.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} on host {host.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}com.vmware.vc.vm.SrcVmClonedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} was cloned to {newMoRef}ExtendedEventVirtual machine failed to create instant clone childerrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childcom.vmware.vc.vm.SrcVmForkFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to create instant clone childEventExVirtual machine migration failederrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to migrateVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migratecom.vmware.vc.vm.SrcVmMigrateFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to migrateEventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} on {host.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}com.vmware.vc.vm.SrcVmMigratedEvent|Virtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} in {datacenter.name} was migrated to {newMoRef}ExtendedEventTemplate converted to VMinfoTemplate {vm.name} converted to VM on {host.name}Template {vm.name} converted to VM on {host.name}Template {vm.name} converted to VMConverted to VM on {host.name}com.vmware.vc.vm.TemplateConvertedToVmEvent|Template {vm.name} converted to VM on {host.name} in {datacenter.name}ExtendedEventVirtual machine tier 1 bandwidth usage is normalinfoVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalThe virtual machine's tier 1 bandwidth usage is normalcom.vmware.vc.vm.Tier1BandwidthGreen|Virtual machine {vm.name}'s tier 1 bandwidth usage is normalExtendedEventVirtual machine tier 1 bandwidth usage is highwarningVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highThe virtual machine's tier 1 bandwidth usage is highcom.vmware.vc.vm.Tier1BandwidthYellow|Virtual machine {vm.name}'s tier 1 bandwidth usage is highExtendedEventThe network adapter of VM successfully activate UPTinfoUPT on network adapter is activatedcom.vmware.vc.vm.Uptv2Active|The UPT is successfully activated on the network adapterEventExThe network adapter of VM fails to activate UPTwarningUPT on network adapter is not activatedcom.vmware.vc.vm.Uptv2Inactive|The UPT failed to activate on the network adapter.{details}EventExVirtual NIC reservation is not satisfiederrorReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is not satisfiedReservation of Virtual NIC {deviceLabel} is not satisfiedcom.vmware.vc.vm.VmAdapterResvNotSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is not satisfiedEventExVirtual NIC reservation is satisfiedinfoReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is satisfiedReservation of Virtual NIC {deviceLabel} is satisfiedcom.vmware.vc.vm.VmAdapterResvSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is satisfiedExtendedEventVM marked as templateinfoVM {vm.name} marked as template on {host.name}VM {vm.name} marked as template on {host.name}VM {vm.name} marked as templateMarked as template on {host.name}com.vmware.vc.vm.VmConvertedToTemplateEvent|VM {vm.name} marked as template on {host.name} in {datacenter.name}ExtendedEventPromoted disks of virtual machine successfullyinfoPromoted disks of virtual machine {vm.name} in {computeResource.name}Promoted disks of virtual machine {vm.name} on host {host.name}Promoted disks of virtual machine {vm.name}Promoted disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotedEvent|Promoted disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventPromoting disks of virtual machineinfoPromoting disks of virtual machine {vm.name} in {computeResource.name}Promoting disks of virtual machine {vm.name} on host {host.name}Promoting disks of virtual machine {vm.name}Promoting disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotingEvent|Promoting disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}EventExHot migrating virtual machine with encryptioninfoHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating from {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptioncom.vmware.vc.vm.VmHotMigratingWithEncryptionEvent|Hot migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost}, {destDatastore} in {destDatacenter} with encryptionEventExcom.vmware.vc.vm.VmMigratingWithEncryptionEvent|ExtendedEventFailed to promote disks of virtual machineinfoFailed to promote disks of virtual machine {vm.name} in {computeResource.name}Failed to promote disks of virtual machine {vm.name} on host {host.name}Failed to promote disks of virtual machine {vm.name}Failed to promote disks of virtual machine {vm.name}com.vmware.vc.vm.VmPromoteDisksFailedEvent|Failed to promote disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventReconfigure VM failed for {VM} on shared diskwarningReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskcom.vmware.vc.vm.VmReconfigureFailedonSharedDiskEvent|Reconfigure VM failed for {VM} on shared diskExtendedEventVirtual machine register failederrorVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on this host failedVirtual machine registration failedcom.vmware.vc.vm.VmRegisterFailedEvent|Virtual machine {vm.name} registration on {host.name} in datacenter {datacenter.name} failedEventExFailed to revert the virtual machine state to a snapshoterrorFailed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} on host {host.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine to snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateFailedToRevertToSnapshot|Failed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}EventExThe virtual machine state has been reverted to a snapshotinfoThe execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} on host {host.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateRevertedToSnapshot|The execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}EventExFault Tolerance virtual machine syncing to secondary with encryptioninfoFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM syncing to secondary on {dstHost} with encryptioncom.vmware.vc.vm.VmSyncingWithEncryptionEvent|Fault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionExtendedEventVirtual machine termination requestedinfoVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine termination requestedcom.vmware.vc.vm.VmTerminateEvent|Virtual machine {vm.name} termination requestedExtendedEventVirtual machine termination failederrorVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine termination failedcom.vmware.vc.vm.VmTerminateFailedEvent|Virtual machine {vm.name} termination failedEventExThe disk device is encrypted with mixed keys.warningThe disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.com.vmware.vc.vm.crypto.DiskchainUsingMixedKeys|The disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.EventExCryptographic operation failed due to insufficient disk space on datastoreerrorCryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.com.vmware.vc.vm.crypto.NoDiskSpace|Cryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.EventExcom.vmware.vc.vm.crypto.RekeyFail|ExtendedEventApplication Monitoring Is Not SupportedwarningApplication monitoring is not supported on {host.name} in cluster {computeResource.name}Application monitoring is not supported on {host.name}Application monitoring is not supportedcom.vmware.vc.vmam.AppMonitoringNotSupported|Application monitoring is not supported on {host.name} in cluster {computeResource.name} in {datacenter.name}EventExvSphere HA detected application heartbeat status changewarningvSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent|vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent"> <description> Application monitoring state changes indicate a change in the health of the application being monitored or in the application-monitoring process. A transition from gray to green occurs when application heartbeat is being enabled from within the guest. A transition to red occurs after vSphere HA didn't receive any heartbeats within 30 seconds. A transition from red to green occurs if heartbeats begin again before vSphere HA can react. A transition to gray occurs after application heartbeating is disabled from within the guest. </description> <cause> <description> Either the user initiated action from inside the guest or vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> If the state transitions to red, investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. Frequent state transitions to or from gray may indicate a problem with the application-monitoring agent. If they occur, investigate whether the enabling/disabling of monitoring is expected. </action> </cause> </EventLongDescription> EventExvSphere HA detected application state changewarningvSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthStateChangedEvent|vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthStateChangedEvent"> <description> Application state changes indicate that an in-guest application has posted one of the two allowed values - appStateOk or appStateNeedReset. The former indicates that the monitored application is fine, the latter causes an immediate reset if Application Monitoring is enabled for this virtual machine. </description> <cause> <description> This is an in-guest initated action. </description> <action> If vSphere HA and Application Monitoring are enabled for this virtual machine, it is reset if the state is appStateNeedReset. If the virtual machine is being migrated using vMotion the reset will be delayed until the virtual machine has reached its destination. Also, the reset will be delayed until the datastore connectivity issues are resolved. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected application heartbeat failurewarningvSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected application heartbeat failure for {vm.name} on {host.name}vSphere HA detected application heartbeat failure for {vm.name}vSphere HA detected application heartbeat failure for this virtual machinecom.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent|vSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent"> <description> vSphere HA has detected a heartbeat failure from the application-monitoring agent inside the guest. If application monitoring is enabled in vSphere the virtual machine will be reset. </description> <cause> <description> vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> Investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. </action> </cause> </EventLongDescription> EventExvCenter server replication status has changed.infocom.vmware.vc.vmdir.ReplicationStatusChangeEvent|vCenter Server Replication Status : {replicationStatus} . {message}EventExvCenter server replication state has changedinfocom.vmware.vc.vmdir.StateChangeEvent|vCenter Server Replication State changed to '{newState}' from '{oldState}' cause: {reason}EventExvSAN datastore {datastoreName} does not have capacityerrorvSAN datastore {datastoreName} in cluster {computeResource.name} does not have capacityvSAN datastore {datastoreName} does not have capacitycom.vmware.vc.vsan.DatastoreNoCapacityEvent|vSAN datastore {datastoreName} in cluster {computeResource.name} in datacenter {datacenter.name} does not have capacity <EventLongDescription id="com.vmware.vc.vsan.DatastoreNoCapacityEvent"> <description> vSAN datastore does not have capacity. </description> <cause> <description> This might be because no disk is configured for vSAN, local disks configured for vSAN service become inaccessible or flash disks configured for vSAN service become inaccessible. </description> <action> Check if vSAN storage configuration is correct and if the local disks and flash disks configured for vSAN service are accessible. </action> </cause> </EventLongDescription> EventExHost cannot communicate with one or more other nodes in the vSAN enabled clustererrorHost {host.name} in cluster {computeResource.name} cannot communicate with all other nodes in the vSAN enabled clusterHost {host.name} cannot communicate with all other nodes in the vSAN enabled clusterHost cannot communicate with one or more other nodes in the vSAN enabled clustercom.vmware.vc.vsan.HostCommunicationErrorEvent|Host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} cannot communicate with all other nodes in the vSAN enabled cluster <EventLongDescription id="com.vmware.vc.vsan.HostCommunicationErrorEvent"> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. </description> <cause> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. This might be caused by network partition or misconfiguration. Each host needs at least one vmnic with vSAN enabled. Those vmnics need to be on the same physical network. The host should have the vSAN service enabled. </description> <action> Check the host for vSAN service configuration, vSAN network configuration and network connection. </action> </cause> </EventLongDescription> ExtendedEventHost with vSAN service enabled is not in the vCenter clustererror{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name}{host.name} with vSAN service enabled is not in the vCenter clusterHost with vSAN service enabled is not in the vCenter clustercom.vmware.vc.vsan.HostNotInClusterEvent|{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.HostNotInClusterEvent"> <description> Host with the vSAN service enabled is not in the vCenter cluster. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN service is not enabled with the recommended interface. </description> <action> Add the host into the cluster or disable vSAN on the host. </action> </cause> </EventLongDescription> ExtendedEventHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailserror{host.name} is in a vSAN cluster {computeResource.name} but does not have vSAN service enabled{host.name} is in a vSAN cluster but does not have vSAN service enabledHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailscom.vmware.vc.vsan.HostNotInVsanClusterEvent|{host.name} is in a vSAN enabled cluster {computeResource.name} in datacenter {datacenter.name} but does not have vSAN service enabled <EventLongDescription id="com.vmware.vc.vsan.HostNotInVsanClusterEvent"> <description> Host is in a vSAN enabled cluster but does not have vSAN service enabled. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN is not enabled with the recommended interface or the vSAN configuration is not set up appropriately. </description> <action> Re-enable vSAN or check the vSAN configuration. </action> </cause> </EventLongDescription> EventExvSAN host vendor provider registration has failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent"> <description> Cannot deregister host vendor provider in Storage management service </description> <cause> <description>Host vendor provider deregistration failed</description> <action>Check if Storage management service is running</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider has been successfully unregisteredinfovSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredcom.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully unregistered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent"> <description> Deregistered host vendor provider from Storage management service </description> </EventLongDescription> EventExvSAN host vendor provider registration failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent"> <description> Cannot register host vendor provider in Storage management service </description> <cause> <description>Host vendor provider registration failed</description> <action>Check if Storage management service is running</action> <action>Check if the vendor provider on host is running</action> <action>Check if there are network connectivity issues between host and VC</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider registration succeededinfovSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredcom.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully registered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent"> <description> Registered host vendor provider in Storage management service </description> </EventLongDescription> ExtendedEventvSAN network is not configurederrorvSAN network is not configured on {host.name} in cluster {computeResource.name}vSAN network is not configured on {host.name}vSAN network is not configuredcom.vmware.vc.vsan.NetworkMisConfiguredEvent|vSAN network is not configured on {host.name}, in cluster {computeResource.name}, and in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.NetworkMisConfiguredEvent"> <description> vSAN network is not configured. </description> <cause> <description> vSAN network is not set up appropriately. vSAN datastore will not be formed as expected. </description> <action> Create at least one vmnic with vSAN enabled on the host. </action> </cause> </EventLongDescription> EventExFound another host participating in the vSAN service which is not a member of this host's vCenter clustererrorFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter cluster {computeResource.name}Found host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clusterFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clustercom.vmware.vc.vsan.RogueHostFoundEvent|Found host(s) {hostString} participating in the vSAN service in cluster {computeResource.name} in datacenter {datacenter.name} is not a member of this host's vCenter cluster <EventLongDescription id="com.vmware.vc.vsan.RogueHostFoundEvent"> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. </description> <cause> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. This might be caused by misconfiguration. </description> <action> Add the rogue host into the cluster or disable vSAN on the rogue host. </action> </cause> </EventLongDescription> EventExFailed to turn off the disk locator LEDerrorFailed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOffFailedEvent|Failed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}EventExFailed to turn on the disk locator LEDerrorFailed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOnFailedEvent|Failed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}EventExvSAN cluster needs disk format upgradewarningvSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationvSAN cluster has one or more hosts for which disk format upgrade is recommended: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationcom.vmware.vc.vsan.VsanHostNeedsUpgradeEvent|vSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationEventExUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}errorUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}com.vmware.vc.vtpm.FailedProcessingVTpmCertsEvent|Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}ExtendedEventA compute policy has been createdinfocom.vmware.vcenter.compute.policies.createEvent|Compute policy {policyName} has been createdExtendedEventA compute policy has been deletedinfocom.vmware.vcenter.compute.policies.deleteEvent|Compute policy {policyName} has been deletedEventExDatabase replication state changed: sync, async or no replicationinfocom.vmware.vcha.DB.replication.state.changed|Database replication mode changed to {newState}EventExThe management interface (NIC0) IP address you specified as for the Passive node is different than the original IP address used to configure vCenter HA. You must use the same IP address.errorcom.vmware.vcha.cluster.differentFailoverIp|The management interface (NIC0) IP address you specified as {given} for the Passive node is different than the original IP address {original} used to configure vCenter HA. You must use the same IP address.EventExvCenter HA cluster mode changedinfocom.vmware.vcha.cluster.mode.changed|vCenter HA cluster mode changed to {clusterMode}ExtendedEventUnable to enable mode.errorcom.vmware.vcha.cluster.modeEnableFailed|Unable to enable mode.EventExThe hostname for a node does not map to the vCenter Server PNID.errorcom.vmware.vcha.cluster.pnidHostnameMismatch|The hostname for {nodeIp} does not map to the vCenter Server PNID. Review the hostname you specified during the VM clone customization step.ExtendedEventVerify if the Passive and the Witness nodes are up and reachable.errorcom.vmware.vcha.cluster.quorumNotCloned|Verify if the Passive and the Witness nodes are up and reachable.EventExUnable to SSH to a node.errorcom.vmware.vcha.cluster.sshConnectFailed|Unable to SSH to {nodeIp}.ExtendedEventvCenter HA cluster state is currently degradedwarningcom.vmware.vcha.cluster.state.degraded|vCenter HA cluster state is currently degradedExtendedEventvCenter HA cluster is destroyedinfocom.vmware.vcha.cluster.state.destroyed|vCenter HA cluster is destroyedExtendedEventvCenter HA cluster state is currently healthyinfocom.vmware.vcha.cluster.state.healthy|vCenter HA cluster state is currently healthyExtendedEventvCenter HA cluster state is currently isolatederrorcom.vmware.vcha.cluster.state.isolated|vCenter HA cluster state is currently isolatedExtendedEventUnable to get vpxd hostname.errorcom.vmware.vcha.cluster.vcFqdnUnavailable|Unable to get vpxd hostname.ExtendedEventFailover cannot proceed when cluster is in disabled modewarningcom.vmware.vcha.failover.failed.disabled.mode|Failover cannot proceed when cluster is in disabled modeExtendedEventFailover cannot proceed when cluster does not have all three nodes connectedwarningcom.vmware.vcha.failover.failed.node.lost|Failover cannot proceed when cluster does not have all three nodes connectedExtendedEventFailover cannot proceed when Passive node is not ready to takeoverwarningcom.vmware.vcha.failover.failed.passive.not.ready|Failover cannot proceed when vPostgres on Passive node is not ready to takeoverExtendedEventFailover did not succeed. Failed to flush the data to the Passive nodewarningcom.vmware.vcha.failover.flush.failed.degraded|Failover did not succeed. Failed to flush the data to the Passive nodeExtendedEventFailover failure is acknowledgedinfocom.vmware.vcha.failover.flush.failed.healthy|Failover failure is acknowledgedExtendedEventFailover status is unknowninfocom.vmware.vcha.failover.flush.failed.unknown|Failover status is unknownExtendedEventFailover succeededinfocom.vmware.vcha.failover.succeeded|Failover succeededEventExAppliance File replication state changedinfocom.vmware.vcha.file.replication.state.changed|Appliance {fileProviderType} is {state}EventExThis node was forcefully converted to the Active nodeinfocom.vmware.vcha.force.reset.active|Node {nodename} was forcefully converted to the Active nodeEventExOne node joined back to the clusterinfocom.vmware.vcha.node.joined|Node {nodeName} joined back to the clusterEventExOne node left the clusterwarningcom.vmware.vcha.node.left|Node {nodeName} left the clusterExtendedEventPSC HA state is currently degradedinfocom.vmware.vcha.psc.ha.health.degraded|PSC HA state is currently degradedExtendedEventPSC HA state is currently healthyinfocom.vmware.vcha.psc.ha.health.healthy|PSC HA state is currently healthyExtendedEventPSC HA state is not being monitoredinfocom.vmware.vcha.psc.ha.health.unknown|PSC HA is not monitored after vCenter HA cluster is destroyedExtendedEventVMware Directory Service health is currently degradedwarningcom.vmware.vcha.vmdir.health.degraded|VMware Directory Service health is currently degradedExtendedEventVMware Directory Service is currently healthyinfocom.vmware.vcha.vmdir.health.healthy|VMware Directory Service is currently healthyExtendedEventVMware Directory Service health is not being monitoredinfocom.vmware.vcha.vmdir.health.unknown|VMware Directory Service health is not being monitoredExtendedEventvSphere Cluster Services mode is system managed on cluster.infocom.vmware.vcls.cluster.DeploymentModeSystemManagedEvent|vSphere Cluster Services mode is system managed on cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.infocom.vmware.vcls.cluster.DrsDisabledHaDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-enabled cluster.errorcom.vmware.vcls.cluster.DrsEnabledDeployModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-enabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.DrsEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.ExtendedEventvSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.warningcom.vmware.vcls.cluster.HaEnabledDrsDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.HaEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.ExtendedEventVSAN VASA provider became available.infocom.vmware.vcls.cluster.VsanProviderAvailableEvent|VSAN VASA provider became available.ExtendedEventTimed out waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.VsanProviderTimedoutEvent|Timed out waiting for VSAN VASA provider availability.EventExA Data Processing Unit is down.infoA Data Processing Unit is down.com.vmware.vim.dpu.down|The Data Processing Unit with id '{dpuId}' is down.EventExA Data Processing Unit has been removed from the system.infoA Data Processing Unit has been removed from the system.com.vmware.vim.dpu.removed|The Data Processing Unit with id '{dpuId}' has been removed from the system.EventExThe management state for a Data Processing Unit has changed.infoThe management state for a Data Processing Unit has changed.com.vmware.vim.dpu.state.changed|The management state for the Data Processing Unit with id '{dpuId}' has changed to '{state}'.EventExThe dpu failover ended on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.com.vmware.vim.dpuFailover.end|DPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.EventExThe dpu failover started on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.com.vmware.vim.dpuFailover.start|DPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.ExtendedEventInvalid UTF-8 string encountered.warningInvalid UTF-8 string encountered.com.vmware.vim.utf8filter.badvalue|Invalid UTF-8 string encountered.ExtendedEventSome of the disks of the virtual machine failed to load. The information present for them in the virtual machine configuration may be incompletewarningSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} failed to load. The information present for them in the virtual machine configuration may be incompletecom.vmware.vim.vm.DisksNotLoaded|Some of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteExtendedEventSnapshot operations are not allowed due to some of the snapshot related objects failed to load.warningSnapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.com.vmware.vim.vm.SnapshotNotAllowed|Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.ExtendedEventVirtual machine reboot converted to power off because the rebootPowerOff option is enabledinfoReboot converted to power off on virtual machine {vm.name}.Reboot converted to power off.com.vmware.vim.vm.reboot.powerOff|Reboot converted to power off on virtual machine {vm.name} on {host.name} because the rebootPowerOff option is enabled.EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' bound to provider '{providerName}'EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vm|vService dependency '{dependencyName}' on '{vm.name}' bound to provider '{providerName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vApp|Created vService dependency '{dependencyName}' with type '{dependencyType}' on vApp '{targetName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vm|Created vService dependency '{dependencyName}' with type '{dependencyType}' on '{vm.name}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vApp|Destroyed vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vm|Destroyed vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vApp|Reconfigured vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vm|Reconfigured vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' unbound from provider '{providerName}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vm|vService dependency '{dependencyName}' on '{vm.name}' unbound from provider '{providerName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vApp|Updated vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vm|Updated vService dependency '{dependencyName}' on '{vm.name}'EventExvService provider registeredinfocom.vmware.vim.vsm.provider.register|vService provider '{providerName}' with type '{providerType}' registered for extension '{extensionKey}'EventExvService provider unregisteredinfocom.vmware.vim.vsm.provider.unregister|vService provider '{providerName}' with type '{providerType}' unregistered for extension '{extensionKey}'EventExvService provider updatedinfocom.vmware.vim.vsm.provider.update|Updating vService provider '{providerName}' registered for extension '{extensionKey}'EventExDeleting stale vdisks generated by FCD migration failed.errorcom.vmware.vslm.DeleteStaleDiskFailureEvent|Deleting stale vdisk {diskPath} and related files generated as part of FCD migration failed. Try to delete them manually.EventExRegistering vdisk as FCD at source failed during rollback of unsuccessful migration.errorcom.vmware.vslm.RegisterDiskFailed|Registering {fcdPath} with name {fcdName} as FCD at source failed during rollback of unsuccessful migration. Try to register it manually using RegisterDisk API.EventExUnregistering of vdisk at destination failed during rollback of unsuccessful migration.errorcom.vmware.vslm.UnRegisterDiskFailed|Unregistering of FCD {fcdId} failed at destination during rollback of unsuccessful migration. Reconcile of datastore {datastore} should fix inconsistencies if any.EventExConnectivity check completedinfocom.vmware.vsphere.client.security.ConnectivityCheckEvent|Connectivity check completed. Operation: {Operation}. Subscription status: {SubscriptionCheckResult}. Connectivity status: {ConnectivityCheckResult}. Access type: {AccessType}. User: {Username}ExtendedEventDatastore is accessible to all hosts under the cluster.infocom.vmware.wcp.Datastore.accessible|Datastore is accessible to all hosts under the clusterExtendedEventDatastore not accessible to all hosts under the cluster.warningcom.vmware.wcp.Datastore.inaccessible|Datastore not accessible to all hosts under the cluster.EventExRemote access for an ESXi local user account has been locked temporarilly due to multiple failed login attempts.warningesx.audit.account.locked|Remote access for ESXi local user account '{1}' has been locked for {2} seconds after {3} failed login attempts.EventExMultiple remote login failures detected for an ESXi local user account.warningesx.audit.account.loginfailures|Multiple remote login failures detected for ESXi local user account '{1}'.ExtendedEventRestoring factory defaults through DCUI.warningesx.audit.dcui.defaults.factoryrestore|The host has been restored to default factory settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventThe DCUI has been disabled.infoesx.audit.dcui.disabled|The DCUI has been disabled.ExtendedEventThe DCUI has been enabled.infoesx.audit.dcui.enabled|The DCUI has been enabled.ExtendedEventRebooting host through DCUI.warningesx.audit.dcui.host.reboot|The host is being rebooted through the Direct Console User Interface (DCUI).ExtendedEventShutting down host through DCUI.warningesx.audit.dcui.host.shutdown|The host is being shut down through the Direct Console User Interface (DCUI).ExtendedEventRestarting host agents through DCUI.infoesx.audit.dcui.hostagents.restart|The management agents on the host are being restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExLogin authentication on DCUI failederroresx.audit.dcui.login.failed|Authentication of user {1} has failed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExDCUI login password changed.infoesx.audit.dcui.login.passwd.changed|Login password for user {1} has been changed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventFactory network settings restored through DCUI.warningesx.audit.dcui.network.factoryrestore|The host has been restored to factory network settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExRestarting network through DCUI.infoesx.audit.dcui.network.restart|A management interface {1} has been restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.esx.audit.entropy.available.low|Host is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.esx.audit.entropy.external.source.disconnected|Host is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.EventExPowering off host through esxcliwarningesx.audit.esxcli.host.poweroff.reason|The host is being powered off through esxcli. Reason for powering off: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.reboot.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.restart.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExHost acceptance level changedinfoesx.audit.esximage.hostacceptance.changed|Host acceptance level changed from {1} to {2}ExtendedEventUEFI Secure Boot enabled: Cannot skip signature checks.warningesx.audit.esximage.install.nobypasssigcheck|UEFI Secure Boot enabled: Cannot skip signature checks. Installing unsigned VIBs will prevent the system from booting. So the vib signature check will be enforced.ExtendedEventAttempting to install an image profile bypassing signing and acceptance level verification.warningesx.audit.esximage.install.nosigcheck|Attempting to install an image profile bypassing signing and acceptance level verification. This may pose a large security risk.ExtendedEventAttempting to install an image profile with validation disabled.warningesx.audit.esximage.install.novalidation|Attempting to install an image profile with validation disabled. This may result in an image with unsatisfied dependencies, file or package conflicts, and potential security violations.EventExSECURITY ALERT: Installing image profile.warningesx.audit.esximage.install.securityalert|SECURITY ALERT: Installing image profile '{1}' with {2}.EventExSuccessfully installed image profile.infoesx.audit.esximage.profile.install.successful|Successfully installed image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully updated host to new image profile.infoesx.audit.esximage.profile.update.successful|Successfully updated host to image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully changed software on host.infoesx.audit.esximage.software.apply.succeeded|Successfully installed {1} component(s) and removed {2} component(s) on host. To see more details about the transaction, run 'esxcli software profile get'.EventExSuccessfully installed VIBs.infoesx.audit.esximage.vib.install.successful|Successfully installed {1} VIB(s), removed {2} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully removed VIBsinfoesx.audit.esximage.vib.remove.successful|Successfully removed {1} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExDPU trust validation failederroresx.audit.esxtokend.dputrust.failed|DPU: {1} trust validation failedEventExDPU was removedwarningesx.audit.esxtokend.dputrust.removed|DPU:{1} was removed.EventExDPU trust validation succeededinfoesx.audit.esxtokend.dputrust.succeeded|DPU: {1} trust validation succeeded.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Warning tripped.EventExNVDIMM: Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.lifetime.warning|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Warning tripped.EventExNVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.spareblocks|NVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.temperature|NVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).warningesx.audit.hardware.nvd.health.life.pctused|NVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).EventExNVDIMM Count of DRAM correctable ECC errors above threshold.infoesx.audit.hardware.nvd.health.module.ce|NVDIMM (handle {1}, idString {2}): Count of DRAM correctable ECC errors above threshold.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Warning tripped.EventExNVDIMM: Module Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.lifetime.warning|NVDIMM (handle {1}, idString {2}): Module Lifetime Warning tripped.EventExNVDIMM: Module Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.temperature.warning|NVDIMM (handle {1}, idString {2}): Module Temperature Warning tripped.EventExNVDIMM: Maintenance needed.warningesx.audit.hardware.nvd.health.vmw.statusflags.maintNeeded|NVDIMM (handle {1}, idString {2}): Maintenance needed.EventExA physical disk has been inserted.infoA physical disk has been insertedesx.audit.hcm.event.disk.insertion|A physical disk has been inserted ({1}).EventExA physical disk has been removed.infoA physical disk has been removed.esx.audit.hcm.event.disk.removal|A physical disk has been removed ({1}).ExtendedEventHost has booted.infoesx.audit.host.boot|Host has booted.EventExHost experienced a crashinfoesx.audit.host.crash.reason|The crash at {1} occurred due to: {2}. More details will be available in the generated vmkernel-zdump.EventExThe host experienced a crashinfoesx.audit.host.crash.reason.available|The host experienced a crash. Reason: {1}.ExtendedEventHost experienced a crashinfoesx.audit.host.crash.reason.unavailable|Host experienced a crash. More details will be available in the generated vmkernel-zdump.EventExThe number of virtual machines registered on the host exceeded limit.warningThe number of virtual machines registered on host {host.name} in cluster {computeResource.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered on host {host.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered exceeded limit: {current} registered, {limit} is the maximum supported.esx.audit.host.maxRegisteredVMsExceeded|The number of virtual machines registered on host {host.name} in cluster {computeResource.name} in {datacenter.name} exceeded limit: {current} registered, {limit} is the maximum supported.EventExThe host has been powered offinfoesx.audit.host.poweroff.reason.available|The host has been powered off. Reason for powering off: {1}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.management|The power off at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.subsystem|The power off at {1} was requested by {2} due to: {3}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.timestamp|The power off at {1} was requested due to: {2}.ExtendedEventHost had been powered offinfoesx.audit.host.poweroff.reason.unavailable|Host had been powered off. The poweroff was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.user|The power off at {1} was requested by user/entity {2} due to: {3}.EventExThe host experienced Quick Bootinfoesx.audit.host.quickboot.reason.available|The host experienced Quick Boot. Reason for reboot: {1}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.management|The Quick Boot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.subsystem|The Quick Boot at {1} was requested by {2} due to: {3}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.timestamp|The Quick Boot at {1} was requested due to: {2}.ExtendedEventHost experienced Quick Bootinfoesx.audit.host.quickboot.reason.unavailable|Host experienced Quick Boot. The Quick Boot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.user|The Quick Boot at {1} was requested by user/entity {2} due to: {3}.EventExThe host has been rebootedinfoesx.audit.host.reboot.reason.available|The host has been rebooted. Reason for reboot: {1}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.management|The reboot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.subsystem|The reboot at {1} was requested by {2} due to: {3}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.timestamp|The reboot at {1} was requested due to: {2}.ExtendedEventHost had been rebootedinfoesx.audit.host.reboot.reason.unavailable|Host had been rebooted. The reboot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.user|The reboot at {1} was requested by user/entity {2} due to: {3}.ExtendedEventHost is rebooting.infoesx.audit.host.stop.reboot|Host is rebooting.ExtendedEventHost is shutting down.infoesx.audit.host.stop.shutdown|Host is shutting down.EventExPowering off host through hostdwarningesx.audit.hostd.host.poweroff.reason|The host is being powered off through hostd. Reason for powering off: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.reboot.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.restart.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.ExtendedEventAdministrator access to the host has been enabled.infoesx.audit.lockdownmode.disabled|Administrator access to the host has been enabled.ExtendedEventAdministrator access to the host has been disabled.infoesx.audit.lockdownmode.enabled|Administrator access to the host has been disabled.ExtendedEventList of lockdown exception users has been changed.infoesx.audit.lockdownmode.exceptions.changed|List of lockdown exception users has been changed.ExtendedEventThe host has canceled entering maintenance mode.infoesx.audit.maintenancemode.canceled|The host has canceled entering maintenance mode.ExtendedEventThe host has entered maintenance mode.infoesx.audit.maintenancemode.entered|The host has entered maintenance mode.ExtendedEventThe host has begun entering maintenance mode.infoesx.audit.maintenancemode.entering|The host has begun entering maintenance mode.ExtendedEventThe host has exited maintenance mode.infoesx.audit.maintenancemode.exited|The host has exited maintenance mode.ExtendedEventThe host has failed entering maintenance mode.erroresx.audit.maintenancemode.failed|The host has failed entering maintenance mode.EventExFirewall configuration has changed.infoesx.audit.net.firewall.config.changed|Firewall configuration has changed. Operation '{1}' for rule set {2} succeeded.ExtendedEventFirewall has been disabled.warningesx.audit.net.firewall.disabled|Firewall has been disabled.EventExFirewall has been enabled for port.infoesx.audit.net.firewall.enabled|Firewall has been enabled for port {1}.EventExPort is now protected by Firewall.infoesx.audit.net.firewall.port.hooked|Port {1} is now protected by Firewall.EventExPort is no longer protected with Firewall.warningesx.audit.net.firewall.port.removed|Port {1} is no longer protected with Firewall.EventExLACP disabledinfoesx.audit.net.lacp.disable|LACP for VDS {1} is disabled.EventExLACP eabledinfoesx.audit.net.lacp.enable|LACP for VDS {1} is enabled.EventExuplink is connectedinfoesx.audit.net.lacp.uplink.connected|LACP info: uplink {1} on VDS {2} got connected.EventExThe host has canceled entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.canceled|The host has canceled entering '{1}'.EventExThe host has entered a partial maintenance mode.infoesx.audit.partialmaintenancemode.entered|The host has entered '{1}'.EventExThe host has begun entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.entering|The host has begun entering '{1}'.EventExThe host has exited a partial maintenance mode.infoesx.audit.partialmaintenancemode.exited|The host has exited '{1}'.EventExThe host has failed entering a partial maintenance mode.erroresx.audit.partialmaintenancemode.failed|The host has failed entering '{1}'.ExtendedEventThe ESXi command line shell has been disabled.infoesx.audit.shell.disabled|The ESXi command line shell has been disabled.ExtendedEventThe ESXi command line shell has been enabled.infoesx.audit.shell.enabled|The ESXi command line shell has been enabled.ExtendedEventSSH access has been disabled.infoesx.audit.ssh.disabled|SSH access has been disabled.ExtendedEventSSH access has been enabled.infoesx.audit.ssh.enabled|SSH access has been enabled.EventExSSH session was closed.infoesx.audit.ssh.session.closed|SSH session was closed for '{1}@{2}'.EventExSSH login has failed.infoesx.audit.ssh.session.failed|SSH login has failed for '{1}@{2}'.EventExSSH session was opened.infoesx.audit.ssh.session.opened|SSH session was opened for '{1}@{2}'.EventExPowering off hostwarningesx.audit.subsystem.host.poweroff.reason|The host is being powered off. Reason for powering off: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.reboot.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.restart.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.ExtendedEventSupershell session has been started by a user.warningSupershell session has been started by a user.esx.audit.supershell.access|Supershell session has been started by a user.EventExTest with an int argumenterroresx.audit.test.test1d|Test with {1}EventExTest with a string argumenterroresx.audit.test.test1s|Test with {1}ExtendedEventUSB configuration has changed.infoUSB configuration has changed on host {host.name} in cluster {computeResource.name}.USB configuration has changed on host {host.name}.USB configuration has changed.esx.audit.usb.config.changed|USB configuration has changed on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExEnforcement level changed for all security domains.warningesx.audit.uw.secpolicy.alldomains.level.changed|The enforcement level for all security domains has been changed to {1}. The enforcement level must always be set to enforcing.EventExEnforcement level changed for security domain.warningesx.audit.uw.secpolicy.domain.level.changed|The enforcement level for security domain {1} has been changed to {2}. The enforcement level must always be set to enforcing.ExtendedEventExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.warningesx.audit.uw.security.User.ExecInstalledOnly.disabled|ExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.Infoesx.audit.uw.security.User.ExecInstalledOnly.enabled|ExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.EventExExecution of non-installed file prevented.warningesx.audit.uw.security.execInstalledOnly.violation|Execution of unknown (non VIB installed) binary '{1}' prevented. Unknown content can cause malware attacks similar to Ransomware.EventExExecution of non-installed file detected.warningesx.audit.uw.security.execInstalledOnly.warning|Execution of unknown (non VIB installed) binary '{1}'. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventLVM device discovered.infoesx.audit.vmfs.lvm.device.discovered|One or more LVM devices have been discovered on this host.EventExRead IO performance maybe impacted for diskinfoRead IO performance maybe impacted for disk {1}: {2}Read IO performance maybe impacted for disk {1}: {2}esx.audit.vmfs.sesparse.bloomfilter.disabled|Read IO performance maybe impacted for disk {1}: {2}EventExFile system mounted.infoesx.audit.vmfs.volume.mounted|File system {1} on volume {2} has been mounted in {3} mode on this host.EventExLVM volume un-mounted.infoesx.audit.vmfs.volume.umounted|The volume {1} has been safely un-mounted. The datastore is no longer accessible on this host.EventExvSAN device is added back successfully after MEDIUM error.infovSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.devicerebuild|vSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExvSAN diskgroup is rebuilt successfully after MEDIUM error.infovSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.diskgrouprebuild|vSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExFound components with invalid metadatawarning{1} components found with invalid metadata on disk {2} {3}esx.audit.vob.vsan.lsom.foundInvalidMetadataComp|{1} components found with invalid metadata on disk {2} {3}EventExvSAN storagepool is added back successfully after MEDIUM error.infovSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.storagepoolrebuild|vSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExTest with both int and sting arguments.infoesx.audit.vobdtestcorrelator.test|Test with both string: {2} and int: {1}.ExtendedEventvSAN clustering services have been enabled.infovSAN clustering and directory services have been enabled.esx.audit.vsan.clustering.enabled|vSAN clustering and directory services have been enabled.ExtendedEventvSAN virtual NIC has been added.infovSAN virtual NIC has been added.esx.audit.vsan.net.vnic.added|vSAN virtual NIC has been added.ExtendedEventvSAN network configuration has been removed.errorvSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.esx.audit.vsan.net.vnic.deleted|vSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.EventExvSAN RDMA changed for vmknic.infovSAN RDMA changed for vmknic {1}.esx.audit.vsan.rdma.changed|vSAN RDMA changed for vmknic {1}.ExtendedEventHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445warningHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445esx.audit.weak.ssl.protocol|Weak SSL protocols found and disabled. Please refer to KB article: KB 1234567ExtendedEventA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.infoA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.esx.clear.coredump.configured|A vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.ExtendedEventAt least one coredump target has been configured. Host core dumps will be saved.infoAt least one coredump target has been configured. Host core dumps will be saved.esx.clear.coredump.configured2|At least one coredump target has been configured. Host core dumps will be saved.EventExNVDIMM Energy Source is sufficiently charged.infoesx.clear.hardware.nvd.health.module.es.charged|NVDIMM (handle {1}, idString {2}): Energy Source is sufficiently charged.EventExRestored network connectivity to portgroupsinfoesx.clear.net.connectivity.restored|Network connectivity restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExRestored Network Connectivity to DVPortsinfoesx.clear.net.dvport.connectivity.restored|Network connectivity restored on DVPorts: {1}. Physical NIC {2} is up.EventExRestored Network Redundancy to DVPortsinfoesx.clear.net.dvport.redundancy.restored|Uplink redundancy restored on DVPorts: {1}. Physical NIC {2} is up recently.EventExlag transition upinfoesx.clear.net.lacp.lag.transition.up|LACP info: LAG {1} on VDS {2} is up.EventExuplink transition upinfoesx.clear.net.lacp.uplink.transition.up|LACP info: uplink {1} on VDS {2} is moved into link aggregation group.EventExuplink is unblockedinfoesx.clear.net.lacp.uplink.unblocked|LACP info: uplink {1} on VDS {2} is unblocked.EventExRestored uplink redundancy to portgroupsinfoesx.clear.net.redundancy.restored|Uplink redundancy restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExLink state upinfoesx.clear.net.vmnic.linkstate.up|Physical NIC {1} linkstate is up.EventExStorage Device I/O Latency has improvedinfoesx.clear.psastor.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.psastor.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.psastor.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExScsi Device I/O Latency has improvedinfoesx.clear.scsi.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.scsi.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.scsi.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExExited the All Paths Down stateinfoesx.clear.storage.apd.exit|Device or filesystem with identifier {1} has exited the All Paths Down state.EventExRestored connectivity to storage deviceinfoesx.clear.storage.connectivity.restored|Connectivity to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored path redundancy to storage deviceinfoesx.clear.storage.redundancy.restored|Path redundancy to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored connection to NFS serverinfoesx.clear.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExNFS volume I/O Latency has improvedinfoesx.clear.vmfs.nfs.volume.io.latency.improved|NFS volume {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExvSAN device has come online.infovSAN device {1} has come online.esx.clear.vob.vsan.pdl.online|vSAN device {1} has come online.EventExTest with both int and sting arguments.infoesx.clear.vobdtestcorrelator.test|Test with both string: {1} {3} and int: {2}.ExtendedEventvSAN clustering services have now been enabled.infovSAN clustering and directory services have now been enabled.esx.clear.vsan.clustering.enabled|vSAN clustering and directory services have now been enabled.ExtendedEventvSAN now has at least one active network configuration.infovSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.network.available|vSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExA previously reported vmknic now has a valid IP.infovmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.vmknic.ready|vmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExVVol container has come online.infoesx.clear.vvol.container.online|VVol container {1} has come online.EventExA 3rd party component on ESXi has reported an error.erroresx.problem.3rdParty.error|A 3rd party component, {1}, running on ESXi has reported an error. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.info|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.information|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported a warning.warningesx.problem.3rdParty.warning|A 3rd party component, {1}, running on ESXi has reported a warning related to a problem. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA corrected memory error occurrederroresx.problem.apei.bert.memory.error.corrected|A corrected memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA fatal memory error occurrederroresx.problem.apei.bert.memory.error.fatal|A fatal memory error occurred in the last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA recoverable memory error occurrederroresx.problem.apei.bert.memory.error.recoverable|A recoverable memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA corrected PCIe error occurrederroresx.problem.apei.bert.pcie.error.corrected|A corrected PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA fatal PCIe error occurrederroresx.problem.apei.bert.pcie.error.fatal|Platform encounterd a fatal PCIe error in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA recoverable PCIe error occurrederroresx.problem.apei.bert.pcie.error.recoverable|A recoverable PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExAn application running on ESXi host has crashed and core file creation failed.warningesx.problem.application.core.dumpFailed|An application ({1}) running on ESXi host has crashed ({2} time(s) so far), but core dump creation failed.EventExAn application running on ESXi host has crashed and a core file was created.warningesx.problem.application.core.dumped|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). A core file might have been created at {3}.EventExAn application running on ESXi host has crashed and an encrypted core file was created.warningesx.problem.application.core.dumped.encrypted|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). An encrypted core file using keyId {3} might have been created at {4}.ExtendedEventCritical failure detected during boot, please refer to KB 93107.errorA critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.esx.problem.boot.failure.detected|A critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.ExtendedEventSystem clock no longer synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.lostsync|system clock no longer synchronized to upstream time serversExtendedEventSystem clock synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.sync|system clock synchronized to upstream time serversExtendedEventSystem clock lost synchronization to upstream time serverswarningesx.problem.clock.correction.adjtime.unsync|system clock lost synchronization to upstream time serversEventExApplication system changed clock, synchronization lostwarningesx.problem.clock.correction.changed|{1} stepped system clock to {2}.{3}, synchronization lostEventExAllowed system clock update with large time changewarningesx.problem.clock.correction.delta.allowed|Clock stepped to {1}.{2}, but delta {3} > {4} secondsEventExFailed system clock update with large time changeerroresx.problem.clock.correction.delta.failed|Clock step to {1}.{2} failed, delta {3} > {4} seconds, number of large corrections > {5}EventExAllowed system clock update with large time change, but number of future updates limitedwarningesx.problem.clock.correction.delta.warning|Clock stepped to {1}.{2}, but delta {3} > {4} seconds, {5}/{6} large correctionsEventExSystem clock stepped, lost synchronizationwarningesx.problem.clock.correction.step.unsync|system clock stepped to {1}.{2}, lost synchronizationEventExSystem clock maximum number of large corrections changedwarningesx.problem.clock.parameter.set.maxLargeCorrections|system clock max number of correction set to {1}EventExSystem clock maximum negative phase correction changedwarningesx.problem.clock.parameter.set.maxNegPhaseCorrection|system clock max negative phase correction set to {1}EventExSystem clock maximum positive phase correction changedwarningesx.problem.clock.parameter.set.maxPosPhaseCorrection|system clock max positive phase correction set to {1}EventExSystem clock count of number of large corrections changedwarningesx.problem.clock.parameter.set.numLargeCorrections|system clock number of large correction set to {1}EventExSystem clock VOB report interval changedwarningesx.problem.clock.parameter.set.vobReportInterval|system clock max number of correction set to {1}ExtendedEventSystem clock state has been resetwarningesx.problem.clock.state.reset|system clock state has been resetEventExThe storage capacity of the coredump targets is insufficient to capture a complete coredump.warningThe storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.esx.problem.coredump.capacity.insufficient|The storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.EventExThe free space available in default coredump copy location is insufficient to copy new coredumps.warningThe free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.esx.problem.coredump.copyspace|The free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.EventExThe given partition has insufficient amount of free space to extract the coredump.warningThe given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.esx.problem.coredump.extraction.failed.nospace|The given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.ExtendedEventNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.warningNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured|No vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.ExtendedEventNo coredump target has been configured. Host core dumps cannot be saved.warningNo coredump target has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured2|No coredump target has been configured. Host core dumps cannot be saved.ExtendedEventDRAM ECC not enabled. Please enable it in BIOS.erroresx.problem.cpu.amd.mce.dram.disabled|DRAM ECC not enabled. Please enable it in BIOS.ExtendedEventNot all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. erroresx.problem.cpu.intel.ioapic.listing.error|Not all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. ExtendedEventMCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.erroresx.problem.cpu.mce.invalid|MCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.EventExHigh number of corrected errors on a page.infoesx.problem.cpu.page.correctederrors.high|High number of corrected errors on host physical page number {1}EventExDisabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.erroresx.problem.cpu.smp.ht.invalid|Disabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.EventExFound {1} PCPUs, but only using {2} of them due to specified limit.erroresx.problem.cpu.smp.ht.numpcpus.max|Found {1} PCPUs, but only using {2} of them due to specified limit.EventExDisabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.erroresx.problem.cpu.smp.ht.partner.missing|Disabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.EventExError copying ConfigStore from backup.errorError copying ConfigStore from backup.esx.problem.cs.createstore.copy.backup.error|Error copying ConfigStore from backup {1}.ExtendedEventFailed an operation on the ConfigStore database.errorFailed an operation on the ConfigStore database.esx.problem.cs.db.operation.error|Failed an operation on the ConfigStore database.ExtendedEventFailed to setup desired configuration.errorFailed to setup desired configuration.esx.problem.cs.desired.config.error|Failed to setup desired configuration.ExtendedEventError cleaning up Datafile store.errorError cleaning up Datafile store.esx.problem.cs.dfs.cleanup.error|Error cleaning up Datafile store.ExtendedEventDataFile store cannot be restored.errorDataFile store cannot be restored.esx.problem.cs.dfs.restore.error|DataFile store cannot be restored.EventExError processing schema file.errorError processing schema file.esx.problem.cs.schema.file.error|Error processing schema file {1}.EventExInvalid metadata in schema file.errorInvalid metadata in schema file.esx.problem.cs.schema.metadata.error|Invalid metadata in schema file {1}.EventExVibId validation failed for schema file.errorVibId validation failed for schema file.esx.problem.cs.schema.validation.error|VibId validation failed for schema file {1}.EventExError in upgrading config.errorError in upgrading config.esx.problem.cs.upgrade.config.error|Error in upgrading config {1}.EventExUnable to obtain a DHCP lease.erroresx.problem.dhclient.lease.none|Unable to obtain a DHCP lease on interface {1}.EventExNo expiry time on offered DHCP lease.erroresx.problem.dhclient.lease.offered.noexpiry|No expiry time on offered DHCP lease from {1}.EventExThe maintenance mode state for some Data Processing Units may be out of sync with the host.warningThe maintenance mode state for some Data Processing Units may be out of sync with the host.esx.problem.dpu.maintenance.sync.failed|The maintenance mode state for Data Processing Units with ids '{dpus}' may be out of sync with the host.EventExSome drivers need special notice.warningDriver for device {1} is {2}. Please refer to KB article: {3}.esx.problem.driver.abnormal|Driver for device {1} is {2}. Please refer to KB article: {3}.EventExHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.esx.problem.entropy.config.error|Host is configured with external entropy source. Entropy daemon has become non functional because of an {1} change. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.empty|Host is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.inmemory.empty|Host is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.EventExCould not install image profile.erroresx.problem.esximage.install.error|Could not install image profile: {1}EventExHost doesn't meet image profile hardware requirements.erroresx.problem.esximage.install.invalidhardware|Host doesn't meet image profile '{1}' hardware requirements: {2}EventExCould not stage image profile.erroresx.problem.esximage.install.stage.error|Could not stage image profile '{1}': {2}ExtendedEventThe host can not support the applied EVC mode.warningesx.problem.evc.incompatible|The host can not support the applied EVC mode.EventExSkipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.device.invalid|Skipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.EventExSkipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.pin.invalid|Skipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.EventExFPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.clear|FPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.creditstall|FPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.devicespecific|FPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.lostcredit|FPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.oversubscription|FPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.devicespecific|FPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.timeout|FPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.unabletoroute|FPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.unknown|FPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.devicespecific|FPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidCRC|FPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidtransmissionword|FPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link failure: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.linkfailure|FPIN FC link failure: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsignal|FPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsynchronization|FPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.primitivesequenceprotocolerror|FPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.uncorrectableFECerror|FPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.unknown|FPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.clear|FPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.creditstall|FPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.devicespecific|FPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.lostcredit|FPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.oversubscription|FPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.EventExIOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.erroresx.problem.hardware.ioapic.missing|IOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.ExtendedEventFailed to communicate with the BMC. IPMI functionality will be unavailable on this system.erroresx.problem.hardware.ipmi.bmc.bad|Failed to communicate with the BMC. IPMI functionality will be unavailable on this system.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Error tripped.EventExNVDIMM: Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.lifetime.error|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Error tripped.EventExNVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.erroresx.problem.hardware.nvd.health.lastshutdownstatus|NVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.EventExNVDIMM Configuration error detected.erroresx.problem.hardware.nvd.health.module.config.error|NVDIMM (handle {1}, idString {2}): Configuration error detected.EventExNVDIMM Controller failure detected.erroresx.problem.hardware.nvd.health.module.ctlr.fail|NVDIMM (handle {1}, idString {2}): Controller failure detected. Access to the device and its capabilities are lost.EventExNVDIMM Controller firmware error detected.erroresx.problem.hardware.nvd.health.module.ctlr.fw.error|NVDIMM (handle {1}, idString {2}): Controller firmware error detected.EventExNVDIMM Energy Source still charging.warningesx.problem.hardware.nvd.health.module.es.charging|NVDIMM (handle {1}, idString {2}): Energy Source still charging but does not have sufficient charge to support a backup. Persistency is temporarily lost for the device.EventExNVDIMM Energy Source failure detected.erroresx.problem.hardware.nvd.health.module.es.fail|NVDIMM (handle {1}, idString {2}): Energy Source failure detected. Persistency is lost for the device.EventExNVDIMM Previous ARM operation failed.warningesx.problem.hardware.nvd.health.module.ops.arm.fail|NVDIMM (handle {1}, idString {2}): Previous ARM operation failed.EventExNVDIMM Previous ERASE operation failed.warningesx.problem.hardware.nvd.health.module.ops.erase.fail|NVDIMM (handle {1}, idString {2}): Previous ERASE operation failed.EventExThe Platform flush failed. The restored data may be inconsistent.erroresx.problem.hardware.nvd.health.module.ops.flush.fail|NVDIMM (handle {1}, idString {2}): The Platform flush failed. The restored data may be inconsistent.EventExNVDIMM Last RESTORE operation failed.erroresx.problem.hardware.nvd.health.module.ops.restore.fail|NVDIMM (handle {1}, idString {2}): Last RESTORE operation failed.EventExNVDIMM Previous SAVE operation failed.erroresx.problem.hardware.nvd.health.module.ops.save.fail|NVDIMM (handle {1}, idString {2}): Previous SAVE operation failed.EventExNVDIMM Count of DRAM uncorrectable ECC errors above threshold.warningesx.problem.hardware.nvd.health.module.uce|NVDIMM (handle {1}, idString {2}): Count of DRAM uncorrectable ECC errors above threshold.EventExNVDIMM Vendor specific error.erroresx.problem.hardware.nvd.health.module.vendor.error|NVDIMM (handle {1}, idString {2}): Vendor specific error.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Error tripped.EventExNVDIMM: Module Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.lifetime.error|NVDIMM (handle {1}, idString {2}): Module Lifetime Error tripped.EventExNVDIMM: Module Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.temperature.error|NVDIMM (handle {1}, idString {2}): Module Temperature Error tripped.EventExNVDIMM: All data may be lost in the event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInPowerLoss|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of power loss.EventExNVDIMM: All data may be lost in the event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInShutdown|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of shutdown.EventExNVDIMM: Subsequent reads may fail or return invalid data and subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossNow|NVDIMM (handle {1}, idString {2}): Subsequent reads may fail or return invalid data and subsequent writes may not persist.EventExNVDIMM: Performance degraded.erroresx.problem.hardware.nvd.health.vmw.statusflags.perfDegraded|NVDIMM (handle {1}, idString {2}): Performance degraded.EventExNVDIMM: Write persistency loss may happen in event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInPowerLoss|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of power loss.EventExNVDIMM: Write persistency loss may happen in event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInShutdown|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of shutdown.EventExNVDIMM: Subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossNow|NVDIMM (handle {1}, idString {2}): Subsequent writes may not persist.ExtendedEventTPM 2.0 device detected but a connection cannot be established.warningesx.problem.hardware.tpm2.connection|TPM 2.0 device detected but a connection cannot be established.ExtendedEventTPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.nosha256|TPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.ExtendedEventTPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.notis|TPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.ExtendedEventUnable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.warningesx.problem.hardware.tpm2.ownership|Unable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.ExtendedEventesx.problem.hardware.tpm2.provisioning|EventExA physical disk has a predictive failure.warningA physical disk has a predictive failure.esx.problem.hcm.event.disk.predictive.failure|A physical disk has a predictive failure ({1}).ExtendedEventAn unread host kernel core dump has been found.warningesx.problem.host.coredump|An unread host kernel core dump has been found.EventExHostd crashed and a core file was created.warningesx.problem.hostd.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExHostd crashed and an encrypted core file was created.warningesx.problem.hostd.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventThis host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.infoesx.problem.hyperthreading.unmitigated|This host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.ExtendedEventSome of the config entries in the VM inventory were skipped because they are invalid.warningesx.problem.inventory.invalidConfigEntries|Some of the config entries in the VM inventory were skipped because they are invalid.EventExAn iofilter installed on the host has stopped functioning.errorIOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}esx.problem.iofilter.disabled|IOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}EventExStorage I/O Control version mismatchinfoesx.problem.iorm.badversion|Host {1} cannot participate in Storage I/O Control(SIOC) on datastore {2} because the version number {3} of the SIOC agent on this host is incompatible with number {4} of its counterparts on other hosts connected to this datastore.EventExUnmanaged workload detected on SIOC-enabled datastoreinfoesx.problem.iorm.nonviworkload|An unmanaged I/O workload is detected on a SIOC-enabled datastore: {1}.EventExThe metadata store has degraded on one of the hosts in the cluster.errorThe metadata store has degraded on host {1}.esx.problem.metadatastore.degraded|The metadata store has degraded on host {1}.ExtendedEventThe metadata store is healthy.infoThe metadata store is healthy.esx.problem.metadatastore.healthy|The metadata store is healthy.ExtendedEventFailed to create default migration heapwarningesx.problem.migrate.vmotion.default.heap.create.failed|Failed to create default migration heap. This might be the result of severe host memory pressure or virtual address space exhaustion. Migration might still be possible, but will be unreliable in cases of extreme host memory pressure.EventExError with migration listen socketerroresx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown|The ESXi host's vMotion network server encountered an error while monitoring incoming network connections. Shutting down listener socket. vMotion might not be possible with this host until vMotion is manually re-enabled. Failure status: {1}EventExThe max_vfs module option has been set for at least one module.warningSetting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.esx.problem.module.maxvfs.set|Setting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.EventExLost Network Connectivityerroresx.problem.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Connectivity to DVPortserroresx.problem.net.dvport.connectivity.lost|Lost network connectivity on DVPorts: {1}. Physical NIC {2} is down.EventExNetwork Redundancy Degraded on DVPortswarningesx.problem.net.dvport.redundancy.degraded|Uplink redundancy degraded on DVPorts: {1}. Physical NIC {2} is down.EventExLost Network Redundancy on DVPortswarningesx.problem.net.dvport.redundancy.lost|Lost uplink redundancy on DVPorts: {1}. Physical NIC {2} is down.EventExNo IPv6 TSO supporterroresx.problem.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid fenceId configuration on dvPorterroresx.problem.net.fence.port.badfenceid|VMkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: invalid fenceId.EventExMaximum number of fence networks or portserroresx.problem.net.fence.resource.limited|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: maximum number of fence networks or ports have been reached.EventExSwitch fence property is not seterroresx.problem.net.fence.switch.unavailable|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: dvSwitch fence property is not set.EventExFirewall configuration operation failed. The changes were not applied.erroresx.problem.net.firewall.config.failed|Firewall configuration operation '{1}' failed. The changes were not applied to rule set {2}.EventExAdding port to Firewall failed.erroresx.problem.net.firewall.port.hookfailed|Adding port {1} to Firewall failed.EventExFailed to set gatewayerroresx.problem.net.gateway.set.failed|Cannot connect to the specified gateway {1}. Failed to set it.EventExNetwork memory pool thresholdwarningesx.problem.net.heap.belowthreshold|{1} free size dropped below {2} percent.EventExlag transition downwarningesx.problem.net.lacp.lag.transition.down|LACP warning: LAG {1} on VDS {2} is down.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse|LACP error: No peer response on uplink {1} for VDS {2}.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse.2|LACP error: No peer response on VDS {1}.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.incompatible|LACP error: Current teaming policy on VDS {1} is incompatible, supported is IP hash only.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.linkstatus|LACP error: Current teaming policy on VDS {1} is incompatible, supported link failover detection is link status only.EventExuplink is blockedwarningesx.problem.net.lacp.uplink.blocked|LACP warning: uplink {1} on VDS {2} is blocked.EventExuplink is disconnectedwarningesx.problem.net.lacp.uplink.disconnected|LACP warning: uplink {1} on VDS {2} got disconnected.EventExuplink duplex mode is differenterroresx.problem.net.lacp.uplink.fail.duplex|LACP error: Duplex mode across all uplink ports must be full, VDS {1} uplink {2} has different mode.EventExuplink speed is differenterroresx.problem.net.lacp.uplink.fail.speed|LACP error: Speed across all uplink ports must be same, VDS {1} uplink {2} has different speed.EventExAll uplinks must be activeerroresx.problem.net.lacp.uplink.inactive|LACP error: All uplinks on VDS {1} must be active.EventExuplink transition downwarningesx.problem.net.lacp.uplink.transition.down|LACP warning: uplink {1} on VDS {2} is moved out of link aggregation group.EventExInvalid vmknic specified in /Migrate/Vmknicwarningesx.problem.net.migrate.bindtovmk|The ESX advanced configuration option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Update the configuration option with a valid vmknic. Alternatively, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExUnsupported vMotion network latency detectedwarningesx.problem.net.migrate.unsupported.latency|ESXi has detected {1}ms round-trip vMotion network latency between host {2} and {3}. High latency vMotion networks are supported only if both ESXi hosts have been configured for vMotion latency tolerance.EventExFailed to apply for free portserroresx.problem.net.portset.port.full|Portset {1} has reached the maximum number of ports ({2}). Cannot apply for any more free ports.EventExVlan ID of the port is invaliderroresx.problem.net.portset.port.vlan.invalidid|{1} VLANID {2} is invalid. VLAN ID must be between 0 and 4095.EventExTry to register an unsupported portset classwarningesx.problem.net.portset.unsupported.psclass|{1} is not a VMware supported portset class, the relevant module must be unloaded.EventExVirtual NIC connection to switch failedwarningesx.problem.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningesx.problem.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Redundancywarningesx.problem.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExRSPAN src session conflict with teamingerroresx.problem.net.rspan.teaming.uplink.io.conflict|Failed to set RSPAN src session {1} on portset {2} due to it disallows uplink I/O which conflicts with {3} teaming policy {4}.EventExThe teaming policy has an invalid uplinkerroresx.problem.net.teaming.policy.invalid.uplink|Failed to update teaming policy {1} on portset {2} due to an invalid uplink {3} which disallows normal I/O.EventExFailed to set MTU on an uplinkwarningesx.problem.net.uplink.mtu.failed|VMkernel failed to set the MTU value {1} on the uplink {2}.EventExA duplicate IP address was detected on a vmknic interfacewarningesx.problem.net.vmknic.ip.duplicate|A duplicate IP address was detected for {1} on the interface {2}. The current owner is {3}.EventExLink state downwarningesx.problem.net.vmnic.linkstate.down|Physical NIC {1} linkstate is down.EventExLink state unstablewarningesx.problem.net.vmnic.linkstate.flapping|Taking down physical NIC {1} because the link is unstable.EventExNic Watchdog Resetwarningesx.problem.net.vmnic.watchdog.reset|Uplink {1} has recovered from a transient failure due to watchdog timeoutEventExNTP daemon stopped. Time correction out of bounds.erroresx.problem.ntpd.clock.correction.error|NTP daemon stopped. Time correction {1} > {2} seconds. Manually set the time and restart ntpd.EventExOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.osdata.partition.full|OSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212ExtendedEventConfigured OSData cannot be found. Please refer to KB article: KB 87212.warningConfigured OSData cannot be found. Please refer to KB article: KB 87212.esx.problem.osdata.path.notfound|Configured OSData cannot be found. Please refer to KB article: KB 87212.EventExVirtual machine killed as it kept using a corrupted memory page.erroresx.problem.pageretire.mce.injected|Killing virtual machine with config path {1} because at least {2} uncorrectable memory error machine check exceptions were injected for guest physical page {3} but the virtual machine's operating system kept using the page.EventExA virtual machine was killed as it kept using a corrupted memory page.errorThe virtual machine was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.esx.problem.pageretire.mce.injected.2|{1} was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.EventExMemory page retirement requested by platform firmware.infoesx.problem.pageretire.platform.retire.request|Memory page retirement requested by platform firmware. FRU ID: {1}. Refer to System Hardware Log: {2}EventExNumber of host physical memory pages that have been selected for retirement but could not yet be retired is high.warningesx.problem.pageretire.selectedbutnotretired.high|Number of host physical memory pages that have been selected for retirement but could not yet be retired is high: ({1})EventExNumber of host physical memory pages selected for retirement exceeds threshold.warningesx.problem.pageretire.selectedmpnthreshold.host.exceeded|Number of host physical memory pages that have been selected for retirement ({1}) exceeds threshold ({2}).ExtendedEventNo memory to allocate APD Eventwarningesx.problem.psastor.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExStorage Device close failed.warningesx.problem.psastor.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.psastor.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.psastor.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExStorage Device I/O Latency going highwarningesx.problem.psastor.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExPlugin's isLocal entry point failedwarningesx.problem.psastor.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.psastor.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.psastor.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.psastor.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExDevice has been turned off administratively.infoesx.problem.psastor.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.psastor.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.psastor.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExMaximum number of storage pathserroresx.problem.psastor.psastorpath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.psastor.unsupported.plugin.type|Storage Device Allocation not supported for plugin type {1}EventExFailed to delete resource group.warningFailed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.esx.problem.resourcegroup.delete.failed|Failed to delete resource groups with names '{rgnames}'.EventExFailed to Set the Virtual Machine's Latency Sensitivitywarningesx.problem.sched.latency.abort|Unable to apply latency-sensitivity setting to virtual machine {1}. No valid placement on the host.EventExNo Cache Allocation Resourcewarningesx.problem.sched.qos.cat.noresource|Unable to support cache allocation for virtual machine {1}. Out of resources.EventExNo Cache Allocation Supportwarningesx.problem.sched.qos.cat.notsupported|Unable to support L3 cache allocation for virtual machine {1}. No processor capabilities.EventExNo Cache Monitoring Resourcewarningesx.problem.sched.qos.cmt.noresource|Unable to support cache monitoring for virtual machine {1}. Out of resources.EventExNo Cache Monitoring Supportwarningesx.problem.sched.qos.cmt.notsupported|Unable to support L3 cache monitoring for virtual machine {1}. No processor capabilities.ExtendedEventScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.warningScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.esx.problem.scratch.on.usb|Scratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.EventExScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.scratch.partition.full|Scratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212EventExSize of scratch partition is too small.warningSize of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.esx.problem.scratch.partition.size.small|Size of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.EventExNo scratch partition has been configured.warningNo scratch partition has been configured. Recommended scratch partition size is {} MiB.esx.problem.scratch.partition.unconfigured|No scratch partition has been configured. Recommended scratch partition size is {} MiB.ExtendedEventNo memory to allocate APD Eventwarningesx.problem.scsi.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExScsi Device close failed.warningesx.problem.scsi.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.scsi.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExFailed to attach filter to device.warningesx.problem.scsi.device.filter.attach.failed|Failed to attach filters to device '%s' during registration. Plugin load failed or the filter rules are incorrect.EventExInvalid XCOPY request for devicewarningesx.problem.scsi.device.invalid.xcopy.request|Invalid XCOPY request for device {1}. Host {2}, Device {3}, Plugin {4}, {5} sense, sense.key = {6}, sense.asc = {7}, sense.ascq = {8}: {9}EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.scsi.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExFailed to obtain INQUIRY data from the devicewarningesx.problem.scsi.device.io.inquiry.failed|Failed to get standard inquiry for device {1} from Plugin {2}.ExtendedEventScsi device queue parameters incorrectly set.warningesx.problem.scsi.device.io.invalid.disk.qfull.value|QFullSampleSize should be bigger than QFullThreshold. LUN queue depth throttling algorithm will not function as expected. Please set the QFullSampleSize and QFullThreshold disk configuration values in ESX correctly.EventExScsi Device I/O Latency going highwarningesx.problem.scsi.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExQErr cannot be changed on device. Please change it manually on the device if possible.warningesx.problem.scsi.device.io.qerr.change.config|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The system is not configured to change the QErr setting of device. The QErr value supported by system is 0x{3}. Please check the SCSI ChangeQErrSetting configuration value for ESX.EventExScsi Device QErr setting changedwarningesx.problem.scsi.device.io.qerr.changed|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The device was originally configured to the supported QErr setting of 0x{3}, but this has been changed and could not be changed back.EventExPlugin's isLocal entry point failedwarningesx.problem.scsi.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.scsi.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.scsi.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.scsi.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExFailed to apply NMP SATP option during device discovery.warningesx.problem.scsi.device.nmp.satp.option.failed|Invalid config parameter: \"{1}\" provided in the nmp satp claimrule, this setting was not applied while claiming the path {2}EventExDevice has been turned off administratively.infoesx.problem.scsi.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.scsi.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExThin Provisioned Device Nearing Capacitywarningesx.problem.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.scsi.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExvVol PE path going out of vVol-incapable adaptererroresx.problem.scsi.scsipath.badpath.unreachpe|Sanity check failed for path {1}. The path is to a vVol PE, but it goes out of adapter {2} which is not PE capable. Path dropped.EventExCannot safely determine vVol PEerroresx.problem.scsi.scsipath.badpath.unsafepe|Sanity check failed for path {1}. Could not safely determine if the path is to a vVol PE. Path dropped.EventExMaximum number of storage pathserroresx.problem.scsi.scsipath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.scsi.unsupported.plugin.type|Scsi Device Allocation not supported for plugin type {1}ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.infoesx.problem.sgx.addpackage|Support for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.infoesx.problem.sgx.htenabled|Support for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.esx.problem.slp.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.EventExAll paths are downwarningesx.problem.storage.apd.start|Device or filesystem with identifier {1} has entered the All Paths Down state.EventExAll Paths Down timed out, I/Os will be fast failedwarningesx.problem.storage.apd.timeout|Device or filesystem with identifier {1} has entered the All Paths Down Timeout state after being in the All Paths Down state for {2} seconds. I/Os will now be fast failed.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.devicepor|Frequent PowerOn Reset Unit Attentions are occurring on device {1}. This might indicate a storage problem. Affected datastores: {2}EventExLost Storage Connectivityerroresx.problem.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.pathpor|Frequent PowerOn Reset Unit Attentions are occurring on path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExFrequent State Changes of Storage Pathinfoesx.problem.storage.connectivity.pathstatechanges|Frequent path state changes are occurring for path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExiSCSI discovery target login connection problemerroresx.problem.storage.iscsi.discovery.connect.error|iSCSI discovery to {1} on {2} failed. The iSCSI Initiator could not establish a network connection to the discovery address.EventExiSCSI Discovery target login errorerroresx.problem.storage.iscsi.discovery.login.error|iSCSI discovery to {1} on {2} failed. The Discovery target returned a login error of: {3}.EventExiSCSI iSns Discovery errorerroresx.problem.storage.iscsi.isns.discovery.error|iSCSI iSns discovery to {1} on {2} failed. ({3} : {4}).EventExiSCSI Target login connection problemerroresx.problem.storage.iscsi.target.connect.error|Login to iSCSI target {1} on {2} failed. The iSCSI initiator could not establish a network connection to the target.EventExiSCSI Target login errorerroresx.problem.storage.iscsi.target.login.error|Login to iSCSI target {1} on {2} failed. Target returned login error of: {3}.EventExiSCSI target permanently removederroresx.problem.storage.iscsi.target.permanently.lost|The iSCSI target {2} was permanently removed from {1}.EventExiSCSI target was permanently removederroresx.problem.storage.iscsi.target.permanently.removed|The iSCSI target {1} was permanently removed from {2}.EventExDegraded Storage Path Redundancywarningesx.problem.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. Affected datastores: {3}.EventExLost Storage Path Redundancywarningesx.problem.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.esx.problem.swap.systemSwap.isPDL.cannot.remove|System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.EventExSystem swap was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningesx.problem.swap.systemSwap.isPDL.cannot.remove.2|System swap was affected by the PDL of {1} and was removed. System swap has been reconfigured.EventExSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure|System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.EventExSystem swap was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningesx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2|System swap was affected by the PDL of {1}. It was removed but the subsequent reconfiguration failed.ExtendedEventSystem logging is not configured.warningSystem logging is not configured on host {host.name}.esx.problem.syslog.config|System logging is not configured on host {host.name}. Please check Syslog options for the host under Configuration -> Software -> Advanced Settings in vSphere client.ExtendedEventSystem logs are stored on non-persistent storage.warningSystem logs on host {host.name} are stored on non-persistent storage.esx.problem.syslog.nonpersistent|System logs on host {host.name} are stored on non-persistent storage. Consult product documentation to configure a syslog server or a scratch partition.ExtendedEventTest with no argumentserroresx.problem.test.test0|Test with no argumentsEventExTest with both int and string argumentserroresx.problem.test.test2|Test with both {1} and {2}ExtendedEventUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.warningUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.esx.problem.unsupported.tls.protocols|Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.EventExA VFAT filesystem is full.erroresx.problem.vfat.filesystem.full.other|The VFAT filesystem {1} (UUID {2}) is full.EventExA VFAT filesystem, being used as the host's scratch partition, is full.erroresx.problem.vfat.filesystem.full.scratch|The host's scratch partition, which is the VFAT filesystem {1} (UUID {2}), is full.EventExConfigstore is reaching its critical size limit. Please refer to the KB 93362 for more details.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.EventExA ramdisk has a very high usage. Please refer to the KB 93362 for more details.warningRamdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.ExtendedEventAn operation on the root filesystem has failed.erroresx.problem.visorfs.failure|An operation on the root filesystem has failed.EventExThe root filesystem's file table is full.erroresx.problem.visorfs.inodetable.full|The root filesystem's file table is full. As a result, the file {1} could not be created by the application '{2}'.EventExA ramdisk is full.erroresx.problem.visorfs.ramdisk.full|The ramdisk '{1}' is full. As a result, the file {2} could not be written.EventExA ramdisk's file table is full.erroresx.problem.visorfs.ramdisk.inodetable.full|The file table of the ramdisk '{1}' is full. As a result, the file {2} could not be created by the application '{3}'.EventExConfig store is reaching its critical size limit.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.EventExA ramdisk has a very high usage.warningRamdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left.EventExA VM could not fault in the a page. The VM is terminated as further progress is impossible.erroresx.problem.vm.kill.unexpected.fault.failure|The VM using the config file {1} could not fault in a guest physical page from the hypervisor level swap file at {2}. The VM is terminated as further progress is impossible.EventExA virtual machine could not fault in the a page. It is terminated as further progress is impossible.errorThe virtual machine could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleesx.problem.vm.kill.unexpected.fault.failure.2|{1} could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleEventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire.64|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA virtual machine cointained a host physical page that was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.errorThe virtual machine contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2|{1} contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.noSwapResponse|The VM using the config file {1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA virtual machine did not respond to swap actions. It is terminated as further progress is impossible.errorThe virtual machine did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.esx.problem.vm.kill.unexpected.noSwapResponse.2|{1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA VM is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.erroresx.problem.vm.kill.unexpected.vmtrack|The VM using the config file {1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.errorThe virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.esx.problem.vm.kill.unexpected.vmtrack.2|{1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA user world daemon of a virtual machine could not fault in the a page. The VM is terminated as further progress is impossible.errorThe user world daemon of this virtual machine could not fault in a page. The virtual machine is terminated as further progress is impossible.esx.problem.vm.kill.unexpected.vmx.fault.failure.2|The user world daemon of {1} could not fault in a page. The virtual machine is terminated as further progress is impossible.EventExMulti-extent ATS-only VMFS Volume unable to use ATSerroresx.problem.vmfs.ats.incompatibility.detected|Multi-extent ATS-only volume '{1}' ({2}) is unable to use ATS because HardwareAcceleratedLocking is disabled on this host: potential for introducing filesystem corruption. Volume should not be used from other hosts.EventExDevice Backing VMFS has lost ATS Supporterroresx.problem.vmfs.ats.support.lost|ATS-Only VMFS volume '{1}' not mounted. Host does not support ATS or ATS initialization has failed.EventExVMFS Locked By Remote Hosterroresx.problem.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.erroresx.problem.vmfs.extent.offline|An attached device {1} may be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system came onlineinfoesx.problem.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Heartbeat Corruption Detected.erroresx.problem.vmfs.heartbeat.corruptondisk|At least one corrupt on-disk heartbeat region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Volume Connectivity Restoredinfoesx.problem.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfoesx.problem.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterroresx.problem.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerroresx.problem.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Volume will remain in read-only metadata mode with limited write support until journal can be created.EventExTrying to acquire lock on an already locked file. - File descriptionerror{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}esx.problem.vmfs.lock.busy.filedesc|{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}EventExTrying to acquire lock on an already locked file. FilenameerrorLock(s) held on file {1} by other host(s).esx.problem.vmfs.lock.busy.filename|Lock(s) held on file {1} by other host(s).EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk.v2|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected.errorInconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.lockmode.inconsistency.detected|Inconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExFailed to mount NFS volumeerroresx.problem.vmfs.nfs.mount.failed|NFS mount failed for {1}:{2} volume {3}. Status: {4}EventExLost connection to NFS servererroresx.problem.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExvmknic configured for NFS has been removedwarningesx.problem.vmfs.nfs.vmknic.removed|vmknic {1} removed, NFS{2} datastore {3} configured with the vmknic will be inaccessible.EventExNFS volume average I/O Latency has exceeded configured threshold for the current configured periodwarningesx.problem.vmfs.nfs.volume.io.latency.exceed.threshold.period|NFS volume {1} average I/O latency {2}(us) has exceeded threshold {3}(us) for last {4} minutesEventExNFS volume I/O Latency going highwarningesx.problem.vmfs.nfs.volume.io.latency.high|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExNFS volume I/O Latency exceeding thresholdwarningesx.problem.vmfs.nfs.volume.io.latency.high.exceed.threshold|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds. Exceeded threshold {4} microsecondsEventExNo space on NFS volume.warningesx.problem.vmfs.nfs.volume.no.space|{1}: No space on NFS volume.EventExVMFS Resource Corruption Detectederroresx.problem.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected on spanned volume.errorInconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanned.lockmode.inconsistency.detected|Inconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.EventExIncompatible VMFS span state detected.errorIncompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanstate.incompatibility.detected|Incompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExRemote logging host has become unreachable.erroresx.problem.vmsyslogd.remote.failure|The host "{1}" has become unreachable. Remote logging to this host has stopped.ExtendedEventLogging to storage has failed.erroresx.problem.vmsyslogd.storage.failure|Logging to storage has failed. Logs are no longer being stored locally on this host.EventExThe configured log directory cannot be used. The default directory will be used instead.erroresx.problem.vmsyslogd.storage.logdir.invalid|The configured log directory {1} cannot be used. The default directory {2} will be used instead.EventExLog daemon has failed for an unexpected reason.erroresx.problem.vmsyslogd.unexpected|Log daemon has failed for an unexpected reason: {1}EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.errorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.EventExvSAN detected LSN mismatch in mirrorswarningvSAN detected LSN mismatch in mirrors for object {1}.esx.problem.vob.vsan.dom.lsnmismatcherror|vSAN detected LSN mismatch in mirrors for object {1}.EventExResync encountered no space errorwarningResync encountered no space error for component {1} on disk {2}.esx.problem.vob.vsan.dom.nospaceduringresync|Resync encountered no space error for component {1} on disk {2}. Resync will resume once space is freed up on this disk. Need around {3}MB to resync the component on this diskEventExResync is delayed.warningResync is delayed for component {1} on disk {2} for object {3}.esx.problem.vob.vsan.dom.resyncdecisiondelayed|Resync is delayed for component {1} on disk {2} until data availability is regained for object {3} on the remote site.EventExResync timed outwarningResync timed out for component {2} on disk {3}.esx.problem.vob.vsan.dom.resynctimeout|Resync timed out as no progress was made in {1} minute(s) for component {2} on disk {3}. Resync will be tried again for this component. The remaining resync is around {4}MB.EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskerrorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskunrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.unrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.EventExNVMe critical health warning for disk. The disk's backup device has failed.errorNVMe critical health warning for disk {1}. The disk's backup device has failed.esx.problem.vob.vsan.lsom.backupfailednvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's backup device has failed.EventExOffline event on component.warningOffline event issued for component: {1}, flag: {2}, reason: {3}.esx.problem.vob.vsan.lsom.componentoffline|Offline event issued for component: {1}, flag: {2}, reason: {3}.EventExvSAN Node: Near node component count limit.warningvSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).esx.problem.vob.vsan.lsom.componentthreshold|vSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).EventExEvacuation has failed for device and it will be retried by DDH.errorEvacuation has failed for device {1} and it will be retried by DDH.esx.problem.vob.vsan.lsom.ddhEvacFailed|Evacuation has failed for device {1} and it will be retried by DDH.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.devicerepair|vSAN device {1} is being repaired due to I/O failures, and will be out of service until the repair is complete. If the device is part of a dedup disk group, the entire disk group will be out of service until the repair is complete.EventExvSAN device has high latency. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithhighlatency|vSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.EventExvSAN device smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithsmartfailure|vSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.EventExvSAN device is under permanent failure.errorvSAN device {1} is under permanent failure.esx.problem.vob.vsan.lsom.diskerror|vSAN device {1} is under permanent failure.EventExFailed to create a new disk group.errorFailed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.esx.problem.vob.vsan.lsom.diskgrouplimit|Failed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.EventExvSAN diskgroup log is congested.errorvSAN diskgroup {1} log is congestedesx.problem.vob.vsan.lsom.diskgrouplogcongested|vSAN diskgroup {1} log is congested.EventExvSAN disk group is under congestion. It will be remediated. No action is needed.warningvSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.esx.problem.vob.vsan.lsom.diskgroupundercongestion|vSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.EventExFailed to add disk to disk group.errorFailed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.esx.problem.vob.vsan.lsom.disklimit2|Failed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.EventExvSAN device is under propagated error.errorvSAN device {1} is under propagated erroresx.problem.vob.vsan.lsom.diskpropagatederror|vSAN device {1} is under propagated error.EventExvSAN device is under propagated permanent error.errorvSAN device {1} is under propagated permanent erroresx.problem.vob.vsan.lsom.diskpropagatedpermerror|vSAN device {1} is under propagated permanent error.EventExvSAN device is unhealthy.errorvSAN device {1} is unhealthyesx.problem.vob.vsan.lsom.diskunhealthy|vSAN device {1} is unhealthy.EventExEvacuation failed for device due to insufficient resources and it will be retried.errorEvacuation failed for device {1} due to insufficient resources and it will be retried.esx.problem.vob.vsan.lsom.evacFailedInsufficientResources|Evacuation failed for device {1} due to insufficient resources and it will be retried. Please make resources available for evacuation.EventExDeleted invalid metadata component.warningDeleted invalid metadata component: {1}.esx.problem.vob.vsan.lsom.invalidMetadataComponent|Deleted invalid metadata component: {1}.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.metadataURE|vSAN device {1} encountered an unrecoverable read error. This disk will be evacuated and rebuilt. If the device is part of a dedup disk group, the entire disk group will be evacuated and rebuilt.EventExNVMe disk critical health warning for disk. Disk is now read only.errorNVMe critical health warning for disk {1}. Disk is now read only.esx.problem.vob.vsan.lsom.readonlynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1} is: The NVMe disk has become read only.EventExNVMe critical health warning for disk. The disk has become unreliable.errorNVMe critical health warning for disk {1}. The disk has become unreliable.esx.problem.vob.vsan.lsom.reliabilitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk has become unreliable.EventExNVMe critical health warning for disk. The disk's spare capacity is below threshold.errorNVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.esx.problem.vob.vsan.lsom.sparecapacitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.storagepoolURE|vSAN device {1} encountered an unrecoverable read error. This disk will be rebuilt.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.storagepoolrepair|vSAN device {1} is being repaired due to I/O failures and will be out of service until the repair is complete.EventExNo response for I/O on vSAN device.errorNo response for I/O on vSAN device {1}.esx.problem.vob.vsan.lsom.storagepoolstuckio|No response for I/O on vSAN device {1}.EventExvSAN device detected suspended I/Os.errorvSAN device {1} detected suspended I/Os.esx.problem.vob.vsan.lsom.stuckio|vSAN device {1} detected suspended I/Os. Taking the host out of service to avoid affecting the vSAN cluster.EventExvSAN device detected stuck I/O error.errorvSAN device {1} detected stuck I/O error.esx.problem.vob.vsan.lsom.stuckiooffline|vSAN device {1} detected stuck I/O error. Marking the device as offline.EventExvSAN device is under propagated stuck I/O error.errorvSAN device {1} is under propagated stuck I/O error.esx.problem.vob.vsan.lsom.stuckiopropagated|vSAN device {1} is under propagated stuck I/O error. Marking the device as offline.EventExvSAN device detected I/O timeout error.errorvSAN device {1} detected I/O timeout error.esx.problem.vob.vsan.lsom.stuckiotimeout|vSAN device {1} detected I/O timeout error. This may lead to stuck I/O.EventExNVMe critical health warning for disk. The disk's temperature is beyond threshold.errorNVMe critical health warning for disk {1}. The disk's temperature is beyond bethreshold.esx.problem.vob.vsan.lsom.temperaturenvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's temperature beyond threshold.EventExvSAN device has gone offline.errorvSAN device {1} has gone offline.esx.problem.vob.vsan.pdl.offline|vSAN device {1} has gone offline.EventExA ZDOM object is paused due to continuous fail-stops.warningZDOM object {1} is paused on host {2}, numFailStops={3}.esx.problem.vob.vsan.zdom.failstoppaused|ZDOM object {1} is paused on host {2}, numFailStops={3}.ExtendedEventTest with no arguments.infoesx.problem.vobdtestcorrelator.test.0|Test with no argumentsEventExTest with int argument.infoesx.problem.vobdtestcorrelator.test.1d|Test with int argument: {1}EventExTest with sting argument.infoesx.problem.vobdtestcorrelator.test.1s|Test with sting argument: {1}EventExTest with huge sting argument.infoesx.problem.vobdtestcorrelator.test.hugestr|Test with huge sting argument: {1}EventExVpxa crashed and a core file was created.warningesx.problem.vpxa.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExVpxa crashed and an encrypted core file was created.warningesx.problem.vpxa.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventvSAN clustering services have been disabled.warningvSAN clustering and directory services have been disabled thus will be no longer available.esx.problem.vsan.clustering.disabled|vSAN clustering and directory services have been disabled thus will be no longer available.EventExData component found on witness host.warningData component {1} found on witness host is ignored.esx.problem.vsan.dom.component.datacomponent.on.witness.host|Data component {1} found on witness host is ignored.EventExvSAN Distributed Object Manager failed to initializewarningvSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.esx.problem.vsan.dom.init.failed.status|vSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.EventExOne or more disks exceed its/their warning usage of estimated endurance threshold.infoOne or more disks exceed its/their warning usage of estimated endurance threshold.esx.problem.vsan.health.ssd.endurance|Disks {Disk Name} in Cluster {Cluster Name} have exceeded warning usage of their estimated endurance threshold {Disk Percentage Threshold}, currently at {Disk Percentage Used} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds the estimated endurance threshold.errorOne of the disks exceeds the estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.error|Disks {1} have exceeded their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds 90% of its estimated endurance threshold.warningOne of the disks exceeds 90% of its estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.warning|Disks {1} have exceeded 90 percent usage of their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.errorOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.esx.problem.vsan.health.vsanesa.pdl|Disk {1} is detected with PDL in vSAN ESA Cluster. Please check the host for further details.EventExvSAN device Memory/SSD congestion has changed.infoLSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.esx.problem.vsan.lsom.congestionthreshold|LSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.EventExA vmknic added to vSAN network configuration doesn't have valid IP. Network is not ready.errorvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.esx.problem.vsan.net.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.ExtendedEventvSAN doesn't haven any redundancy in its network configuration.warningvSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.lost|vSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.ExtendedEventvSAN is operating on reduced network redundancy.warningvSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.reduced|vSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.ExtendedEventvSAN doesn't have any network configuration for use.errorvSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.esx.problem.vsan.no.network.connectivity|vSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.EventExA vmknic added to vSAN network configuration doesn't have valid IP. It will not be in use.warningvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.esx.problem.vsan.vmknic.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.EventEx Failed to add shared virtual disk. Maximum count reachederroresx.problem.vscsi.shared.vmdk.add.failure.max.count|Failed to add shared virtual disk. Maximum number of shared vmdks supported per ESX host are {1}EventExNo free slots availableerroresx.problem.vscsi.shared.vmdk.no.free.slot.available|No Free slot available. Maximum number of virtual machinies supported in MSCS cluster are {1}EventExFailed to power on virtual machines on shared VMDK with running virtual machineerroresx.problem.vscsi.shared.vmdk.virtual.machine.power.on.failed|Two or more virtual machines (\"{1}\" and \"{2}\") sharing same virtual disk are not allowed to be Powered-On on same host.EventExVVol container has gone offline.erroresx.problem.vvol.container.offline|VVol container {1} has gone offline: isPEAccessible {2}, isVPAccessible {3}.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.EventExCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated.thirdPartyProv|CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.EventExApplication consistent sync completed.infoApplication consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Application consistent sync completed for virtual machine {vm.name} on host {host.name}.Application consistent sync completed for virtual machine {vm.name}.Application consistent sync completed.hbr.primary.AppQuiescedDeltaCompletedEvent|Application consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)ExtendedEventConnection to VR Server restored.infoConnection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Connection to VR Server restored for virtual machine {vm.name} on host {host.name}.Connection to VR Server restored for virtual machine {vm.name}.Connection to VR Server restored.hbr.primary.ConnectionRestoredToHbrServerEvent|Connection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExSync stopped.warningSync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped: {reason.@enum.hbr.primary.ReasonForDeltaAbort}hbr.primary.DeltaAbortedEvent|Sync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}EventExSync completed.infoSync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync completed for virtual machine {vm.name} on host {host.name}.Sync completed for virtual machine {vm.name}.Sync completed.hbr.primary.DeltaCompletedEvent|Sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventSync started.infoSync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync started by {userName} for virtual machine {vm.name} on host {host.name}.Sync started by {userName} for virtual machine {vm.name}.Sync started by {userName}.hbr.primary.DeltaStartedEvent|Sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFile system consistent sync completed.infoFile system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.File system consistent sync completed for virtual machine {vm.name} on host {host.name}.File system consistent sync completed for virtual machine {vm.name}.File system consistent sync completed.hbr.primary.FSQuiescedDeltaCompletedEvent|File system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)EventExFailed to start sync.errorFailed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartDeltaEvent|Failed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExFailed to start full sync.errorFailed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartSyncEvent|Failed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExDisk replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}hbr.primary.InvalidDiskReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}EventExVirtual machine replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}hbr.primary.InvalidVmReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}ExtendedEventVR Server does not support network compression.warningVR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support network compression for virtual machine {vm.name} on host {host.name}.VR Server does not support network compression for virtual machine {vm.name}.VR Server does not support network compression.hbr.primary.NetCompressionNotOkForServerEvent|VR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server supports network compression.infoVR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server supports network compression for virtual machine {vm.name} on host {host.name}.VR Server supports network compression for virtual machine {vm.name}.VR Server supports network compression.hbr.primary.NetCompressionOkForServerEvent|VR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExNo connection to VR Server.warningNo connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server: {reason.@enum.hbr.primary.ReasonForNoServerConnection}hbr.primary.NoConnectionToHbrServerEvent|No connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}EventExVR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}errorVR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}hbr.primary.NoProgressWithHbrServerEvent|VR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}ExtendedEventPrepare Delta Time exceeds configured RPO.warningPrepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name}.Prepare Delta Time exceeds configured RPO.hbr.primary.PrepareDeltaTimeExceedsRpoEvent|Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventQuiescing is not supported for this virtual machine.warningQuiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing is not supported for virtual machine {vm.name} on host {host.name}.Quiescing is not supported for virtual machine {vm.name}.Quiescing is not supported for this virtual machine.hbr.primary.QuiesceNotSupported|Quiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server is compatible with the configured RPO.infoVR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name}.VR Server is compatible with the configured RPO.hbr.primary.RpoOkForServerEvent|VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server does not support the configured RPO.warningVR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server does not support the configured RPO for virtual machine {vm.name}.VR Server does not support the configured RPO.hbr.primary.RpoTooLowForServerEvent|VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFull sync completed.infoFull sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync completed for virtual machine {vm.name} on host {host.name}.Full sync completed for virtual machine {vm.name}.Full sync completed.hbr.primary.SyncCompletedEvent|Full sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventFull sync started.infoFull sync started for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync started for virtual machine {vm.name} on host {host.name}.Full sync started for virtual machine {vm.name}.Full sync started.hbr.primary.SyncStartedEvent|Full sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventReplication paused.infoReplication paused for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Replication paused for virtual machine {vm.name} on host {host.name}.Replication paused for virtual machine {vm.name}.Replication paused.hbr.primary.SystemPausedReplication|Replication paused by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.warningQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.hbr.primary.UnquiescedDeltaCompletedEvent|Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).EventExReplication configuration changed.infoReplication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} on host {host.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).hbr.primary.VmReplicationConfigurationChangedEvent|Replication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).AccountCreatedEventAccount createdinfoAn account was createdAccount {spec.id} was created on host {host.name} <EventLongDescription id="vim.event.AccountCreatedEvent"> <description> An account has been created on the host </description> </EventLongDescription> AccountRemovedEventAccount removedinfoAccount {account} was removedAccount {account} was removed on host {host.name} <EventLongDescription id="vim.event.AccountRemovedEvent"> <description> An account has been removed from the host </description> </EventLongDescription> AccountUpdatedEventAccount updatedinfoAccount {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}' <EventLongDescription id="vim.event.AccountUpdatedEvent"> <description> An account has been updated on the host </description> </EventLongDescription> AdminPasswordNotChangedEventAdministrator password not changedinfoThe default password for the root user has not been changedThe default password for the root user on the host {host.name} has not been changed <EventLongDescription id="vim.event.AdminPasswordNotChangedEvent"> <description> The default password for the Administrator user on the host has not been changed </description> <cause> <description> You have not changed the password for the Administrator user on the host so the default password is still active </description> <action> Change the password for the Administrator user on the host </action> </cause> </EventLongDescription> AlarmAcknowledgedEventAlarm acknowledgedinfoAcknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}'Acknowledged alarm '{alarm.name}' on {entity.name}AlarmActionTriggeredEventAlarm action triggeredinfoAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarmClearedEventAlarm clearedinfoManually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}AlarmCreatedEventAlarm createdinfoCreated alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}'Created alarm '{alarm.name}' on {entity.name}AlarmEmailCompletedEventAlarm email sentinfoAlarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}AlarmEmailFailedEventCannot send alarm emailerrorAlarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to} <EventLongDescription id="vim.event.AlarmEmailFailedEvent"> <description> An error occurred while sending email notification of a triggered alarm </description> <cause> <description>Failed to send email for a triggered alarm</description> <action>Check the vCenter Server SMTP settings for sending email notifications</action> </cause> </EventLongDescription> AlarmEvent<Alarm Event>info<internal>AlarmReconfiguredEventAlarm reconfiguredinfoReconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}'Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}. <EventLongDescription id="vim.event.AlarmReconfiguredEvent"> <description> An alarm has been reconfigured </description> <cause> <description>A user has reconfigured an alarm</description> </cause> </EventLongDescription> AlarmRemovedEventAlarm removedinfoRemoved alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}'Removed alarm '{alarm.name}' on {entity.name}AlarmScriptCompleteEventAlarm script completedinfoAlarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}AlarmScriptFailedEventAlarm script not completederrorAlarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg} <EventLongDescription id="vim.event.AlarmScriptFailedEvent"> <description> The vCenter Server logs this event if an error occurs while running a script after an alarm triggers. </description> <cause> <description>There was an error running the script</description> <action>Fix the script or failure condition</action> </cause> </EventLongDescription> AlarmSnmpCompletedEventAlarm SNMP trap sentinfoAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarmSnmpFailedEventAlarm SNMP trap not senterrorAlarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg} <EventLongDescription id="vim.event.AlarmSnmpFailedEvent"> <description> The vCenter Server logs this event if an error occurs while sending an SNMP trap when an alarm triggers. </description> <cause> <description>An SNMP trap could not be sent for a triggered alarm</description> <action>Check the vCenter Server SNMP settings. Make sure that the vCenter Server network can handle SNMP packets.</action> </cause> </EventLongDescription> AlarmStatusChangedEventAlarm status changedinfoAlarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}AllVirtualMachinesLicensedEventAll virtual machines are licensedinfoAll running virtual machines are licensedAlreadyAuthenticatedSessionEventAlready authenticatedinfoUser cannot logon since the user is already logged onAuthorizationEvent<Authorization Event>info<internal>BadUsernameSessionEventInvalid user nameerrorCannot login {userName}@{ipAddress} <EventLongDescription id="vim.event.BadUsernameSessionEvent"> <description> A user attempted to log in with an unknown or invalid username </description> <cause> <description> The username is unknown to the system </description> <action> Use a username that is included in the system user directory </action> <action> On Linux, verify that the user directory is correctly configured </action> <action> If you are using Active Directory, check the health of the domain controller </action> </cause> <cause> <description> The user provided an invalid password </description> <action> Supply the correct password </action> </cause> </EventLongDescription> CanceledHostOperationEventCanceled host operationinfoThe operation performed on host {host.name} was canceledThe operation performed on host {host.name} was canceledThe operation was canceledThe operation performed on host {host.name} in {datacenter.name} was canceled <EventLongDescription id="vim.event.CanceledHostOperationEvent"> <description> An operation performed on the host was canceled </description> <cause> <description> A previous event in the sequence of events will provide more information about the cause of this cancellation </description> </cause> </EventLongDescription> ClusterComplianceCheckedEventChecked cluster for complianceinfoChecked cluster {computeResource.name} for complianceCluster was checked for compliance with profile {profile.name}Checked cluster for compliance <EventLongDescription id="vim.event.ClusterComplianceCheckedEvent"> <description> The cluster was checked for compliance with a cluster profile </description> <cause> <description> The user initiated a compliance check on the cluster against a cluster profile </description> </cause> <cause> <description> A scheduled has initiated a compliance check for the cluster against a cluster profile </description> </cause> </EventLongDescription> ClusterCreatedEventCluster createdinfoCreated cluster {computeResource.name}Created in folder {parent.name}Created cluster {computeResource.name} in {datacenter.name}ClusterDestroyedEventCluster deletedinfoRemoved cluster {computeResource.name}Removed clusterRemoved cluster {computeResource.name} in datacenter {datacenter.name}ClusterEvent<Cluster Event>info<internal>ClusterOvercommittedEventCluster overcommittederrorInsufficient capacity in cluster {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in cluster {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.ClusterOvercommittedEvent"> <description> The cumulative CPU and/or memory resources of all hosts in the cluster are not adequate to satisfy the resource reservations of all virtual machines in the cluster </description> <cause> <description>You attempted to power on a virtual machine bypassing vCenter Server. This condition occurs when you attempt the power on using the vSphere Client directly connected to the host.</description> <action>In a DRS cluster, do not power on virtual machines bypassing vCenter Server</action> </cause> <cause> <description>A host was placed in Maintenance, Standby, or Disconnected Mode</description> <action>Bring any host in Maintenance, Standby, or Disconnected mode out of these modes</action> </cause> </EventLongDescription> ClusterReconfiguredEventCluster reconfiguredinfoReconfigured cluster {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Cluster reconfiguredReconfigured cluster {computeResource.name} in datacenter {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ClusterReconfiguredEvent"> <description> The cluster configuration was changed. The cluster configuration includes information about the DRS, DPM, EVC and vSphere HA settings of the cluster. All DRS rules are also stored in the cluster configuration. Editing the cluster configuration may trigger an invocation of DRS and/or enabling/disabling of vSphere HA on each host in the cluster. </description> </EventLongDescription> ClusterStatusChangedEventCluster status changedinfoConfiguration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.ClusterStatusChangedEvent"> <description> The cluster status has changed. This status is the status of the root resource pool that encompasses the entire cluster. A cluster status change may be accompanied by the removal of a configuration issue if one was previously detected. A cluster status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The cluster status changed to yellow</description> <action>Add more resources (more hosts), or reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The cluster status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> CustomFieldDefAddedEventCustom field definition addedinfoCreated new custom field definition {name}CustomFieldDefEvent<Custom Field Definition Event>info<internal>CustomFieldDefRemovedEventCustom field definition removedinfoRemoved field definition {name}CustomFieldDefRenamedEventCustom field definition renamedinfoRenamed field definition from {name} to {newName}CustomFieldEvent<Custom Field Event>info<internal>CustomFieldValueChangedEventCustom field value changedinfoChanged custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} in {datacenter.name} from '{prevState}' to '{value}'CustomizationEvent<Customization Event>info<internal>CustomizationFailed<An error occurred during customization>infoAn error occurred during customization, Reason: {reason.@enum.CustomizationFailed.ReasonCode}An error occurred during customization on VM {vm.name}, Reason: {reason.@enum.CustomizationFailed.ReasonCode}. See customization log at {logLocation} on the guest OS for details.CustomizationLinuxIdentityFailedCustomization Linux Identity FailederrorAn error occurred while setting up Linux identity. See log file '{logLocation}' on guest OS for details. <EventLongDescription id="vim.event.CustomizationLinuxIdentityFailed"> <description> The guest operating system Linux distribution is not supported by the customization scripts. Please refer to the VMware vSphere Compatibility Matrix for the list of the supported Linux distributions. </description> <cause> <description> Customization of the target guest operating system Linux distribution is not supported. </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> CustomizationNetworkSetupFailedCannot complete customization network setuperrorAn error occurred while setting up network properties of the guest OS. See the log file {logLocation} in the guest OS for details. <EventLongDescription id="vim.event.CustomizationNetworkSetupFailed"> <description> The customization scripts failed to set the parameters in the corresponding configuration files for Linux or in the Windows registry </description> <cause> <description> The Customization Specification contains an invalid host name or domain name </description> <action> Review the guest operating system log files for this event for more details </action> <action> Provide a valid host name for the target guest operating system. The name must comply with the host name and domain name definitions in RFC 952, 1035, 1123, 2181. </action> </cause> <cause> <description> Could not find a NIC with the MAC address specified in the Customization Package </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that there was no change in the virtual NIC MAC address between the creation of the Customization Package and its deployment. Deployment occurs during the first boot of the virtual machine after customization has been scheduled. </action> </cause> <cause> <description> The customization code needs read/write permissions for certain configuration files. These permissions were not granted to the 'root' account on Linux or to the account used by the VMware Tools Service on the Windows guest operating system. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Grant read/write permissions to the 'root' account for Linux or to the account used by the VMware Tools Service on the Windows guest operating system and the registry keys that need to be modified by the customization code </action> </cause> </EventLongDescription> CustomizationStartedEventStarted customizationinfoStarted customization of VM {vm.name}. Customization log located at {logLocation} in the guest OS.CustomizationSucceededCustomization succeededinfoCustomization of VM {vm.name} succeeded. Customization log located at {logLocation} in the guest OS.CustomizationSysprepFailedCannot complete customization SyspreperrorThe version of Sysprep {sysprepVersion} provided for customizing VM {vm.name} does not match the version of guest OS {systemVersion}. See the log file {logLocation} in the guest OS for more information. <EventLongDescription id="vim.event.CustomizationSysprepFailed"> <description> The sysprep files in the folder corresponding to the selected target guest operating system are not compatible with the actual version of the guest operation system </description> <cause> <description> The sysprep files in the folder corresponding to the target guest operating system (for example Windows XP) are for a different guest operating system (for example Windows 2003) </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> <cause> <description> The sysprep files in the folder corresponding to the guest operating system are for a different Service Pack, for example the guest operating system is Windows XP SP2 and but the sysprep files are for Windows XP SP1. </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> </EventLongDescription> CustomizationUnknownFailureUnknown customization errorerrorAn error occurred while customizing VM {vm.name}. For details reference the log file {logLocation} in the guest OS. <EventLongDescription id="vim.event.CustomizationUnknownFailure"> <description> The customization component failed to set the required parameters inside the guest operating system </description> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the registry keys used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on registry keys used by the customization code </action> </cause> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the files and folders used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on the files and folders used by the customization code </action> </cause> <cause> <description> On Linux, an invalid or unsupported time zone is passed to the customization scripts and the time zone configuration failed as a result </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that a supported time zone is passed in Customization Specification. </action> </cause> <cause> <description> On Linux, the guest operating system 'root' account does not have read/write permissions for the configuration files that the customization scripts need to modify ('/etc/hosts') </description> <action> Grant read/write permissions for the configuration files to the guest operating system 'root' account </action> </cause> <cause> <description> To enable guest customization on Linux, in case open-vm-tools are used, you must also install the deployPkg plug-in. </description> <action> Follow kb.vmware.com/kb/2075048 to install the open-vm-tools deployPkg plug-in. </action> </cause> <cause> <description> Customization of the target guest operating system is not supported </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> DVPortgroupCreatedEventdvPort group createdinfodvPort group {net.name} was added to switch {dvs}.dvPort group {net.name} in {datacenter.name} was added to switch {dvs.name}.DVPortgroupDestroyedEventdvPort group deletedinfodvPort group {net.name} was deleted.dvPort group {net.name} in {datacenter.name} was deleted.DVPortgroupEventdvPort group eventinfodvPort group eventdvPort group eventDVPortgroupReconfiguredEventdvPort group reconfiguredinfodvPort group {net.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}dvPort group {net.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DVPortgroupRenamedEventdvPort group renamedinfodvPort group {oldName} was renamed to {newName}.dvPort group {oldName} in {datacenter.name} was renamed to {newName}DasAdmissionControlDisabledEventvSphere HA admission control disabledinfovSphere HA admission control disabled for cluster {computeResource.name}vSphere HA admission control disabledvSphere HA admission control disabled for cluster {computeResource.name} in {datacenter.name}DasAdmissionControlEnabledEventvSphere HA admission control enabledinfovSphere HA admission control enabled for cluster {computeResource.name}vSphere HA admission control enabledvSphere HA admission control enabled for cluster {computeResource.name} in {datacenter.name}DasAgentFoundEventvSphere HA agent foundinfoRe-established contact with a primary host in this vSphere HA clusterDasAgentUnavailableEventvSphere HA agent unavailableerrorUnable to contact a primary vSphere HA agent in cluster {computeResource.name}Unable to contact a primary vSphere HA agentUnable to contact a primary vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasAgentUnavailableEvent"> <description> vCenter Server is not able to contact any good primary hosts in the vSphere HA cluster. vSphere HA protection may not be available for virtual machines running in the cluster. In addition, you cannot enable or reconfigure vSphere HA on hosts in the cluster until contact between vCenter Server and a good primary host is restored. </description> <cause> <description> There was a network outage, and all hosts show up in the inventory as "not responding" </description> <action>Restore the network</action> </cause> <cause> <description>All the primary hosts in the cluster failed</description> <action> If the failed primary hosts cannot be restored, disable vSphere HA on the cluster, wait for the Unconfigure vSphere HA tasks to complete on all hosts, and re-enable vSphere HA on the cluster </action> </cause> </EventLongDescription> DasClusterIsolatedEventAll vSphere HA hosts isolatederrorAll hosts in the vSphere HA cluster {computeResource.name} were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster {computeResource.name} in {datacenter.name} were isolated from the network. Check the network configuration for proper network redundancy in the management network.DasDisabledEventvSphere HA disabled for clusterinfovSphere HA disabled for cluster {computeResource.name}vSphere HA disabled for this clustervSphere HA disabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasDisabledEvent"> <description> vSphere HA has been disabled on this host due to a user action. vSphere HA is disabled when a host is disconnected from vCenter Server or placed into maintenance or standby mode. Virtual machines on other hosts in the cluster will not be failed over to this host in the event of a host failure. In addition, if the host is disconnected, any virtual machines running on this host will not be failed if the host fails. Further, no attempt will be made by vSphere HA VM and Application Monitoring to reset VMs. </description> </EventLongDescription> DasEnabledEventvSphere HA enabled for clusterinfovSphere HA enabled for cluster {computeResource.name}vSphere HA enabled for this clustervSphere HA enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasEnabledEvent"> <description> vSphere HA has been enabled on this host due to a user action. vSphere HA is enabled when a host is added to or moved into a vSphere HA cluster or when vSphere HA is enabled on a cluster. If the host was already in a vSphere HA cluster, vSphere HA will be enabled when the host is reconnected to vCenter Server or brought out of maintenance or standby mode. vSphere HA will attempt to protect any VMs that are running on the host at the time that HA is enabled on it. </description> </EventLongDescription> DasHostFailedEventvSphere HA host failederrorA possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name} in cluster {computeResource.name} in {datacenter.name}DasHostIsolatedEventvSphere HA host isolatedwarningHost {isolatedHost.name} has been isolated from cluster {computeResource.name}Host {isolatedHost.name} has been isolatedHost has been isolated from clusterHost {isolatedHost.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasHostIsolatedEvent"> <description> vSphere HA detected that the host is network isolated. When a host is in this state, vSphere HA applies the power-off or shutdown host isolation response to virtual machines running on the host, and continues to monitor the virtual machines that are left powered on. While a host is in this state, vSphere HA's ability to restart virtual machines after a failure is impacted. vSphere HA only powers off or shuts down a virtual machine if the agent on the host determines that a master agent is responsible for the virtual machine. </description> <cause> <description> A host is network isolated if both of the following conditions are met: (1) isolation addresses have been configured and the host is unable to ping them; (2) the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <action> Resolve the networking problem that is preventing the host from pinging its isolation addresses and communicating with other hosts. Ensure that there is redundancy in the management networks used by vSphere HA. With redundancy, vSphere HA is able to communicate over more than one path thus reducing the chance of a host becoming isolated. </action> </cause> </EventLongDescription> DatacenterCreatedEventDatacenter createdinfoCreated in folder {parent.name}Created datacenter {datacenter.name}Created datacenter {datacenter.name} in folder {parent.name}DatacenterEvent<Datacenter Event>info<internal>DatacenterRenamedEventDatacenter renamedinfoRenamed datacenterRenamed datacenter from {oldName} to {newName}Renamed datacenter from {oldName} to {newName}DatastoreCapacityIncreasedEventDatastore capacity increasedinfoDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytesDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytes in {datacenter.name}DatastoreDestroyedEventDatastore deletedinfoRemoved unconfigured datastore {datastore.name}Removed unconfigured datastore {datastore.name}DatastoreDiscoveredEventDatastore discoveredinfoDiscovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name}Discovered datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreDiscoveredEvent"> <description> A datastore was discovered on a host </description> <cause> <description> A host that has access to this datastore was added to the datacenter </description> </cause> <cause> <description> The storage backing this datastore was unmasked to a host in the datacenter </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host and the datastore was visible on at least one other host in the datacenter prior to this operation. </description> </cause> </EventLongDescription> DatastoreDuplicatedEventDatastore duplicatederrorMultiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detectedMultiple datastores named {datastore} detected on host {host.name} in {datacenter.name}DatastoreEvent<Datastore Event>info<internal>DatastoreFileCopiedEventFile or directory copied to datastoreinfoCopy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Copy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileDeletedEventFile or directory deletedinfoDeletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Deletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileEvent<Datastore File Event>info<internal>DatastoreFileMovedEventFile or directory moved to datastoreinfoMove of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Move of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreIORMReconfiguredEventReconfigured Storage I/O Control on datastoreinfoReconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}DatastorePrincipalConfiguredDatastore principal configuredinfoConfigured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal}Configured datastore principal {datastorePrincipal} on host {host.name} in {datacenter.name}DatastoreRemovedOnHostEventDatastore removed from hostinfoRemoved datastore {datastore.name} from {host.name}Removed datastore {datastore.name}Removed datastore {datastore.name} from {host.name} in {datacenter.name}DatastoreRenamedEventDatastore renamedinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name}DatastoreRenamedOnHostEventDatastore renamed from hostinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreRenamedOnHostEvent"> <description> A datastore was renamed on a host managed by vCenter Server </description> <cause> <description> vCenter Server discovered datastore on a host and renamed the datastore because it already exists in the vCenter Server inventory under a different name. vCenter Server might also have renamed the datastore because the name conflicts with another datastore in the same datacenter. </description> </cause> </EventLongDescription> DrsDisabledEventDRS disabledinfoDisabled DRS on cluster {computeResource.name}Disabled DRSDisabled DRS on cluster {computeResource.name} in datacenter {datacenter.name}DrsEnabledEventDRS enabledinfoEnabled DRS on cluster {computeResource.name} with automation level {behavior}Enabled DRS with automation level {behavior}Enabled DRS on {computeResource.name} with automation level {behavior} in {datacenter.name}DrsEnteredStandbyModeEventDRS entered standby modeinfoDRS put {host.name} into standby modeDRS put {host.name} into standby modeDRS put the host into standby modeDRS put {host.name} into standby modeDrsEnteringStandbyModeEventDRS entering standby modeinfoDRS is putting {host.name} into standby modeDRS is putting {host.name} into standby modeDRS is putting the host into standby modeDRS is putting {host.name} into standby modeDrsExitStandbyModeFailedEventDRS cannot exit the host out of standby modeerrorDRS cannot move {host.name} out of standby modeDRS cannot move {host.name} out of standby modeDRS cannot move the host out of standby modeDRS cannot move {host.name} out of standby mode <EventLongDescription id="vim.event.DrsExitStandbyModeFailedEvent"> <description> DPM failed to power on a host in standby mode. DPM tried to power on a host using IPMI, iLO or Wake-on-LAN protocol, but the host did not power on. </description> <cause> <description>DPM could not communicate with the BMC on the host</description> <action>Verify the IPMI/iLO credentials entered in vCenter Server</action> <action>Verify that LAN access is enabled in the BMC</action> </cause> <cause> <description>The vMotion NIC on the host does not support Wake-on-LAN</description> <action>Select a vMotion NIC that supports Wake-on-LAN</action> </cause> </EventLongDescription> DrsExitedStandbyModeEventDRS exited standby modeinfoDRS moved {host.name} out of standby modeDRS moved {host.name} out of standby modeDRS moved the host out of standby modeDRS moved {host.name} out of standby modeDrsExitingStandbyModeEventDRS exiting standby modeinfoDRS is moving {host.name} out of standby modeDRS is moving {host.name} out of standby modeDRS is moving the host out of standby modeDRS is moving {host.name} out of standby modeDrsInvocationFailedEventDRS invocation not completederrorDRS invocation not completedDRS invocation not completedDRS invocation not completed <EventLongDescription id="vim.event.DrsInvocationFailedEvent"> <description> A DRS invocation failed to complete successfully. This condition can occur for a variety of reasons, some of which may be transient. </description> <cause> <description>An error was encountered during a DRS invocation</description> <action>Disable and re-enable DRS</action> </cause> </EventLongDescription> DrsRecoveredFromFailureEventDRS has recovered from the failureinfoDRS has recovered from the failureDRS has recovered from the failureDRS has recovered from the failureDrsResourceConfigureFailedEventCannot complete DRS resource configurationerrorUnable to apply DRS resource settings on host. {reason.msg}. This can significantly reduce the effectiveness of DRS.Unable to apply DRS resource settings on host {host.name} in {datacenter.name}. {reason.msg}. This can significantly reduce the effectiveness of DRS. <EventLongDescription id="vim.event.DrsResourceConfigureFailedEvent"> <description> The DRS resource settings could not be successfully applied to a host in the cluster. This condition is typically transient. </description> <cause> <description>DRS resource settings could not be applied to a host.</description> <action>DRS generates resource settings that map the cluster values to the host. However, in this case, the values could not be successfully applied to the host. This is typically a transient error caused by delayed synchronization from DRS to the host. If this condition persists, enable debug logging in vpxa and contact VMware Support. </action> </cause> </EventLongDescription> DrsResourceConfigureSyncedEventDRS resource configuration synchronizedinfoResource configuration specification returns to synchronization from previous failureResource configuration specification returns to synchronization from previous failure on host '{host.name}' in {datacenter.name}DrsRuleComplianceEventVM is now compliant with DRS VM-Host affinity rulesinfo{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} is now compliant with DRS VM-Host affinity rulesvirtual machine on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} in {datacenter.name} is now compliant with DRS VM-Host affinity rulesDrsRuleViolationEventVM is violating a DRS VM-Host affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} is violating a DRS VM-Host affinity rulevirtual machine on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host affinity ruleDrsSoftRuleViolationEventThe VM is violating a DRS VM-Host soft affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} is violating a DRS VM-Host soft affinity rulevirtual machine on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host soft affinity ruleDrsVmMigratedEventDRS VM migratedinfoDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name}DRS migrated {vm.name} from {sourceHost.name} to {host.name}DRS migrated {vm.name} from {sourceHost.name}Migrated from {sourceHost.name} to {host.name} by DRSDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmMigratedEvent"> <description> A virtual machine was migrated based on a DRS recommendation. The recommendation might have been made be to achieve better load balancing in the cluster or to evacuate a host in the cluster that is being put into Standby or Maintenance Mode. </description> <cause> <description>DRS recommended the migration of a virtual machine</description> </cause> </EventLongDescription> DrsVmPoweredOnEventDRS VM powered oninfoDRS powered on {vm.name} on {host.name}DRS powered on {vm.name} on {host.name}DRS powered on {vm.name}DRS powered on the virtual machine on {host.name}DRS powered on {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmPoweredOnEvent"> <description> A virtual machine was powered on by the user and DRS choose a host for the virtual machine based on the current cluster load distribution combined with the virtual machine's resource requirements </description> <cause> <description>DRS chose a host for a virtual machine that was being powered on</description> </cause> </EventLongDescription> DuplicateIpDetectedEventDuplicate IP detectedinfoVirtual machine {macAddress} has a duplicate IP {duplicateIP}Virtual machine {macAddress} on host {host.name} has a duplicate IP {duplicateIP}DvpgImportEventImport Operation eventinfoImport operation with type {importType} was performed on {net.name}Import operation with type {importType} was performed on {net.name}DvpgRestoreEventRestore Operation eventinfoRestore operation was performed on {net.name}Restore operation was performed on {net.name}DvsCreatedEventvSphere Distributed Switch createdinfoA vSphere Distributed Switch {dvs.name} was createdA vSphere Distributed Switch {dvs.name} was created in {datacenter.name}.DvsDestroyedEventvSphere Distributed Switch deletedinfovSphere Distributed Switch {dvs.name} was deleted.vSphere Distributed Switch {dvs.name} in {datacenter.name} was deleted.DvsEventvSphere Distributed Switch eventinfovSphere Distributed Switch eventvSphere Distributed Switch eventDvsHealthStatusChangeEventHealth check status of the switch changed.infoHealth check status changed in vSphere Distributed Switch {dvs.name} on host {host.name}Health check status changed in vSphere Distributed Switch {dvs.name}Health check status was changed in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}DvsHostBackInSyncEventThe vSphere Distributed Switch configuration on the host was synchronized with that of the vCenter Server.infoThe vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.DvsHostJoinedEventHost joined the vSphere Distributed SwitchinfoThe host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name}.The host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostLeftEvent Host left vSphere Distributed SwitchinfoThe host {hostLeft.name} left the vSphere Distributed Switch {dvs.name}.The host {hostLeft.name} left the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostStatusUpdatedHost status changed on the vSphere Distributed SwitchinfoThe host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name}.The host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostWentOutOfSyncEventThe vSphere Distributed Switch configuration on the host differed from that of the vCenter Server.warningThe vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server. <EventLongDescription id="vim.event.DvsHostWentOutOfSyncEvent"> <description> The vSphere Distributed Switch configuration on the host differed from that of the vCenter Server </description> <cause> <description> The host was not connected to the vCenter Server when updates were sent </description> </cause> <cause> <description> vCenter Server failed to push the vSphere Distributed Switch configuration to the host in the past</description> </cause> </EventLongDescription> DvsImportEventImport Operation eventinfoImport operation with type {importType} was performed on {dvs.name}Import operation with type {importType} was performed on {dvs.name}DvsMergedEventvSphere Distributed Switch mergedinfovSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name}.vSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name} in {datacenter.name}.DvsPortBlockedEventdvPort blockedinfoThe dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortConnectedEventdvPort connectedinfoThe dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortCreatedEventdvPort createdinfoNew ports were created in the vSphere Distributed Switch {dvs.name}.New ports were created in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDeletedEventdvPort deletedinfoPorts were deleted in the vSphere Distributed Switch {dvs.name}.Deleted ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDisconnectedEventdvPort disconnectedinfoThe dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortEnteredPassthruEventdvPort in passthrough modeinfoThe dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortExitedPassthruEventdvPort not in passthrough modeinfoThe dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortJoinPortgroupEventA dvPort was moved into the dvPort group.infoThe dvPort {portKey} was moved into the dvPort group {portgroupName}.The dvPort {portKey} was moved into the dvPort group {portgroupName} in {datacenter.name}.DvsPortLeavePortgroupEventA dvPort was moved out of the dvPort group.infoThe dvPort {portKey} was moved out of the dvPort group {portgroupName}.The dvPort {portKey} was moved out of the dvPort group {portgroupName} in {datacenter.name}.DvsPortLinkDownEventdvPort link was downinfoThe dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortLinkUpEventdvPort link was upinfoThe dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortReconfiguredEventdvPort reconfiguredinfoPorts were reconfigured in the vSphere Distributed Switch {dvs.name}.
Ports changed {portKey}.
Changes are {configChanges}Reconfigured ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.
Ports changed {portKey}.
Changes are {configChanges}DvsPortRuntimeChangeEventdvPort runtime information changed.infoThe dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortUnblockedEventdvPort unblockedinfoThe dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortVendorSpecificStateChangeEventdvPort vendor specific state changed.infoThe dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsReconfiguredEventvSphere Distributed Switch reconfiguredinfoThe vSphere Distributed Switch {dvs.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}The vSphere Distributed Switch {dvs.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DvsRenamedEventvSphere Distributed Switch renamedinfoThe vSphere Distributed Switch {oldName} was renamed to {newName}.The vSphere Distributed Switch {oldName} in {datacenter.name} was renamed to {newName}.DvsRestoreEventRestore Operation eventinfoRestore operation was performed on {dvs.name}Restore operation was performed on {dvs.name}DvsUpgradeAvailableEventAn upgrade for the vSphere Distributed Switch is available.infoAn upgrade for vSphere Distributed Switch {dvs.name} is available. An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is available.DvsUpgradeInProgressEventAn upgrade for the vSphere Distributed Switch is in progress.infoAn upgrade for vSphere Distributed Switch {dvs.name} is in progress.An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is in progress.DvsUpgradeRejectedEventCannot complete the upgrade for the vSphere Distributed SwitchinfoAn upgrade for vSphere Distributed Switch {dvs.name} was rejected.Cannot complete an upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name}DvsUpgradedEventThe vSphere Distributed Switch was upgraded.infovSphere Distributed Switch {dvs.name} was upgraded.vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} was upgraded.EnteredMaintenanceModeEventEntered maintenance modeinfoHost {host.name} in {datacenter.name} has entered maintenance modeHost {host.name} in {datacenter.name} has entered maintenance modeEnter maintenance mode completed. All virtual machine operations are disabledHost {host.name} in {datacenter.name} has entered maintenance modeEnteredStandbyModeEventEntered standby modeinfoEntered standby modeThe host {host.name} is in standby modeEnteringMaintenanceModeEventEntering maintenance modeinfoHost {host.name} has started to enter maintenance modeHost {host.name} has started to enter maintenance modeStarted to enter maintenance mode. Waiting for virtual machines to shut down, suspend, or migrateHost {host.name} in {datacenter.name} has started to enter maintenance modeEnteringStandbyModeEventEntering standby modeinfoEntering standby modeThe host {host.name} is entering standby modeErrorUpgradeEventUpgrade errorerror{message} <EventLongDescription id="vim.event.ErrorUpgradeEvent"> <description> An error occurred during agent upgrade </description> </EventLongDescription> Event<Event>info<internal>ExitMaintenanceModeEventExit maintenance modeinfoHost {host.name} has exited maintenance modeHost {host.name} has exited maintenance modeExited maintenance modeHost {host.name} in {datacenter.name} has exited maintenance modeExitStandbyModeFailedEventCannot exit standby modeerrorCould not exit standby modeThe host {host.name} could not exit standby modeExitedStandbyModeEventExited standby modeinfoExited standby modeThe host {host.name} is no longer in standby modeExitingStandbyModeEventExiting standby modeinfoExiting standby modeThe host {host.name} is exiting standby modeFailoverLevelRestoredvSphere HA failover resources are sufficientinfoSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name}Sufficient resources are available to satisfy vSphere HA failover levelSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name} in {datacenter.name}GeneralEventGeneral eventinfoGeneral event: {message}GeneralHostErrorEventHost errorerrorError detected on {host.name}: {message}Error detected on {host.name}: {message}{message}Error detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostErrorEvent"> <description> An error occurred on the host </description> <cause> <description> The agent cannot send heartbeats because of a networking related failure on host </description> </cause> <cause> <description> The agent failed to update the configuration file on host </description> </cause> <cause> <description> The agent failed to save the configuration file to disk on host </description> </cause> <cause> <description> The provisioning module failed to load. As a result, all provisioning operations will fail on host. </description> </cause> </EventLongDescription> GeneralHostInfoEventHost informationinfoIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostInfoEvent"> <description> A general information event occurred on the host </description> </EventLongDescription> GeneralHostWarningEventHost warningwarningIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostWarningEvent"> <description> A general warning event occurred on the host </description> <cause> <description> Virtual machine creation might fail because the agent was unable to retrieve virtual machine creation options from the host </description> </cause> </EventLongDescription> GeneralUserEventUser eventuserUser logged event: {message} <EventLongDescription id="vim.event.GeneralUserEvent"> <description> A general user event occurred on the host </description> <cause> <description> A user initiated an action on the host </description> </cause> </EventLongDescription> GeneralVmErrorEventVM errorerrorError detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name}: {message}{message} on {host.name}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmErrorEvent"> <description> An error occurred on the virtual machine </description> </EventLongDescription> GeneralVmInfoEventVM informationinfoIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmInfoEvent"> <description> A general information event occurred on the virtual machine </description> </EventLongDescription> GeneralVmWarningEventVM warningwarningIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmWarningEvent"> <description> A general warning event occurred on the virtual machine </description> </EventLongDescription> GhostDvsProxySwitchDetectedEventThe vSphere Distributed Switch corresponding to the proxy switches on the host does not exist in vCenter Server or does not contain this host.infoThe vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host does not exist in vCenter Server or does not contain this host.The vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host {host.name} does not exist in vCenter Server or does not contain this host. <EventLongDescription id="vim.event.GhostDvsProxySwitchDetectedEvent"> <description> vCenter Server found a vSphere Distributed Switch proxy switch on the host that does not match any vSphere Distributed Switch in vCenter Server </description> <cause> <description> The vSphere Distributed Switch corresponding to the vSphere Distributed Switch proxy switch on the host was deleted while host was disconnected from the vCenter Server </description> </cause> <cause> <description> The host is no longer a member of the vSphere Distributed Switch that the proxy switch in the host corresponds to </description> </cause> </EventLongDescription> GhostDvsProxySwitchRemovedEventA ghost proxy switch on the host was resolved.infoA ghost proxy switch {switchUuid} on the host was resolved.A ghost proxy switch {switchUuid} on the host {host.name} was resolved.GlobalMessageChangedEventMessage changedinfoThe message changed: from '{prevMessage}' to '{message}'HealthStatusChangedEventStatus changeinfo{componentName} status changed from {oldStatus} to {newStatus}HostAddFailedEventCannot add hosterrorCannot add host {hostname}Cannot add host {hostname}Cannot add host {hostname} to datacenter {datacenter.name} <EventLongDescription id="vim.event.HostAddFailedEvent"> <description> Adding a host failed </description> </EventLongDescription> HostAddedEventHost AddedinfoAdded host {host.name}Added host {host.name}Added host {host.name} to datacenter {datacenter.name}HostAdminDisableEventHost administrator access disabledwarningAdministrator access to the host is disabledAdministrator access to the host {host.name} is disabled <EventLongDescription id="vim.event.HostAdminDisableEvent"> <description> Host permissions have been changed so that only the account used for vCenter Server operations has Administrator permissions </description> <cause> <description> This condition occurs when vCenter Server removes all other Administrator access to the host because the host has been placed in Lockdown Mode. The host can be managed by vCenter Server only and Only vCenter Server can re-enable Administrator access for other accounts. </description> </cause> </EventLongDescription> HostAdminEnableEventHost administrator access enabledwarningAdministrator access to the host has been restoredAdministrator access to the host {host.name} has been restored <EventLongDescription id="vim.event.HostAdminEnableEvent"> <description> vCenter Server has restored Administrator permissions for host user accounts whose permissions were disabled by Lockdown Mode </description> <cause> <description> This condition occurs when vCenter Server restores Administrator access to host user accounts that lost their Administrator permissions when the host was placed in Lockdown Mode </description> </cause> </EventLongDescription> HostCnxFailedAccountFailedEventCannot connect host and configure management accounterrorCannot connect {host.name}: cannot configure management accountCannot connect {host.name}: cannot configure management accountCannot connect: cannot configure management accountCannot connect {host.name} in {datacenter.name}: cannot configure management account <EventLongDescription id="vim.event.HostCnxFailedAccountFailedEvent"> <description> Could not connect to the host because setting up a management account failed </description> <cause> <description> The account used by vCenter Server to manage the host could not be configured </description> </cause> </EventLongDescription> HostCnxFailedAlreadyManagedEventCannot connect host - already managederrorCannot connect {host.name}: already managed by {serverName}Cannot connect {host.name}: already managed by {serverName}Cannot connect: already managed by {serverName}Cannot connect {host.name} in {datacenter.name}: already managed by {serverName} <EventLongDescription id="vim.event.HostCnxFailedAlreadyManagedEvent"> <description> Could not connect to the host because it is already being managed by a different vCenter Server instance. </description> <cause> <description> The host is already being managed by a different vCenter Server instance </description> <action> Remove the host from the inventory for the other vCenter Server instance </action> <action> Force the addition of the host to the current vCenter Server instance </action> </cause> </EventLongDescription> HostCnxFailedBadCcagentEventCannot connect host - incorrect CcagenterrorCannot connect {host.name} : server agent is not respondingCannot connect {host.name} : server agent is not respondingCannot connect: server agent is not respondingCannot connect host {host.name} in {datacenter.name} : server agent is not responding <EventLongDescription id="vim.event.HostCnxFailedBadCcagentEvent"> <description> Could not connect to the host because the host agent did not respond </description> <cause> <description> No response was received from the host agent </description> <action> Restart the host agent on the ESX/ESXi host </action> </cause> </EventLongDescription> HostCnxFailedBadUsernameEventCannot connect host - incorrect user nameerrorCannot connect {host.name}: incorrect user name or passwordCannot connect {host.name}: incorrect user name or passwordCannot connect: incorrect user name or passwordCannot connect {host.name} in {datacenter.name}: incorrect user name or password <EventLongDescription id="vim.event.HostCnxFailedBadUsernameEvent"> <description> Could not connect to the host due to an invalid username and password combination </description> <cause> <description> Invalid username and password combination </description> <action> Use the correct username and password </action> </cause> </EventLongDescription> HostCnxFailedBadVersionEventCannot connect host - incompatible versionerrorCannot connect {host.name}: incompatible versionCannot connect {host.name}: incompatible versionCannot connect: incompatible versionCannot connect {host.name} in {datacenter.name}: incompatible version <EventLongDescription id="vim.event.HostCnxFailedBadVersionEvent"> <description> Could not connect to the host due to an incompatible vSphere Client version </description> <cause> <description> The version of the vSphere Client is incompatible with the ESX/ESXi host so the connection attempt failed </description> <action> Download and use a compatible vSphere Client version to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedCcagentUpgradeEventCannot connect host - Ccagent upgradeerrorCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect: did not install or upgrade vCenter agent serviceCannot connect host {host.name} in {datacenter.name}. Did not install or upgrade vCenter agent service. <EventLongDescription id="vim.event.HostCnxFailedCcagentUpgradeEvent"> <description> Could not connect to the host because a host agent upgrade or installation is in process </description> <cause> <description> The host agent is being upgraded or installed on the host </description> <action> Wait for the host agent upgrade or installation to complete </action> </cause> </EventLongDescription> HostCnxFailedEventCannot connect hosterrorCannot connect host {host.name}: error connecting to hostCannot connect host {host.name}: error connecting to hostCannot connect: error connecting to hostCannot connect {host.name} in {datacenter.name}: error connecting to host <EventLongDescription id="vim.event.HostCnxFailedEvent"> <description> Could not connect to the host due to an unspecified condition </description> <cause> <description> Unknown cause of failure </description> </cause> </EventLongDescription> HostCnxFailedNetworkErrorEventCannot connect host - network errorerrorCannot connect {host.name}: network errorCannot connect {host.name}: network errorCannot connect: network errorCannot connect {host.name} in {datacenter.name}: network error <EventLongDescription id="vim.event.HostCnxFailedNetworkErrorEvent"> <description> Could not connect to the host due to a network error </description> <cause> <description> A Network error occurred while connecting to the host </description> <action> Verify that host networking is configured correctly </action> </cause> </EventLongDescription> HostCnxFailedNoAccessEventCannot connect host - no accesserrorCannot connect {host.name}: account has insufficient privilegesCannot connect {host.name}: account has insufficient privilegesCannot connect: account has insufficient privilegesCannot connect host {host.name} in {datacenter.name}: account has insufficient privileges <EventLongDescription id="vim.event.HostCnxFailedNoAccessEvent"> <description> Could not connect to the host due to insufficient account privileges </description> <cause> <description> The account used to connect to host does not have host access privileges </description> <action> Use an account that has sufficient privileges to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedNoConnectionEventCannot connect host - no connectionerrorCannot connect {host.name}Cannot connect {host.name}Cannot connect to hostCannot connect host {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostCnxFailedNoConnectionEvent"> <description> Could not connect to the host because the host is not in the network </description> <cause> <description> The host that you are attempting to connect to is not present in the network </description> <action> Verify that host networking is configured correctly and the host is connected to the same network as vCenter Server </action> </cause> </EventLongDescription> HostCnxFailedNoLicenseEventCannot connect host - no licenseerrorCannot connect {host.name}: not enough CPU licensesCannot connect {host.name}: not enough CPU licensesCannot connect: not enough CPU licensesCannot connect {host.name} in {datacenter.name}: not enough CPU licenses <EventLongDescription id="vim.event.HostCnxFailedNoLicenseEvent"> <description> Could not connect to the host due to a licensing issue </description> <cause> <description> There are not enough licenses to add the host to the vCenter Server inventory. This event is accompanied by a fault that specifies the missing licenses required to add the host successfully. </description> <action> Add the necessary licenses to vCenter Server and try adding the host again </action> </cause> </EventLongDescription> HostCnxFailedNotFoundEventCannot connect host - host not founderrorCannot connect {host.name}: incorrect host nameCannot connect {host.name}: incorrect host nameCannot connect: incorrect host nameCannot connect {host.name} in {datacenter.name}: incorrect host name <EventLongDescription id="vim.event.HostCnxFailedNotFoundEvent"> <description> Could not connect to the host because vCenter Server could not resolve the host name </description> <cause> <description> Unable to resolve the host name of the host </description> <action> Verify that the correct host name has been supplied for the host </action> <action> Configure the host to use a known-good (resolvable) host name </action> <action> Add the host name to the DNS server </action> </cause> </EventLongDescription> HostCnxFailedTimeoutEventCannot connect host - time-outerrorCannot connect {host.name}: time-out waiting for host responseCannot connect {host.name}: time-out waiting for host responseCannot connect: time-out waiting for host responseCannot connect {host.name} in {datacenter.name}: time-out waiting for host response <EventLongDescription id="vim.event.HostCnxFailedTimeoutEvent"> <description> Could not connect to the host because the connection attempt timed out </description> <cause> <description> A timeout occurred while attempting to connect to the host </description> </cause> </EventLongDescription> HostComplianceCheckedEventChecked host for complianceinfoHost {host.name} checked for compliance with profile {profile.name}Host {host.name} checked for compliance with profile {profile.name}Checked host for compliance with profile {profile.name}Host {host.name} checked for compliance. <EventLongDescription id="vim.event.HostComplianceCheckedEvent"> <description> The host was checked for compliance with a host profile </description> <cause> <description> The user initiated a compliance check on the host against a host profile </description> </cause> <cause> <description> A scheduled task initiated a compliance check for the host against a host profile </description> </cause> </EventLongDescription> HostCompliantEventHost compliant with profileinfoHost is in compliance with the attached profile.Host {host.name} is in compliance with the attached profileHostConfigAppliedEventHost configuration changes applied to hostinfoHost configuration changes applied to {host.name}Host configuration changes applied to {host.name}Host configuration changes applied.Host configuration changes applied.HostConnectedEventHost connectedinfoConnected to {host.name}Connected to {host.name}Established a connectionConnected to {host.name} in {datacenter.name}HostConnectionLostEventHost connection losterrorHost {host.name} is not respondingHost {host.name} is not respondingHost is not respondingHost {host.name} in {datacenter.name} is not responding <EventLongDescription id="vim.event.HostConnectionLostEvent"> <description> Connection to the host has been lost </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostDasDisabledEventvSphere HA agent disabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is disabledvSphere HA agent on {host.name} is disabledvSphere HA agent on this host is disabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is disabledHostDasDisablingEventDisabling vSphere HAinfovSphere HA is being disabled on {host.name}vSphere HA is being disabled on {host.name}Disabling vSphere HAvSphere HA is being disabled on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}HostDasEnabledEventvSphere HA agent enabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is enabledvSphere HA agent on {host.name} is enabledvSphere HA agent on this host is enabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is enabledHostDasEnablingEventEnabling host vSphere HA agentwarningEnabling vSphere HA agent on {host.name}Enabling vSphere HA agent on {host.name}Enabling vSphere HA agentEnabling vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.HostDasEnablingEvent"> <description> vSphere HA is being enabled on this host. </description> </EventLongDescription> HostDasErrorEventvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error {message}: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}HostDasEvent<Host vSphere HA Event>info<internal>HostDasOkEventvSphere HA agent configuredinfovSphere HA agent on host {host.name} is configured correctlyvSphere HA agent on host {host.name} is configured correctlyvSphere HA agent is configured correctlyvSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} is configured correctlyHostDisconnectedEventHost disconnectedinfoDisconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from host. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name} in {datacenter.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}HostEnableAdminFailedEventCannot restore administrator permissions to hosterrorCannot restore some administrator permissions to the hostCannot restore some administrator permissions to the host {host.name}HostEvent<Host Event>info<internal>HostExtraNetworksEventHost has extra vSphere HA networkserrorHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostExtraNetworksEvent"> <description> The host being added to the vSphere HA cluster has more management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, be on the same subnets. This helps to facilitate the pairing up of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnet of the joining member that are not present on the existing member. </description> <cause> <description> The host has extra networks missing on an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for the cluster. </action> </cause> </EventLongDescription> HostGetShortNameFailedEventCannot get short host nameerrorCannot complete command 'hostname -s' or returned incorrect name formatCannot complete command 'hostname -s' on host {host.name} or returned incorrect name format <EventLongDescription id="vim.event.HostGetShortNameFailedEvent"> <description> The hostname -s command has failed on the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostInAuditModeEventHost is in audit mode.infoHost is running in audit mode.Host {host.name} is running in audit mode. The host's configuration will not be persistent across reboots.HostInventoryFullEventHost inventory fullerrorMaximum ({capacity}) number of hosts allowed for this edition of vCenter Server has been reached <EventLongDescription id="vim.event.HostInventoryFullEvent"> <description> The vCenter Server Foundation license key currently allows only three hosts to be added to the inventory. Adding extra hosts results in errors and the logging of this event. </description> <cause> <description>Attempting to add more hosts than the number allowed by the license key assigned to vCenter Server</description> <action>Assign vCenter Server a license key that allows more hosts or has no host limit</action> </cause> </EventLongDescription> HostInventoryUnreadableEventHost Inventory UnreadableinfoThe virtual machine inventory file is damaged or unreadable.The virtual machine inventory file on host {host.name} is damaged or unreadable.HostIpChangedEventHost IP changedinfoIP address changed from {oldIP} to {newIP}IP address of the host {host.name} changed from {oldIP} to {newIP} <EventLongDescription id="vim.event.HostIpChangedEvent"> <description> The IP address of the host was changed </description> <cause> <description> The IP address of the host was changed through vCenter Server </description> </cause> <cause> <description> The IP address of the host was changed through the host </description> </cause> </EventLongDescription> HostIpInconsistentEventHost IP inconsistenterrorConfiguration of host IP address is inconsistent: address resolved to {ipAddress} and {ipAddress2}Configuration of host IP address is inconsistent on host {host.name}: address resolved to {ipAddress} and {ipAddress2}HostIpToShortNameFailedEventHost IP to short name not completederrorCannot resolve IP address to short nameCannot resolve IP address to short name on host {host.name} <EventLongDescription id="vim.event.HostIpToShortNameFailedEvent"> <description> The host's IP address could not be resolved to a short name </description> <cause> <description>The host or DNS records are improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostIsolationIpPingFailedEventvSphere HA isolation address unreachableerrorvSphere HA agent on host {host.name} in cluster {computeResource.name} could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} could not reach isolation address: {isolationIp}vSphere HA agent on this host could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} could not reach isolation address: {isolationIp} <EventLongDescription id="vim.event.HostIsolationIpPingFailedEvent"> <description> vSphere HA was unable to ping one or more of the isolation IP addresses. The inability to ping the addresses may cause HA to incorrectly declare the host as network isolated. A host is declared as isolated if it cannot ping the configured isolation addresses and the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <cause> <description>Could not ping the isolation address</description> <action>Correct the cause of the failure to ping the address</action> <action> Use advanced options to change the addresses used by vSphere HA for determining if a host is network isolated. By default, the isolation address is the default gateway of the management network. You can override the default using advanced options, or specify additional addresses to use for determining if a host is network isolated. Set the das.useDefaultIsolationAddress advanced option to "false" if you prefer that vSphere HA not use the default gateway as the isolation address. Specify the das.isolationAddress[X] advanced option for each isolation address that you want to specify. The new values take effect when vSphere HA is reconfigured for each host. </action> </cause> </EventLongDescription> HostLicenseExpiredEventHost license expirederrorA host license for {host.name} has expired <EventLongDescription id="vim.event.HostLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of host licenses on the license server and uses this event to notify you of any host licenses that are about to expire </description> <cause> <description>Host licenses on the license server are about to expire</description> <action>Update the license server to get a new version of the host license</action> </cause> </EventLongDescription> HostLocalPortCreatedEventA host local port is created to recover from management network connectivity loss.infoA host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic}.A host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic} on the host {host.name}.HostMissingNetworksEventHost is missing vSphere HA networkserrorHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostMissingNetworksEvent"> <description> The host being added to the vSphere HA cluster has fewer management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, have common subnets. This helps facilitate the pairing of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnets of the existing member that are not present on the joining member. </description> <cause> <description> The host does not have networks compatible with an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. After you change the host's network configuration, reconfigure vSphere HA for this host. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for this cluster. </action> </cause> </EventLongDescription> HostMonitoringStateChangedEventvSphere HA host monitoring state changedinfovSphere HA host monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'HostNoAvailableNetworksEventHost has no available networks for vSphere HA communicationerrorHost {host.name} in cluster {computeResource.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}This host currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips} <EventLongDescription id="vim.event.HostNoAvailableNetworksEvent"> <description> The host being added to the vSphere HA cluster has no management networks available for vSphere HA cluster communication. The advanced option das.allowNetwork[X] is set, but no port group names match the advanced option for this host. </description> <cause> <description> The host has no port groups that match the names used by the advanced options to control which port groups vSphere HA uses </description> <action> Delete the advanced options das.allowNetwork[X] to allow vSphere HA to select the default management port groups </action> <action> Correct the names of the port groups specified in the advanced options to match those to be used by vSphere HA for this host </action> <action> Specify additional das.allowNetwork[X] advanced options to match the port group names for this host </action> </cause> </EventLongDescription> HostNoHAEnabledPortGroupsEventHost has no port groups enabled for vSphere HAerrorHost {host.name} in cluster {computeResource.name} has no port groups enabled for vSphere HA communication.Host {host.name} has no port groups enabled for vSphere HA communication.This host has no port groups enabled for vSphere HA communication.Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no port groups enabled for vSphere HA communication. <EventLongDescription id="vim.event.HostNoHAEnabledPortGroupsEvent"> <description> vSphere HA has determined that there are no management networks available on the host for vSphere HA inter-agent communication. </description> <cause> <description> The host has no vSphere HA management networks available </description> <action> If this event is observed when the host is being added to a vSphere HA cluster, change the host's network configuration to enable vSphere HA traffic on one or more port groups. By default, vSphere HA will use the Service Console port groups on ESX and ESXi hosts, the port groups with the Management Traffic checkbox selected. If vSphere HA was already configured on the host, it is possible that the host's network settings have changed and invalidated the management network configuration. Review the settings to make sure the port groups configured for management network still exist on the host and for ESXi the Management Traffic option is enabled. Reconfigure vSphere HA on the host after fixing any configuration issues. </action> </cause> </EventLongDescription> HostNoRedundantManagementNetworkEventNo redundant management network for hostwarningHost {host.name} in cluster {computeResource.name} currently has no management network redundancyHost {host.name} currently has no management network redundancyThis host currently has no management network redundancyHost {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no management network redundancy <EventLongDescription id="vim.event.HostNoRedundantManagementNetworkEvent"> <description> vSphere HA has determined that there is only one path for vSphere HA management traffic, resulting in a single point of failure. Best practices require more than one path for vSphere HA to use for heartbeats and cluster communication. A host with a single path is more likely to be declared dead, network partitioned or isolated after a network failure. If declared dead, vSphere HA will not respond if the host subsequently actually fails, while if declared isolated, vSphere HA may apply the isolation response thus impacting the uptime of the virtual machines running on it. </description> <cause> <description>There is only one port group available for vSphere HA communication</description> <action>Configure another Service Console port group on the ESX host</action> <action> Configure another port group on the ESXi host by selecting the "Management Traffic" check box </action> <action> Use NIC teaming on the management port group to allow ESX or ESXi to direct management traffic out of more than one physical NIC in case of a path failure </action> <action> If you accept the risk of not having redundancy for vSphere HA communication, you can eliminate the configuration issue by setting the das.ignoreRedundantNetWarning advanced option to "true" </action> </cause> </EventLongDescription> HostNonCompliantEventHost non-compliant with profileerrorHost is not in compliance with the attached profile.Host {host.name} is not in compliance with the attached profile <EventLongDescription id="vim.event.HostNonCompliantEvent"> <description> The host does not comply with the host profile </description> <cause> <description> The host is not in compliance with the attached profile </description> <action> Check the Summary tab for the host in the vSphere Client to determine the possible cause(s) of noncompliance </action> </cause></EventLongDescription> HostNotInClusterEventHost not in clustererrorNot a cluster member in {datacenter.name}Host {host.name} is not a cluster member in {datacenter.name}HostOvercommittedEventHost resource overcommittederrorInsufficient capacity in host {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in host {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.HostOvercommittedEvent"> <description> A host does not have sufficient CPU and/or memory capacity to satisfy its resource configuration. The host has its own admission control, so this condition should never occur. </description> <cause> <description>A host has insufficient capacity for its resource configuration</description> <action>If you encounter this condition, contact VMware Support </action> </cause> </EventLongDescription> HostPrimaryAgentNotShortNameEventHost primary agent not specified as short nameerrorPrimary agent {primaryAgent} was not specified as a short namePrimary agent {primaryAgent} was not specified as a short name to host {host.name} <EventLongDescription id="vim.event.HostPrimaryAgentNotShortNameEvent"> <description> The primary agent is not specified in short name format </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostProfileAppliedEventHost profile appliedinfoProfile configuration applied to the hostProfile is applied on the host {host.name}HostReconnectionFailedEventCannot reconnect hosterrorCannot reconnect to {host.name}Cannot reconnect to {host.name}Cannot reconnectCannot reconnect to {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostReconnectionFailedEvent"> <description> Could not reestablish a connection to the host </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostRemovedEventHost removedinfoRemoved host {host.name}Removed host {host.name}Removed from inventoryRemoved host {host.name} in {datacenter.name}HostShortNameInconsistentEventHost short name inconsistenterrorHost names {shortName} and {shortName2} both resolved to the same IP address. Check the host's network configuration and DNS entries <EventLongDescription id="vim.event.HostShortNameInconsistentEvent"> <description> The name resolution check on the host returns different names for the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShortNameToIpFailedEventHost short name to IP not completederrorCannot resolve short name {shortName} to IP addressCannot resolve short name {shortName} to IP address on host {host.name} <EventLongDescription id="vim.event.HostShortNameToIpFailedEvent"> <description> The short name of the host can not be resolved to an IP address </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShutdownEventHost shut downinfoShut down of {host.name}: {reason}Shut down of {host.name}: {reason}Shut down of host: {reason}Shut down of {host.name} in {datacenter.name}: {reason}HostSpecificationChangedEventHost specification is changed on vCenterinfoHost specification of host {host.name} is changed on vCenter.Host specification of host {host.name} is changed on vCenter.Host specification is changed.Host specification of host {host.name} is changed on vCenter.HostSpecificationRequireEventPull host specification from host to vCenterinfoPull host specification of host {host.name} to vCenter.Pull host specification of host {host.name} to vCenter.Pull host specification to vCenter.Pull host specification of host {host.name} to vCenter.HostSpecificationUpdateEventHost specification is changed on hostinfoHost specification is changed on host {host.name}.Host specification is changed on host {host.name}.Host specification is changed.Host specification is changed on host {host.name}.HostStatusChangedEventHost status changedinfoConfiguration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.HostStatusChangedEvent"> <description> The host status has changed. This status is the status of the root resource pool that encompasses the entire host. A host status change may be accompanied by the removal of a configuration issue if one was previously detected. A host status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The host status changed to yellow</description> <action>Reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The host status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> HostSubSpecificationDeleteEventDelete host sub specification {subSpecName}infoDelete host sub specification {subSpecName} of host {host.name}.Delete host sub specification {subSpecName} of host {host.name}.Delete host sub specification.Delete host sub specification {subSpecName} of host {host.name}.HostSubSpecificationUpdateEventHost sub specification {hostSubSpec.name} is changed on hostinfoHost sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed.Host sub specification {hostSubSpec.name} is changed on host {host.name}.HostSyncFailedEventCannot synchronize hosterrorCannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg} <EventLongDescription id="vim.event.HostSyncFailedEvent"> <description> Failed to sync with the vCenter Agent on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> HostUpgradeFailedEventHost upgrade failederrorCannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostUpgradeFailedEvent"> <description> Failed to connect to the host due to an installation or upgrade issue </description> </EventLongDescription> HostUserWorldSwapNotEnabledEventThe userworld swap is not enabled on the hostwarningThe userworld swap is not enabled on the hostThe userworld swap is not enabled on the host {host.name}HostVnicConnectedToCustomizedDVPortEventSome host vNICs were reconfigured to use dvPorts with port level configuration, which might be different from the dvPort group.infoHost vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.Host {host.name} vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.HostWwnChangedEventHost WWN changedwarningWWNs are changedWWNs are changed for {host.name}HostWwnConflictEventHost WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {host.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.HostWwnConflictEvent"> <description> The WWN (World Wide Name) of this host conflicts with the WWN of another host or virtual machine </description> <cause> <description> The WWN of this host conflicts with WWN of another host </description> </cause> <cause> <description> The WWN of this host conflicts with WWN of another virtual machine</description> </cause> </EventLongDescription> IncorrectHostInformationEventIncorrect host informationerrorInformation needed to acquire the correct set of licenses not providedHost {host.name} did not provide the information needed to acquire the correct set of licenses <EventLongDescription id="vim.event.IncorrectHostInformationEvent"> <description> The host did not provide the information needed to acquire the correct set of licenses </description> <cause> <description> The cpuCores, cpuPackages or hostType information on the host is not valid </description> </cause> <cause> <description> The host information is not available because host was added as disconnected </description> </cause> </EventLongDescription> InfoUpgradeEventInformation upgradeinfo{message}InsufficientFailoverResourcesEventvSphere HA failover resources are insufficienterrorInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name}Insufficient resources to satisfy vSphere HA failover levelInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.InsufficientFailoverResourcesEvent"> <description> The cluster does not have enough unreserved capacity to satisfy the level configured for vSphere HA admission control. Failovers may still be performed by vSphere HA but will be on a best effort basis. </description> <cause> <description> If the "number of host failures to tolerate" policy is configured and a few virtual machines have a much higher CPU or memory reservation than the other virtual machines, vSphere HA admission control can be excessively conservative to ensure that there are enough unfragmented resources if a host fails. </description> <action> Use similar CPU and memory reservations for all virtual machines in the cluster. If this is not possible, consider using a different vSphere HA admission control policy, such as reserving a percentage of cluster resource for failover. Alternatively, you can use advanced options to specify a cap for the slot size. See the vSphere Availability Guide for details. </action> </cause> <cause> <description> Hosts with vSphere HA agent errors are not good candidates for providing failover capacity in the cluster, and their resources are not considered for vSphere HA admission control purposes. If many hosts have an vSphere HA agent error, vCenter Server generates this event. </description> <action> Check the event log of the hosts to determine the cause of the vSphere HA agent errors. After addressing any configuration issues, reconfigure vSphere HA on the affected hosts or on the cluster. </action> </cause> </EventLongDescription> InvalidEditionEventInvalid editionerrorThe license edition '{feature}' is invalid <EventLongDescription id="vim.event.InvalidEditionEvent"> <description> vCenter Server attempted to acquire an undefined feature from the license server </description> <cause> <description>Any operation that requires a feature license such as vMotion, DRS, vSphere HA might result in this event if that feature is not defined on the license server</description> <action>Verify that the feature in question is present on the license server</action> </cause> </EventLongDescription> EventExLicense downgradewarningLicense downgradeLicense downgradeLicense downgradevim.event.LicenseDowngradedEvent|License downgrade: {licenseKey} removes the following features: {lostFeatures} <EventLongDescription id="vim.event.LicenseDowngradedEvent"> <description> The installed license reduces the set of available features. Some of the features, previously available, will not be accessible with the new license. </description> <cause> <description>The license has been replaced.</description> <action>Revert to the license previously installed if it is not already expired.</action> <action>Contact VMware in order to obtain new license with the required features.</action> </cause> </EventLongDescription> LicenseEvent<License Event>info<internal>LicenseExpiredEventLicense expirederrorLicense {feature.featureName} has expiredLicenseNonComplianceEventInsufficient licenses.errorLicense inventory is not compliant. Licenses are overused <EventLongDescription id="vim.event.LicenseNonComplianceEvent"> <description> vCenter Server does not strictly enforce license usage. Instead, it checks for license overuse periodically. If vCenter Server detects overuse, it logs this event and triggers an alarm. </description> <cause> <description>Overuse of licenses</description> <action>Check the license reports through the vSphere Client and reduce the number of entities using the license key or add a new license key with a greater capacity</action> </cause> </EventLongDescription> LicenseRestrictedEventUnable to acquire licenses due to a restriction on the license servererrorUnable to acquire licenses due to a restriction in the option file on the license server. <EventLongDescription id="vim.event.LicenseRestrictedEvent"> <description> vCenter Server logs this event if it is unable to check out a license from the license server due to restrictions in the license file </description> <cause> <description>License file in the license server has restrictions that prevent check out</description> <action>Check the license file and remove any restrictions that you can</action> </cause> </EventLongDescription> LicenseServerAvailableEventLicense server availableinfoLicense server {licenseServer} is availableLicenseServerUnavailableEventLicense server unavailableerrorLicense server {licenseServer} is unavailable <EventLongDescription id="vim.event.LicenseServerUnavailableEvent"> <description> vCenter Server tracks the license server state and logs this event if the license server has stopped responding. </description> <cause> <description>License server is not responding and not available to vCenter Server</description> <action>Verify that the license server is running. If it is, check the connectivity between vCenter Server and the license server.</action> </cause> </EventLongDescription> LocalDatastoreCreatedEventLocal datastore createdinfoCreated local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl})Created local datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}LocalTSMEnabledEventESXi Shell is enabledinfoESXi Shell for the host has been enabledESXi Shell for the host {host.name} has been enabledLockerMisconfiguredEventLocker misconfiguredwarningDatastore {datastore} which is configured to back the locker does not existLockerReconfiguredEventLocker reconfiguredinfoLocker was reconfigured from {oldDatastore} to {newDatastore} datastoreMigrationErrorEventMigration errorerrorUnable to migrate {vm.name} from {host.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate from {host.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine from the source host failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationEvent<Migration Event>info<internal>MigrationHostErrorEventMigration host errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to host {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine to the destination host or datastore failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationHostWarningEventMigration host warningwarningMigration of {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination host or datastore is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceErrorEventMigration resource errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Cannot migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceErrorEvent"> <description> A virtual machine failed to migrate due to incompatibilities with target resource pool </description> <cause> <description>Migrating a virtual machine to the destination host or datastore is not possible due to incompatibilities with the target resource pool. </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceWarningEventMigration resource warningwarningMigration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination resource pool is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationWarningEventMigration warningwarningMigration of {vm.name} from {host.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration from {host.name}: {fault.msg}Migration of {vm.name} from {host.name} in {datacenter.name}: {fault.msg}MtuMatchEventThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to the physical NIC.infoThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}MtuMismatchEventThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to the physical NIC.errorThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}NASDatastoreCreatedEventNAS datastore createdinfoCreated NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl})Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}NetworkRollbackEventNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.errorNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.NoAccessUserEventNo access for usererrorCannot login user {userName}@{ipAddress}: no permission <EventLongDescription id="vim.event.NoAccessUserEvent"> <description> A user could not log in due to insufficient access permission </description> <cause> <description> The user account has insufficient access permission </description> <action> Log in with a user account that has the necessary access permissions or grant additional access permissions to the current user </action> </cause> </EventLongDescription> NoDatastoresConfiguredEventNo datastores configuredinfoNo datastores have been configuredNo datastores have been configured on the host {host.name}NoLicenseEventNo licenseerrorA required license {feature.featureName} is not reserved <EventLongDescription id="vim.event.NoLicenseEvent"> <description> vCenter Server logs this event if it fails to acquire a feature from the license server for an unknown reason. </description> <cause> <description>Acquiring a feature license fails for an unknown reason</description> <action>Verify that the license server has the license for the feature</action> </cause> </EventLongDescription> NoMaintenanceModeDrsRecommendationForVMNo maintenance mode DRS recommendation for the VMinfoUnable to automatically migrate {vm.name}Unable to automatically migrate from {host.name}Unable to automatically migrate {vm.name} from {host.name} <EventLongDescription id="vim.event.NoMaintenanceModeDrsRecommendationForVM"> <description> DRS failed to generate a vMotion recommendation for a virtual machine on a host entering Maintenance Mode. This condition typically occurs because no other host in the DRS cluster is compatible with the virtual machine. Unless you manually migrate or power off this virtual machine, the host will be unable to enter Maintenance Mode. </description> <cause> <description>DRS failed to evacuate a powered on virtual machine</description> <action>Manually migrate the virtual machine to another host in the cluster</action> <action>Power off the virtual machine</action> <action>Bring any hosts in Maintenance Mode out of that mode</action> <action>Cancel the task that is making the host enter Maintenance Mode </action> </cause> </EventLongDescription> NonVIWorkloadDetectedOnDatastoreEventUnmanaged workload detected on SIOC-enabled datastoreinfoAn unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.NotEnoughResourcesToStartVmEventInsufficient resources for vSphere HA to start the VM. Reason: {reason.@enum.fdm.placementFault}warningInsufficient resources to fail over {vm.name} in {computeResource.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over this virtual machine. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name} in {computeResource.name} that resides in {datacenter.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault} <EventLongDescription id="vim.event.NotEnoughResourcesToStartVmEvent"> <description> This event is issued by vSphere HA when the master agent was not able to fail over a virtual machine to one of its compatible hosts. This condition is due to one or more of the causes listed below. When this condition occurs, vSphere HA will report a cause for it in the event summary, but note that additional causes might exist. It is more likely to occur if vSphere HA admission control is disabled or more hosts fail than were provisioned for. When a virtual machine cannot be placed, vSphere HA will retry placing it when the cluster state changes. Also, if vSphere DRS is enabled, it will be invoked to try to defragment the cluster or bring hosts out of Standby Mode. </description> <cause> <description> A virtual machine has bandwidth reservations for CPU, memory, vFlash cache, or virtual NICs. There was no compatible host that had enough resources to satisfy the reservations. </description> <action>Decrease the virtual machine resource reservation</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition if one exists</action> <action>If DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> <cause> <description> The cluster has vSAN enabled, and one or more hosts that contribute storage to the cluster is inaccessible, preventing vSphere HA from powering on the virtual machine. This applies to virtual machines that have one or more files on a vSAN datastore. </description> <action>Bring online any failed hosts or resolve a network partition if one exists that involves hosts that contribute storage to the vSAN cluster</action> </cause> <cause> <description>One or more datastores that are associated with a virtual machine are inaccessible by any compatible host in the cluster.</description> <action>Bring online any non-responding host that mounts the virtual machine datastores</action> <action>Fix the all-paths-down (APD) or permanent-device-loss (PDL) issues.</action> </cause> <cause> <description>vSphere HA is enforcing virtual machine to virtual machine anti-affinity rules, and the rule cannot be satisfied. </description> <action>Add more hosts to cluster</action> <action>Bring online any non-responding host or resolve a network partition if one exists</action> <action>Remove any anti-affinity rules that are restricting the placement</action> </cause> <cause> <description>The number of VMs that can run on each host is limited. There is no host that can power on the VM without exceeding the limit.</description> <action>Increase the limit if you have set the limitVmsPerESXHost HA advanced option.</action> <action>Bring online any non-responding host or add new hosts to the cluster</action> </cause> </EventLongDescription> OutOfSyncDvsHostThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.warningThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.The vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.PermissionAddedEventPermission addedinfoPermission created for {principal} on {entity.name}, role is {role.name}, propagation is {propagate.@enum.auth.Permission.propagate}PermissionEvent<Permission Event>info<internal>PermissionRemovedEventPermission removedinfoPermission rule removed for {principal} on {entity.name}PermissionUpdatedEventPermission updatedinfoPermission changed for '{principal}' on '{entity.name}'.
Role changed from '{prevRole.name}' to role '{role.name}'. Propagate changed from '{prevPropagate.@enum.auth.Permission.propagate}' to '{propagate.@enum.auth.Permission.propagate}'.ProfileAssociatedEventProfile attached to hostinfoProfile {profile.name} has been attached.Profile {profile.name} has been attached.Profile {profile.name} has been attached with the host.Profile {profile.name} attached.ProfileChangedEventProfile was changedinfoProfile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.ProfileCreatedEventProfile createdinfoProfile is created.ProfileDissociatedEventProfile detached from hostinfoProfile {profile.name} has been detached.Profile {profile.name} has been detached. Profile {profile.name} has been detached from the host.Profile {profile.name} detached.ProfileEventinfo<internal>ProfileReferenceHostChangedEventThe profile reference host was changedinfoProfile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.ProfileRemovedEventProfile removedinfoProfile {profile.name} was removed.Profile {profile.name} was removed.Profile was removed.RecoveryEventRecovery completed on the host.infoThe host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the management virtual NIC {vnic} by connecting to a new port {portKey} on the vSphere Distributed Switch {dvsUuid}.RemoteTSMEnabledEventSSH is enabledinfoSSH for the host has been enabledSSH for the host {host.name} has been enabledResourcePoolCreatedEventResource pool createdinfoCreated resource pool {resourcePool.name} in compute-resource {computeResource.name}Created resource pool {resourcePool.name}Created resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}ResourcePoolDestroyedEventResource pool deletedinfoRemoved resource pool {resourcePool.name} on {computeResource.name}Removed resource pool {resourcePool.name}Removed resource pool {resourcePool.name} on {computeResource.name} in {datacenter.name}ResourcePoolEvent<Resource Pool Event>info<internal>ResourcePoolMovedEventResource pool movedinfoMoved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name} in {datacenter.name}ResourcePoolReconfiguredEventResource pool reconfiguredinfoUpdated configuration for {resourcePool.name} in compute-resource {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration on {resourcePool.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration for {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ResourcePoolReconfiguredEvent"> <description> The resource pool configuration changed. The resource pool configuration includes information about the resource reservations of the resource pool and the resource reservations of its children. </description> </EventLongDescription> ResourceViolatedEventResource usage exceeds configurationerrorResource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name}'Resource usage exceeds configuration on resource pool {resourcePool.name}Resource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.ResourceViolatedEvent"> <description> The cumulative CPU and/or memory resource consumption of all virtual machines in the resource pool exceeds the resource pool configuration </description> <cause> <description>You attempted to move a virtual machine from one resource pool into another bypassing vCenter Server. This condition occurs when you attempt the move using the vSphere Client directly connected to the host. </description> <action>In a DRS cluster, do not move and power on a virtual machine bypassing vCenter Server</action> </cause> </EventLongDescription> RoleAddedEventRole addedinfoNew role {role.name} createdRoleEvent<Role Event>info<internal>RoleRemovedEventRole removedinfoRole {role.name} removedRoleUpdatedEventRole updatedinfoRole modified.
Previous name: {prevRoleName}, new name: {role.name}.
Added privileges: {privilegesAdded}.
Removed privileges: {privilegesRemoved}.RollbackEventHost Network operation rolled backinfoThe Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.The operation {methodName} on the host {hostName} disconnected the host and was rolled back .The Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.ScheduledTaskCompletedEventScheduled task completedinfoTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} completed successfullyTask {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} completed successfullyScheduledTaskCreatedEventScheduled task createdinfoCreated task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name}Created task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskEmailCompletedEventSent scheduled task emailinfoTask {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} sent email to {to}ScheduledTaskEmailFailedEventScheduled task email not senterrorTask {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot send email to {to}: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskEmailFailedEvent"> <description> An error occurred while sending email notification that a scheduled task is running </description> <cause> <description>Failed to send email for the scheduled task</description> <action>Check the vCenter Server SMTP settings for sending emails</action> </cause> </EventLongDescription> ScheduledTaskEvent<Scheduled Task Event>info<internal>ScheduledTaskFailedEventCannot complete scheduled taskerrorTask {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot be completed: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskFailedEvent"> <description> An error occurred while running a scheduled task </description> <cause> <description>Failed to run a scheduled task</description> <action>Correct the failure condition</action> </cause> </EventLongDescription> ScheduledTaskReconfiguredEventScheduled task reconfiguredinfoReconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name}Reconfigured task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.ScheduledTaskRemovedEventScheduled task removedinfoRemoved task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name}Removed task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskStartedEventScheduled task startedinfoRunning task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name}Running task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ServerLicenseExpiredEventServer license expirederrorA vCenter Server license has expiredServerStartedSessionEventServer started sessioninfovCenter startedSessionEvent<Session Event>info<internal>SessionTerminatedEventSession stoppedinfoA session for user '{terminatedUsername}' has stopped <EventLongDescription id="vim.event.SessionTerminatedEvent"> <description> A session has been terminated </description> </EventLongDescription> ExtendedEventThe time-limited license on the host has expired.warningThe time-limited license on host {host.name} has expired.The time-limited license on host {host.name} has expired.The time-limited license on the host has expired.vim.event.SubscriptionLicenseExpiredEvent|The time-limited license on host {host.name} has expired. To comply with the EULA, renew the license at http://my.vmware.comTaskEventTask eventinfoTask: {info.descriptionId}TaskTimeoutEventTask time-outinfoTask: {info.descriptionId} time-out <EventLongDescription id="vim.event.TaskTimeoutEvent"> <description> A task has been cleaned up because it timed out </description> </EventLongDescription> TeamingMatchEventTeaming configuration in the vSphere Distributed Switch matches the physical switch configurationinfoTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TeamingMisMatchEventTeaming configuration in the vSphere Distributed Switch does not match the physical switch configurationerrorTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TemplateBeingUpgradedEventUpgrading templateinfoUpgrading template {legacyTemplate}TemplateUpgradeEvent<Template Upgrade Event>info<internal>TemplateUpgradeFailedEventCannot upgrade templateinfoCannot upgrade template {legacyTemplate} due to: {reason.msg}TemplateUpgradedEventTemplate upgradedinfoTemplate {legacyTemplate} upgrade completedTimedOutHostOperationEventHost operation timed outwarningThe operation performed on host {host.name} timed outThe operation performed on host {host.name} timed outThe operation timed outThe operation performed on {host.name} in {datacenter.name} timed out <EventLongDescription id="vim.event.TimedOutHostOperationEvent"> <description> An operation performed on the host has timed out </description> <cause> <description> A previous event in the sequence of events will provide information on the reason for the timeout </description> </cause> </EventLongDescription> UnlicensedVirtualMachinesEventUnlicensed virtual machinesinfoThere are {unlicensed} unlicensed virtual machines on host {host} - there are only {available} licenses availableUnlicensedVirtualMachinesFoundEventUnlicensed virtual machines foundinfo{unlicensed} unlicensed virtual machines found on host {host}UpdatedAgentBeingRestartedEventRestarting updated agentinfoThe agent is updated and will soon restartThe agent on host {host.name} is updated and will soon restartUpgradeEvent<Upgrade Event>info<internal>UplinkPortMtuNotSupportEventNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.errorNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortMtuSupportEventAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.infoAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.All VLAN MTU setting on the external physical switch allows the vSphere Distributed Switch max MTU size packets passing on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}All VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanTrunkedEventThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch.infoThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanUntrunkedEventNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch.errorNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UserAssignedToGroupUser assigned to groupinfoUser {userLogin} was added to group {group}UserLoginSessionEventUser logininfoUser {userName}@{ipAddress} logged in as {userAgent}UserLogoutSessionEventUser logoutinfoUser {userName}@{ipAddress} logged out (login time: {loginTime}, number of API invocations: {callCount}, user agent: {userAgent})UserPasswordChangedUser password changedinfoPassword was changed for account {userLogin}Password was changed for account {userLogin} on host {host.name}UserUnassignedFromGroupUser removed from groupinfoUser {userLogin} removed from group {group}UserUpgradeEventUser upgradeuser{message} <EventLongDescription id="vim.event.UserUpgradeEvent"> <description> A general user event occurred due to an upgrade </description> </EventLongDescription> VMFSDatastoreCreatedEventVMFS datastore createdinfoCreated VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl})Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}VMFSDatastoreExpandedEventVMFS datastore expandedinfoExpanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name}Expanded VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExpandedEvent"> <description> An existing extent in a VMFS volume was grown to increase its capacity </description> <cause> <description> A user or system action caused an extent of an existing VMFS datastore to be grown. Only extents with free space immediately after them are expandable. As a result, the action filled the available adjacent capacity on the LUN. </description> </cause> </EventLongDescription> VMFSDatastoreExtendedEventVMFS datastore extendedinfoExtended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name}Extended VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExtendedEvent"> <description> An existing VMFS volume was extended to increase its capacity </description> <cause> <description> A user or system action caused the datastore to be extended with a partition on a LUN to increase its capacity. </description> </cause> </EventLongDescription> VMotionLicenseExpiredEventvMotion license expirederrorA vMotion license for {host.name} has expired <EventLongDescription id="vim.event.VMotionLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of vMotion licenses on the license server and uses this event to notify you of any vMotion licenses that are about to expire </description> <cause> <description>vMotion licenses on the license server are about to expire</description> <action>Update the license server to get a fresher version of the vMotion license</action> </cause> </EventLongDescription> VcAgentUninstallFailedEventCannot uninstall vCenter agenterrorCannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUninstallFailedEvent"> <description> An attempt to uninstall the vCenter Agent failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUninstalledEventvCenter agent uninstalledinfovCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalledvCenter agent has been uninstalled from {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUninstalledEvent"> <description> The vCenter Agent has been uninstalled from host </description> </EventLongDescription> VcAgentUpgradeFailedEventCannot complete vCenter agent upgradeerrorCannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUpgradeFailedEvent"> <description> A vCenter Agent upgrade attempt failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUpgradedEventvCenter agent upgradedinfovCenter agent has been upgraded on {host.name}vCenter agent has been upgraded on {host.name}vCenter agent has been upgradedvCenter agent has been upgraded on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUpgradedEvent"> <description> The vCenter Agent has been upgraded on the host </description> </EventLongDescription> VimAccountPasswordChangedEventVIM account password changedinfoVIM account password changedVIM account password was changed on host {host.name} <EventLongDescription id="vim.event.VimAccountPasswordChangedEvent"> <description> The password for the Vim account user on the host has been changed. This account is created by vCenter Server and used to manage the host. </description> <cause> <description> vCenter Server periodically changes the password of the Vim account that it uses to manage the host </description> </cause> </EventLongDescription> VmAcquiredMksTicketEventVM acquired MKS ticketinfoRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} has been openedRemote console has been opened for this virtual machine on {host.name}Remote console to {vm.name} on {host.name} in {datacenter.name} has been opened <EventLongDescription id="vim.event.VmAcquiredMksTicketEvent"> <description> Successfully acquired MKS Ticket for the virtual machine </description> <cause> <description> The MKS Ticket used to connect to the virtual machine remote console has been successfully acquired. </description> </cause> </EventLongDescription> VmAcquiredTicketEventVM acquired ticketinfoA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket of type {ticketType.@enum.VirtualMachine.TicketType} has been acquired.A ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} on {host.name} in {datacenter.name} has been acquiredVmAutoRenameEventVM auto renameinfoInvalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name}. Renamed from {oldName} to {newName}Conflicting or invalid virtual machine name detected. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name} in {datacenter.name}. Renamed from {oldName} to {newName} <EventLongDescription id="vim.event.VmAutoRenameEvent"> <description> The virtual machine was renamed because of possible name conflicts with another virtual machine </description> <cause> <description>The virtual machine might have been added to the vCenter Server inventory while scanning the datastores of hosts added to the inventory. During such an action, the newly-added virtual machine's name might have been found to be in conflict with a virtual machine name already in the inventory. To resolve this, vCenter Server renames the newly-added virtual machine. </description> </cause> </EventLongDescription> VmBeingClonedEventVM being clonedinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}VmBeingClonedNoFolderEventVM being cloned to a vAppinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on host {host.name}, {ds.name} in {datacenter.name} to {destName} on host {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}VmBeingCreatedEventCreating VMinfoCreating {vm.name} on {host.name}, {ds.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {ds.name} in {datacenter.name}Creating VM on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmBeingDeployedEventDeploying VMinfoDeploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} from template {srcTemplate.name}Deploying VM on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} in {datacenter.name} from template {srcTemplate.name} <EventLongDescription id="vim.event.VmBeingDeployedEvent"> <description> A virtual machine is being created from a template </description> <cause> <description> A user action prompted a virtual machine to be created from this template. </description> </cause> </EventLongDescription> VmBeingHotMigratedEventVM is hot migratinginfoMigrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingHotMigratedEvent"> <description> A powered-on virtual machine is being migrated with vMotion </description> <cause> <description> A user action might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> <cause> <description> A DRS recommendation might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> </EventLongDescription> VmBeingMigratedEventVM migratinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingMigratedEvent"> <description> Changing the host on which the virtual machine is executing </description> <cause> <description> A user action caused the virtual machine to be migrated to a different host </description> </cause> </EventLongDescription> VmBeingRelocatedEventVM relocatinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingRelocatedEvent"> <description> The virtual machine execution and/or storage is being relocated </description> <cause> <description> A user action might have caused the virtual machine's execution and/or storage to be changed </description> </cause> </EventLongDescription> VmCloneEvent<VM Clone Event>info<internal><internal><internal><internal><internal>VmCloneFailedEventCannot complete VM cloneerrorFailed to clone {vm.name} on {host.name}, {ds.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmCloneFailedEvent"> <description> Cloning a virtual machine failed </description> <cause> <description> An error prevented the virtual machine from being cloned </description> </cause> </EventLongDescription> VmClonedEventVM clonedinfo{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {ds.name} in {datacenter.name}{sourceVm.name} cloned to {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmConfigMissingEventVM configuration missinginfoConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} cannot be foundConfiguration file cannot be foundConfiguration file for {vm.name} on {host.name} in {datacenter.name} cannot be found <EventLongDescription id="vim.event.VmConfigMissingEvent"> <description> One or more configuration files for the virtual machine cannot be found </description> <cause> <description> The datastore on which this virtual machine resides may be inaccessible </description> <action> Check the connectivity of the datastore on which this virtual machine resides. If the datastore has a backing LUN, check to see if there are any transient disk failures. </action> </cause> </EventLongDescription> VmConnectedEventVM connectedinfoHost is connectedVirtual machine {vm.name} is connected <EventLongDescription id="vim.event.VmConnectedEvent"> <description> The virtual machine is in a connected state in the inventory and vCenter Server can access it </description> <cause> <description> A user or system action that resulted in operations such as creating, registering, cloning or deploying a virtual machine gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> A user or system action that resulted in operations such as adding or reconnecting a host gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> The state of the virtual machine's host changed from Not Responding to Connected and the host gave vCenter Server access to the virtual machine </description> </cause> </EventLongDescription> VmCreatedEventVM createdinfoNew virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {ds.name} in {datacenter.name}Virtual machine created on {host.name}, {ds.name} in {datacenter.name}Created virtual machine {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmDasBeingResetEventvSphere HA is resetting VMinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode} <EventLongDescription id="vim.event.VmDasBeingResetEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA has been configured, the virtual machine might be reset because the VMware Tools heartbeat or application heartbeat status turned red. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasBeingResetWithScreenshotEventvSphere HA enabled VM reset with screenshotinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}{vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}. <EventLongDescription id="vim.event.VmDasBeingResetWithScreenshotEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA is configured, this condition can occur because the VMware Tools heartbeat or the application heartbeat status turned red. The event contains the location of the screenshot taken of the guest console before it was reset. You can use this information to determine the cause of the heartbeat failure. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> Check the screenshot image to see if the cause was a guest operating system failure. If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasResetFailedEventvSphere HA cannot reset VMwarningvSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA cannot reset {vm.name} on {host.name}vSphere HA cannot reset {vm.name}vSphere HA cannot reset this virtual machinevSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmDasResetFailedEvent"> <description> vSphere HA attempted to reset the virtual machine because of a heartbeat failure from VMware Tools or a guest application, depending on how vSphere HA was configured. However, the reset operation failed. </description> <cause> <description> The most likely reason for the reset failure is that the virtual machine was running another task at the time the reset was initiated. </description> <action>Check to see whether the virtual machine requires attention and reset it manually if necessary.</action> </cause> </EventLongDescription> VmDasUpdateErrorEventVM vSphere HA update errorerrorUnable to update vSphere HA agents given the state of {vm.name}VmDasUpdateOkEventCompleted VM DAS updateinfovSphere HA agents have been updated with the current state of the virtual machineVmDateRolledBackEventVM date rolled backerrorDisconnecting all hosts as the date of virtual machine {vm.name} has been rolled backVmDeployFailedEventCannot deploy VM from templateerrorFailed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmDeployFailedEvent"> <description> Failed to deploy a virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine failed to deploy. This condition can occur if there is not enough disk space, the host or virtual machine loses its network connection, the host is disconnected, and so on. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmDeployedEventVM deployedinfoTemplate {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name} <EventLongDescription id="vim.event.VmDeployedEvent"> <description> A virtual machine has been created from the specified template </description> <cause> <description> A user action caused a virtual machine to be created from the template </description> </cause> <cause> <description> A scheduled task caused a virtual machine to be created from the template </description> </cause> </EventLongDescription> VmDisconnectedEventVM disconnectedinfo{vm.name} on host {host.name} is disconnected{vm.name} on host {host.name} is disconnected{vm.name} is disconnected{host.name} is disconnected{vm.name} on host {host.name} in {datacenter.name} is disconnectedVmDiscoveredEventVM discoveredinfoDiscovered {vm.name} on {host.name}Discovered {vm.name} on {host.name}Discovered {vm.name}Discovered on {host.name}Discovered {vm.name} on {host.name} in {datacenter.name}VmDiskFailedEventCannot create VM diskerrorCannot create virtual disk {disk} <EventLongDescription id="vim.event.VmDiskFailedEvent"> <description> Failed to create a virtual disk for the virtual machine for reasons described in the event message </description> <cause> <description> A virtual disk was not created for the virtual machine. This condition can occur if the operation failed to access the disk, the disk did not have enough space, you do not have permission for the operation, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that disk is accessible, has enough space, and that the permission settings allow the operation. </action> </cause> </EventLongDescription> VmEmigratingEventVM emigratinginfoMigrating {vm.name} off host {host.name}Migrating {vm.name} off host {host.name}Migrating {vm.name} off hostMigrating off host {host.name}Migrating {vm.name} off host {host.name} in {datacenter.name}VmEndRecordingEventEnd a recording sessioninfoEnd a recording sessionEnd a recording session on {vm.name}VmEndReplayingEventEnd a replay sessioninfoEnd a replay sessionEnd a replay session on {vm.name}VmEvent<VM Event>info<internal>VmFailedMigrateEventCannot migrate VMerrorCannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} to {destHost.name}, {destDatastore.name}Cannot migrate to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmFailedMigrateEvent"> <description> Failed to migrate the virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine did not migrate. This condition can occur if vMotion IPs are not configured, the source and destination hosts are not accessible, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that the vMotion IPs are configured on source and destination hosts, the hosts are accessible, and so on. </action> </cause> </EventLongDescription> VmFailedRelayoutEventCannot complete VM relayout.errorCannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name}: {reason.msg}Cannot complete relayout for this virtual machine on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedRelayoutEvent"> <description> Failed to lay out a virtual machine </description> <cause> <description> An attempt to lay out a virtual machine on disk failed for reasons described in the event message. This condition can occur for any of several reasons, for example inability to access the disk. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmFailedRelayoutOnVmfs2DatastoreEventCannot complete VM relayout on Vmfs2 datastoreerrorCannot complete relayout due to disks on a VMFS2 volumeCannot complete relayout for virtual machine {vm.name} which has disks on a VMFS2 volume. <EventLongDescription id="vim.event.VmFailedRelayoutOnVmfs2DatastoreEvent"> <description> Failed to migrate a virtual machine on VMFS2 datastore </description> <cause> <description> An attempt to migrate a virtual machine failed because the virtual machine still has disk(s) on a VMFS2 datastore. VMFS2 datastores are read-only for ESX 3.0 and later hosts. </description> <action> Upgrade the datastore(s) from VMFS2 to VMFS3 </action> </cause> </EventLongDescription> VmFailedStartingSecondaryEventvCenter cannot start the Fault Tolerance secondary VMerrorvCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason} <EventLongDescription id="vim.event.VmFailedStartingSecondaryEvent"> <description> vCenter Server could not start the Secondary VM because of an error </description> <cause> <description> The remote host is incompatible for Secondary VM. For instance, this condition can occur when the host does not have access to the virtual machine's network or datastore. </description> <action>Ensure that the hosts in the cluster are compatible for FT</action> </cause> <cause> <description>Login to a remote host failed. If the host has been newly added to the inventory or just rebooted, it might take some time for SSL thumbprints to be propagated to the hosts. </description> <action>If the problem persists, disconnect and re-connect the host.</action> </cause> <cause> <description>Registration of the Secondary VM on the remote host failed</description> <action>Determine whether the remote host has access to the datastore that the FT virtual machine resides on</action> </cause> <cause> <description>An error occurred while starting the Secondary VM</description> <action>Determine the cause of the migration error. vCenter Server will try to restart the Secondary VM if it can.</action> </cause> </EventLongDescription> VmFailedToPowerOffEventCannot power off the VM.errorCannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name}. {reason.msg}Cannot power off: {reason.msg}Cannot power off {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOffEvent"> <description>The virtual machine failed to power off</description> <cause> <description> The virtual machine might be performing concurrent operations </description> <action>Complete the concurrent operations and retry the power-off operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility. </description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToPowerOnEventCannot power on the VM.errorCannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOnEvent"> <description> The virtual machine failed to power on </description> <cause> <description> Virtual machine power-on attempts can fail because the virtual machine is already in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the power-on failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToRebootGuestEventVM cannot reboot the guest OS.errorCannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot the guest OS for {vm.name} on {host.name} in {datacenter.name}. {reason.msg} <EventLongDescription id="vim.event.VmFailedToRebootGuestEvent"> <description> The guest operating system on the virtual machine failed to reboot. </description> <cause> <description> Guest operating system reboot failures can occur because the virtual machine is not in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the reboot failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToResetEventCannot reset VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToResetEvent"> <description>The virtual machine failed to reset</description> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere client and respond to the question or prompt </action> </cause> <cause> <description>There might not be enough available licenses to perform this operation.</description> <action> Obtain the required licenses and retry the reset operation </action> </cause> <cause> <description> Concurrent operations might be executing on the virtual machine </description> <action>Complete the concurrent operations and retry the reset operation</action> </cause> <cause> <description> The host on which the virtual machine is running is entering maintenance mode </description> <action> Wait until the host exits maintenance mode and retry the operation </action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToShutdownGuestEventCannot shut down the guest OSerrorCannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}{vm.name} cannot shut down the guest OS on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToShutdownGuestEvent"> <description> Guest operating system shutdown failed for the virtual machine </description> <cause> <description> Guest operating system shutdown can fail if VMware Tools is not installed in the virtual machine. </description> <action>Install VMware Tools.</action> </cause> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere Client and respond to the question or prompt </action> </cause> <cause> <description> Concurrent operations might be running on the virtual machine </description> <action>Complete the concurrent operations and retry the shutdown operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToStandbyGuestEventVM cannot standby the guest OSerrorCannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}{vm.name} cannot standby the guest OS on {host.name} in {datacenter.name}: {reason.msg}VmFailedToSuspendEventCannot suspend VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg}VmFailedUpdatingSecondaryConfigvCenter cannot update the Fault Tolerance secondary VM configurationerrorvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name}vCenter cannot update the Fault Tolerance secondary VM configurationvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFailedUpdatingSecondaryConfig"> <description> After a failover, the new Primary VM failed to update the configuration of the Secondary VM </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFailoverFailedvSphere HA virtual machine failover unsuccessfulwarningvSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over this virtual machine. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg} <EventLongDescription id="vim.event.VmFailoverFailed"> <description> vSphere HA did not failover this virtual machine. The event includes the details of the fault that was generated when vSphere HA attempted the failover. vSphere HA will retry the failover on another host unless the maximum number of failover attempts have been exceeded. In many cases, the retry will succeed. </description> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It may also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to failover the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. If any hosts have been declared dead, investigate whether a networking/storage issue may be the cause. </action> <action> If, however, the error reports that the virtual machine is in an invalid state, there may be an in-progress operation that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation that is taking a long time to complete. </action> </cause> </EventLongDescription> VmFaultToleranceStateChangedEventVM Fault Tolerance state changedinfoFault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state on {vm.name} on host {host.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState} <EventLongDescription id="vim.event.VmFaultToleranceStateChangedEvent"> <description> The Fault Tolerance state of the virtual machine changed </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceTurnedOffEventVM Fault Tolerance turned offinfoFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name}Fault Tolerance protection has been turned off for {vm.name} on host {host.name}Fault Tolerance protection has been turned off for {vm.name}Fault Tolerance protection has been turned off for this virtual machineFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFaultToleranceTurnedOffEvent"> <description> All Secondary VMs have been removed and Fault Tolerance protection is turned off for this virtual machine. </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceVmTerminatedEventFault Tolerance VM terminatedinfoThe Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason} <EventLongDescription id="vim.event.VmFaultToleranceVmTerminatedEvent"> <description> A Primary VM or Secondary VM became inactive </description> <cause> <description> The Secondary VM became inactive because its operations are no longer synchronized with those of the Primary VM</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Secondary VM became inactive because a hardware or network failure caused the Primary VM to lose the Primary-to-Secondary connection</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Fault Tolerant VM became inactive due to a partial hardware failure on the physical host</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> A user stopped the Fault Tolerant VM</description> <action>The remaining Fault Tolerant VM takes over as the Primary VM. vSphere HA will attempt to restart the Secondary VM.</action> </cause> </EventLongDescription> VmGuestOSCrashedEventGuest operating system crashederror{vm.name} on {host.name}: Guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.{vm.name}: Guest operating system has crashed.This virtual machine's guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.VmGuestRebootEventGuest rebootinfoGuest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name}Guest OS rebootGuest OS reboot for {vm.name} on {host.name} in {datacenter.name}VmGuestShutdownEventGuest OS shut downinfoGuest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name}Guest OS shut downGuest OS shut down for {vm.name} on {host.name} in {datacenter.name}VmGuestStandbyEventGuest standbyinfoGuest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name}Guest OS standbyGuest OS standby for {vm.name} on {host.name} in {datacenter.name}VmHealthMonitoringStateChangedEventvSphere HA VM monitoring state changedinfovSphere HA VM monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'VmInstanceUuidAssignedEventAssign a new instance UUIDinfoAssign a new instance UUID ({instanceUuid})Assign a new instance UUID ({instanceUuid}) to {vm.name} <EventLongDescription id="vim.event.VmInstanceUuidAssignedEvent"> <description>The virtual machine was assigned a new vCenter Server-specific instance UUID </description> <cause> <description> The user who created the virtual machine did not specify a vCenter Server-specific instance UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmInstanceUuidChangedEventInstance UUID ChangedinfoThe instance UUID has been changed from ({oldInstanceUuid}) to ({newInstanceUuid})The instance UUID of {vm.name} has been changed from ({oldInstanceUuid}) to ({newInstanceUuid}) <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine has changed </description> <cause> <description> A user action resulted in a change to the vCenter Server-specific instance UUID of the virtual machine </description> </cause> <cause> <description> vCenter Server changed the instance UUID of the virtual machine because it detected a conflict </description> </cause> </EventLongDescription> VmInstanceUuidConflictEventInstance UUIDs conflicterrorThe instance UUID ({instanceUuid}) conflicts with the instance UUID assigned to {conflictedVm.name}The instance UUID ({instanceUuid}) of {vm.name} conflicts with the instance UUID assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine conflicted with that of another virtual machine. </description> <cause> <description> Virtual machine instance UUID conflicts can occur if you copy virtual machine files manually without using vCenter Server. </description> </cause> </EventLongDescription> VmMacAssignedEventVM MAC assignedinfoNew MAC address ({mac}) assigned to adapter {adapter}New MAC address ({mac}) assigned to adapter {adapter} for {vm.name}VmMacChangedEventVM MAC changedwarningChanged MAC address from {oldMac} to {newMac} for adapter {adapter}Changed MAC address from {oldMac} to {newMac} for adapter {adapter} for {vm.name} <EventLongDescription id="vim.event.VmMacChangedEvent"> <description> The virtual machine MAC address has changed </description> <cause> <description> A user action changed the virtual machine MAC address </description> </cause> <cause> <description> vCenter changed the virtual machine MAC address because it detected a MAC address conflict </description> </cause> </EventLongDescription> VmMacConflictEventVM MAC conflicterrorThe MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmMacConflictEvent"> <description> The virtual machine MAC address conflicts with that of another virtual machine </description> <cause> <description> This virtual machine's MAC address is the same as that of another virtual machine. Refer to the event details for more information on the virtual machine that caused the conflict. </description> </cause> </EventLongDescription> VmMaxFTRestartCountReachedvSphere HA reached maximum Secondary VM restart count.warningvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxFTRestartCountReached"> <description> The system reached the maximum restart limit in its attempt to restart a Secondary VM </description> <cause> <description>The system exceeded the number of allowed restart attempts for the Secondary VM when it tried to reestablish Fault Tolerance</description> <action>Check the causes for the restart failures and fix them. Then disable and re-enable Fault Tolerance protection.</action> </cause> </EventLongDescription> VmMaxRestartCountReachedvSphere HA reached maximum VM restart countwarningvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name}because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart this VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxRestartCountReached"> <description> vSphere HA has reached the maximum number of failover attempts for this virtual machine and has not been able to restart it. No further failover attempts will be made. By default vSphere HA attempts to failover a virtual machine 5 times. </description> <cause> <description> Failover can fail for a number of reasons including that the configuration file of the virtual machine is corrupt or one or more of the virtual machines datastores are not accessible by any host in the cluster due to an all paths down condition. In addition, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. </description> <action> To determine why previous failover attempts failed, search the events that are logged for the VM for occurrences of the event vSphere HA reports when a failover fails. These events will report the reason for the failed failover. vSphere HA events can be located by searching for the phrase 'vSphere HA'. To determine whether any issues still exist, try to manually power on the virtual machine. If power-on fails, investigate the error that is returned. But, if the power-on remains pending for a long time, investigate whether an all paths down condition exists. Also, if any hosts have been declared dead, investigate whether a networking or storage issue may be the cause. </action> </cause> </EventLongDescription> VmMessageErrorEventVM error messageerrorError message on {vm.name} on {host.name}: {message}Error message on {vm.name} on {host.name}: {message}Error message on {vm.name}: {message}Error message from {host.name}: {message}Error message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageErrorEvent"> <description> An error message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this error occurred </description> </cause> </EventLongDescription> VmMessageEventVM information messageinfoMessage on {vm.name} on {host.name}: {message}Message on {vm.name} on {host.name}: {message}Message on {vm.name}: {message}Message from {host.name}: {message}Message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageEvent"> <description> An information message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on the messages from the virtual machine </description> </cause> </EventLongDescription> VmMessageWarningEventVM warning messagewarningWarning message on {vm.name} on {host.name}: {message}Warning message on {vm.name} on {host.name}: {message}Warning message on {vm.name}: {message}Warning message from {host.name}: {message}Warning message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageWarningEvent"> <description> A warning message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this warning was issued </description> </cause> </EventLongDescription> VmMigratedEventVM migratedinfoVirtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmMigratedEvent"> <description> The virtual machine's host was changed successfully </description> <cause> <description> A user action caused the virtual machine to be successfully migrated to a different host </description> </cause> </EventLongDescription> VmNoCompatibleHostForSecondaryEventNo compatible host for the Fault Tolerance secondary VMerrorNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name}No compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name}No compatible host for the Fault Tolerance secondary VM {vm.name}No compatible host for the Fault Tolerance secondary VMNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmNoCompatibleHostForSecondaryEvent"> <description> No compatible host was found when trying to place a Secondary VM </description> <cause> <description>There was no compatible host available to place a Secondary VM</description> <action>Resolve the incompatibilities and retry the operation</action> </cause> </EventLongDescription> VmNoNetworkAccessEventVM No Network AccesswarningNot all networks are accessible by {destHost.name}Not all networks for {vm.name} are accessible by {destHost.name}VmOrphanedEventVM orphanedwarning{vm.name} does not exist on {host.name}{vm.name} does not exist on {host.name}{vm.name} does not existVirtual machine does not exist on {host.name}{vm.name} does not exist on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmOrphanedEvent"> <description> The virtual machine does not exist on the host with which it is associated </description> <cause> <description> The virtual machine was deleted while its host was disconnected from vCenter Server. </description> </cause> </EventLongDescription> VmPowerOffOnIsolationEventvSphere HA powered off VM on isolated hostinfovSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off this virtual machine on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmPowerOffOnIsolationEvent"> <description> vSphere HA powered off this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmPoweredOffEventVM powered offinfo{vm.name} on {host.name} is powered off{vm.name} on {host.name} is powered off{vm.name} is powered offVirtual machine on {host.name} is powered off{vm.name} on {host.name} in {datacenter.name} is powered offVmPoweredOnEventVM powered oninfo{vm.name} on {host.name} has powered on{vm.name} on {host.name} has powered on{vm.name} has powered onVirtual machine on {host.name} has powered on{vm.name} on {host.name} in {datacenter.name} has powered onVmPoweringOnWithCustomizedDVPortEventVirtual machine powered on with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.infoVirtual machine powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.Virtual machine {vm.name} powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.VmPrimaryFailoverEventFault Tolerance VM failovererrorFault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name} in {datacenter.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}VmReconfiguredEventVM reconfiguredinfoReconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured virtual machine.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name} in {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}VmRegisteredEventVM registeredinfoRegistered {vm.name} on {host.name}Registered {vm.name} on {host.name} in {datacenter.name}Registered {vm.name}Registered VM on {host.name} in {datacenter.name}Registered {vm.name} on {host.name} in {datacenter.name}VmRelayoutSuccessfulEventVM relayout completedinfoRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} completedRelayout of the virtual machine completedRelayout of {vm.name} on {host.name} in {datacenter.name} completedVmRelayoutUpToDateEventVM relayout up-to-dateinfo{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} is in the correct format and relayout is not necessaryIn the correct format and relayout is not necessary{vm.name} on {host.name} in {datacenter.name} is in the correct format and relayout is not necessaryVmReloadFromPathEventVirtual machine reloaded from pathinfo{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} reloaded from new configuration {configPath}.Virtual machine on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.VmReloadFromPathFailedEventVirtual machine not reloaded from patherror{vm.name} on {host.name} could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from path {configPath}.{vm.name} could not be reloaded from {configPath}.This virtual machine could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from {configPath}. <EventLongDescription id="vim.event.VmReloadFromPathFailedEvent"> <description> Reloading the virtual machine from a new datastore path failed </description> <cause> <description>The destination datastore path was inaccessible or invalid </description> <action>Use a valid destination datastore path </action> </cause> <cause> <description>The virtual machine is in an invalid state </description> <action>Check the virtual machine state power state. If the virtual machine is powered on, power it off </action> </cause> <cause> <description>The virtual machine is enabled for Fault Tolerance </description> <action>Disable Fault Tolerance for the virtual machine and retry the operation </action> </cause> </EventLongDescription> VmRelocateFailedEventFailed to relocate VMerrorFailed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmRelocateFailedEvent"> <description> Virtual machine relocation to a different host or datastore failed </description> <cause> <description> Virtual machine relocation can fail for a number of reasons, including network outages, insufficient disk space, and so on </description> <action> Consider the task related to this event, evaluate the failure reason, and take action accordingly </action> </cause> </EventLongDescription> VmRelocateSpecEvent<VM Relocate Spec Event>info<internal><internal><internal><internal><internal>VmRelocatedEventVM relocatedinfoVirtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmRelocatedEvent"> <description> The virtual machine execution and/or storage was successfully relocated </description> <cause> <description> A user action caused the virtual machine's execution and/or storage to be successfully changed </description> </cause> </EventLongDescription> VmRemoteConsoleConnectedEventVM remote console connectedinfoRemote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name}Remote console connectedRemote console connected to {vm.name} on host {host.name}VmRemoteConsoleDisconnectedEventVM remote console disconnectedinfoRemote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name}Remote console connectedRemote console disconnected from {vm.name} on host {host.name}VmRemovedEventVM removedinfoRemoved {vm.name} on {host.name}Removed {vm.name} on {host.name}Removed {vm.name}RemovedRemoved {vm.name} on {host.name} from {datacenter.name}VmRenamedEventVM renamedwarningRenamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName} in {datacenter.name}VmRequirementsExceedCurrentEVCModeEventVirtual machine is using features that exceed the capabilities of the host's current EVC mode.warningFeature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of this host's current EVC mode.Feature requirements of this virtual machine exceed capabilities of this host's current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.VmResettingEventVM resettinginfo{vm.name} on {host.name} is reset{vm.name} on {host.name} is reset{vm.name} is resetVirtual machine on {host.name} is reset{vm.name} on {host.name} in {datacenter.name} is resetVmResourcePoolMovedEventVM resource pool movedinfoMoved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name} in {datacenter.name}VmResourceReallocatedEventVM resource reallocatedinfoResource allocation changed
Modified:
{configChanges.modified}Changed resource allocation for {vm.name}
Modified:
{configChanges.modified}VmRestartedOnAlternateHostEventVM restarted on alternate hostinfoVirtual machine {vm.name} was restarted on this host since {sourceHost.name} failedVirtual machine was restarted on {host.name} since {sourceHost.name} failedVirtual machine {vm.name} was restarted on {host.name} since {sourceHost.name} failedVmResumingEventVM resuminginfo{vm.name} on {host.name} is resuming{vm.name} on {host.name} is resuming{vm.name} is resumingVirtual machine on {host.name} is resuming{vm.name} on {host.name} in {datacenter.name} is resumingVmSecondaryAddedEventFault Tolerance secondary VM addedinfoA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name}A Fault Tolerance secondary VM has been added for {vm.name} on host {host.name}A Fault Tolerance secondary VM has been added for {vm.name}A Fault Tolerance secondary VM has been added for this VMA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryDisabledBySystemEventvCenter disabled Fault ToleranceerrorvCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the Secondary VM could not be powered On. <EventLongDescription id="vim.event.VmSecondaryDisabledBySystemEvent"> <description> vCenter Server disabled a Secondary VM because it could not power on the Secondary VM </description> <cause> <description>vCenter Server failed to power on the Secondary VM </description> <action>Check the reason in the event message for more details, fix the failure, and re-enable Fault Tolerance protection to power on the Secondary VM.</action> </cause> </EventLongDescription> VmSecondaryDisabledEventDisabled Fault Tolerance secondary VMinfoDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Disabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Disabled Fault Tolerance secondary VM for {vm.name}Disabled Fault Tolerance secondary VM for this virtual machineDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryEnabledEventEnabled Fault Tolerance secondary VMinfoEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Enabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Enabled Fault Tolerance secondary VM for {vm.name}Enabled Fault Tolerance secondary VM for this VMEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryStartedEventStarted Fault Tolerance secondary VMinfoStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Started Fault Tolerance secondary VM for {vm.name} on host {host.name}Started Fault Tolerance secondary VM for {vm.name}Started Fault Tolerance secondary VM for this virtual machineStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmShutdownOnIsolationEventvSphere HA shut down VM on isolated hostinfovSphere HA shut down {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down this virtual machine on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} was shut down on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation} <EventLongDescription id="vim.event.VmShutdownOnIsolationEvent"> <description> vSphere HA shut down this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmStartRecordingEventStart a recording sessioninfoStart a recording sessionStart a recording session on {vm.name}VmStartReplayingEventStart a replay sessioninfoStart a replay sessionStart a replay session on {vm.name}VmStartingEventVM startinginfo{vm.name} on {host.name} is starting{vm.name} on {host.name} is starting{vm.name} is startingVirtual machine is starting{vm.name} on {host.name} in {datacenter.name} is startingVmStartingSecondaryEventStarting Fault Tolerance secondary VMinfoStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Starting Fault Tolerance secondary VM for {vm.name} on host {host.name} in clusterStarting Fault Tolerance secondary VM for {vm.name}Starting Fault Tolerance secondary VM for this virtual machineStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmStaticMacConflictEventVM static MAC conflicterrorThe static MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The static MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name}VmStoppingEventVM stoppinginfo{vm.name} on {host.name} is stopping{vm.name} on {host.name} is stopping{vm.name} is stoppingVirtual machine is stopping{vm.name} on {host.name} in {datacenter.name} is stoppingVmSuspendedEventVM suspendedinfo{vm.name} on {host.name} is suspended{vm.name} on {host.name} is suspended{vm.name} is suspendedVirtual machine is suspended{vm.name} on {host.name} in {datacenter.name} is suspendedVmSuspendingEventVM being suspendedinfo{vm.name} on {host.name} is being suspended{vm.name} on {host.name} is being suspended{vm.name} is being suspendedVirtual machine is being suspended{vm.name} on {host.name} in {datacenter.name} is being suspendedVmTimedoutStartingSecondaryEventStarting the Fault Tolerance secondary VM timed outerrorStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} timed out within {timeout} ms <EventLongDescription id="vim.event.VmTimedoutStartingSecondaryEvent"> <description> An attempt to start a Secondary VM timed out. </description> <cause> <description>A user attempted to turn on or enable Fault Tolerance, triggering the start of the Secondary VM. The start operation timed out and, as a result, vCenter Server disables Fault Tolerance. </description> <action>Fix any problems and re-enable Fault Tolerance protection</action> </cause> <cause> <description>The secondary VM was started in response to a failure, but the start attempt timed out</description> <action> vSphere HA will attempt to power on the Secondary VM</action> </cause> </EventLongDescription> VmUnsupportedStartingEventVM unsupported guest OS is startingwarningUnsupported guest OS {guestId} for {vm.name}Unsupported guest OS {guestId} for {vm.name} on {host.name}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name}Unsupported guest OS {guestId}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUnsupportedStartingEvent"> <description> Attempting to power on a virtual machine that has an unsupported guest operating system </description> <cause> <description> A user action initiated a virtual machine power-on operation, but the virtual machine has an unsupported guest operating system. </description> </cause> </EventLongDescription> VmUpgradeCompleteEventVM upgrade completeinfoVirtual machine compatibility upgraded to {version.@enum.vm.hwVersion}VmUpgradeFailedEventCannot upgrade VMerrorCannot upgrade virtual machine compatibility.VmUpgradingEventUpgrading VMinfoUpgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} in {datacenter.name} to {version.@enum.vm.hwVersion} <EventLongDescription id="vim.event.VmUpgradingEvent"> <description>The virtual hardware on this virtual machine is being upgraded</description> <cause> <description>A user-initiated action triggered an upgrade of the virtual machine hardware</description> </cause> <cause> <description>A scheduled task started an upgrade of the virtual machine hardware</description> </cause> </EventLongDescription> VmUuidAssignedEventVM UUID assignedinfoAssigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name}Assigned new BIOS UUID ({uuid})Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidAssignedEvent"> <description>The virtual machine was assigned a new BIOS UUID</description> <cause> <description>The user who created the virtual machine did not specify a BIOS UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmUuidChangedEventVM UUID ChangedwarningChanged BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name}BIOS UUID was changed from {oldUuid} to {newUuid}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidChangedEvent"> <description>The virtual machine BIOS UUID has changed</description> <cause> <description> A user changed the virtual machine BIOS UUID directly on the host </description> </cause> </EventLongDescription> VmUuidConflictEventVM UUID ConflicterrorBIOS ID ({uuid}) conflicts with that of {conflictedVm.name}BIOS ID ({uuid}) of {vm.name} conflicts with that of {conflictedVm.name}VmVnicPoolReservationViolationClearEventVirtual NIC Network Resource Pool Reservation Violation Clear eventinfoThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedVmVnicPoolReservationViolationRaiseEventVirtual NIC Network Resource Pool Reservation Violation eventinfoThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedVmWwnAssignedEventVM WWN assignedinfoNew WWNs assignedNew WWNs assigned to {vm.name} <EventLongDescription id="vim.event.VmWwnAssignedEvent"> <description> The virtual machine was assigned a WWN (World Wide Name) </description> <cause> <description>The virtual machine was assigned a WWN because it was created with an RDM (Raw Device Mappings) disk or was reconfigured to access an RDM disk </description> </cause> </EventLongDescription> VmWwnChangedEventVM WWN changedwarningWWNs are changedWWNs are changed for {vm.name} <EventLongDescription id="vim.event.VmWwnChangedEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine was changed </description> <cause> <description>The virtual machine was assigned a new WWN, possibly due to a conflict caused by another virtual machine being assigned the same WWN </description> </cause> </EventLongDescription> VmWwnConflictEventVM WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {vm.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.VmWwnConflictEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine has a conflict </description> <cause> <description>The WWN assigned to this virtual machine was the same as that of a different virtual machine. </description> <action> Check the event details for more information on the conflict and correct the problem. </action>\</cause> </EventLongDescription> WarningUpgradeEventWarning upgradewarning{message}IScsiBootFailureEventBoot from iSCSI failed.warningBooting from iSCSI failed.Booting from iSCSI failed with an error. See the VMware Knowledge Base for information on configuring iBFT networking.EventExLost Network Connectivityerrorvprob.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExNo IPv6 TSO supporterrorvprob.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid vmknic specified in /Migrate/Vmknicwarningvprob.net.migrate.bindtovmk|The ESX advanced config option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Please update the config option with a valid vmknic or, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExVirtual NIC connection to switch failedwarningvprob.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningvprob.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. {3} uplinks still up. Affected portgroups:{4}.EventExLost Network Redundancywarningvprob.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExThin Provisioned Device Nearing Capacitywarningvprob.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold.EventExLost Storage Connectivityerrorvprob.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExDegraded Storage Path Redundancywarningvprob.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. {3} remaining active paths. Affected datastores: {4}.EventExLost Storage Path Redundancywarningvprob.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExVMFS Locked By Remote Hosterrorvprob.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.errorvprob.vmfs.extent.offline|An attached device {1} might be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system is online.infovprob.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Volume Connectivity Restoredinfovprob.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfovprob.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterrorvprob.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerrorvprob.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Opening volume in read-only metadata mode with limited write support.EventExVMFS Lock Corruption Detectederrorvprob.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume may be damaged too.EventExLost connection to NFS servererrorvprob.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExRestored connection to NFS serverinfovprob.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExVMFS Resource Corruption Detectederrorvprob.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExCopied Library Iteminfocom.vmware.cl.CopyLibraryItemEvent|Copied Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}({sourceLibraryItemId}), source Library {sourceLibraryName}.EventExFailed to copy Library Itemerrorcom.vmware.cl.CopyLibraryItemFailEvent|Failed to copy Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}, source Library {sourceLibraryName}.EventExCreated Libraryinfocom.vmware.cl.CreateLibraryEvent|Created Library {libraryName}EventExFailed to create Libraryerrorcom.vmware.cl.CreateLibraryFailEvent|Failed to create Library {libraryName}EventExCreated Library Iteminfocom.vmware.cl.CreateLibraryItemEvent|Created Library Item {libraryItemName} in Library {libraryName}.EventExFailed to create Library Itemerrorcom.vmware.cl.CreateLibraryItemFailEvent|Failed to create Library Item {libraryItemName} in Library {libraryName}.EventExDeleted Libraryinfocom.vmware.cl.DeleteLibraryEvent|Deleted Library {libraryName}EventExFailed to delete Libraryerrorcom.vmware.cl.DeleteLibraryFailEvent|Failed to delete Library {libraryName}EventExDeleted Library Iteminfocom.vmware.cl.DeleteLibraryItemEvent|Deleted Library Item {libraryItemName} in Library {libraryName}.EventExFailed to delete Library Itemerrorcom.vmware.cl.DeleteLibraryItemFailEvent|Failed to delete Library Item {libraryItemName} in Library {libraryName}.EventExPublished Libraryinfocom.vmware.cl.PublishLibraryEvent|Published Library {libraryName}EventExFailed to publish Libraryerrorcom.vmware.cl.PublishLibraryFailEvent|Failed to publish Library {libraryName}EventExPublished Library Iteminfocom.vmware.cl.PublishLibraryItemEvent|Published Library Item {libraryItemName} in Library {libraryName}EventExFailed to publish Library Itemerrorcom.vmware.cl.PublishLibraryItemFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName}EventExPublished Library Item to Subscriptioninfocom.vmware.cl.PublishLibraryItemSubscriptionEvent|Published Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library Item to Subscriptionerrorcom.vmware.cl.PublishLibraryItemSubscriptionFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExPublished Library to Subscriptioninfocom.vmware.cl.PublishLibrarySubscriptionEvent|Published Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library to Subscriptionerrorcom.vmware.cl.PublishLibrarySubscriptionFailEvent|Failed to publish Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExCreated Subscriptioninfocom.vmware.cl.SubscriptionCreateEvent|Created subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to create Subscriptionerrorcom.vmware.cl.SubscriptionCreateFailEvent|Failed to create subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExDeleted Subscriptioninfocom.vmware.cl.SubscriptionDeleteEvent|Deleted subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to delete Subscriptionerrorcom.vmware.cl.SubscriptionDeleteFailEvent|Failed to delete subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExUpdated Subscriptioninfocom.vmware.cl.SubscriptionUpdateEvent|Updated subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to update Subscriptionerrorcom.vmware.cl.SubscriptionUpdateFailEvent|Failed to update subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExSynchronized Libraryinfocom.vmware.cl.SyncLibraryEvent|Synchronized Library {libraryName}EventExFailed to Synchronize Libraryerrorcom.vmware.cl.SyncLibraryFailEvent|Failed to Synchronize Library {libraryName}EventExSynchronized Library Iteminfocom.vmware.cl.SyncLibraryItemEvent|Synchronized Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Itemerrorcom.vmware.cl.SyncLibraryItemFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Iteminfocom.vmware.cl.SyncNfcFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}. Failure may be due to a network error or a host entering maintenance mode.EventExUpdated Libraryinfocom.vmware.cl.UpdateLibraryEvent|Updated Library {libraryName}EventExFailed to update Libraryerrorcom.vmware.cl.UpdateLibraryFailEvent|Failed to update Library {libraryName}EventExUpdated Library Iteminfocom.vmware.cl.UpdateLibraryItemEvent|Updated Library Item {libraryItemName} in Library {libraryName}.EventExFailed to update Library Itemerrorcom.vmware.cl.UpdateLibraryItemFailEvent|Failed to update Library Item {libraryItemName} in Library {libraryName}.EventExCould not locate Library Item file on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryItemFileOnRestoreEvent|File '{fileName}' in Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library Item folder on the storage backing after restorecom.vmware.cl.restore.DeletedLibraryItemOnRestoreEvent|Folder for Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library folder on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryOnRestoreEvent|Library '{libraryName}' folder could not be located on the storage backing after restoreEventExCould not locate Library Item content after restorecom.vmware.cl.restore.MissingLibraryItemContentOnRestoreEvent|The content of Library Item '{libraryItemName}' could not be located on storage after restoreEventExNew Library Item file found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemFileOnRestoreEvent|New Library Item file '{fileName}' found on the storage backing for Library Item '{libraryItemName}' after restore. Path to the file on storage: '{filePath}'EventExNew Library Item folder found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemOnRestoreEvent|New Library Item folder '{itemFolderName}' found on the storage backing for Library '{libraryName}' after restore. Path to the item folder on storage: '{itemFolderPath}'ExtendedEventCancel LWD snapshotinfoCancelling LWD snapshotcom.vmware.dp.events.cancelsnapshot|Cancelling LWD snapshotExtendedEventLWD snapshot is cancelledinfoLWD snapshot is cancelledcom.vmware.dp.events.cancelsnapshotdone|LWD snapshot is cancelledExtendedEventFailed to cancel LWD snapshoterrorFailed to cancel LWD snapshotcom.vmware.dp.events.cancelsnapshotfailed|Failed to cancel LWD snapshotExtendedEventPerform 'commit' phase of LWD-based restoreinfoPerforming 'commit' phase of LWD-based restorecom.vmware.dp.events.commitrestore|Performing 'commit' phase of LWD-based restoreExtendedEvent'commit' phase of LWD-based restore is completedinfo'commit' phase of LWD-based restore is completedcom.vmware.dp.events.commitrestoredone|'commit' phase of LWD-based restore is completedExtendedEvent'commit' phase of LWD-based restore failederror'commit' phase of LWD-based restore failedcom.vmware.dp.events.commitrestorefailed|'commit' phase of LWD-based restore failedExtendedEventEnabling protection services on hosts in the clusterinfoEnabling protection services on hosts in the clusterEnabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservices|Enabling protection services on hosts in the clusterExtendedEventFinished enabling protection services on hosts in the clusterinfoFinished enabling protection services on hosts in the clusterFinished enabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesdone|Finished enabling protection services on hosts in the clusterExtendedEventFailed to enable protection services on hosts in the clustererrorFailed to enable protection services on hosts in the clusterFailed to enable protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesfailed|Failed to enable protection services on hosts in the clusterExtendedEventPerform 'prepare' phase of LWD-based restoreinfoPerforming 'prepare' phase of LWD-based restorecom.vmware.dp.events.preparerestore|Perform 'prepare' phase of LWD restoreExtendedEvent'prepare' phase of LWD-based restore is completedinfo'prepare' phase of LWD-based restore is completedcom.vmware.dp.events.preparerestoredone|'prepare' phase of LWD-based restore is completedExtendedEvent'prepare' phase of LWD-based restore failederror'prepare' phase of LWD-based restore failedcom.vmware.dp.events.preparerestorefailed|'prepare' phase of LWD-based restore failedExtendedEventEnable LWD data protectioninfoEnabling LWD data protectioncom.vmware.dp.events.protect|Enabling LWD data protectionExtendedEventLWD data protection enabledinfoLWD data protection enabledcom.vmware.dp.events.protectdone|LWD data protection enabledExtendedEventFailed to enable LWD data protectionerrorFailed to enable LWD data protectioncom.vmware.dp.events.protectfailed|Failed to enable LWD data protectionExtendedEventQuerying entity for protection infoinfoQuerying entity for protection infocom.vmware.dp.events.queryprotectedentityinfo|Querying entity for protection infoExtendedEventFinished querying entity for protection infoinfoFinished querying entity for protection infocom.vmware.dp.events.queryprotectedentityinfodone|Finished querying entity for protection infoExtendedEventFailed to query entity for protection infoerrorFailed to query entity for protection infocom.vmware.dp.events.queryprotectedentityinfofailed|Failed to query entity for protection infoExtendedEventRetire LWD snapshotinfoRetiring LWD snapshotcom.vmware.dp.events.retiresnapshot|Retiring LWD snapshotExtendedEventLWD snapshot is retiredinfoLWD snapshot is retiredcom.vmware.dp.events.retiresnapshotdone|LWD snapshot is retiredExtendedEventFailed to retire LWD snapshoterrorFailed to retire LWD snapshotcom.vmware.dp.events.retiresnapshotfailed|Failed to retire LWD snapshotExtendedEventTake LWD application-consistent snapshotinfoTaking LWD application-consistent snapshotcom.vmware.dp.events.snapshot.applicationconsistent|Taking LWD application-consistent snapshotExtendedEventTake LWD crash-consistent snapshotinfoTaking LWD crash-consistent snapshotcom.vmware.dp.events.snapshot.crashconsistent|Taking LWD crash-consistent snapshotExtendedEventTake LWD metadata-only snapshotinfoTaking LWD metadata-only snapshotcom.vmware.dp.events.snapshot.metadataonly|Taking LWD metadata-only snapshotExtendedEventTake LWD VSS application-consistent snapshotinfoTaking LWD VSS application-consistent snapshotcom.vmware.dp.events.snapshot.vssappconsistent|Taking LWD VSS application-consistent snapshotExtendedEventLWD application-consistent snapshot takeninfoLWD application-consistent snapshot takencom.vmware.dp.events.snapshotdone.applicationconsistent|LWD application-consistent snapshot takenExtendedEventLWD crash-consistent snapshot takeninfoLWD crash-consistent snapshot takencom.vmware.dp.events.snapshotdone.crashconsistent|LWD crash-consistent snapshot takenExtendedEventLWD metadata-only snapshot takeninfoLWD metadata-only snapshot takencom.vmware.dp.events.snapshotdone.metadataonly|LWD metadata-only snapshot takenExtendedEventLWD VSS application-consistent snapshot takeninfoLWD VSS application-consistent snapshot takencom.vmware.dp.events.snapshotdone.vssappconsistent|LWD VSS application-consistent snapshot takenExtendedEventLWD application-consistent snapshot failederrorLWD application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.applicationconsistent|LWD application-consistent snapshot failedExtendedEventLWD crash-consistent snapshot failederrorLWD crash-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.crashconsistent|LWD crash-consistent snapshot failedExtendedEventLWD metadata-only snapshot failederrorLWD metadata-only snapshot failedcom.vmware.dp.events.snapshotfailed.metadataonly|LWD metadata-only snapshot failedExtendedEventLWD VSS application-consistent snapshot failederrorLWD VSS application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.vssappconsistent|LWD VSS application-consistent snapshot failedExtendedEventPerform LWD snapshot syncinfoPerforming LWD snapshot synccom.vmware.dp.events.sync|Performing LWD snapshot syncExtendedEventLWD snapshot sync is completedinfoLWD snapshot sync is completedcom.vmware.dp.events.syncdone|LWD snapshot sync is completedExtendedEventLWD snapshot sync failederrorLWD snapshot sync failedcom.vmware.dp.events.syncfailed|LWD snapshot sync failedExtendedEventDisable LWD data protectioninfoDisabling LWD data protectioncom.vmware.dp.events.unprotect|Disabling LWD data protectionExtendedEventLWD data protection disabledinfoLWD data protection disabledcom.vmware.dp.events.unprotectdone|LWD data protection disabledExtendedEventFailed to disable LWD data protectionerrorFailed to disable LWD data protectioncom.vmware.dp.events.unprotectfailed|Failed to disable LWD data protectionEventExDeployed entity from Content Libraryinfocom.vmware.ovfs.DeployEvent|Deployed entity from Library Item {libraryItemName} in Library {libraryName}EventExFailed to deploy entity from Content Libraryerrorcom.vmware.ovfs.DeployFailEvent|Failed to deploy entity from Library Item {libraryItemName} in Library {libraryName}EventExCloned entity to Content Libraryinfocom.vmware.ovfs.ExportEvent|Cloned entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExFailed to clone entity to Content Libraryerrorcom.vmware.ovfs.ExportFailEvent|Failed to clone entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExinfocom.vmware.rbd.activateRuleSet|Activate Rule SetEventExwarningcom.vmware.rbd.fdmPackageMissing|A host in a HA cluster does not have the 'vmware-fdm' package in its image profileEventExwarningcom.vmware.rbd.hostProfileRuleAssocEvent|A host profile associated with one or more active rules was deleted.EventExerrorcom.vmware.rbd.hostScriptFailure|An error encountered while running a user defined script: {scriptName} on the host: {ip}. Status: {status}EventExwarningcom.vmware.rbd.ignoreMachineIdentity|Ignoring the AutoDeploy.MachineIdentity event, since the host is already provisioned through Auto DeployEventExinfocom.vmware.rbd.pxeBootNoImageRule|Unable to PXE boot host since it does not match any rulesEventExinfocom.vmware.rbd.pxeBootUnknownHost|PXE Booting unknown hostEventExinfocom.vmware.rbd.pxeProfileAssoc|Attach PXE ProfileEventExinfocom.vmware.rbd.scriptBundleAssoc|Script Bundle Name: {name} attached to moref {moref}, entity-id {entity-id}EventExerrorcom.vmware.rbd.vmcaCertGenerationFailureEvent|Failed to generate host certificates using VMCAEventExCreated Harbor registryinfocom.vmware.registry.HarborCreateEvent|Created Harbor registry {registryName} on cluster {clusterId}.EventExFailed to create Harbor registryerrorcom.vmware.registry.HarborCreateFailEvent|Failed to create Harbor registry {registryName} on cluster {clusterId}.EventExDeleted Harbor registryinfocom.vmware.registry.HarborDeleteEvent|Deleted Harbor registry {registryName} on cluster {clusterId}.EventExFailed to delete Harbor registryerrorcom.vmware.registry.HarborDeleteFailEvent|Failed to delete Harbor registry {registryName} on cluster {clusterId}.EventExCreated Harbor projectinfocom.vmware.registry.HarborProjectCreateEvent|Created Harbor project {projectName} for registry {registryId}.EventExFailed to create Harbor projecterrorcom.vmware.registry.HarborProjectCreateFailEvent|Failed to create Harbor project {projectName} for registry {registryId}.EventExDeleted Harbor projectinfocom.vmware.registry.HarborProjectDeleteEvent|Deleted Harbor project {projectName} for registry {registryId}.EventExFailed to delete Harbor projecterrorcom.vmware.registry.HarborProjectDeleteFailEvent|Failed to delete Harbor project {projectName} for registry {registryId}.EventExCreated Harbor project memberinfocom.vmware.registry.HarborProjectMemberCreateEvent|Created Harbor project member {memberName} for project {projectName}.EventExFailed to create Harbor project membererrorcom.vmware.registry.HarborProjectMemberCreateFailEvent|Failed to create Harbor project member {memberName} for project {projectName}.EventExDeleted Harbor project memberinfocom.vmware.registry.HarborProjectMemberDeleteEvent|Deleted Harbor project member {memberName} from project {projectName}.EventExFailed to delete Harbor project membererrorcom.vmware.registry.HarborProjectMemberDeleteFailEvent|Failed to delete Harbor project member {memberName} from project {projectName}.EventExUpdated Harbor project memberinfocom.vmware.registry.HarborProjectMemberUpdateEvent|Updated Harbor project member {memberName} for project {projectName}.EventExFailed to update Harbor project membererrorcom.vmware.registry.HarborProjectMemberUpdateFailEvent|Failed to update Harbor project member {memberName} for project {projectName}.EventExPurged Harbor projectinfocom.vmware.registry.HarborProjectPurgeEvent|Purged Harbor project {projectName} for registry {registryId}.EventExFailed to purge Harbor projecterrorcom.vmware.registry.HarborProjectPurgeFailEvent|Failed to purge Harbor project {projectName} for registry {registryId}.EventExRestoring Harbor registryinfocom.vmware.registry.HarborRestoreEvent|Restoring Harbor registry {registryName} on cluster {clusterId}.EventExFailed to restore Harbor registryerrorcom.vmware.registry.HarborRestoreFailEvent|Failed to restore Harbor registry {registryName} on cluster {clusterId}.EventExRestored Harbor registryinfocom.vmware.registry.HarborRestoreSuccessEvent|Restored Harbor registry {registryName} on cluster {clusterId}.ExtendedEventProactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!errorcom.vmware.vc.proactivehdw.DbError|Proactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!EventExProactive hardware management: Host is disabled with proactive hardware management.warningcom.vmware.vc.proactivehdw.Disabled|Host is disabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: Host is enabled with proactive hardware management.infocom.vmware.vc.proactivehdw.Enabled|Host is enabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: received a failure health update from vendor.errorcom.vmware.vc.proactivehdw.Failure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: Polled health updates from HSM are discarded due to health update response content size limit being exceeded.warningcom.vmware.vc.proactivehdw.HealthUpdatesResponseLimitExceed|Proactive hardware management: Polled health updates from HSM {VendorDisplayName} are discarded due to health update response content size limit being exceeded. Refer to vSAN health logs for more details.EventExProactive hardware management: Some health updates from HSM are discarded due to validation failures.warningcom.vmware.vc.proactivehdw.HealthUpdatesValidationFail|Proactive hardware management: Some health updates from HSM {VendorDisplayName} are discarded due to validation failures. Refer to vSAN health logs for more details.EventExProactive hardware management: Error occurred when posting host-level event for unregistration of HSMerrorcom.vmware.vc.proactivehdw.HostEventPostFailed|Proactive hardware management: After HSM {VendorDisplayName} was unregistered an internal error prevented a host event from posting. The following hosts are affected: {AffectedHosts}.EventExProactive hardware management: Failed to contact an HSMerrorcom.vmware.vc.proactivehdw.HsmCommunicationError|Proactive hardware management: Failed to contact HSM with vendor: {VendorDisplayName}.EventExProactive hardware management: Error occured in poll HSM requesterrorcom.vmware.vc.proactivehdw.HsmRequestError|Proactive hardware management: Internal error occurred during polling HSM from vendor {VendorDisplayName}.EventExProactive hardware management: HSM is unregistered.infocom.vmware.vc.proactivehdw.HsmUnregistration|Proactive hardware management: HSM is unregistered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: received a predictive failure health update from vendor.warningcom.vmware.vc.proactivehdw.PredictiveFailure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: HSM is unregistered but with a failure in removing resource bundle.errorcom.vmware.vc.proactivehdw.ResourceBundleCleanupError|Proactive hardware management: HSM from {VendorDisplayName} is unregistered but with a failure in removing resource bundle - likely the resource bundle is currently in use. Please refer to vSAN health logs for the underlying cause and perform manual clean up on the resource bundle.EventExProactive hardware management: Failed to create/update subscription for HSM due to a communication error with HSMerrorcom.vmware.vc.proactivehdw.SubscriptionHsmCommError|Proactive hardware management: Failed to create/update subscription for HSM {VendorDisplayName} due to a communication error with HSM.EventExProactive hardware management: Failed to create/update subscription for HSM due to internal errorerrorcom.vmware.vc.proactivehdw.SubscriptionInternalError|Proactive hardware management: Failed to perform subscription create/update for HSM {VendorDisplayName} due to an internal error. Please refer to the vSAN health logs for more details.EventExProactive hardware management: A new HSM is registered.infocom.vmware.vc.proactivehdw.registration.NewRegistration|Proactive hardware management: A new HSM is registered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: HSM registration is updated.infocom.vmware.vc.proactivehdw.registration.UpdateSuccess|Proactive hardware management: The registration information on the following HSM: '{VendorDisplayName}' has been updated. Here are its supported health update infos: '{EnabledHealthUpdateInfos}'ExtendedEventinfocom.vmware.vcIntegrity.CancelTask|Canceling task on [data.name].ExtendedEventinfocom.vmware.vcIntegrity.CheckNotification|Successfully downloaded notifications. New notifications: [data.Notifications]ExtendedEventerrorcom.vmware.vcIntegrity.CheckNotificationFailed|Could not download notifications.ExtendedEventerrorcom.vmware.vcIntegrity.CheckPXEBootHostFailure|Cannot determine whether host {host.name} is PXE booted. The host will be excluded for the current operation.ExtendedEventwarningcom.vmware.vcIntegrity.ClusterConfigurationOutOfCompliance|Hosts in Cluster [data.resource] are out of compliance.ExtendedEventerrorcom.vmware.vcIntegrity.ClusterOperationCancelledDueToCertRefresh|In-flight VUM task on Cluster [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventwarningcom.vmware.vcIntegrity.CriticallyLowDiskSpace|VMware vSphere Lifecycle Manager is critically low on storage space! Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.DisableToolsRemediateOnReboot|Successfully disabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.DisableToolsRemediateOnRebootFailed|Could not disable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.DownloadAlert|VMware vSphere Lifecycle Manager download alert (critical/total): ESX [data.esxCritical]/[data.esxTotal]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadFailedPatchBinary|Could not download patch packages for following patches: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestPackage|Successfully downloaded guest patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestPackageFailed|Could not download guest patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixPackage|Successfully downloaded guest patch packages for UNIX. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixPackageFailed|Could not download guest patch packages for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixUpdate|Successfully downloaded guest patch definitions for UNIX. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixUpdateFailed|Could not download guest patch definitions for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUpdate|Successfully downloaded guest patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUpdateFailed|Could not download guest patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostPackage|Successfully downloaded host patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostPackageFailed|Could not download host patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostUpdate|Successfully downloaded host patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostUpdateFailed|Could not download host patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.EnableToolsRemediateOnReboot|Successfully enabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.EnableToolsRemediateOnRebootFailed|Could not enable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.FailToLock|There are running tasks for the entity [data.name] that cannot finish within a specific time. The operation will stop.ExtendedEventcom.vmware.vcIntegrity.FtFailedEvent|ExtendedEventerrorcom.vmware.vcIntegrity.GADvdMountError|VMware vSphere Lifecycle Manager Guest Agent could not access the DVD drive on {vm.name}. Verify that a DVD drive is available and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAError|An internal error occurred in communication with VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Verify that the VM is powered on and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAInstallFailed|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Make sure that the VM is powered on.ExtendedEventinfocom.vmware.vcIntegrity.GAInstalled|VMware vSphere Lifecycle Manager Guest Agent successfully installed on {vm.name}.ExtendedEventerrorcom.vmware.vcIntegrity.GARuntimeError|An unknown internal error occurred during the required operation on {vm.name}. Check the logs for more details and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GATimeout|VMware vSphere Lifecycle Manager Guest Agent could not respond in time on {vm.name}. Verify that the VM is powered on and that the Guest Agent is running.ExtendedEventwarningcom.vmware.vcIntegrity.HostConfigurationOutOfCompliance|Configuration of Host [data.resource] is out of compliance.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallClose|Close [data.name] firewall ports.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallOpen|Open [data.name] firewall ports.ExtendedEventerrorcom.vmware.vcIntegrity.HostOperationCancelledDueToCertRefresh|In-flight VUM task on Host [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportCancelled|Host patch offline bundle upload is canceled by user.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportSuccess|[data.numBulletins] new bulletins uploaded successfully through offline bundle.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchBundleImportUnknownError|Host patch offline bundle upload did not succeed.ExtendedEventcom.vmware.vcIntegrity.HostPatchInputRecalledFailure|ExtendedEventcom.vmware.vcIntegrity.HostPatchPrerequisiteRecalledFailure|ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be remediated. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry remediation operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be remediated. Remove either of the patch from the baseline and retry the remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be staged. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be staged. Remove either of the patch from the baseline and retry the stage operation.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmFtEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster and contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmScanEvent|Cannot scan host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmStageEvent|Cannot stage host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtDiffPatchesEvent|Host {host.name} has FT enabled VMs. If you apply different patches to hosts in a cluster, FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtEvent|Cannot remediate host {host.name} because it contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtPairEvent|Host {host.name} has FT enabled VMs. The host on which the Secondary VMs reside is not selected for remediation. As a result FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedHacEvent|Cannot remediate host {host.name} because it is a part of a HA admission control enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedPxeUpgradeUnsupported|Upgrade operations are not supported on host {host.name} because it is PXE booted.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedRemovableDeviceEvent|Cannot remediate host {host.name} because it has VMs with a connected removable device. Disconnect all removable devices before remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorEsxFileDownload|Host [data.name] cannot download files from the VMware vSphere Lifecycle Manager patch store. Check the network connectivity and firewall setup, and verify that the host can access the configured patch store.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorNotInstallable|The selected patches [data.arg1] cannot be installed on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateConflictDependencies|The patches selected for remediation on the host [data.name] depend on other patches that have conflicts.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDefault|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDeviceAttached|Remediation did not succeed for [data.name]. The host has virtual machines [data.arg1] with connected removable media devices. This prevents the host from entering maintenance mode. Disconnect the removable devices and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateEnterMmode|Remediation did not succeed for [data.name]. The host could not enter maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateExitMmode|Remediation did not succeed for [data.name]. The host could not exit maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReboot|Remediation did not succeed for [data.name]. The host did not reboot after remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRebootReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect after a reboot.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRestoreVm|Remediation did not succeed for [data.name]. Restoring the power state or device connection state for one or more virtual machines on the host did not succeed.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateMetadataCorrupt|Remediation did not succeed for [data.name]. The patch metadata is corrupted. This might be caused by an invalid format of metadata content. You can try to re-download the patches.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateVibDownload|Remediation did not succeed for [data.name]. There were errors while downloading one or more software packages. Check the VMware vSphere Lifecycle Manager network connectivity settings.ExtendedEventcom.vmware.vcIntegrity.HostUpdateErrorVsanHealthCheckFailed|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeAgentDeployFailure|Cannot deploy upgrade agent on host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailBootDiskSize|The boot disk has a size of [data.found] MiB, the minimum requirement of the upgrade image is [data.expected] MiB.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailConflictingVibs|The upgrade contains conflicting VIBs. Remove the conflicting VIBs or use Image Builder to create a custom upgrade ISO image that contains the newer versions of the conflicting VIBs, and try to upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreakageUnsure|Cannot determine whether the upgrade breaks Cisco Nexus 1000V virtual network switch feature on the host. If the host does not have the feature, you can ignore this warning.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreaks|Cisco Nexus 1000V virtual network switch feature installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailEESXInsufficientSpaceForImage|Cannot create a ramdisk of size [data.expected]MB to store the upgrade image. Check if the host has sufficient memory.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailESXInsufficientSpaceForImage|Upgrade requires at least [data.expected]MB free space on boot partition to store the upgrade image, only [data.found]MB found. Retry after freeing up sufficient space or perform a CD-based installation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailHostHardwareMismatch|The upgrade is not supported on the host hardware. The upgrade ISO image contains VIBs that failed the host hardware compatibility check.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPInImage|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V software package [data.expected] installed on the host. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPUSupportedHost|The host is managing a DPU(s) and is a part of vLCM baselines-managed cluster, which is not supported. Move the host to vLCM image-managed cluster and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleHostAcceptance|The upgrade package is not compatible with the host. Use an upgrade package that meets the host's acceptance level or change the host's acceptance level to match that of the upgrade package.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePartitionLayout|The host cannot be upgraded due to incompatible partition layout.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePasswords|The passwords cannot be migrated because the password encryption scheme is incompatible.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleSphereletVersion|Spherelet version [data.found] is not compatible with ESXi 8.0 and later version. Please upgrade your WCP cluster to install a compatible Spherelet version, or remove Spherelet if the host is not in a WCP cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleWithDvsCP|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientEntropyCache|Storage entropy cache is not full. A full entropy cache is required for upgrade. Refer to KB 89854 for steps on how to refill the cache.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientMemory|Insufficient memory found on the host: [data.expected]MB required, [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientSpaceForConfig|Upgrade requires at least [data.expected]MB free space on a local VMFS datastore, only [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailLockerSpaceAvail|The system has insufficient locker space for the image profile.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPBreaksDvsCP|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image that is compatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPInImage|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image [data.found]. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingGunzipChecksumVibs|These VIB(s) on the host do not have the required sha-256 gunzip checksum for their payloads: [data.found]. This will prevent VIB security verification and secure boot from functioning properly. Please remove these VIBs and check with your vendor for a replacement of these VIBs.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeBootBank|The system image on the attached iso lacks a storage driver for the installed bootbank.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeNic|The system image on the attached iso lacks a NIC driver for the management network traffic.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMD5RootPassword|The root password is not using MD5 hashing, causing it to be authenticated up to only 8 characters. For instructions on how to correct this, see VMware KB 1024500 at http://kb.vmware.com/kb/1024500.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMinCpuCores|New ESXi version requires a minimum of [data.expected] processor cores.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoVt|Processor does not support hardware virtualization or it is disabled in BIOS. Virtual machine performance may be slow.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNonVmwareSoftware|The software modules [data.found] found on the host are not part of the upgrade image. These modules will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNvdsToCvdsMigration|ESXi host is not ready for NSX-T vSphere Distributed Switch (VDS) migration included with this ESXi upgrade. Please run Upgrade Readiness Tool (URT) from the NSX-T Manager managing this host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNxbitEnabled|No eXecute (NX) bit is not enabled on the host. New ESXi version requires a CPU with NX/XD bit supported and enabled.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPendingReboot|Host software configuration requires a reboot. Reboot the host and try upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPowerPathBreaks|EMC PowerPath module [data.found] installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailRRFTVMsPresent|Legacy FT is not compatible with upgraded version. Disable legacy FT.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailScriptInitFailed|Host upgrade validity checks are not successful.ExtendedEventcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailTbootRequired|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnknown|The upgrade precheck script returned unknown error.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedConfig|Error in ESX configuration file (esx.conf).ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedDevices|Unsupported devices [data.found] found on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedHostVersion|Host version [data.found] is not supported for upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedLongMode|Host CPU is unsupported. New ESXi version requires a 64-bit CPU with support for LAHF/SAHF instructions in long mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedSHA1Cert|SHA-1 signature found in host certificate {data.cert} with subject {data.subject}. Support for certificates with weak signature algorithm SHA-1 has been removed in ESXi 8.0. To proceed with upgrade, replace it with a SHA-2 signature based certificate. Refer to release notes and KB 89424 for more details.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedTPMVersion|TPM 1.2 device detected. Support for TPM version 1.2 is discontinued. Installation may proceed, but may cause the system to behave unexpectedly.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailVFATCorruption|A problem with one or more vFAT bootbank partitions was detected. Please refer to KB 91136 and run dosfsck on bootbank partitions.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeProgressAborted|Host upgrade installer stopped.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressAuth|Host upgrade in progress: Configuring authentication.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressBootloader|Host upgrade in progress: Boot setup.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressClearpart|Host upgrade in progress: Clearing partitions.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressComplete|Host upgrade installer completed.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressKeyboard|Host upgrade in progress: Setting keyboard.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressLanguage|Host upgrade in progress: Setting language.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMigrating|Host upgrade in progress: Migrating ESX v3 configuration to ESX v4.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMount|Host upgrade in progress: Mounting file systems.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressNetworking|Host upgrade in progress: Installing network configuration.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPackages|Host upgrade in progress: Installing packages.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartphys|Host upgrade in progress: Partitioning physical hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartvirt|Host upgrade in progress: Partitioning virtual hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPostscript|Host upgrade in progress: Running postinstallation script.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressRootpass|Host upgrade in progress: Setting root passwordExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressTimezone|Host upgrade in progress: Setting timezone.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressUnknown|Host upgrade in progress.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeRunScriptFailure|Cannot run upgrade script on host.ExtendedEventerrorcom.vmware.vcIntegrity.ImageRecommendationGenerationError|The image recommendation generation failed.ExtendedEventinfocom.vmware.vcIntegrity.ImageRecommendationGenerationFinished|The image recommendation generation finished.ExtendedEventerrorcom.vmware.vcIntegrity.IncompatibleTools|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name} because VMware Tools is not installed or is of an incompatible version. The required version is [data.requiredVersion] and the installed version is [data.installedVersion].ExtendedEventinfocom.vmware.vcIntegrity.InstallAddOnUpdate|The following additional patches are included to resolve a conflict for installation on [data.entityName]: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestion|To resolve a conflict for installation on [data.entityName], the following additional patches might need to be included in the baseline: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestionNotFound|VMware vSphere Lifecycle Manager could not find patches to resolve the conflict for installation on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdate|Installation of patches [data.updateId] started on host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdateComplete|Installation of patches succeeded on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.InstallUpdateError|Could not install patches on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.LinuxOffLineScanNotSupported|Cannot scan [data.name] for patches. Scan of powered off or suspended Linux VMs is not supported.ExtendedEventwarningcom.vmware.vcIntegrity.LowDiskSpace|VMware vSphere Lifecycle Manager is running out of storage space. Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.MetadataCorrupted|Patch definition for [data.name] is corrupt. Check the logs for more details. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.MetadataNotFound|Patch definitions for [data.name] are missing. Download patch definitions first.ExtendedEventerrorcom.vmware.vcIntegrity.NoRequiredLicense|There is no VMware vSphere Lifecycle Manager license for [data.name] for the required operation.ExtendedEventinfocom.vmware.vcIntegrity.NotificationCriticalInfoAlert|VMware vSphere Lifecycle Manager informative notification (critical) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationDownloadAlert|VMware vSphere Lifecycle Manager notification download alertExtendedEventinfocom.vmware.vcIntegrity.NotificationImportantInfoAlert|VMware vSphere Lifecycle Manager informative notification (important) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationModerateInfoAlert|VMware vSphere Lifecycle Manager informative notification (moderate) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallAlert|VMware vSphere Lifecycle Manager recall alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallFixAlert|VMware vSphere Lifecycle Manager recall fix alertExtendedEventerrorcom.vmware.vcIntegrity.OperationCancelledDueToCertRefresh|In-flight VUM task on [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventcom.vmware.vcIntegrity.PXEBootedHostEvent|ExtendedEventinfocom.vmware.vcIntegrity.PackageImport|Package [data.name] is successfully imported.ExtendedEventerrorcom.vmware.vcIntegrity.PackageImportFailure|Import of package: [data.name] did not succeed.ExtendedEventinfocom.vmware.vcIntegrity.RebootHostComplete|Host [data.entityName] is successfully rebooted.ExtendedEventerrorcom.vmware.vcIntegrity.RebootHostError|Cannot reboot host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostStart|Start rebooting host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostWait|Waiting for host [data.entityName] to reboot.ExtendedEventerrorcom.vmware.vcIntegrity.ReconfigureClusterFailedEvent|VMware vSphere Lifecycle Manager could not restore HA admission control/DPM settings for cluster {computeResource.name} to their original values. These settings have been changed for patch installation. Check the cluster settings and restore them manually.ExtendedEventinfocom.vmware.vcIntegrity.Remediate|Remediation succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedHost|Could not remediate {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedVm|Could not remediate {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDpmDisableHost|Could not remediate host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.RemediateFailed|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostInvalidPowerState|Cannot remediate the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostOnUnsupportedHost|Could not remediate {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediateOrphanedVm|Could not remediate orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.RemediateStart|Remediating object [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateVmOnUnsupportedHost|Could not remediate {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediationStatusEvent|Current progress of remediation: [data.noOfSucceededHosts] hosts completed successfully, [data.noOfFailedHosts] hosts completed with errors, [data.noOfHostsBeingRemediatedCurrently] hosts are being remediated, [data.noOfWaitingHosts] hosts are waiting to start remediation, and [data.noOfRetryHosts] hosts could not enter maintenance mode and are waiting to retry.ExtendedEventinfocom.vmware.vcIntegrity.Scan|Successfully scanned [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanCancelled|Scanning of [data.name] is canceled by user.ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedHost|Could not scan {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedVm|Could not scan {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDpmDisableHost|Could not scan host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.ScanFailed|Could not scan [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostInvalidPowerState|Cannot scan the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostOnUnsupportedHost|Could not scan {host.name} for patches because it is of unsupported version [data.version].ExtendedEventwarningcom.vmware.vcIntegrity.ScanMissingUpdate|Found a missing patch: [data.message] when scanning [data.name]. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.ScanOrphanedVm|Could not scan orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.ScanStart|Scanning object [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.ScanUnsupportedVolume|{vm.name} contains an unsupported volume [data.volumeLabel]. Scan results for this VM might be incomplete.ExtendedEventerrorcom.vmware.vcIntegrity.ScanVmOnUnsupportedHost|Could not scan {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventerrorcom.vmware.vcIntegrity.SequentialRemediateFailedEvent|An error occured during the sequential remediation of hosts in cluster {computeResource.name}. Check the related events for more details.ExtendedEventinfocom.vmware.vcIntegrity.SkipSuspendedVm|Suspended VM {vm.name} has been skipped.ExtendedEventinfocom.vmware.vcIntegrity.Stage|Staging succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.StageDisconnectedHost|Could not stage patches to {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageDpmDisableHost|Could not stage patches to host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.StageFailed|Staging did not succeed for [data.name][data.message].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostInvalidPowerState|Cannot stage patches to the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostOnUnsupportedHost|Could not stage patches to {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.StageStart|Staging patches to host [data.name].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdate|Started staging of patches [data.updateId] on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdateComplete|Staging of patch to [data.entityName] succeeded.ExtendedEventerrorcom.vmware.vcIntegrity.StageUpdateError|Cannot stage patch [data.updateId] to [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.SysprepDisabled|Sysprep is disabled during the remediation.ExtendedEventinfocom.vmware.vcIntegrity.SysprepEnabled|Sysprep settings are restored.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepHandleFailure|Cannot access the sysprep settings for VM {vm.name}. Retry the operation after disabling sysprep for the VM.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepNotFound|Cannot locate the sysprep settings for VM {vm.name}. For Windows 7 and Windows 2008 R2, offline VM remediation is supported only if the system volume is present in the primary disk partition. Retry the operation after disabling sysprep for the VM.ExtendedEventinfocom.vmware.vcIntegrity.ToolsRemediate|VMware Tools upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ToolsRemediateFailed|VMware Tools upgrade did not succeed for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.ToolsScan|Successfully scanned [data.name] for VMware Tools upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.ToolsScanFailed|Could not scan [data.name] for VMware Tools upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsScanInstallNotSupported|VMware Tools is not installed on [data.name]. VMware vSphere Lifecycle Manager supports upgrading only an existing VMware Tools installation.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeRemediateSkippedOnHost|VMware Tools upgrade was not performed on {vm.name}. VMware Tools upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade is not supported for virtual appliances.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeScanSkippedOnHost|VMware Tools upgrade scan was not performed on {vm.name}. VMware Tools upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedHostRemediateSpecialVMEvent|The host [data.name] has a VM [data.vm] with VMware vSphere Lifecycle Manager or VMware vCenter Server installed. The VM must be moved to another host for the remediation to proceed.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedLinuxAction|Action is not supported for Linux VM/VA {vm.name}. VMware Tools is not installed or the machine cannot start.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedOs|Scan or remediation is not supported on [data.name] because of unsupported OS [data.os].ExtendedEventinfocom.vmware.vcIntegrity.UnsupportedPXEBootHost|Scanning, remediation, and staging are not supported on PXE booted ESXi hosts.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedSpecialVMEvent|VM [data.name] has either VMware vSphere Lifecycle Manager or VMware vCenter Server installed. This VM will be ignored for scan and remediation.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedVaAction|Action is not supported for offline or suspended virtual appliance {vm.name}. ExtendedEventerrorcom.vmware.vcIntegrity.VAAutoUpdateOn|Auto update is set to ON for virtual appliance [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADiscovery|Successfully discovered virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADiscoveryFailed|Could not discover virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadGenericFailure|Could not download virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadInvalidUrl|[data.name] is not a valid virtual appliance download URL.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadMetadataFailure|Could not download virtual appliance upgrade metadata for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADownloadSuccess|Successfully downloaded virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VARepositoryAddressNotSet|No repository address is set for virtual appliance [data.name]. The appliance does not support updates by vCenter Server.ExtendedEventinfocom.vmware.vcIntegrity.VAScan|Successfully scanned [data.name] for VA upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VAScanFailed|Could not scan [data.name] for VA upgrades.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeRemediate|Virtual Hardware upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateFailed|Could not perform Virtual Hardware upgrade on [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateSkippedOnHost|Virtual Hardware upgrade was not performed for {vm.name}. Virtual Hardware upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade is not supported for virtual appliances.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeScan|Successfully scanned [data.name] for Virtual Hardware upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeScanFailed|Could not scan [data.name] for Virtual Hardware upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeScanSkippedOnHost|Virtual Hardware upgrade scan was not performed for {vm.name}. Virtual Hardware upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotInstalled|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not installed. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotLatest|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not the latest version supported by the host. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnknown|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is unknown. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnmanaged|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is not managed by VMware vSphere. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsAutoUpgradeUnsupported|The version of VMware Tools installed in {vm.name} does not support automatic upgrade. Upgrade VMware Tools manually.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsNotRunning|Error while waiting for VMware Tools to respond. Verify that VMware Tools is running in VM {vm.name}.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForInstall|Patch [data.inputBulletin] was excluded from the remediation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the remediation. You can also add the baselines to a baseline group for convenience and perform the remediation.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForStage|Patch [data.inputBulletin] was excluded from the stage operation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the stage operation. You can also add the baselines to a baseline group for convenience and perform the stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.VmDevicesRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original removable device connection settings for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the settings for the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmMigrationFailedEvent|Cannot migrate VM {vm.name} from [data.srcHost] to [data.destHost].ExtendedEventerrorcom.vmware.vcIntegrity.VmPowerRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original power state for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the original power state of the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmotionCompatibilityCheckFailedEvent|Cannot check compatibility of the VM {vm.name} for migration with vMotion to host [data.hostName].EventExAgency createdinfocom.vmware.vim.eam.agency.create|{agencyName} created by {ownerName}EventExAgency destroyedinfocom.vmware.vim.eam.agency.destroyed|{agencyName} removed from the vSphere ESX Agent ManagerEventExAgency state changedinfocom.vmware.vim.eam.agency.goalstate|{agencyName} changed goal state from {oldGoalState} to {newGoalState}EventExAgency status changedinfocom.vmware.vim.eam.agency.statusChanged|Agency status changed from {oldStatus} to {newStatus}EventExAgency reconfiguredinfocom.vmware.vim.eam.agency.updated|Configuration updated {agencyName}EventExCluster Agent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterPowerOn|Cluster Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterProvisioning|Cluster Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM is about to be powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailablePrePowerOn|Cluster Agent VM {vm.name} is about to be powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent added to hostinfocom.vmware.vim.eam.agent.created|Agent added to host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyed|Agent removed from host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyedNoHost|Agent removed from host ({agencyName})EventExAgent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterPowerOn|Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterProvisioning|Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent status changedinfocom.vmware.vim.eam.agent.statusChanged|Agent status changed from {oldStatus} to {newStatus}EventExAgent VM is deletedinfocom.vmware.vim.eam.agent.task.deleteVm|Agent VM {vmName} is deleted on host {host.name} ({agencyName})EventExAgent VM is provisionedinfocom.vmware.vim.eam.agent.task.deployVm|Agent VM {vm.name} is provisioned on host {host.name} ({agencyName})EventExAgent VM powered offinfocom.vmware.vim.eam.agent.task.powerOffVm|Agent VM {vm.name} powered off, on host {host.name} ({agencyName})EventExAgent VM powered oninfocom.vmware.vim.eam.agent.task.powerOnVm|Agent VM {vm.name} powered on, on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibInstalled|Agent installed VIB {vib} on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibUninstalled|Agent uninstalled VIB {vib} on host {host.name} ({agencyName})EventExwarningcom.vmware.vim.eam.issue.agencyDisabled|Agency is disabledEventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentOVF|Unable to access agent OVF package at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentVib|Unable to access agent VIB module at {url} ({agencyName})EventExcom.vmware.vim.eam.issue.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInMaintenanceMode|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterResources|Cluster Agent VM cannot be powered on due to insufficient resources on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterSpace|Cluster Agent VM on cluster {computeResource.name} cannot be provisioned due to insufficient space on cluster datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.invalidConfig|Cluster Agent VM {vm.name} on cluster {computeResource.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmDatastore|Cluster Agent VM datastore(s) {customAgentVmDatastoreName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmNetwork|Cluster Agent VM network(s) {customAgentVmNetworkName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.ovfInvalidProperty|OVF environment used to provision cluster Agent VM on cluster {computeResource.name} has one or more invalid properties ({agencyName})EventExcom.vmware.vim.eam.issue.cluster.agent.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotDeployed|Cluster Agent VM is missing on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotRemoved|Cluster Agent VM {vm.name} is provisioned when it should be removed ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOff|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOn|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmSuspended|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostInMaintenanceMode|Agent cannot complete an operation since the host {host.name} is in maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.hostInStandbyMode|Agent cannot complete an operation since the host {host.name} is in standby mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostNotReachable|Host {host.name} must be powered on and connected to complete agent operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.immediateHostRebootRequired|Host {host.name} must be rebooted immediately to unblock agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.incompatibleHostVersion|Agent is not deployed due to incompatible host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientResources|Agent cannot be provisioned due to insufficient resources on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientSpace|Agent on {host.name} cannot be provisioned due to insufficient space on datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotDeleteSoftware|Cannot remove the Baseline associated with agency {agencyName} from VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotStageSoftware|The software defined by agency {agencyName} cannot be staged in VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.vUMUnavailable|VMware Update Manager was unavailable during agency {agencyName} operationsEventExerrorcom.vmware.vim.eam.issue.invalidConfig|Agent VM {vm.name} on host {host.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmDatastore|No agent datastore configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmNetwork|No agent network configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmDatastore|Agent datastore(s) {customAgentVmDatastoreName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmNetwork|Agent network(s) {customAgentVmNetworkName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmDatastore|Agent datastore cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmNetwork|Agent network(s) cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidFormat|OVF used to provision agent on host {host.name} has invalid format ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidProperty|OVF environment used to provision agent on host {host.name} has one or more invalid properties ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotConfigureSolutions|The required solutions defined by agency {agencyName} cannot be configured in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotUploadDepot|Software defined by agency {agencyName} cannot be uploaded in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.inaccessibleDepot|Unable to access software defined by agency {agencyName}EventExerrorcom.vmware.vim.eam.issue.personality.agency.invalidDepot|Software defined by agency {agencyName} contains invalid vSphere Lifecycle Manager related metadataEventExerrorcom.vmware.vim.eam.issue.personality.agency.pMUnavailable|vSphere Lifecycle Manager was unavailable during agency {agencyName} operationsEventExinfocom.vmware.vim.eam.issue.personality.agent.awaitingPMRemediation|Agent requires application of configured solutions through vSphere Lifecycle Manager on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agent.blockedByAgencyOperation|Agency issues related to vSphere Lifecycle Manager require resolution to unblock host {host.name} ({agencyName})EventExinfocom.vmware.vim.eam.issue.resolved|Issue {type} resolved (key {key})EventExerrorcom.vmware.vim.eam.issue.vibCannotPutHostInMaintenanceMode|Cannot put host into maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.vibCannotPutHostOutOfMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.vibDependenciesNotMetByHost|VIB module dependencies for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibInvalidFormat|Invalid format for VIB module at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibNotInstalled|VIB module for agent is not installed/removed on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequirementsNotMetByHost|VIB system requirements for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostInMaintenanceMode|Host must be put into maintenance mode to complete agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostReboot|Host {host.name} must be reboot to complete agent VIB installation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualInstallation|VIB {vib} requires manual installation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualUninstallation|VIB {vib} requires manual uninstallation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmCorrupted|Agent VM {vm.name} on host {host.name} is corrupted ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmDeployed|Agent VM {vm.name} is provisioned on host {host.name} when it should be removed ({agencyName})EventExcom.vmware.vim.eam.issue.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.vmNotDeployed|Agent VM is missing on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmOrphaned|Orphaned agent VM {vm.name} on host {host.name} detected ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOff|Agent VM {vm.name} on host {host.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOn|Agent VM {vm.name} on host {host.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmRequiresHostOutOfMaintenanceMode|Agent cannot deploy Agent VM since the host {host.name} is in maintenance mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmSuspended|Agent VM {vm.name} on host {host.name} is expected to be powered on but is suspended ({agencyName})ExtendedEventInvalid loginwarningcom.vmware.vim.eam.login.invalid|Failed login to vSphere ESX Agent ManagerEventExSuccessful login to vSphere ESX Agent Managerinfocom.vmware.vim.eam.login.succeeded|Successful login by {user} into vSphere ESX Agent ManagerEventExUser logged out of vSphere ESX Agent Managerinfocom.vmware.vim.eam.logout|User {user} logged out of vSphere ESX Agent Manager by logging out of the vCenter serverEventExUnauthorized access in vSphere ESX Agent Managerwarningcom.vmware.vim.eam.unauthorized.access|Unauthorized access by {user} in vSphere ESX Agent ManagerEventExChecked in virtual machine into a virtual machine template iteminfocom.vmware.vmtx.LibraryItemCheckInEvent|Checked in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to check in virtual machine into a virtual machine template itemerrorcom.vmware.vmtx.LibraryItemCheckInFailEvent|Failed to check in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'EventExDeleted the virtual machine checked out from the VM template iteminfocom.vmware.vmtx.LibraryItemCheckOutDeleteEvent|Deleted the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExFailed to delete the virtual machine checked out from the VM template itemerrorcom.vmware.vmtx.LibraryItemCheckOutDeleteFailEvent|Failed to delete the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExChecked out virtual machine template item as a virtual machineinfocom.vmware.vmtx.LibraryItemCheckOutEvent|Checked out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExFailed to check out virtual machine template item as a virtual machineerrorcom.vmware.vmtx.LibraryItemCheckOutFailEvent|Failed to check out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExA virtual machine checked out from the VM template item was orphaned after restorewarningcom.vmware.vmtx.LibraryItemCheckoutOrphanedOnRestoreEvent|A virtual machine (ID: {vmId}) checked out from the VM template item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was orphaned after restoreEventExCloned virtual machine to Content Library as VM templateinfocom.vmware.vmtx.LibraryItemCreateEvent|Cloned virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExFailed to clone virtual machine to Content Library as VM templateerrorcom.vmware.vmtx.LibraryItemCreateFailEvent|Failed to clone virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExDeleted a version of the virtual machine template iteminfocom.vmware.vmtx.LibraryItemDeleteVersionEvent|Deleted VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to delete a version of the virtual machine template itemerrorcom.vmware.vmtx.LibraryItemDeleteVersionFailEvent|Failed to delete VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'EventExDeployed virtual machine from Content Libraryinfocom.vmware.vmtx.LibraryItemDeployEvent|Deployed virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExFailed to deploy virtual machine from Content Libraryerrorcom.vmware.vmtx.LibraryItemDeployFailEvent|Failed to deploy virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExRolled back virtual machine template item to a previous versioninfocom.vmware.vmtx.LibraryItemRollbackEvent|Rolled back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'ExtendedEventFailed to roll back virtual machine template item to a previous versionerrorcom.vmware.vmtx.LibraryItemRollbackFailEvent|Failed to roll back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'EventExA virtual machine template managed by Content Library was converted to a virtual machineerrorcom.vmware.vmtx.LibraryItemTemplateConvertedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was converted to a virtual machineEventExA virtual machine template managed by Content Library was converted to a virtual machine after restorewarningcom.vmware.vmtx.LibraryItemTemplateConvertedOnRestoreEvent|The virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was found converted to a virtual machine after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was deletedEventExCould not locate a virtual machine template managed by Content Library after restorewarningcom.vmware.vmtx.LibraryItemTemplateDeletedOnRestoreEvent|Could not locate the virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateLatestVersionDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) is rolled back to the previous version because the latest VM template (ID: {vmId}) was deletedEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplatePreviousVersionDeletedEvent|Previous VM template (ID: {vmId}) of the library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was deletedEventExA virtual machine template managed by Content Library was renamedwarningcom.vmware.vmtx.LibraryItemTemplateRenamedEvent|The name of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will change to '{newItemName}' because the virtual machine template (ID: {vmId}) that the item manages was renamedExtendedEventAdded witness host to the cluster.infoAdded witness host to the cluster.com.vmware.vsan.clusterconfig.events.witnessadditiondone|Added witness host to the cluster.ExtendedEventRemoved witness host from the cluster.infoRemoved witness host from the cluster.com.vmware.vsan.clusterconfig.events.witnessremovaldone|Removed witness host from the cluster.ExtendedEventAdd disk group back to the vSAN cluster.infoAdd disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.adddisks|Add disk group back to the vSAN cluster on host {host.name}.ExtendedEventFailed to add disk group back to the vSAN cluster.errorFailed to add disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.addfail|Failed to add disk group back to the vSAN cluster on host {host.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formatdone|Disk format conversion is done on cluster {computeResource.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formathostdone|Disk format conversion is done on host {host.name}.ExtendedEventFailed to migrate vsanSparse objects.errorFailed to migrate vsanSparse objects.com.vmware.vsan.diskconversion.events.migrationfail|Failed to migrate vsanSparse objects on cluster {computeResource.name}.ExtendedEventNo disk conversion performed, all mounted disk groups on host are compliantinfoNo disk conversion performed, all mounted disk groups on host are compliant.com.vmware.vsan.diskconversion.events.noneed|No disk conversion performed, all mounted disk groups on host {host.name} are already compliant.ExtendedEventCheck existing objects on the vSAN cluster.infoCheck existing objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objectcheck|Check existing objects on the vSAN cluster.ExtendedEventObject conversion is done.infoObject conversion is done.com.vmware.vsan.diskconversion.events.objectdone|Object conversion is done.ExtendedEventFailed to convert objects on the vSAN cluster.errorFailed to convert objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objecterror|Failed to convert objects on the vSAN cluster.ExtendedEventRemove disk group from the vSAN cluster.infoRemove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removedisks|Remove disk group from the vSAN cluster on host {host.name}.ExtendedEventFailed to remove disk group from the vSAN cluster.errorFailed to remove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removefail|Failed to remove disk group on host {host.name} from the vSAN cluster.ExtendedEventRestore disk group from last break point.infoRestore disk group from last break point..com.vmware.vsan.diskconversion.events.restore|Restore disk group from last break point.ExtendedEventNo disk conversion performed, host has no mounted disk groups.infoNo disk conversion performed, host has no mounted disk groups.com.vmware.vsan.diskconversion.events.skiphost|No disk conversion performed, host {host.name} has no mounted disk groups.ExtendedEventCheck cluster status for disk format conversion.infoCheck cluster status for disk format conversion.com.vmware.vsan.diskconversion.events.statuscheck|Check status of cluster {computeResource.name} status for disk format conversion.ExtendedEventcom.vmware.vsan.diskconversion.events.syncingtimeout|ExtendedEventUpdate the vSAN cluster system settings.infoUpdate the vSAN cluster system settings.com.vmware.vsan.diskconversion.events.updatesetting|Update the vSAN cluster system settings on host {host.name}.ExtendedEventDisk format conversion failed in what if upgrade.infoDisk format conversion faild in what if upgrade check.com.vmware.vsan.diskconversion.events.whatifupgradefailed|Disk format conversion failed in what if upgrade check.EventExMark ssd(s) as capacity flash.infoMark {disks} as capacity flash.com.vmware.vsan.diskmgmt.events.tagcapacityflash|Mark {disks} as capacity flash.EventExMark ssd as hdd.infoMark ssd {disk} as hdd.com.vmware.vsan.diskmgmt.events.taghdd|Mark ssd {disk} as hdd.EventExMark remote disk as local disk.infoMark remote disk {disk} as local disk.com.vmware.vsan.diskmgmt.events.taglocal|Mark remote disk {disk} as local disk.EventExMark hdd as ssd.infoMark hdd {disk} as ssd.com.vmware.vsan.diskmgmt.events.tagssd|Mark hdd {disk} as ssd.EventExRemove capacity flash mark from ssd(s).infoRemove capacity flash mark from {disks}.com.vmware.vsan.diskmgmt.events.untagcapacityflash|Remove capacity flash mark from {disks}.EventExAdvisorvSAN Health Test 'Advisor' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.advisor.event|vSAN Health Test 'Advisor' changed from '{prestatus}' to '{curstatus}'EventExAudit CEIP Collected DatavSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.auditceip.event|vSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'EventExCNS Critical Alert - Patch available with important fixesvSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.cnspatchalert.event|vSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.controllercacheconfig.event|vSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExCoredump partition size checkvSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.coredumpartitionsize.event|vSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'EventExUpgrade vSphere CSI driver with cautionvSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.csidriver.event|vSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.diskusage.event|vSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dualencryption.event|vSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExProper vSAN network traffic shaping policy is configuredvSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dvsportspeedlimit.event|vSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'EventExEnd of general support for lower vSphere versionvSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.eoscheck.event|vSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'EventExImportant patch available for vSAN issuevSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.fsvlcmpatchalert.event|vSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.h730.event|vSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExHPE SAS Solid State DrivevSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.hpesasssd.event|vSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration check for large scale clustervSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.largescalecluster.event|vSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lavenderalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExvSAN critical alert regarding a potential data inconsistencyvSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilacdeltacomponenttest.event|vSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Critical Alert - Patch available for critical vSAN issuevSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilypatchalert.event|vSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.marigoldalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmode.event|vSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmodeh730.event|vSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvSAN storage policy compliance up-to-datevSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.objspbm.event|vSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.patchalert.event|vSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.pnicconsistent.event|vSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExVM storage policy is not-recommendedvSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.policyupdate.event|vSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'EventExMaximum host number in vSAN over RDMAvSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.rdmanodesalert.event|vSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExESXi system logs stored outside vSAN datastorevSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.scratchconfig.event|vSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.smalldiskstest.event|vSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExThick-provisioned VMs on vSANvSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.thickprovision.event|vSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExFix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabledvSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.unmaptest.event|vSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'EventExvSAN v1 disk in usevSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.v1diskcheck.event|vSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'EventExvCenter Server up to datevSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vcuptodate.event|vSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'EventExMultiple VMs share the same vSAN home namespacevSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vmns.event|vSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Support InsightvSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanenablesupportinsight.event|vSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpefwtest.event|vSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExCustomer advisory for HPE Smart ArrayvSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpesmartarraytest.event|vSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'EventExvSAN management service resource checkvSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanmgmtresource.event|vSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.witnesshw.event|vSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Advanced Configuration Check for Urgent vSAN ESA PatchvSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.zdomadvcfgenabled.event|vSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all of the hosts in a vSAN cluster have consistent advanced configuration options.vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.advcfgsync.event|vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN host daemon liveness.vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clomdliveness.event|vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSphere cluster members match vSAN cluster members.vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clustermembership.event|vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster configuration consistencyvSAN Health Test 'vSAN cluster configuration consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.consistentconfig.event|vSAN Health Test 'vSAN configuration consistency' status changed from '{prestatus}' to '{curstatus}'EventExESA prescriptive disk claimvSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.ddsconfig.event|vSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk group layoutvSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.dglayout.event|vSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN disk balance statusvSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.diskbalance.event|vSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Conversion HealthvSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.esaconversionhealth.event|vSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN extended configuration in syncvSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.extendedconfig.event|vSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Managed disk claimvSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.hcldiskclaimcheck.event|vSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'EventExCheck host maintenance mode is in sync with vSAN node decommission state.vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.mmdecominsync.event|vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'EventExvSAN optimal datastore default policy configurationvSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.optimaldsdefaultpolicy.event|vSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN with RDMA supports up to 32 hosts.vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.rdmanodes.event|vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExResync operations throttlingvSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.resynclimit.event|vSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN Cluster time sync status among hosts and VCvSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.timedrift.event|vSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk format statusvSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradelowerhosts.event|vSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'EventExSoftware version compatibilityvSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradesoftware.event|vSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'EventExVMware vCenter state is authoritativevSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vcauthoritative.event|vSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Direct homogeneous disk claimingvSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsandconfigconsistency.event|vSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'EventExvSphere Lifecycle Manager (vLCM) configurationvSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsanesavlcmcheck.event|vSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks the object format status of all vSAN objects.vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objectformat.event|vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health status of all vSAN objects.vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objecthealth.event|vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'EventExpNic RX/TX PauseRX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.pausecount.event|RX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX CRC ErrorRX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxcrcerr.event|RX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Generic ErrorRX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxerr.event|RX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX FIFO ErrorRX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxfifoerr.event|RX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Missed ErrorRX missed error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxmisserr.event|RX missed error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Buffer Overflow ErrorRX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxoverr.event|RX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Carrier ErrorTX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txcarerr.event|TX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Generic ErrorTX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txerr.event|TX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExRDT Checksum Mismatch ErrorRDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})vsan.health.test.diagnostics.rdt.checksummismatchcount.event|RDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})EventExData-in-transit encryption configuration checkvSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.ditencryption.ditconfig.event|vSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.dualencryption.event|vSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExChecks if CPU AES-NI is disabled on hostsvSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.hostcpuaesni.event|vSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VMware vCenter or any hosts are not connected to Key Management ServersvSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.kmsconnection.event|vSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Prescriptive Disk Claim ConfigurationsHost {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsvsan.health.test.esaprescriptivediskclaim.noeligibledisk|Host {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsEventExCheck vSAN File Service host file server agent vm state.vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.fileserver.event|vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'EventExInfrastructure HealthvSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.host.event|vSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'EventExFile Share HealthvSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.sharehealth.event|vSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'EventExVDS compliance check for hyperconverged cluster configurationvSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.dvshciconfig.event|vSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExHost compliance check for hyperconverged cluster configurationvSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.hosthciconfig.event|vSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN health alarm enablement statusvSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hciskip.event|vSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'EventExvSAN HCL DB Auto UpdatevSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.autohclupdate.event|vSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllercacheconfig.event|vSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN disk group type (All-Flash or Hybrid) is VMware certified for the used SCSI controllervSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdiskmode.event|vSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller driver is VMware certified.vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdriver.event|vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller firmware is VMware certified.vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerfirmware.event|vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller is compatible with the VMWARE Compatibility GuidevSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controlleronhcl.event|vSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExDisplays information about whether there is any driver supported for a given controller in the release of ESXi installed.vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerreleasesupport.event|vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.h730.event|vSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks the age of the VMware Hardware Compatibility Guid database.vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hcldbuptodate.event|vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any host failed to return its hardware information.vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hclhostbadstate.event|vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'EventExHost physical memory compliance checkvSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hostmemcheck.event|vSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmode.event|vSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmodeh730.event|vSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvsan.health.test.hcl.nvmeonhcl.event|EventExNetwork Interface Cards (NICs) used in vSAN hosts must meet certain requirements. These NIC requirements assume that the packet loss is not more than 0.0001% in the hyper-converged environments. It's recommended to use NIC which link speed can meet the minimum requirement. Otherwise, there can be a drastic impact on the vSAN performance.vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.pniclinkspeed.event|vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NICs used in this RDMA enabled vSAN cluster are certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmaniciscertified.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NIC's driver and firmware combination is certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportdriverfirmware.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the current ESXi release is certified for the RDMA NIC by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportesxrelease.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.vsanhpefwtest.event|vSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExHome objectvSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsihomeobjectstatustest.event|vSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExLUN runtime healthvSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsilunruntimetest.event|vSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExNetwork configurationvSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicenetworktest.event|vSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExService runtime statusvSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicerunningtest.event|vSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster claimed capacity is more than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.error.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExvSAN cluster claimed capacity is less than the entitled capacity.vSAN cluster claimed capacity is less than the entitled capacity.vsan.health.test.licensecapacityusage.green.event|vSAN cluster claimed capacity is less than the entitled capacity.EventExvSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.warn.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExChecks the vSAN cluster storage space utilizationvSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.diskspace.event|vSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN component limits, disk space and RC reservations assuming one host failure.vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.limit1hf.event|vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'EventExChecks the component utilization for the vSAN cluster and each host in the cluster.vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.nodecomponentlimit.event|vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN cluster read cache utilizationvSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.rcreservation.event|vSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN cluster is partitioned due to a network issue.vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.clusterpartition.event|vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExCheck if there are duplicate IP addresses configured for vmknic interfaces.vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.duplicateip.event|vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a connectivity check for vSAN Max Client Network by checking the heartbeats from each host to all other hosts in server clustervSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.externalconnectivity.event|vSAN Health Test 'vSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if API calls from VC to a host are failing while the host is in vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostconnectivity.event|vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VC has an active connection to all hosts in the cluster.vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostdisconnected.event|vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a network latency check via ping small packet size ping test from all hosts to all other hostsvSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostlatencycheck.event|vSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN API calls from each host can reach to other peer hosts in the clustervSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.interhostconnectivity.event|vSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExCheck if LACP is working properly.vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.lacpstatus.event|vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hostsvSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.largeping.event|vSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster receive the multicast heartbeat of the vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastdeepdive.event|vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have matching IP multicast configuration.vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsettings.event|vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any of the hosts in the vSAN cluster have IP multicast connectivity issue.vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsuspected.event|vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'EventExCheck if any host in remote vSAN client or server cluster has more than one vSAN vmknic configured.vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multiplevsanvmknic.event|vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnicconsistent.event|vSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExCheck if TSO is enabled for pNIC.vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnictso.event|vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'EventExCheck if the vSAN RDMA enabled physical NIC is configured for lossless traffic.vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.rdmaconfig.event|vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all hosts in client cluster have been in a single partition with all hosts in server vSAN cluster.vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.serverpartition.event|vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hostsvSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.smallping.event|vSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpinglarge.event|vSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpingsmall.event|vSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExCheck if all hosts in server cluster have a dedicated vSAN external vmknic configured.vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanexternalvmknic.event|vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have a configured vmknic with vSAN traffic enabled.vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanvmknic.event|vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'EventExCheck all remote VMware vCenter network connectivity.vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.xvcconnectivity.event|vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'EventExvSAN overall health statusvSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.overallsummary.event|vSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service data collectionvSAN Health Test 'Checks the statistics collection of the vSAN performance service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.collection.event|vSAN Health Test 'Checks statistics collection of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service network diagnostic mode statusvSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.diagmode.event|vSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'EventExNot all hosts are contributing stats to vSAN Performance ServicevSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.hostsmissing.event|vSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats primary electionvSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.masterexist.event|vSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service statusvSAN Health Test 'Checks status of vSAN Performance Service changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.perfsvcstatus.event|vSAN Health Test 'Checks status of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats DB object conflictsvSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.renameddirs.event|vSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the vSAN performance service statistics database objectvSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.statsdb.event|vSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service verbose mode statusvSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.verbosemode.event|vSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN has encountered an integrity issue of the metadata of a component on this disk.vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.componentmetadata.event|vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.diskusage.event|vSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on vital memory pools, needed for the correct operation of physical disks.vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomheap.event|vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on the vital memory pool, needed for the operation of physical disks.vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomslab.event|vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'EventExStorage Vendor Reported Drive HealthvSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.phmhealth.event|vSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the free space on physical disks in the vSAN cluster.vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcapacity.event|vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the number of components on the physical disk reaches the maximum limitationvSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcomplimithealth.event|vSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN is using the disk with reduced performance.vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcongestion.event|vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'EventExChecks if there is an issue retrieving the physical disk information from hosts in the vSAN cluster.vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskhostissues.event|vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the physical disks for all hosts in the vSAN cluster.vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskoverall.event|vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.smalldiskstest.event|vSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExCluster Name is not found in ssd endurance alarmClusters - {clustername} is/are not found in alarm - vSAN Health Alarm for disk endurance check.vsan.health.test.ssdendurance.clusternotfound.event|Clusters - {clustername} is/are not found. Please edit alarm - 'vSAN Health Alarm for disk endurance check' and correct the cluster name.EventExThe stretched cluster contains multiple unicast agents. This means multiple unicast agents were set on non-witness hostsvSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithmultipleunicastagents.event|vSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain a valid witness hostvSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithoutonewitnesshost.event|vSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain two valid fault domainsvSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithouttwodatafaultdomains.event|vSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'EventExHost should setup unicast agent so that they are able to communicate with the witness nodevSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostunicastagentunset.event|vSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'EventExHost with an invalid unicast agentvsan.health.test.stretchedcluster.hostwithinvalidunicastagent.event|vSAN Health Test 'Invalid unicast agent' status changed from '{prestatus}' to '{curstatus}'EventExCluster contains hosts that do not support stretched clustervSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostwithnostretchedclustersupport.event|vSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'EventExUnexpected number of data hosts in shared witness cluster. This means more than 2 data hosts in one shared witness cluster.vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnessclusterdatahostnumexceed.event|vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'EventExPer cluster component limit scaled down for shared witness host because of insufficient memoryvSAN Health Test 'Shared witness per cluster component limit scaled down' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnesscomponentlimitscaleddown.event|vSAN Health Test 'Shared witness per-cluster component limit inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExChecks the network latency between the two fault domains and the witness hostvSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.siteconnectivity.event|vSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'EventExWitness node is managed by vSphere Lifecycle ManagervSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.vlcmwitnessconfig.event|vSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'EventExThe following witness node resides in one of the data fault domainsvSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessfaultdomaininvalid.event|vSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster incorporates a witness host inside VMware vCenter clustervSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessinsidevccluster.event|vSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'EventExThe following (witness) hosts have invalid preferred fault domainsvSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomaininvalid.event|vSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'EventExThe preferred fault domain does not exist in the cluster for the following witness hostvSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomainnotexist.event|vSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevsan.health.test.stretchedcluster.witnessupgissue.event|vSAN Health Test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExWitness appliance upgrade to vSphere 7.0 or higher with cautionvsan.health.test.stretchedcluster.witnessupgrade.event|vSAN Health Test 'Witness appliance upgrade to vSphere 7.0 or higher with caution' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster contains witness hosts with no disk claimedvSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesswithnodiskmapping.event|vSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'EventExVMware Certified vSAN HardwarevSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vsanhardwarecert.event|vSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.patchalert.event|vSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExvSAN release catalog up-to-datevSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.releasecataloguptodate.event|vSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'EventExCheck configuration issues for vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumconfig.event|vSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'EventExESXi build recommended by vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumrecommendation.event|vSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'EventExThis object has the risk of PSOD issue due to improper DOM object flag leakThis object has the risk of PSOD issue due to improper DOM object flag leakvsan.health.test.zdom.leak|Objects {1} have the risk of PSOD issue due to improper DOM object flag leak. Please refer KB https://kb.vmware.com/s/article/89564VirtualMachineFaultToleranceStateFault Tolerance has not been configured for this virtual machinenotConfiguredFault Tolerance is disableddisabledFault Tolerance is enabledenabledFault Tolerant Secondary VM is not runningneedSecondaryFault Tolerance is startingstartingFault Tolerance is runningrunning
12857:20241101:185643.942 End of vmware_service_get_evt_severity() evt_severities:1989
12857:20241101:185643.943 In vmware_service_get_hv_ds_dc_dvs_list()
12857:20241101:185643.947 vmware_service_get_hv_ds_dc_dvs_list() SOAP response:
group-d1triggeredAlarmState365.1group-d1alarm-365yellowfalse39701datacenter-3nameNTK-corptriggeredAlarmStategroup-n7triggeredAlarmStategroup-h5triggeredAlarmStatedatastore-4041datastore-4050datastore-4046datastore-2007datastore-2006datastore-2005group-v4triggeredAlarmStategroup-n4029triggeredAlarmStategroup-v11triggeredAlarmStategroup-v4027triggeredAlarmStatedvs-21nameNTK-DSwitchuuid50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbgroup-v4056triggeredAlarmStatehost-4047host-4043host-4038
12857:20241101:185643.948 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_datacenters_list'
12857:20241101:185643.948 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_datacenters_list' found:0 total:0
12857:20241101:185643.948 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_hv_ds_dc_dvs_list'
12857:20241101:185643.948 In vmware_service_alarm_details_update() alarm:alarm-365
12857:20241101:185643.950 vmware_service_alarm_details_update() SOAP response:
alarm-365info.descriptionThis alarm is fired when vSphere Health detects new issues in your environment. This alarm will be retriggered even if acknowledged when new issues are detected. Go to Monitor -> Health for a detailed description of the issues.info.enabledtrueinfo.nameSkyline Health has detected issues in your vSphere environmentinfo.systemNameSkyline Health has detected issues in your vSphere environment
12857:20241101:185643.950 End of vmware_service_alarm_details_update() index:0
12857:20241101:185643.950 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_hv_ds_dc_dvs_list' found:1 total:1
12857:20241101:185643.950 End of vmware_service_get_hv_ds_dc_dvs_list():SUCCEED found hv:3 ds:6 dc:1
12857:20241101:185643.950 In vmware_service_create_datastore() datastore:'datastore-4041'
12857:20241101:185643.952 vmware_service_create_datastore() SOAP response:
datastore-4041infoLocal_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/34100425523270368744177664703687441776642024-10-24T08:57:27.792Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0334252364185616396313666.8267155e10-d4545cb2-5b01-3cecef012e78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682111______8falsetruesummarydatastore-4041Local_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12857:20241101:185643.952 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185643.952 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185643.952 End of vmware_service_create_datastore()
12857:20241101:185643.952 In vmware_service_create_datastore() datastore:'datastore-4050'
12857:20241101:185643.954 vmware_service_create_datastore() SOAP response:
datastore-4050infoLocal_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/3410042552327036874417766468169720922112703687441776642024-11-01T13:06:44.907432Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0134252364185616396313666.8267155cc9-bea5e318-19fd-ac1f6bb14c78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______8falsetruetruesummarydatastore-4050Local_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12857:20241101:185643.954 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185643.954 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185643.954 End of vmware_service_create_datastore()
12857:20241101:185643.954 In vmware_service_create_datastore() datastore:'datastore-4046'
12857:20241101:185643.956 vmware_service_create_datastore() SOAP response:
datastore-4046infoLocal_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/34100425523270368744177664703687441776642024-11-01T11:53:36.643999Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0234252364185616396313666.8267155ba7-5e9d16d6-0733-3cecef02b6e0t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682100______8falsetruesummarydatastore-4046Local_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12857:20241101:185643.956 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185643.956 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185643.956 End of vmware_service_create_datastore()
12857:20241101:185643.956 In vmware_service_create_datastore() datastore:'datastore-2007'
12857:20241101:185643.959 vmware_service_create_datastore() SOAP response:
datastore-2007info3PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/5031560478727036874417766468169720922112703687441776642024-11-01T13:06:44.904493Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID53153660247654416396313666.826704dec9-75e6c68a-c19e-9440c9831520naa.60002ac00000000000000054000228a31falsefalsefalsesummarydatastore-20073PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/53660247654450315604787242237661184truetrueVMFSnormaltriggeredAlarmState
12857:20241101:185643.959 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185643.959 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185643.959 End of vmware_service_create_datastore()
12857:20241101:185643.959 In vmware_service_create_datastore() datastore:'datastore-2006'
12857:20241101:185643.961 vmware_service_create_datastore() SOAP response:
datastore-2006info3PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/1592765972487036874417766468169720922112703687441776642024-11-01T13:06:44.898963Z7036874417766468169720922112VMFS3PAR_KARTOHA_SAS_NTK_ID53516079283814416396313666.826703d63f-3516ce66-4bee-9440c9831520naa.60002ac0000000000000042f000219831falsefalsefalsesummarydatastore-20063PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/160792838144159276597248truetrueVMFSnormaltriggeredAlarmState
12857:20241101:185643.961 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185643.961 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185643.961 End of vmware_service_create_datastore()
12857:20241101:185643.961 In vmware_service_create_datastore() datastore:'datastore-2005'
12857:20241101:185643.963 vmware_service_create_datastore() SOAP response:
datastore-2005info3PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/8543356846087036874417766468169720922112703687441776642024-11-01T18:34:30.288888Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID530_mgmt107347338854416396313666.826703d517-82086a06-cec0-9440c9831520naa.60002ac0000000000000004a000228a31falsefalsefalsesummarydatastore-20053PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/10734733885448543356846080truetrueVMFSnormaltriggeredAlarmState
12857:20241101:185643.963 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12857:20241101:185643.963 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12857:20241101:185643.963 End of vmware_service_create_datastore()
12857:20241101:185643.963 In vmware_service_get_clusters_and_resourcepools()
12857:20241101:185643.966 vmware_service_get_clusters_and_resourcepools() SOAP response:
domain-c1002nameNTK-corptriggeredAlarmStateresgroup-1003nameResourcesparentdomain-c1002resourcePoolresgroup-4001resgroup-4026resgroup-4026nameNTKparentresgroup-1003resourcePoolresgroup-4001namemgmtparentresgroup-1003resourcePool
12857:20241101:185643.966 In vmware_service_process_cluster_data()
12857:20241101:185643.966 In vmware_service_get_alarms_data(), func_parent:'vmware_service_process_cluster_data'
12857:20241101:185643.966 End of vmware_service_get_alarms_data() func_parent:'vmware_service_process_cluster_data' found:0 total:1
12857:20241101:185643.966 End of vmware_service_process_cluster_data():SUCCEED cl:1 rp:3
12857:20241101:185643.966 In vmware_service_get_cluster_state() clusterid:'domain-c1002'
12857:20241101:185643.968 vmware_service_get_cluster_state() SOAP response:
domain-c1002datastoredatastore-2005datastore-2006datastore-2007datastore-4041datastore-4046datastore-4050summary.overallStatusgreen
12857:20241101:185643.968 End of vmware_service_get_cluster_state():SUCCEED
12857:20241101:185643.968 End of vmware_service_get_clusters_and_resourcepools():SUCCEED found cl:1 rp:2
12857:20241101:185643.968 In vmware_service_init_hv() hvid:'host-4047'
12857:20241101:185643.968 In vmware_service_get_hv_data() guesthvid:'host-4047'
12857:20241101:185643.968 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4047false
12857:20241101:185643.980 vmware_service_get_hv_data() SOAP response:
host-4047config.network.dnsConfigfalsentk-esxi-01m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrueac:1f:6b:b1:4c:783ac:1f:6b:b1:4c:7800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrueac:1f:6b:b1:4c:793ac:1f:6b:b1:4c:7900falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:20:55:a8350:7c:6f:20:55:a800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:20:55:a9350:7c:6f:20:55:a900falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383139353420202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4050overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-01.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686832898048summary.hardware.modelSuper Serversummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-ac1f6bb14c78summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage209summary.quickStats.overallMemoryUsage16610summary.quickStats.uptime691130summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6300-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen107-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen103-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen184-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen341-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen516-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen183-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen186-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen340-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen510-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1170-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen690000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 4 FAN4The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen650000-2RPMnonefanFan Device 1 FAN1The sensor is operating under normal conditionsGreen660000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3100-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4800-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen4400-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5800-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5300-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4060
12857:20241101:185643.980 End of vmware_service_get_hv_data():SUCCEED
12857:20241101:185643.981 In vmware_service_get_hv_pnics_data()
12857:20241101:185643.982 End of vmware_service_get_hv_pnics_data() found:4
12857:20241101:185643.982 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12857:20241101:185643.982 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12857:20241101:185643.982 In vmware_hv_ip_search()
12857:20241101:185643.982 End of vmware_hv_ip_search() ip:10.50.242.11
12857:20241101:185643.982 In vmware_hv_get_parent_data() id:'host-4047'
12857:20241101:185643.984 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12857:20241101:185643.984 End of vmware_hv_get_parent_data():SUCCEED
12857:20241101:185643.985 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4047"
12857:20241101:185643.985 In vmware_service_hv_disks_get_info() hvid:'host-4047'
12857:20241101:185643.985 vmware_service_hv_disks_get_info() count of scsiLun:21
12859:20241101:185643.989 In vmware_job_get() queue:2
12859:20241101:185643.989 End of vmware_job_get() queue:1 type:update_perf_counters
12859:20241101:185643.989 In vmware_job_exec() type:update_perf_counters
12859:20241101:185643.989 End of vmware_job_exec() type:update_perf_counters ret:FAIL
12859:20241101:185643.989 In vmware_job_schedule() queue:1 type:update_perf_counters
12859:20241101:185643.989 End of vmware_job_schedule() type:update_perf_counters nextcheck:18:57:43
12859:20241101:185643.989 In vmware_job_get() queue:2
12859:20241101:185643.989 End of vmware_job_get() queue:2 type:none
12861:20241101:185643.989 In vmware_job_get() queue:2
12861:20241101:185643.989 End of vmware_job_get() queue:2 type:none
12855:20241101:185643.989 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001019 sec]'
12855:20241101:185643.989 In vmware_job_get() queue:2
12855:20241101:185643.989 End of vmware_job_get() queue:2 type:none
12857:20241101:185643.995 vmware_service_hv_disks_get_info() SOAP response:
host-4047config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].canonicalNamet10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].modelSAMSUNG MZ7LH480config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].queueDepth31config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].revision904Qconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].serialNumberS45PNC0R681954 config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].vendorATA config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].canonicalNamenaa.2ff70002ac021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].canonicalNamenaa.2ff70002ac0228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].canonicalNamenaa.60002ac0000000000000004a000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].canonicalNamenaa.60002ac00000000000000054000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].canonicalNamenaa.60002ac0000000000000042f00021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].vendor3PARdata
12857:20241101:185643.995 In vmware_service_hv_disks_parse_info()
12857:20241101:185643.996 End of vmware_service_hv_disks_parse_info() created:6
12857:20241101:185643.996 End of vmware_service_hv_disks_get_info():SUCCEED for 6(vsan:0) / 21
12857:20241101:185643.996 In vmware_service_hv_get_multipath_data() hvid:'host-4047'
12857:20241101:185644.005 vmware_service_hv_get_multipath_data() SOAP response:
host-4047config.storageDevice.multipathInfokey-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554e0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.MultipathInfo.Path-vmhba1:C0:T0:L0vmhba1:C0:T0:L0activeactivetruekey-vim.host.BlockHba-vmhba1key-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554eFIXEDkey-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a3565620202020020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L530vmhba2:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L530vmhba2:C0:T3:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L530vmhba3:C0:T1:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L530vmhba3:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a3565620202020020013020060002ac00000000000000054000228a3565620202020key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L531vmhba2:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L531vmhba2:C0:T3:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L531vmhba3:C0:T1:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L531vmhba3:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f00021983565620202020020017020060002ac0000000000000042f00021983565620202020key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L535vmhba3:C0:T3:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L535vmhba3:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L535vmhba2:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L535vmhba2:C0:T1:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202002000001002ff70002ac0228a3565620202020key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L256vmhba2:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L256vmhba2:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L256vmhba3:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L256vmhba3:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202002000001002ff70002ac021983565620202020key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L256vmhba3:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L256vmhba3:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L256vmhba2:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L256vmhba2:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUA
12857:20241101:185644.005 End of vmware_service_hv_get_multipath_data():SUCCEED
12857:20241101:185644.005 In vmware_hv_ds_access_update() hv id:host-4047 hv dss:4 dss:6
12857:20241101:185644.009 vmware_hv_ds_access_update() SOAP response:
datastore-2005host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2006host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2007host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-4050host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtrue
12857:20241101:185644.009 In vmware_hv_ds_access_parse()
12857:20241101:185644.009 In vmware_hv_get_ds_access() for DS:datastore-2005
12857:20241101:185644.009 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185644.010 In vmware_hv_get_ds_access() for DS:datastore-2006
12857:20241101:185644.010 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185644.010 In vmware_hv_get_ds_access() for DS:datastore-2007
12857:20241101:185644.010 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185644.010 In vmware_hv_get_ds_access() for DS:datastore-4050
12857:20241101:185644.010 End of vmware_hv_get_ds_access() mountinfo:15
12857:20241101:185644.010 End of vmware_hv_ds_access_parse() parsed:4
12857:20241101:185644.010 End of vmware_hv_ds_access_update():SUCCEED for 4 / 4
12857:20241101:185644.010 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID530_mgmt"
12857:20241101:185644.010 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_KARTOHA_SAS_NTK_ID535"
12857:20241101:185644.010 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID531"
12857:20241101:185644.010 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"Local_ntk-m1-esxi-01"
12857:20241101:185644.010 In vmware_service_create_vm() vmid:'vm-4060'
12857:20241101:185644.010 In vmware_service_get_vm_data() vmid:'vm-4060'
12857:20241101:185644.014 vmware_service_get_vm_data() SOAP response:
vm-4060availableFieldconfig.hardware218192falsefalse200IDE 00201IDE 11300PS2 controller 00600700100PCI controller 00500120001000150004000400SIO controller 00600Keyboard3000700Pointing device; Devicefalseautodetect3001500Video card100040961falsefalseautomatic26214412000Device on the virtual machine PCI bus that provides support for the virtual machine communication interface10017-1079927627falsetrue1000LSI Logic16100302000truenoSharing715000AHCI321002401600016000ISO [3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.iso[3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.isodatastore-2005truetruefalseok1500002000104,857,600 KB[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdatastore-2005persistentfalsefalsefalsefalse6000C29d-45c9-aa9f-3d54-a04187209ee5fa74bccac7959c5d95abe5bffffffffefalsesharingNone100001048576001073741824001000normal-11000normal05-20004000DVSwitch: 50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 db50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-400628630340996truefalsetrueok1601007assigned00:50:56:b0:80:48true050normal-1trueconfig.instanceUuid50304101-157a-f442-58f4-550f05de33feconfig.uuid42306756-2f64-b85a-a4fe-276cbfa19cb5customValuedatastoredatastore-2005guest.disk/5146047283240655228928/boot20403732481785856000guest.guestFamilylinuxGuestguest.guestFullNameUbuntu Linux (64-bit)guest.guestStaterunningguest.hostNamezabb-ntk-proxyguest.ipAddress10.50.242.76guest.netntk_dmz_vlan_112910.50.242.76fe80::250:56ff:feb0:804800:50:56:b0:80:48true400010.50.242.7628preferredfe80::250:56ff:feb0:804864unknownguest.toolsRunningStatusguestToolsRunningguest.toolsVersion12389layoutEx0[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmxconfig23822382true1[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmsdsnapshotList00true2[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdiskDescriptor458458true3[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-flat.vmdkdiskExtent107374182400107374182400true4[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.nvramnvram86848684true5[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-13083273.vswpswap85899345928589934592true6[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/vmx-zabbix-proxy-ntk-4e9138c7e268bf86a750769daba1b562730af6a5e74aa2ad704e8731824ba105-1.vswpuwswap8598323285983232true2000232024-11-01T18:26:30.170712Zparentgroup-v11resourcePoolresgroup-4001summary.config.memorySizeMB8192summary.config.namezabbix-proxy-ntksummary.config.numCpu2summary.quickStats.balloonedMemory0summary.quickStats.compressedMemory0summary.quickStats.guestMemoryUsage245summary.quickStats.hostMemoryUsage8222summary.quickStats.overallCpuUsage0summary.quickStats.privateMemory8165summary.quickStats.sharedMemory3summary.quickStats.swappedMemory0summary.quickStats.uptimeSeconds75907summary.runtime.consolidationNeededfalsesummary.runtime.powerStatepoweredOnsummary.storage.committed116050111748summary.storage.uncommitted0summary.storage.unshared107374182858triggeredAlarmStategroup-v11nameDiscovered virtual machineparentgroup-v4group-v4namevmparentdatacenter-3
12857:20241101:185644.015 End of vmware_service_get_vm_data():SUCCEED
12857:20241101:185644.016 In vmware_service_get_vm_folder() folder id:'group-v11'
12857:20241101:185644.016 End of vmware_service_get_vm_folder(): vm folder:Discovered virtual machine
12857:20241101:185644.016 In vmware_vm_get_nic_devices()
12857:20241101:185644.016 End of vmware_vm_get_nic_devices() found:1
12857:20241101:185644.016 In vmware_vm_get_disk_devices()
12857:20241101:185644.017 End of vmware_vm_get_disk_devices() found:1
12857:20241101:185644.017 In vmware_vm_get_file_systems()
12857:20241101:185644.017 End of vmware_vm_get_file_systems() found:2
12857:20241101:185644.017 In vmware_vm_get_custom_attrs()
12857:20241101:185644.017 End of vmware_vm_get_custom_attrs() attributes:0
12857:20241101:185644.017 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_vm'
12857:20241101:185644.017 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_vm' found:0 total:1
12857:20241101:185644.017 End of vmware_service_create_vm():SUCCEED
12857:20241101:185644.017 End of vmware_service_init_hv():SUCCEED
12857:20241101:185644.017 In vmware_service_init_hv() hvid:'host-4043'
12857:20241101:185644.017 In vmware_service_get_hv_data() guesthvid:'host-4043'
12857:20241101:185644.017 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4043false
12857:20241101:185644.028 vmware_service_get_hv_data() SOAP response:
host-4043config.network.dnsConfigfalsentk-esxi-02m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrue3c:ec:ef:02:b6:e033c:ec:ef:02:b6:e000falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrue3c:ec:ef:02:b6:e133c:ec:ef:02:b6:e100falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:3b:d8:c6350:7c:6f:3b:d8:c600falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:3b:d8:c7350:7c:6f:3b:d8:c700falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383231303020202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383231303020202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4046overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-02.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686831919104summary.hardware.modelSYS-6019P-WTRsummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-3cecef02b6e0summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage446summary.quickStats.overallMemoryUsage8860summary.quickStats.uptime691253summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6700-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen106-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen102-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen182-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen335-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen343-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1164-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen560000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen590000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen610000-2RPMnonefanFan Device 2 FAN2The sensor is operating under normal conditionsGreen600000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3600-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen4000-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen4600-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5000-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4057
12857:20241101:185644.028 End of vmware_service_get_hv_data():SUCCEED
12857:20241101:185644.029 In vmware_service_get_hv_pnics_data()
12857:20241101:185644.030 End of vmware_service_get_hv_pnics_data() found:4
12857:20241101:185644.030 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12857:20241101:185644.030 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12857:20241101:185644.030 In vmware_hv_ip_search()
12857:20241101:185644.030 End of vmware_hv_ip_search() ip:10.50.242.12
12857:20241101:185644.030 In vmware_hv_get_parent_data() id:'host-4043'
12857:20241101:185644.033 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12857:20241101:185644.033 End of vmware_hv_get_parent_data():SUCCEED
12857:20241101:185644.033 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4043"
12857:20241101:185644.033 In vmware_service_hv_disks_get_info() hvid:'host-4043'
12857:20241101:185644.033 vmware_service_hv_disks_get_info() count of scsiLun:21
12859:20241101:185644.989 In vmware_job_get() queue:2
12859:20241101:185644.989 End of vmware_job_get() queue:2 type:none
12861:20241101:185644.989 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000840 sec]'
12861:20241101:185644.989 In vmware_job_get() queue:2
12861:20241101:185644.989 End of vmware_job_get() queue:2 type:none
12855:20241101:185644.989 In vmware_job_get() queue:2
12855:20241101:185644.989 End of vmware_job_get() queue:2 type:none
12859:20241101:185645.989 In vmware_job_get() queue:2
12859:20241101:185645.989 End of vmware_job_get() queue:2 type:none
12861:20241101:185645.989 In vmware_job_get() queue:2
12861:20241101:185645.989 End of vmware_job_get() queue:2 type:none
12855:20241101:185645.990 In vmware_job_get() queue:2
12855:20241101:185645.990 End of vmware_job_get() queue:2 type:none
12859:20241101:185646.989 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001233 sec]'
12859:20241101:185646.990 In vmware_job_get() queue:2
12859:20241101:185646.990 End of vmware_job_get() queue:2 type:none
12861:20241101:185646.990 In vmware_job_get() queue:2
12861:20241101:185646.990 End of vmware_job_get() queue:2 type:none
12855:20241101:185646.990 In vmware_job_get() queue:2
12855:20241101:185646.990 End of vmware_job_get() queue:2 type:none
12861:20241101:185647.990 In vmware_job_get() queue:2
12861:20241101:185647.990 End of vmware_job_get() queue:2 type:none
12855:20241101:185647.990 In vmware_job_get() queue:2
12855:20241101:185647.990 End of vmware_job_get() queue:2 type:none
12859:20241101:185647.990 In vmware_job_get() queue:2
12859:20241101:185647.990 End of vmware_job_get() queue:2 type:none
12837:20241101:185648.454 received configuration data from server at "10.50.242.78", datalen 437
12861:20241101:185648.990 In vmware_job_get() queue:2
12861:20241101:185648.990 End of vmware_job_get() queue:2 type:none
12855:20241101:185648.990 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001374 sec]'
12855:20241101:185648.991 In vmware_job_get() queue:2
12855:20241101:185648.991 End of vmware_job_get() queue:2 type:none
12859:20241101:185648.991 In vmware_job_get() queue:2
12859:20241101:185648.991 End of vmware_job_get() queue:2 type:none
12861:20241101:185649.991 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001515 sec]'
12861:20241101:185649.991 In vmware_job_get() queue:2
12861:20241101:185649.991 End of vmware_job_get() queue:2 type:none
12855:20241101:185649.991 In vmware_job_get() queue:2
12855:20241101:185649.991 End of vmware_job_get() queue:2 type:none
12859:20241101:185649.991 In vmware_job_get() queue:2
12859:20241101:185649.991 End of vmware_job_get() queue:2 type:none
12861:20241101:185650.991 In vmware_job_get() queue:2
12861:20241101:185650.991 End of vmware_job_get() queue:2 type:none
12855:20241101:185650.991 In vmware_job_get() queue:2
12855:20241101:185650.991 End of vmware_job_get() queue:2 type:none
12859:20241101:185650.991 In vmware_job_get() queue:2
12859:20241101:185650.991 End of vmware_job_get() queue:2 type:none
12855:20241101:185651.991 In vmware_job_get() queue:2
12855:20241101:185651.991 End of vmware_job_get() queue:2 type:none
12861:20241101:185651.991 In vmware_job_get() queue:2
12861:20241101:185651.991 End of vmware_job_get() queue:2 type:none
12859:20241101:185651.991 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001919 sec]'
12859:20241101:185651.991 In vmware_job_get() queue:2
12859:20241101:185651.991 End of vmware_job_get() queue:2 type:none
12855:20241101:185652.991 In vmware_job_get() queue:2
12855:20241101:185652.991 End of vmware_job_get() queue:2 type:none
12861:20241101:185652.992 In vmware_job_get() queue:2
12861:20241101:185652.992 End of vmware_job_get() queue:2 type:none
12859:20241101:185652.992 In vmware_job_get() queue:2
12859:20241101:185652.992 End of vmware_job_get() queue:2 type:none
12855:20241101:185653.992 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001275 sec]'
12855:20241101:185653.992 In vmware_job_get() queue:2
12855:20241101:185653.992 End of vmware_job_get() queue:2 type:none
12859:20241101:185653.992 In vmware_job_get() queue:2
12859:20241101:185653.992 End of vmware_job_get() queue:2 type:none
12861:20241101:185653.992 In vmware_job_get() queue:2
12861:20241101:185653.992 End of vmware_job_get() queue:2 type:none
12857:20241101:185654.035 End of vmware_service_hv_disks_get_info():FAIL for 0(vsan:0) / 21
12857:20241101:185654.035 End of vmware_service_init_hv():FAIL
12857:20241101:185654.035 Unable initialize hv host-4043: Timeout was reached.
12857:20241101:185654.035 In vmware_service_init_hv() hvid:'host-4038'
12857:20241101:185654.035 In vmware_service_get_hv_data() guesthvid:'host-4038'
12857:20241101:185654.036 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4038false
12855:20241101:185654.992 In vmware_job_get() queue:2
12855:20241101:185654.992 End of vmware_job_get() queue:2 type:none
12859:20241101:185654.992 In vmware_job_get() queue:2
12859:20241101:185654.992 End of vmware_job_get() queue:2 type:none
12861:20241101:185654.992 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001485 sec]'
12861:20241101:185654.992 In vmware_job_get() queue:2
12861:20241101:185654.992 End of vmware_job_get() queue:2 type:none
12855:20241101:185655.992 In vmware_job_get() queue:2
12855:20241101:185655.992 End of vmware_job_get() queue:2 type:none
12859:20241101:185655.992 In vmware_job_get() queue:2
12859:20241101:185655.993 End of vmware_job_get() queue:2 type:none
12861:20241101:185655.993 In vmware_job_get() queue:2
12861:20241101:185655.993 End of vmware_job_get() queue:2 type:none
12855:20241101:185656.992 In vmware_job_get() queue:2
12855:20241101:185656.993 End of vmware_job_get() queue:2 type:none
12861:20241101:185656.993 In vmware_job_get() queue:2
12861:20241101:185656.993 End of vmware_job_get() queue:2 type:none
12859:20241101:185656.993 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001277 sec]'
12859:20241101:185656.993 In vmware_job_get() queue:2
12859:20241101:185656.993 End of vmware_job_get() queue:2 type:none
12855:20241101:185657.993 In vmware_job_get() queue:2
12855:20241101:185657.993 End of vmware_job_get() queue:2 type:none
12861:20241101:185657.994 In vmware_job_get() queue:2
12861:20241101:185657.994 End of vmware_job_get() queue:2 type:none
12859:20241101:185657.994 In vmware_job_get() queue:2
12859:20241101:185657.994 End of vmware_job_get() queue:2 type:none
12837:20241101:185658.470 received configuration data from server at "10.50.242.78", datalen 437
12855:20241101:185658.994 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002447 sec]'
12855:20241101:185658.994 In vmware_job_get() queue:2
12861:20241101:185658.994 In vmware_job_get() queue:2
12859:20241101:185658.994 In vmware_job_get() queue:2
12855:20241101:185658.994 End of vmware_job_get() queue:2 type:none
12861:20241101:185658.995 End of vmware_job_get() queue:2 type:none
12859:20241101:185658.995 End of vmware_job_get() queue:2 type:none
12855:20241101:185659.995 In vmware_job_get() queue:2
12855:20241101:185659.995 End of vmware_job_get() queue:2 type:none
12861:20241101:185659.995 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002675 sec]'
12861:20241101:185659.995 In vmware_job_get() queue:2
12861:20241101:185659.995 End of vmware_job_get() queue:2 type:none
12859:20241101:185659.995 In vmware_job_get() queue:2
12859:20241101:185659.995 End of vmware_job_get() queue:2 type:none
12855:20241101:185700.995 In vmware_job_get() queue:2
12855:20241101:185700.995 End of vmware_job_get() queue:2 type:none
12859:20241101:185700.995 In vmware_job_get() queue:2
12859:20241101:185700.995 End of vmware_job_get() queue:2 type:none
12861:20241101:185700.996 In vmware_job_get() queue:2
12861:20241101:185700.996 End of vmware_job_get() queue:2 type:none
12855:20241101:185701.996 In vmware_job_get() queue:2
12855:20241101:185701.996 End of vmware_job_get() queue:2 type:none
12859:20241101:185701.996 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002943 sec]'
12859:20241101:185701.996 In vmware_job_get() queue:2
12859:20241101:185701.996 End of vmware_job_get() queue:2 type:none
12861:20241101:185701.996 In vmware_job_get() queue:2
12861:20241101:185701.996 End of vmware_job_get() queue:2 type:none
12855:20241101:185702.996 In vmware_job_get() queue:2
12855:20241101:185702.996 End of vmware_job_get() queue:2 type:none
12859:20241101:185702.996 In vmware_job_get() queue:2
12859:20241101:185702.996 End of vmware_job_get() queue:2 type:none
12861:20241101:185702.996 In vmware_job_get() queue:2
12861:20241101:185702.996 End of vmware_job_get() queue:2 type:none
12859:20241101:185703.996 In vmware_job_get() queue:2
12859:20241101:185703.996 End of vmware_job_get() queue:2 type:none
12855:20241101:185703.996 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001870 sec]'
12855:20241101:185703.996 In vmware_job_get() queue:2
12855:20241101:185703.996 End of vmware_job_get() queue:2 type:none
12861:20241101:185703.996 In vmware_job_get() queue:2
12861:20241101:185703.996 End of vmware_job_get() queue:2 type:none
12857:20241101:185704.037 End of vmware_service_get_hv_data():FAIL
12857:20241101:185704.037 End of vmware_service_init_hv():FAIL
12857:20241101:185704.037 Unable initialize hv host-4038: Timeout was reached.
12857:20241101:185704.037 In vmware_service_dvswitch_load() dvs count:0
12857:20241101:185704.037 End of vmware_service_dvswitch_load() count: 0 / 0
12857:20241101:185704.037 In vmware_service_props_load() props total:0
12857:20241101:185704.037 End of vmware_service_props_load() count: 0 / 0
12857:20241101:185704.037 In vmware_service_get_maxquerymetrics()
12859:20241101:185704.996 In vmware_job_get() queue:2
12859:20241101:185704.996 End of vmware_job_get() queue:2 type:none
12855:20241101:185704.996 In vmware_job_get() queue:2
12855:20241101:185704.996 End of vmware_job_get() queue:2 type:none
12861:20241101:185704.996 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001518 sec]'
12861:20241101:185704.996 In vmware_job_get() queue:2
12861:20241101:185704.996 End of vmware_job_get() queue:2 type:none
12859:20241101:185705.996 In vmware_job_get() queue:2
12859:20241101:185705.996 End of vmware_job_get() queue:2 type:none
12855:20241101:185705.996 In vmware_job_get() queue:2
12855:20241101:185705.996 End of vmware_job_get() queue:2 type:none
12861:20241101:185705.997 In vmware_job_get() queue:2
12861:20241101:185705.997 End of vmware_job_get() queue:2 type:none
12859:20241101:185706.996 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000734 sec]'
12859:20241101:185706.996 In vmware_job_get() queue:2
12859:20241101:185706.996 End of vmware_job_get() queue:2 type:none
12855:20241101:185706.997 In vmware_job_get() queue:2
12855:20241101:185706.997 End of vmware_job_get() queue:2 type:none
12861:20241101:185706.997 In vmware_job_get() queue:2
12861:20241101:185706.997 End of vmware_job_get() queue:2 type:none
12859:20241101:185707.997 In vmware_job_get() queue:2
12859:20241101:185707.997 End of vmware_job_get() queue:2 type:none
12855:20241101:185707.997 In vmware_job_get() queue:2
12855:20241101:185707.997 End of vmware_job_get() queue:2 type:none
12861:20241101:185707.997 In vmware_job_get() queue:2
12861:20241101:185707.997 End of vmware_job_get() queue:2 type:none
12837:20241101:185708.486 received configuration data from server at "10.50.242.78", datalen 437
12859:20241101:185708.997 In vmware_job_get() queue:2
12859:20241101:185708.997 End of vmware_job_get() queue:2 type:none
12861:20241101:185708.997 In vmware_job_get() queue:2
12861:20241101:185708.997 End of vmware_job_get() queue:2 type:none
12855:20241101:185708.997 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001146 sec]'
12855:20241101:185708.997 In vmware_job_get() queue:2
12855:20241101:185708.997 End of vmware_job_get() queue:2 type:none
12859:20241101:185709.997 In vmware_job_get() queue:2
12859:20241101:185709.997 End of vmware_job_get() queue:2 type:none
12861:20241101:185709.997 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000949 sec]'
12861:20241101:185709.997 In vmware_job_get() queue:2
12861:20241101:185709.997 End of vmware_job_get() queue:2 type:none
12855:20241101:185709.997 In vmware_job_get() queue:2
12855:20241101:185709.998 End of vmware_job_get() queue:2 type:none
12859:20241101:185710.997 In vmware_job_get() queue:2
12859:20241101:185710.997 End of vmware_job_get() queue:2 type:none
12861:20241101:185710.998 In vmware_job_get() queue:2
12861:20241101:185710.998 End of vmware_job_get() queue:2 type:none
12855:20241101:185710.998 In vmware_job_get() queue:2
12855:20241101:185710.998 End of vmware_job_get() queue:2 type:none
12859:20241101:185711.998 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001198 sec]'
12859:20241101:185711.998 In vmware_job_get() queue:2
12859:20241101:185711.998 End of vmware_job_get() queue:2 type:none
12861:20241101:185711.998 In vmware_job_get() queue:2
12861:20241101:185711.998 End of vmware_job_get() queue:2 type:none
12855:20241101:185711.998 In vmware_job_get() queue:2
12855:20241101:185711.998 End of vmware_job_get() queue:2 type:none
12859:20241101:185712.998 In vmware_job_get() queue:2
12859:20241101:185712.998 End of vmware_job_get() queue:2 type:none
12861:20241101:185712.998 In vmware_job_get() queue:2
12861:20241101:185712.998 End of vmware_job_get() queue:2 type:none
12855:20241101:185712.998 In vmware_job_get() queue:2
12855:20241101:185712.998 End of vmware_job_get() queue:2 type:none
12861:20241101:185713.998 In vmware_job_get() queue:2
12861:20241101:185713.998 End of vmware_job_get() queue:2 type:none
12859:20241101:185713.998 In vmware_job_get() queue:2
12859:20241101:185713.998 End of vmware_job_get() queue:2 type:none
12855:20241101:185713.998 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001019 sec]'
12855:20241101:185713.998 In vmware_job_get() queue:2
12855:20241101:185713.998 End of vmware_job_get() queue:2 type:none
12857:20241101:185714.039 End of vmware_service_get_maxquerymetrics():FAIL
12857:20241101:185714.039 In vmware_service_update_perf_entities()
12857:20241101:185714.039 In vmware_service_add_perf_entity() type:HostSystem id:host-4047
12857:20241101:185714.039 In zbx_vmware_service_get_perf_entity() type:HostSystem id:host-4047
12857:20241101:185714.039 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185714.039 In zbx_vmware_service_get_counterid() path:net/packetsRx[summation]
12857:20241101:185714.039 zbx_vmware_service_get_counterid() counterid:153
12857:20241101:185714.039 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.039 In zbx_vmware_service_get_counterid() path:net/packetsTx[summation]
12857:20241101:185714.039 zbx_vmware_service_get_counterid() counterid:154
12857:20241101:185714.039 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.039 In zbx_vmware_service_get_counterid() path:net/received[average]
12857:20241101:185714.039 zbx_vmware_service_get_counterid() counterid:155
12857:20241101:185714.039 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.039 In zbx_vmware_service_get_counterid() path:net/transmitted[average]
12857:20241101:185714.039 zbx_vmware_service_get_counterid() counterid:156
12857:20241101:185714.039 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.039 In zbx_vmware_service_get_counterid() path:datastore/totalReadLatency[average]
12857:20241101:185714.039 zbx_vmware_service_get_counterid() counterid:189
12857:20241101:185714.039 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.039 In zbx_vmware_service_get_counterid() path:datastore/totalWriteLatency[average]
12857:20241101:185714.039 zbx_vmware_service_get_counterid() counterid:190
12857:20241101:185714.039 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.039 In zbx_vmware_service_get_counterid() path:datastore/numberReadAveraged[average]
12857:20241101:185714.039 zbx_vmware_service_get_counterid() counterid:185
12857:20241101:185714.039 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.039 In zbx_vmware_service_get_counterid() path:datastore/numberWriteAveraged[average]
12857:20241101:185714.039 zbx_vmware_service_get_counterid() counterid:186
12857:20241101:185714.039 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.039 In zbx_vmware_service_get_counterid() path:cpu/usage[average]
12857:20241101:185714.039 zbx_vmware_service_get_counterid() counterid:2
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:cpu/utilization[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:398
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:power/power[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:164
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:power/powerCap[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:165
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/droppedRx[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:605
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/droppedTx[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:606
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/errorsRx[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:613
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/errorsTx[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:614
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/broadcastRx[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:609
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/broadcastTx[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:610
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 End of vmware_service_add_perf_entity() perfcounters:18
12857:20241101:185714.040 In vmware_service_add_perf_entity() type:VirtualMachine id:vm-4060
12857:20241101:185714.040 In zbx_vmware_service_get_perf_entity() type:VirtualMachine id:vm-4060
12857:20241101:185714.040 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:virtualDisk/read[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:180
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:virtualDisk/write[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:181
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:virtualDisk/numberReadAveraged[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:178
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:virtualDisk/numberWriteAveraged[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:179
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/packetsRx[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:153
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/packetsTx[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:154
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/received[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:155
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/transmitted[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:156
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:cpu/ready[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:12
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/usage[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:150
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:cpu/usage[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:2
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:cpu/latency[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:540
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:cpu/readiness[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:548
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:cpu/swapwait[summation]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:531
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:sys/osUptime[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:643
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:mem/consumed[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:98
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:mem/usage[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:24
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:mem/swapped[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:70
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:net/usage[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:150
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:virtualDisk/readOIO[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:349
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:virtualDisk/writeOIO[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:350
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:virtualDisk/totalWriteLatency[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:183
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:virtualDisk/totalReadLatency[average]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:182
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 End of vmware_service_add_perf_entity() perfcounters:23
12857:20241101:185714.040 vmware_service_update_perf_entities() for type: VirtualMachine hv id: host-4047 hv uuid: 00000000-0000-0000-0000-ac1f6bb14c78 linked vm id: vm-4060 vm uuid: 50304101-157a-f442-58f4-550f05de33fe
12857:20241101:185714.040 In vmware_service_add_perf_entity() type:Datastore id:datastore-2005
12857:20241101:185714.040 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2005
12857:20241101:185714.040 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185714.040 vmware_service_update_perf_entities() for type: Datastore id: datastore-2005 name: 3PAR_GOROH_SSD_NTK_ID530_mgmt uuid: 6703d517-82086a06-cec0-9440c9831520
12857:20241101:185714.040 In vmware_service_add_perf_entity() type:Datastore id:datastore-2006
12857:20241101:185714.040 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2006
12857:20241101:185714.040 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185714.040 vmware_service_update_perf_entities() for type: Datastore id: datastore-2006 name: 3PAR_KARTOHA_SAS_NTK_ID535 uuid: 6703d63f-3516ce66-4bee-9440c9831520
12857:20241101:185714.040 In vmware_service_add_perf_entity() type:Datastore id:datastore-2007
12857:20241101:185714.040 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2007
12857:20241101:185714.040 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185714.040 vmware_service_update_perf_entities() for type: Datastore id: datastore-2007 name: 3PAR_GOROH_SSD_NTK_ID531 uuid: 6704dec9-75e6c68a-c19e-9440c9831520
12857:20241101:185714.040 In vmware_service_add_perf_entity() type:Datastore id:datastore-4046
12857:20241101:185714.040 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4046
12857:20241101:185714.040 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185714.040 vmware_service_update_perf_entities() for type: Datastore id: datastore-4046 name: Local_ntk-m1-esxi-02 uuid: 67155ba7-5e9d16d6-0733-3cecef02b6e0
12857:20241101:185714.040 In vmware_service_add_perf_entity() type:Datastore id:datastore-4050
12857:20241101:185714.040 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4050
12857:20241101:185714.040 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185714.040 vmware_service_update_perf_entities() for type: Datastore id: datastore-4050 name: Local_ntk-m1-esxi-01 uuid: 67155cc9-bea5e318-19fd-ac1f6bb14c78
12857:20241101:185714.040 In vmware_service_add_perf_entity() type:Datastore id:datastore-4041
12857:20241101:185714.040 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4041
12857:20241101:185714.040 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:285
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:286
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12857:20241101:185714.040 zbx_vmware_service_get_counterid() counterid:287
12857:20241101:185714.040 End of zbx_vmware_service_get_counterid():SUCCEED
12857:20241101:185714.040 End of vmware_service_add_perf_entity() perfcounters:3
12857:20241101:185714.040 vmware_service_update_perf_entities() for type: Datastore id: datastore-4041 name: Local_ntk-m1-esxi-03 uuid: 67155e10-d4545cb2-5b01-3cecef012e78
12857:20241101:185714.040 End of vmware_service_update_perf_entities() entities:8
12857:20241101:185714.041 === memory statistics for vmware cache size ===
12857:20241101:185714.041 free chunks of size >= 256 bytes: 4
12857:20241101:185714.041 min chunk size: 760 bytes
12857:20241101:185714.041 max chunk size: 1073164312 bytes
12857:20241101:185714.041 memory of total size 1073625776 bytes fragmented into 7203 chunks
12857:20241101:185714.041 of those, 1073166912 bytes are in 4 free chunks
12857:20241101:185714.041 of those, 458864 bytes are in 7199 used chunks
12857:20241101:185714.041 of those, 115232 bytes are used by allocation overhead
12857:20241101:185714.041 ================================
12857:20241101:185714.041 End of zbx_vmware_service_update():FAIL processed:1638400 bytes of data. Events:0 DC:1 DS:6 CL:1 HV:1 VM:1 DVS:1 Alarms:1 VMwareCache memory usage (free/strpool/total): 1073166912 / 3110400 / 1073741008
12857:20241101:185714.041 End of vmware_job_exec() type:update_conf ret:FAIL
12857:20241101:185714.041 In vmware_job_schedule() queue:2 type:update_conf
12857:20241101:185714.041 End of vmware_job_schedule() type:update_conf nextcheck:18:57:43
12857:20241101:185714.041 In vmware_job_get() queue:3
12857:20241101:185714.041 End of vmware_job_get() queue:3 type:none
12857:20241101:185714.041 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 0.000000 sec during 30.361558 sec]'
12857:20241101:185714.041 In vmware_job_get() queue:3
12857:20241101:185714.041 End of vmware_job_get() queue:3 type:none
12861:20241101:185714.998 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000934 sec]'
12861:20241101:185714.998 In vmware_job_get() queue:3
12861:20241101:185714.998 End of vmware_job_get() queue:3 type:none
12859:20241101:185714.998 In vmware_job_get() queue:3
12859:20241101:185714.998 End of vmware_job_get() queue:3 type:none
12855:20241101:185714.998 In vmware_job_get() queue:3
12855:20241101:185714.998 End of vmware_job_get() queue:3 type:none
12857:20241101:185715.041 In vmware_job_get() queue:3
12857:20241101:185715.041 End of vmware_job_get() queue:3 type:none
12861:20241101:185715.998 In vmware_job_get() queue:3
12861:20241101:185715.998 End of vmware_job_get() queue:3 type:none
12859:20241101:185715.998 In vmware_job_get() queue:3
12859:20241101:185715.998 End of vmware_job_get() queue:3 type:none
12855:20241101:185715.998 In vmware_job_get() queue:3
12855:20241101:185715.998 End of vmware_job_get() queue:3 type:none
12857:20241101:185716.041 In vmware_job_get() queue:3
12857:20241101:185716.041 End of vmware_job_get() queue:3 type:none
12859:20241101:185716.998 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000933 sec]'
12859:20241101:185716.999 In vmware_job_get() queue:3
12859:20241101:185716.999 End of vmware_job_get() queue:3 type:none
12855:20241101:185716.999 In vmware_job_get() queue:3
12855:20241101:185716.999 End of vmware_job_get() queue:3 type:none
12861:20241101:185716.999 In vmware_job_get() queue:3
12861:20241101:185716.999 End of vmware_job_get() queue:3 type:none
12857:20241101:185717.041 In vmware_job_get() queue:3
12857:20241101:185717.041 End of vmware_job_get() queue:3 type:none
12855:20241101:185717.999 In vmware_job_get() queue:3
12855:20241101:185717.999 End of vmware_job_get() queue:3 type:none
12859:20241101:185717.999 In vmware_job_get() queue:3
12859:20241101:185717.999 End of vmware_job_get() queue:3 type:none
12861:20241101:185717.999 In vmware_job_get() queue:3
12861:20241101:185717.999 End of vmware_job_get() queue:3 type:none
12857:20241101:185718.041 In vmware_job_get() queue:3
12857:20241101:185718.041 End of vmware_job_get() queue:3 type:none
12837:20241101:185718.503 received configuration data from server at "10.50.242.78", datalen 437
12855:20241101:185718.999 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001168 sec]'
12855:20241101:185718.999 In vmware_job_get() queue:3
12855:20241101:185718.999 End of vmware_job_get() queue:3 type:none
12859:20241101:185718.999 In vmware_job_get() queue:3
12859:20241101:185718.999 End of vmware_job_get() queue:3 type:none
12861:20241101:185718.999 In vmware_job_get() queue:3
12861:20241101:185718.999 End of vmware_job_get() queue:3 type:none
12857:20241101:185719.041 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000519 sec]'
12857:20241101:185719.041 In vmware_job_get() queue:3
12857:20241101:185719.041 End of vmware_job_get() queue:3 type:none
12861:20241101:185719.999 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001290 sec]'
12861:20241101:185719.999 In vmware_job_get() queue:3
12861:20241101:185719.999 End of vmware_job_get() queue:3 type:none
12859:20241101:185720.000 In vmware_job_get() queue:3
12859:20241101:185720.000 End of vmware_job_get() queue:3 type:none
12855:20241101:185720.000 In vmware_job_get() queue:3
12855:20241101:185720.000 End of vmware_job_get() queue:3 type:none
12857:20241101:185720.043 In vmware_job_get() queue:3
12857:20241101:185720.043 End of vmware_job_get() queue:3 type:none
12859:20241101:185721.000 In vmware_job_get() queue:3
12859:20241101:185721.000 End of vmware_job_get() queue:3 type:none
12855:20241101:185721.000 In vmware_job_get() queue:3
12855:20241101:185721.000 End of vmware_job_get() queue:3 type:none
12861:20241101:185721.000 In vmware_job_get() queue:3
12861:20241101:185721.000 End of vmware_job_get() queue:3 type:none
12857:20241101:185721.043 In vmware_job_get() queue:3
12857:20241101:185721.043 End of vmware_job_get() queue:3 type:none
12855:20241101:185722.000 In vmware_job_get() queue:3
12855:20241101:185722.000 End of vmware_job_get() queue:3 type:none
12859:20241101:185722.000 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001357 sec]'
12859:20241101:185722.000 In vmware_job_get() queue:3
12859:20241101:185722.000 End of vmware_job_get() queue:3 type:none
12861:20241101:185722.000 In vmware_job_get() queue:3
12861:20241101:185722.000 End of vmware_job_get() queue:3 type:none
12857:20241101:185722.043 In vmware_job_get() queue:3
12857:20241101:185722.043 End of vmware_job_get() queue:3 type:none
12861:20241101:185723.000 In vmware_job_get() queue:3
12861:20241101:185723.000 End of vmware_job_get() queue:3 type:none
12855:20241101:185723.000 In vmware_job_get() queue:3
12855:20241101:185723.000 End of vmware_job_get() queue:3 type:none
12859:20241101:185723.000 In vmware_job_get() queue:3
12859:20241101:185723.000 End of vmware_job_get() queue:3 type:none
12857:20241101:185723.043 In vmware_job_get() queue:3
12857:20241101:185723.043 End of vmware_job_get() queue:3 type:none
12855:20241101:185724.000 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000778 sec]'
12855:20241101:185724.000 In vmware_job_get() queue:3
12855:20241101:185724.000 End of vmware_job_get() queue:3 type:none
12861:20241101:185724.000 In vmware_job_get() queue:3
12861:20241101:185724.000 End of vmware_job_get() queue:3 type:none
12859:20241101:185724.000 In vmware_job_get() queue:3
12859:20241101:185724.000 End of vmware_job_get() queue:3 type:none
12857:20241101:185724.044 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002437 sec]'
12857:20241101:185724.044 In vmware_job_get() queue:3
12857:20241101:185724.044 End of vmware_job_get() queue:3 type:none
12861:20241101:185725.000 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000863 sec]'
12861:20241101:185725.000 In vmware_job_get() queue:3
12861:20241101:185725.000 End of vmware_job_get() queue:3 type:none
12855:20241101:185725.000 In vmware_job_get() queue:3
12855:20241101:185725.000 End of vmware_job_get() queue:3 type:none
12859:20241101:185725.000 In vmware_job_get() queue:3
12859:20241101:185725.000 End of vmware_job_get() queue:3 type:none
12857:20241101:185725.044 In vmware_job_get() queue:3
12857:20241101:185725.044 End of vmware_job_get() queue:3 type:none
12861:20241101:185726.000 In vmware_job_get() queue:3
12861:20241101:185726.000 End of vmware_job_get() queue:3 type:none
12859:20241101:185726.000 In vmware_job_get() queue:3
12859:20241101:185726.000 End of vmware_job_get() queue:3 type:none
12855:20241101:185726.000 In vmware_job_get() queue:3
12855:20241101:185726.000 End of vmware_job_get() queue:3 type:none
12857:20241101:185726.044 In vmware_job_get() queue:3
12857:20241101:185726.044 End of vmware_job_get() queue:3 type:none
12859:20241101:185727.001 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000721 sec]'
12859:20241101:185727.001 In vmware_job_get() queue:3
12859:20241101:185727.001 End of vmware_job_get() queue:3 type:none
12861:20241101:185727.001 In vmware_job_get() queue:3
12861:20241101:185727.001 End of vmware_job_get() queue:3 type:none
12855:20241101:185727.001 In vmware_job_get() queue:3
12855:20241101:185727.001 End of vmware_job_get() queue:3 type:none
12857:20241101:185727.044 In vmware_job_get() queue:3
12857:20241101:185727.044 End of vmware_job_get() queue:3 type:none
12859:20241101:185728.001 In vmware_job_get() queue:3
12859:20241101:185728.001 End of vmware_job_get() queue:3 type:none
12855:20241101:185728.001 In vmware_job_get() queue:3
12855:20241101:185728.001 End of vmware_job_get() queue:3 type:none
12861:20241101:185728.001 In vmware_job_get() queue:3
12861:20241101:185728.001 End of vmware_job_get() queue:3 type:none
12857:20241101:185728.044 In vmware_job_get() queue:3
12857:20241101:185728.044 End of vmware_job_get() queue:3 type:none
12837:20241101:185728.519 received configuration data from server at "10.50.242.78", datalen 437
12859:20241101:185729.001 In vmware_job_get() queue:3
12859:20241101:185729.001 End of vmware_job_get() queue:3 type:none
12861:20241101:185729.001 In vmware_job_get() queue:3
12861:20241101:185729.001 End of vmware_job_get() queue:3 type:none
12855:20241101:185729.001 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000944 sec]'
12855:20241101:185729.001 In vmware_job_get() queue:3
12855:20241101:185729.001 End of vmware_job_get() queue:3 type:none
12857:20241101:185729.044 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000782 sec]'
12857:20241101:185729.044 In vmware_job_get() queue:3
12857:20241101:185729.044 End of vmware_job_get() queue:3 type:none
12859:20241101:185730.001 In vmware_job_get() queue:3
12859:20241101:185730.001 End of vmware_job_get() queue:3 type:none
12861:20241101:185730.001 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000797 sec]'
12861:20241101:185730.001 In vmware_job_get() queue:3
12861:20241101:185730.001 End of vmware_job_get() queue:3 type:none
12855:20241101:185730.001 In vmware_job_get() queue:3
12855:20241101:185730.001 End of vmware_job_get() queue:3 type:none
12857:20241101:185730.045 In vmware_job_get() queue:3
12857:20241101:185730.045 End of vmware_job_get() queue:3 type:none
12859:20241101:185731.001 In vmware_job_get() queue:3
12861:20241101:185731.001 In vmware_job_get() queue:3
12861:20241101:185731.001 End of vmware_job_get() queue:3 type:none
12855:20241101:185731.001 In vmware_job_get() queue:3
12855:20241101:185731.001 End of vmware_job_get() queue:3 type:none
12859:20241101:185731.001 End of vmware_job_get() queue:3 type:none
12857:20241101:185731.045 In vmware_job_get() queue:3
12857:20241101:185731.045 End of vmware_job_get() queue:3 type:none
12855:20241101:185732.001 In vmware_job_get() queue:3
12855:20241101:185732.001 End of vmware_job_get() queue:3 type:none
12859:20241101:185732.001 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000844 sec]'
12859:20241101:185732.002 In vmware_job_get() queue:3
12859:20241101:185732.002 End of vmware_job_get() queue:3 type:none
12861:20241101:185732.002 In vmware_job_get() queue:3
12861:20241101:185732.002 End of vmware_job_get() queue:3 type:none
12857:20241101:185732.045 In vmware_job_get() queue:3
12857:20241101:185732.045 End of vmware_job_get() queue:3 type:none
12855:20241101:185733.002 In vmware_job_get() queue:3
12855:20241101:185733.003 End of vmware_job_get() queue:3 type:none
12859:20241101:185733.003 In vmware_job_get() queue:3
12859:20241101:185733.003 End of vmware_job_get() queue:3 type:none
12861:20241101:185733.003 In vmware_job_get() queue:3
12861:20241101:185733.003 End of vmware_job_get() queue:3 type:none
12857:20241101:185733.045 In vmware_job_get() queue:3
12857:20241101:185733.045 End of vmware_job_get() queue:3 type:none
12855:20241101:185734.003 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001811 sec]'
12859:20241101:185734.003 In vmware_job_get() queue:3
12859:20241101:185734.003 End of vmware_job_get() queue:3 type:none
12855:20241101:185734.003 In vmware_job_get() queue:3
12855:20241101:185734.003 End of vmware_job_get() queue:3 type:none
12861:20241101:185734.003 In vmware_job_get() queue:3
12861:20241101:185734.003 End of vmware_job_get() queue:3 type:none
12857:20241101:185734.045 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000788 sec]'
12857:20241101:185734.045 In vmware_job_get() queue:3
12857:20241101:185734.045 End of vmware_job_get() queue:3 type:none
12861:20241101:185735.003 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001938 sec]'
12861:20241101:185735.003 In vmware_job_get() queue:3
12861:20241101:185735.003 End of vmware_job_get() queue:3 type:none
12855:20241101:185735.003 In vmware_job_get() queue:3
12855:20241101:185735.003 End of vmware_job_get() queue:3 type:none
12859:20241101:185735.003 In vmware_job_get() queue:3
12859:20241101:185735.003 End of vmware_job_get() queue:3 type:none
12857:20241101:185735.045 In vmware_job_get() queue:3
12857:20241101:185735.045 End of vmware_job_get() queue:3 type:none
12861:20241101:185736.003 In vmware_job_get() queue:3
12861:20241101:185736.003 End of vmware_job_get() queue:3 type:none
12855:20241101:185736.003 In vmware_job_get() queue:3
12855:20241101:185736.003 End of vmware_job_get() queue:3 type:none
12859:20241101:185736.003 In vmware_job_get() queue:3
12859:20241101:185736.003 End of vmware_job_get() queue:3 type:none
12857:20241101:185736.045 In vmware_job_get() queue:3
12857:20241101:185736.045 End of vmware_job_get() queue:3 type:none
12861:20241101:185737.003 In vmware_job_get() queue:3
12861:20241101:185737.003 End of vmware_job_get() queue:3 type:none
12855:20241101:185737.003 In vmware_job_get() queue:3
12855:20241101:185737.004 End of vmware_job_get() queue:3 type:none
12859:20241101:185737.004 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002156 sec]'
12859:20241101:185737.004 In vmware_job_get() queue:3
12859:20241101:185737.004 End of vmware_job_get() queue:3 type:none
12857:20241101:185737.046 In vmware_job_get() queue:3
12857:20241101:185737.046 End of vmware_job_get() queue:3 type:none
12861:20241101:185738.004 In vmware_job_get() queue:3
12861:20241101:185738.004 End of vmware_job_get() queue:3 type:none
12859:20241101:185738.004 In vmware_job_get() queue:3
12859:20241101:185738.004 End of vmware_job_get() queue:3 type:none
12855:20241101:185738.004 In vmware_job_get() queue:3
12855:20241101:185738.004 End of vmware_job_get() queue:3 type:none
12857:20241101:185738.046 In vmware_job_get() queue:3
12857:20241101:185738.046 End of vmware_job_get() queue:3 type:none
12837:20241101:185738.536 received configuration data from server at "10.50.242.78", datalen 437
12859:20241101:185739.004 In vmware_job_get() queue:3
12859:20241101:185739.004 End of vmware_job_get() queue:3 type:none
12861:20241101:185739.004 In vmware_job_get() queue:3
12861:20241101:185739.004 End of vmware_job_get() queue:3 type:none
12855:20241101:185739.004 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001018 sec]'
12855:20241101:185739.004 In vmware_job_get() queue:3
12855:20241101:185739.004 End of vmware_job_get() queue:3 type:none
12857:20241101:185739.046 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000746 sec]'
12857:20241101:185739.046 In vmware_job_get() queue:3
12857:20241101:185739.046 End of vmware_job_get() queue:3 type:none
12859:20241101:185740.004 In vmware_job_get() queue:3
12859:20241101:185740.004 End of vmware_job_get() queue:3 type:none
12861:20241101:185740.004 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001016 sec]'
12861:20241101:185740.004 In vmware_job_get() queue:3
12861:20241101:185740.004 End of vmware_job_get() queue:3 type:none
12855:20241101:185740.004 In vmware_job_get() queue:3
12855:20241101:185740.004 End of vmware_job_get() queue:3 type:none
12857:20241101:185740.046 In vmware_job_get() queue:3
12857:20241101:185740.046 End of vmware_job_get() queue:3 type:none
12859:20241101:185741.004 In vmware_job_get() queue:3
12859:20241101:185741.004 End of vmware_job_get() queue:3 type:none
12861:20241101:185741.004 In vmware_job_get() queue:3
12861:20241101:185741.004 End of vmware_job_get() queue:3 type:none
12855:20241101:185741.004 In vmware_job_get() queue:3
12855:20241101:185741.004 End of vmware_job_get() queue:3 type:none
12857:20241101:185741.046 In vmware_job_get() queue:3
12857:20241101:185741.046 End of vmware_job_get() queue:3 type:none
12861:20241101:185742.004 In vmware_job_get() queue:3
12861:20241101:185742.004 End of vmware_job_get() queue:3 type:none
12859:20241101:185742.004 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000802 sec]'
12859:20241101:185742.004 In vmware_job_get() queue:3
12859:20241101:185742.004 End of vmware_job_get() queue:3 type:none
12855:20241101:185742.004 In vmware_job_get() queue:3
12855:20241101:185742.004 End of vmware_job_get() queue:3 type:none
12857:20241101:185742.046 In vmware_job_get() queue:3
12857:20241101:185742.046 End of vmware_job_get() queue:3 type:none
12855:20241101:185742.860 cannot increase log level: maximum level has been already set
12855:20241101:185742.860 In vmware_job_get() queue:3
12855:20241101:185742.860 End of vmware_job_get() queue:3 type:none
12857:20241101:185742.860 cannot increase log level: maximum level has been already set
12857:20241101:185742.860 In vmware_job_get() queue:3
12857:20241101:185742.860 End of vmware_job_get() queue:3 type:none
12861:20241101:185742.860 cannot increase log level: maximum level has been already set
12861:20241101:185742.860 In vmware_job_get() queue:3
12861:20241101:185742.860 End of vmware_job_get() queue:3 type:none
12859:20241101:185742.860 cannot increase log level: maximum level has been already set
12859:20241101:185742.860 In vmware_job_get() queue:3
12859:20241101:185742.860 End of vmware_job_get() queue:3 type:none
12855:20241101:185743.860 In vmware_job_get() queue:3
12855:20241101:185743.861 End of vmware_job_get() queue:2 type:update_tags
12855:20241101:185743.861 In vmware_job_exec() type:update_tags
12855:20241101:185743.861 End of vmware_job_exec() type:update_tags ret:FAIL
12855:20241101:185743.861 In vmware_job_schedule() queue:2 type:update_tags
12855:20241101:185743.861 End of vmware_job_schedule() type:update_tags nextcheck:18:58:43
12855:20241101:185743.861 In vmware_job_get() queue:3
12855:20241101:185743.861 End of vmware_job_get() queue:2 type:update_conf
12855:20241101:185743.861 In vmware_job_exec() type:update_conf
12855:20241101:185743.861 In zbx_vmware_service_update() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12855:20241101:185743.861 In vmware_service_cust_query_prep() cust_queries:0
12855:20241101:185743.861 End of vmware_service_cust_query_prep() cq_values:0
12855:20241101:185743.861 In vmware_service_cust_query_prep() cust_queries:0
12855:20241101:185743.861 End of vmware_service_cust_query_prep() cq_values:0
12861:20241101:185743.861 In vmware_job_get() queue:2
12861:20241101:185743.861 End of vmware_job_get() queue:1 type:update_perf_counters
12861:20241101:185743.861 In vmware_job_exec() type:update_perf_counters
12861:20241101:185743.861 End of vmware_job_exec() type:update_perf_counters ret:FAIL
12861:20241101:185743.861 In vmware_job_schedule() queue:1 type:update_perf_counters
12861:20241101:185743.861 End of vmware_job_schedule() type:update_perf_counters nextcheck:18:58:43
12861:20241101:185743.861 In vmware_job_get() queue:2
12861:20241101:185743.861 End of vmware_job_get() queue:2 type:none
12859:20241101:185743.861 In vmware_job_get() queue:3
12859:20241101:185743.861 End of vmware_job_get() queue:2 type:none
12857:20241101:185743.861 In vmware_job_get() queue:2
12857:20241101:185743.861 End of vmware_job_get() queue:2 type:none
12855:20241101:185743.861 In vmware_service_authenticate() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12855:20241101:185743.909 vmware_service_authenticate() SOAP response:
52df8d84-16e9-91af-26ce-12eee6e3db08VSPHERE.LOCAL\zabbix2024-11-01T18:57:43.915306Z2024-11-01T18:57:43.915306Zenenfalse10.50.242.760
12855:20241101:185743.909 End of vmware_service_authenticate():SUCCEED
12855:20241101:185743.913 vmware_service_get_contents() SOAP response:
group-d1propertyCollectorViewManagerVMware vCenter ServerVMware vCenter Server 8.0.3 build-24322831VMware, Inc.8.0.324322831INTL000linux-x64vpxVirtualCenter8.0.3.09a31b4b0-64a6-48e1-919a-e9f7ca1668b6VMware VirtualCenter Server8.0VpxSettingsUserDirectorySessionManagerAuthorizationManagerServiceMgrPerfMgrScheduledTaskManagerAlarmManagerEventManagerTaskManagerExtensionManagerCustomizationSpecManagerCustomFieldsManagerDiagMgrLicenseManagerSearchIndexFileManagerDatastoreNamespaceManagervirtualDiskManagerSnmpSystemProvCheckerCompatCheckerOvfManagerIpPoolManagerDVSManagerHostProfileManagerClusterProfileManagerMoComplianceManagerLocalizationManagerStorageResourceManagerguestOperationsManagerOverheadMemoryManagercertificateManagerIoFilterManager
12855:20241101:185743.913 In vmware_service_get_perf_counters()
12855:20241101:185743.933 vmware_service_get_perf_counters() SOAP response:
PerfMgrperfCounter1CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentnonerate442CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentaveragerate133CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentminimumrate444CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentmaximumrate445CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertznonerate446CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzaveragerate137CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzminimumrate448CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzmaximumrate449Total CPU capacity reserved by virtual machinesreservedCapacityCPUcpuMegahertzmegaHertzaverageabsolute2310Amount of time spent on system processes on each virtual CPU in the virtual machinesystemCPUcpuMillisecondmillisecondsummationdelta3311Total CPU time spent in wait statewaitCPUcpuMillisecondmillisecondsummationdelta3312Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement intervalreadyCPUcpuMillisecondmillisecondsummationdelta1313Total time that the CPU spent in an idle stateidleCPUcpuMillisecondmillisecondsummationdelta2314Total CPU usageusedCPUcpuMillisecondmillisecondsummationdelta3315Capacity in MHz of the physical CPU corescapacity.provisionedCPUcpuMegahertzmegaHertzaverageabsolute4416CPU resources devoted by the ESXi scheduler to the virtual machines and resource poolscapacity.entitlementCPUcpuMegahertzmegaHertzaverageabsolute4417CPU usage as a percent during the intervalcapacity.usageCPUcpuMegahertzmegaHertzaveragerate4418The amount of CPU resources a VM would use if there were no CPU contentioncapacity.demandCPUcpuMegahertzmegaHertzaverageabsolute4419Percent of time the VM is unable to run because it is contending for access to the physical CPU(s)capacity.contentionCPUcpuPercentagepercentaveragerate4420The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberaverageabsolute4421The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberaverageabsolute4422Time the VM vCPU is ready to run, but is unable to run due to co-scheduling constraintscorecount.contentionCPUcpuPercentagepercentaveragerate4423Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentnoneabsolute4424Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentaverageabsolute1325Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentminimumabsolute4426Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentmaximumabsolute4427Memory reservation consumed by powered-on virtual machinesreservedCapacityMemorymemMegabytemegaBytesaverageabsolute2328Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesnoneabsolute4429Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesaverageabsolute2330Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesminimumabsolute4431Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesmaximumabsolute4432Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesnoneabsolute4433Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesaverageabsolute2334Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesminimumabsolute4435Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesmaximumabsolute4436Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesnoneabsolute4437Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesaverageabsolute2338Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesminimumabsolute4439Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesmaximumabsolute4440Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesnoneabsolute4441Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesaverageabsolute2342Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesminimumabsolute4443Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesmaximumabsolute4444Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesnoneabsolute4445Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesaverageabsolute2346Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesminimumabsolute4447Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesmaximumabsolute4448Swap storage space consumedswapusedMemorymemKilobytekiloBytesnoneabsolute4449Swap storage space consumedswapusedMemorymemKilobytekiloBytesaverageabsolute2350Swap storage space consumedswapusedMemorymemKilobytekiloBytesminimumabsolute4451Swap storage space consumedswapusedMemorymemKilobytekiloBytesmaximumabsolute4452swapunreservedMemorymemKilobytekiloBytesnoneabsolute4453swapunreservedMemorymemKilobytekiloBytesaverageabsolute4454swapunreservedMemorymemKilobytekiloBytesminimumabsolute4455swapunreservedMemorymemKilobytekiloBytesmaximumabsolute4456Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesnoneabsolute4457Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesaverageabsolute2358Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesminimumabsolute4459Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesmaximumabsolute4460Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesnoneabsolute4461Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesaverageabsolute4462Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesminimumabsolute4463Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesmaximumabsolute4464Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesnoneabsolute4465Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesaverageabsolute4466Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesminimumabsolute4467Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesmaximumabsolute4468Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machinesstateMemorymemNumbernumberlatestabsolute2369Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesnoneabsolute4470Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesaverageabsolute2371Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesminimumabsolute4472Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesmaximumabsolute4473Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesnoneabsolute4474Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesaverageabsolute2375Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesminimumabsolute4476Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesmaximumabsolute4477swapInMemorymemKilobytekiloBytesnoneabsolute4478swapInMemorymemKilobytekiloBytesaverageabsolute2379swapInMemorymemKilobytekiloBytesminimumabsolute4480swapInMemorymemKilobytekiloBytesmaximumabsolute4481swapOutMemorymemKilobytekiloBytesnoneabsolute4482swapOutMemorymemKilobytekiloBytesaverageabsolute2383swapOutMemorymemKilobytekiloBytesminimumabsolute4484swapOutMemorymemKilobytekiloBytesmaximumabsolute4485Rate at which guest physical memory is swapped in from the swap spaceswapinRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1386Rate at which guest physical memory is swapped out to the swap spaceswapoutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1387Amount of memory that is swapped out for the Service ConsoleswapOutManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3388Amount of memory that is swapped in for the Service ConsoleswapInManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3389Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesnoneabsolute4490Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesaverageabsolute1391Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesminimumabsolute4492Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesmaximumabsolute4493Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesnoneabsolute4494Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesaverageabsolute2395Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesminimumabsolute4496Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesmaximumabsolute4497Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesnoneabsolute4498Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesaverageabsolute1399Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesminimumabsolute44100Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesmaximumabsolute44101Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesnoneabsolute44102Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesaverageabsolute11103Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesminimumabsolute44104Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesmaximumabsolute44105Guest physical memory pages that have undergone memory compressioncompressedMemorymemKilobytekiloBytesaverageabsolute23106Rate of guest physical memory page compression by ESXicompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23107Rate of guest physical memory decompressiondecompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23108Total amount of memory available to the hostcapacity.provisionedMemorymemKilobytekiloBytesaverageabsolute44109Amount of host physical memory the VM is entitled to, as determined by the ESXi schedulercapacity.entitlementMemorymemKilobytekiloBytesaverageabsolute44110Amount of physical memory available for use by virtual machines on this hostcapacity.usableMemorymemKilobytekiloBytesaverageabsolute44111Amount of physical memory actively usedcapacity.usageMemorymemKilobytekiloBytesaverageabsolute44112Percentage of time VMs are waiting to access swapped, compressed or ballooned memorycapacity.contentionMemorymemPercentagepercentaveragerate44113capacity.usage.vmMemorymemKilobytekiloBytesaverageabsolute24114capacity.usage.vmOvrhdMemorymemKilobytekiloBytesaverageabsolute24115capacity.usage.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24116capacity.usage.userworldMemorymemKilobytekiloBytesaverageabsolute24117reservedCapacity.vmMemorymemKilobytekiloBytesaverageabsolute24118reservedCapacity.vmOvhdMemorymemKilobytekiloBytesaverageabsolute24119reservedCapacity.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24120reservedCapacity.userworldMemorymemKilobytekiloBytesaverageabsolute24121Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentaverageabsolute44122Amount of physical memory consumed by VMs on this hostconsumed.vmsMemorymemKilobytekiloBytesaverageabsolute24123Amount of physical memory consumed by userworlds on this hostconsumed.userworldsMemorymemKilobytekiloBytesaverageabsolute24124Current read bandwidth of this memory typebandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22125Current write bandwidth of this memory typebandwidth.writeMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22126Total read and write bandwidth of this memory typebandwidth.totalMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute11127vm.bandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22128Get the current miss rate of this memory typemissrateMemorymemPercentagepercentlatestabsolute22129Get the current read latency of this memory typelatency.readMemorymemNanosecondnanosecondlatestabsolute33130Get the current write latency of this memory typelatency.writeMemorymemNanosecondnanosecondlatestabsolute33131Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondnonerate44132Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate13133Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondminimumrate44134Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondmaximumrate44135Number of disk reads during the collection intervalnumberReadDiskdiskNumbernumbersummationdelta33136Number of disk writes during the collection intervalnumberWriteDiskdiskNumbernumbersummationdelta33137Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23138Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23139Average amount of time taken during the collection interval to process a Storage command issued by the guest OS to the virtual machinetotalLatencyDiskdiskMillisecondmillisecondaverageabsolute33140Highest latency value across all disks used by the hostmaxTotalLatencyDiskdiskMillisecondmillisecondlatestabsolute13141Number of Storage commands aborted during the collection intervalcommandsAbortedDiskdiskNumbernumbersummationdelta23142Number of Storage-bus reset commands issued during the collection intervalbusResetsDiskdiskNumbernumbersummationdelta23143Average number of disk reads per second during the collection intervalnumberReadAveragedDiskdiskNumbernumberaveragerate13144Average number of disk writes per second during the collection intervalnumberWriteAveragedDiskdiskNumbernumberaveragerate13145Aggregated disk I/O rate, including the rates for all virtual machines running on the host during the collection intervalthroughput.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate44146Average amount of time for an I/O operation to complete successfullythroughput.contentionDiskdiskMillisecondmillisecondaverageabsolute44147Number of Storage reservation conflicts for the LUN during the collection intervalscsiReservationConflictsDiskdiskNumbernumbersummationdelta22148Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaverageabsolute44149Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondnonerate44150Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate13151Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondminimumrate44152Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondmaximumrate44153Number of packets received during the intervalpacketsRxNetworknetNumbernumbersummationdelta23154Number of packets transmitted during the intervalpacketsTxNetworknetNumbernumbersummationdelta23155Average rate at which data was received during the intervalreceivedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23156Average rate at which data was transmitted during the intervaltransmittedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23157The maximum network bandwidth for the hostthroughput.provisionedNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44158The current available network bandwidth for the hostthroughput.usableNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44159The current network bandwidth usage for the hostthroughput.usageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44160The aggregate network droppped packets for the hostthroughput.contentionNetworknetNumbernumbersummationdelta44161Average rate of packets received and transmitted per secondthroughput.packetsPerSecNetworknetNumbernumberaveragerate44162Total time elapsed, in seconds, since last system startupuptimeSystemsysSecondsecondlatestabsolute13163Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumbersummationdelta13164Current power usagepowerPowerpowerWattwattaveragerate23165Maximum allowed power usagepowerCapPowerpowerWattwattaverageabsolute33166Total energy used since last stats resetenergyPowerpowerJoulejoulesummationdelta33167Current power usage as a percentage of maximum allowed powercapacity.usagePctPowerpowerPercentagepercentaverageabsolute44168Average number of commands issued per second by the storage adapter during the collection intervalcommandsAveragedStorage adapterstorageAdapterNumbernumberaveragerate22169Average number of read commands issued per second by the storage adapter during the collection intervalnumberReadAveragedStorage adapterstorageAdapterNumbernumberaveragerate22170Average number of write commands issued per second by the storage adapter during the collection intervalnumberWriteAveragedStorage adapterstorageAdapterNumbernumberaveragerate22171Rate of reading data by the storage adapterreadStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22172Rate of writing data by the storage adapterwriteStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22173The average time a read by the storage adapter takestotalReadLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22174The average time a write by the storage adapter takestotalWriteLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22175Highest latency value across all storage adapters used by the hostmaxTotalLatencyStorage adapterstorageAdapterMillisecondmillisecondlatestabsolute33176Average amount of time for an I/O operation to complete successfullythroughput.contStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute44177The percent of I/Os that have been issued but have not yet completedOIOsPctStorage adapterstorageAdapterPercentagepercentaverageabsolute44178Average number of read commands issued per second to the virtual disk during the collection intervalnumberReadAveragedVirtual diskvirtualDiskNumbernumberaveragerate13179Average number of write commands issued per second to the virtual disk during the collection intervalnumberWriteAveragedVirtual diskvirtualDiskNumbernumberaveragerate13180Rate of reading data from the virtual diskreadVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22181Rate of writing data to the virtual diskwriteVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22182The average time a read from the virtual disk takestotalReadLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13183The average time a write to the virtual disk takestotalWriteLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13184Average amount of time for an I/O operation to complete successfullythroughput.contVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute44185Average number of read commands issued per second to the datastore during the collection intervalnumberReadAveragedDatastoredatastoreNumbernumberaveragerate13186Average number of write commands issued per second to the datastore during the collection intervalnumberWriteAveragedDatastoredatastoreNumbernumberaveragerate13187Rate of reading data from the datastorereadDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22188Rate of writing data to the datastorewriteDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22189The average time a read from the datastore takestotalReadLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13190The average time a write to the datastore takestotalWriteLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13191Highest latency value across all datastores used by the hostmaxTotalLatencyDatastoredatastoreMillisecondmillisecondlatestabsolute33192Storage I/O Control aggregated IOPSdatastoreIopsDatastoredatastoreNumbernumberaverageabsolute13193Storage I/O Control size-normalized I/O latencysizeNormalizedDatastoreLatencyDatastoredatastoreMicrosecondmicrosecondaverageabsolute13194throughput.usageDatastoredatastoreKilobytes per secondkiloBytesPerSecondaverageabsolute44195throughput.contentionDatastoredatastoreMillisecondmillisecondaverageabsolute44196busResetsDatastoredatastoreNumbernumbersummationdelta22197commandsAbortedDatastoredatastoreNumbernumbersummationdelta22198Percentage of time Storage I/O Control actively controlled datastore latencysiocActiveTimePercentageDatastoredatastorePercentagepercentaverageabsolute13199Average amount of time for an I/O operation to complete successfullythroughput.contStorage pathstoragePathMillisecondmillisecondaverageabsolute44200Highest latency value across all storage paths used by the hostmaxTotalLatencyStorage pathstoragePathMillisecondmillisecondlatestabsolute33201Virtual disk I/O ratethroughput.usageVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate44202Number of terminations to a virtual diskcommandsAbortedVirtual diskvirtualDiskNumbernumbersummationdelta24203Number of resets to a virtual diskbusResetsVirtual diskvirtualDiskNumbernumbersummationdelta24204The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberaverageabsolute22205The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberaverageabsolute22206The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberaverageabsolute22207Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22208The storage adapter's I/O ratethroughput.usagStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate44209Number of Storage-bus reset commands issued during the collection intervalbusResetsStorage pathstoragePathNumbernumbersummationdelta23210Number of Storage commands terminated during the collection intervalcommandsAbortedStorage pathstoragePathNumbernumbersummationdelta23211Storage path I/O ratethroughput.usageStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate44212Average pNic I/O rate for VMsthroughput.usage.vmNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33213Average pNic I/O rate for NFSthroughput.usage.nfsNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33214Average pNic I/O rate for vMotionthroughput.usage.vmotionNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33215Average pNic I/O rate for FTthroughput.usage.ftNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33216Average pNic I/O rate for iSCSIthroughput.usage.iscsiNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33217Average pNic I/O rate for HBRthroughput.usage.hbrNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33218Current maximum allowed power usagecapacity.usablePowerpowerWattwattaverageabsolute44219Current power usagecapacity.usagePowerpowerWattwattaverageabsolute44220Power usage due to host idlenesscapacity.usageIdlePowerpowerWattwattaverageabsolute23221Power usage due to non-VM activitiescapacity.usageSystemPowerpowerWattwattaverageabsolute23222Power usage due to VM workloadscapacity.usageVmPowerpowerWattwattaverageabsolute23223Static power usage of VMcapacity.usageStaticPowerpowerWattwattaverageabsolute23224Amount of CPU resources allocated to the virtual machine or resource pool, based on the total cluster capacity and the resource configuration of the resource hierarchycpuentitlementCPUcpuMegahertzmegaHertzlatestabsolute23225Memory allocation as calculated by the VMkernel scheduler based on current estimated demand and reservation, limit, and shares policies set for all virtual machines and resource pools in the host or clustermementitlementMemorymemMegabytemegaByteslatestabsolute23226DRS score of the virtual machinevmDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11227Fairness of distributed CPU resource allocationcpufairnessCluster servicesclusterServicesNumbernumberlatestabsolute13228Aggregate available memory resources of all the hosts within a clustermemfairnessCluster servicesclusterServicesNumbernumberlatestabsolute13229The rate of transmitted packets for this VDSthroughput.pktsTxNetworknetNumbernumberaverageabsolute33230The rate of transmitted Multicast packets for this VDSthroughput.pktsTxMulticastNetworknetNumbernumberaverageabsolute33231The rate of transmitted Broadcast packets for this VDSthroughput.pktsTxBroadcastNetworknetNumbernumberaverageabsolute33232The rate of received packets for this vDSthroughput.pktsRxNetworknetNumbernumberaverageabsolute33233The rate of received Multicast packets for this VDSthroughput.pktsRxMulticastNetworknetNumbernumberaverageabsolute33234The rate of received Broadcast packets for this VDSthroughput.pktsRxBroadcastNetworknetNumbernumberaverageabsolute33235Count of dropped transmitted packets for this VDSthroughput.droppedTxNetworknetNumbernumberaverageabsolute33236Count of dropped received packets for this VDSthroughput.droppedRxNetworknetNumbernumberaverageabsolute33237The rate of transmitted packets for this DVPortthroughput.vds.pktsTxNetworknetNumbernumberaverageabsolute33238The rate of transmitted multicast packets for this DVPortthroughput.vds.pktsTxMcastNetworknetNumbernumberaverageabsolute33239The rate of transmitted broadcast packets for this DVPortthroughput.vds.pktsTxBcastNetworknetNumbernumberaverageabsolute33240The rate of received packets for this DVPortthroughput.vds.pktsRxNetworknetNumbernumberaverageabsolute33241The rate of received multicast packets for this DVPortthroughput.vds.pktsRxMcastNetworknetNumbernumberaverageabsolute33242The rate of received broadcast packets for this DVPortthroughput.vds.pktsRxBcastNetworknetNumbernumberaverageabsolute33243Count of dropped transmitted packets for this DVPortthroughput.vds.droppedTxNetworknetNumbernumberaverageabsolute33244Count of dropped received packets for this DVPortthroughput.vds.droppedRxNetworknetNumbernumberaverageabsolute33245The rate of transmitted packets for this LAGthroughput.vds.lagTxNetworknetNumbernumberaverageabsolute33246The rate of transmitted Multicast packets for this LAGthroughput.vds.lagTxMcastNetworknetNumbernumberaverageabsolute33247The rate of transmitted Broadcast packets for this LAGthroughput.vds.lagTxBcastNetworknetNumbernumberaverageabsolute33248The rate of received packets for this LAGthroughput.vds.lagRxNetworknetNumbernumberaverageabsolute33249The rate of received multicast packets for this LAGthroughput.vds.lagRxMcastNetworknetNumbernumberaverageabsolute33250The rate of received Broadcast packets for this LAGthroughput.vds.lagRxBcastNetworknetNumbernumberaverageabsolute33251Count of dropped transmitted packets for this LAGthroughput.vds.lagDropTxNetworknetNumbernumberaverageabsolute33252Count of dropped received packets for this LAGthroughput.vds.lagDropRxNetworknetNumbernumberaverageabsolute33253Number of virtual machine power on operationsnumPoweronVirtual machine operationsvmopNumbernumberlatestabsolute13254Number of virtual machine power off operationsnumPoweroffVirtual machine operationsvmopNumbernumberlatestabsolute13255Number of virtual machine suspend operationsnumSuspendVirtual machine operationsvmopNumbernumberlatestabsolute13256Number of virtual machine reset operationsnumResetVirtual machine operationsvmopNumbernumberlatestabsolute13257Number of virtual machine guest reboot operationsnumRebootGuestVirtual machine operationsvmopNumbernumberlatestabsolute13258Number of virtual machine standby guest operationsnumStandbyGuestVirtual machine operationsvmopNumbernumberlatestabsolute13259Number of virtual machine guest shutdown operationsnumShutdownGuestVirtual machine operationsvmopNumbernumberlatestabsolute13260Number of virtual machine create operationsnumCreateVirtual machine operationsvmopNumbernumberlatestabsolute13261Number of virtual machine delete operationsnumDestroyVirtual machine operationsvmopNumbernumberlatestabsolute13262Number of virtual machine register operationsnumRegisterVirtual machine operationsvmopNumbernumberlatestabsolute13263Number of virtual machine unregister operationsnumUnregisterVirtual machine operationsvmopNumbernumberlatestabsolute13264Number of virtual machine reconfigure operationsnumReconfigureVirtual machine operationsvmopNumbernumberlatestabsolute13265Number of virtual machine clone operationsnumCloneVirtual machine operationsvmopNumbernumberlatestabsolute13266Number of virtual machine template deploy operationsnumDeployVirtual machine operationsvmopNumbernumberlatestabsolute13267Number of host change operations for powered-off and suspended VMsnumChangeHostVirtual machine operationsvmopNumbernumberlatestabsolute13268Number of datastore change operations for powered-off and suspended virtual machinesnumChangeDSVirtual machine operationsvmopNumbernumberlatestabsolute13269Number of host and datastore change operations for powered-off and suspended virtual machinesnumChangeHostDSVirtual machine operationsvmopNumbernumberlatestabsolute13270Number of migrations with vMotion (host change operations for powered-on VMs)numVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13271Number of migrations with Storage vMotion (datastore change operations for powered-on VMs)numSVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13272Number of host and datastore change operations for powered-on and suspended virtual machinesnumXVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13273Total available CPU resources of all hosts within a clustereffectivecpuCluster servicesclusterServicesMegahertzmegaHertzaveragerate13274Total amount of machine memory of all hosts in the cluster that is available for use for virtual machine memory and overhead memoryeffectivememCluster servicesclusterServicesMegabytemegaBytesaverageabsolute13275Total amount of CPU resources of all hosts in the clustertotalmhzCPUcpuMegahertzmegaHertzaveragerate13276Total amount of host physical memory of all hosts in the cluster that is available for virtual machine memory (physical memory for use by the guest OS) and virtual machine overhead memorytotalmbMemorymemMegabytemegaBytesaverageabsolute13277DRS score of the clusterclusterDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11278vSphere HA number of failures that can be toleratedfailoverCluster servicesclusterServicesNumbernumberlatestabsolute13279The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentaverageabsolute13280The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesaverageabsolute13281The amount of GPU memory reserved in kilobytesmem.reservedGPUgpuKilobytekiloByteslatestabsolute13282The power used by a GPU in wattspower.usedGPUgpuWattwattlatestabsolute13283The temperature of a GPU in degrees celsiustemperatureGPUgpuTemperature in degrees Celsiuscelsiusaverageabsolute13284The total amount of GPU memory in kilobytesmem.totalGPUgpuKilobytekiloByteslatestabsolute13285Amount of space actually used by the virtual machine or the datastoreusedDiskdiskKilobytekiloByteslatestabsolute11286Amount of storage set aside for use by a datastore or a virtual machineprovisionedDiskdiskKilobytekiloByteslatestabsolute11287Configured size of the datastorecapacityDiskdiskKilobytekiloByteslatestabsolute13288Amount of space associated exclusively with a virtual machineunsharedDiskdiskKilobytekiloByteslatestabsolute11289Amount of disk actually used on the datastoreactualusedDiskdiskMegabytemegaByteslatestabsolute23290Storage overhead of a virtual machine or a datastore due to delta disk backingsdeltausedDiskdiskKilobytekiloByteslatestabsolute23291Virtual disk: The maximum capacity size of the virtual disk. Virtual machine: The provisioned size of all virtual disks plus snapshot files and the swap file, if the VM is running. Datastore: The maximum capacity of the datastore. POD: The maximum capacity of all datastores in the POD.capacity.provisionedDiskdiskKilobytekiloBytesaverageabsolute44292The amount of storage capacity currently being consumed by the entity or on the entity.capacity.usageDiskdiskKilobytekiloBytesaverageabsolute44293The amount of storage capacity overcommitment for the entity, measured in percent.capacity.contentionDiskdiskPercentagepercentaverageabsolute44294The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44295The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44296The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11297Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44298Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44299Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11300Total size of in-memory cache of blocks (buffer cache) read in from block devices (i.e., disk devices) on the system where vCenter Server is runningbufferszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44301Total size of in-memory caches of pages (page cache) for files from on-disk and in-memory filesystems on the system where vCenter Server is runningcacheszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44302Number of context switches per second on the system where vCenter Server is runningctxswitchesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11303Disk sectors read per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskreadsectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44304Number of disk reads per second on the system where vCenter Server is runningdiskreadsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11305Disk sectors written per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskwritesectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44306Number of disk writes per second on the system where vCenter Server is runningdiskwritesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11307The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44308The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44309The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11310The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44311The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44312The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11313vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44314vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44315vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11316vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44317vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44318vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11319vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44320vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44321vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11322Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44323Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44324Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11325Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44326Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44327Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11328Rate of the number of total packets received per second on the system where vCenter Server is runningpacketrecvratevCenter resource usage informationvcResourcesNumbernumberaveragerate11329Number of total packets sent per second on the system where vCenter Server is runningpacketsentratevCenter resource usage informationvcResourcesNumbernumberaveragerate11330Total system CPU used on the system where vCenter Server in runningsystemcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11331Number of page faults per second on the system where vCenter Server is runningpagefaultratevCenter resource usage informationvcResourcesNumbernumberaveragerate11332Physical memory used by vCenterphysicalmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11333CPU used by vCenter Server in privileged modepriviledgedcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11334Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44335Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44336Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute33337The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44338The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44339The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11340Number of systems calls made per second on the system where vCenter Server is runningsyscallsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11341The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44342The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44343The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11344CPU used by vCenter Server in user modeusercpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11345vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44346vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44347vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11348Virtual memory used by vCenter ServervirtualmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11349Average number of outstanding read requests to the virtual disk during the collection intervalreadOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22350Average number of outstanding write requests to the virtual disk during the collection intervalwriteOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22351Storage DRS virtual disk metric for the read workload modelreadLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22352Storage DRS virtual disk metric for the write workload modelwriteLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22353CPU active average over 1 minuteactav1Resource group CPUrescpuPercentagepercentlatestabsolute33354Storage DRS datastore bytes readdatastoreReadBytesDatastoredatastoreNumbernumberlatestabsolute22355Storage DRS datastore bytes writtendatastoreWriteBytesDatastoredatastoreNumbernumberlatestabsolute22356Storage DRS datastore read I/O ratedatastoreReadIopsDatastoredatastoreNumbernumberlatestabsolute13357Storage DRS datastore write I/O ratedatastoreWriteIopsDatastoredatastoreNumbernumberlatestabsolute13358Storage DRS datastore outstanding read requestsdatastoreReadOIODatastoredatastoreNumbernumberlatestabsolute13359Storage DRS datastore outstanding write requestsdatastoreWriteOIODatastoredatastoreNumbernumberlatestabsolute13360Storage DRS datastore normalized read latencydatastoreNormalReadLatencyDatastoredatastoreNumbernumberlatestabsolute22361Storage DRS datastore normalized write latencydatastoreNormalWriteLatencyDatastoredatastoreNumbernumberlatestabsolute22362Storage DRS datastore metric for read workload modeldatastoreReadLoadMetricDatastoredatastoreNumbernumberlatestabsolute44363Storage DRS datastore metric for write workload modeldatastoreWriteLoadMetricDatastoredatastoreNumbernumberlatestabsolute44364The average datastore latency as seen by virtual machinesdatastoreVMObservedLatencyDatastoredatastoreMicrosecondmicrosecondlatestabsolute13365Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaveragerate44366Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskNumbernumberlatestabsolute44367Number of failed reads on the diskreadFailedDiskdiskNumbernumberlatestabsolute44368Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskNumbernumberlatestabsolute44369Number of failed writes on the diskwriteFailedDiskdiskNumbernumberlatestabsolute44370Number of successful commands on the diskcommands.successDiskdiskNumbernumberlatestabsolute44371Number of failed commands on the diskcommands.failedDiskdiskNumbernumberlatestabsolute44372Number of queued commands on the diskcommands.queuedDiskdiskNumbernumberlatestabsolute44373Number of active commands on the diskcommands.activeDiskdiskNumbernumberlatestabsolute44374Current state of devicestateDiskdiskNumbernumberlatestabsolute44375Total number of aborts on a diskTM.abortDiskdiskNumbernumberlatestabsolute44376Total number of aborts retries on a diskTM.abortRetryDiskdiskNumbernumberlatestabsolute44377Total number of failed aborts on a diskTM.abortFailedDiskdiskNumbernumberlatestabsolute44378Total number of virt resets TM.virtResetDiskdiskNumbernumberlatestabsolute44379Total number of virt-reset retries TM.virtResetRetryDiskdiskNumbernumberlatestabsolute44380Total number of failed virt-resetsTM.virtResetFailedDiskdiskNumbernumberlatestabsolute44381Total number of lun resets TM.lunResetDiskdiskNumbernumberlatestabsolute44382Total number of lun-reset retries TM.lunResetRetryDiskdiskNumbernumberlatestabsolute44383Total number of failed lun-resetsTM.lunResetFailedDiskdiskNumbernumberlatestabsolute44384Total number of device resets TM.deviceResetDiskdiskNumbernumberlatestabsolute44385Total number of device-reset retries TM.deviceResetRetryDiskdiskNumbernumberlatestabsolute44386Total number of failed device-resetsTM.deviceResetFailedDiskdiskNumbernumberlatestabsolute44387Total number of bus resets TM.busResetDiskdiskNumbernumberlatestabsolute44388Total number of bus-reset retries TM.busResetRetryDiskdiskNumbernumberlatestabsolute44389Total number of failed bus-resetsTM.busResetFailedDiskdiskNumbernumberlatestabsolute44390Average time, in microseconds, spent by Queue to process each Storage commandlatency.qavgDiskdiskMicrosecondmicrosecondlatestabsolute44391Average time, in microseconds, spent by Device to process each Storage commandlatency.davgDiskdiskMicrosecondmicrosecondlatestabsolute44392Average time, in microseconds, spent by kernel to process each Storage commandlatency.kavgDiskdiskMicrosecondmicrosecondlatestabsolute44393Average time, in microseconds, spent by Guest to process each Storage commandlatency.gavgDiskdiskMicrosecondmicrosecondlatestabsolute44394The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberlatestabsolute44395The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberlatestabsolute44396The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberlatestabsolute44397The percentage HT partner usage per physical CPUpartnerBusyTimeCPUcpuPercentagepercentaveragerate44398CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentaveragerate23399The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberlatestabsolute44400The amount of L3 cache the VM usescache.l3.occupancyCPUcpuKilobytekiloBytesaverageabsolute44401The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberlatestabsolute44402CPU load average over the past 1 minute, sampled on every 6 secondsload.avg1minCPUcpuPercentagepercentlatestabsolute44403CPU load average over the past 5 minutes, sampled on every 6 secondsload.avg5minCPUcpuPercentagepercentlatestabsolute44404CPU load average over the past 15 minutes, sampled on every 6 secondsload.avg15minCPUcpuPercentagepercentlatestabsolute44405Total amount of memory available to the hostcapacity.provisionedMemorymemMegabytemegaByteslatestabsolute44406Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentlatestabsolute44407Ratio of total requested memory and the managed memory minus 1 over the past 1 minuteovercommit.avg1minMemorymemNumbernumberlatestabsolute44408Ratio of total requested memory and the managed memory minus 1 over the past 5 minutesovercommit.avg5minMemorymemNumbernumberlatestabsolute44409Ratio of total requested memory and the managed memory minus 1 over the past 15 minutesovercommit.avg15minMemorymemNumbernumberlatestabsolute44410Total amount of machine memory on the ESXi hostphysical.totalMemorymemMegabytemegaByteslatestabsolute44411Amount of machine memory being used by everything other than VMkernelphysical.userMemorymemMegabytemegaByteslatestabsolute44412Amount of machine memory that is free on the ESXi hostphysical.freeMemorymemMegabytemegaByteslatestabsolute44413Total amount of machine memory managed by VMkernelkernel.managedMemorymemMegabytemegaByteslatestabsolute44414Mininum amount of machine memory that VMkernel likes to keep freekernel.minfreeMemorymemMegabytemegaByteslatestabsolute44415Amount of machine memory that is currently unreservedkernel.unreservedMemorymemMegabytemegaByteslatestabsolute44416Amount of physical memory that is being sharedpshare.sharedMemorymemMegabytemegaByteslatestabsolute44417Amount of machine memory that is common across World(s)pshare.commonMemorymemMegabytemegaByteslatestabsolute44418Amount of machine memory saved due to page-sharingpshare.sharedSaveMemorymemMegabytemegaByteslatestabsolute44419Current swap usageswap.currentMemorymemMegabytemegaByteslatestabsolute44420Where ESXi expects the reclaimed memory using swapping and compression to beswap.targetMemorymemMegabytemegaByteslatestabsolute44421Rate at which memory is swapped in by ESXi from diskswap.readrateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44422Rate at which memory is swapped to disk by the ESXiswap.writerateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44423Total compressed physical memoryzip.zippedMemorymemMegabytemegaByteslatestabsolute44424Saved memory by compressionzip.savedMemorymemMegabytemegaByteslatestabsolute44425Total amount of physical memory reclaimed using the vmmemctl modulesmemctl.currentMemorymemMegabytemegaByteslatestabsolute44426Total amount of physical memory ESXi would like to reclaim using the vmmemctl modulesmemctl.targetMemorymemMegabytemegaByteslatestabsolute44427Maximum amount of physical memory ESXi can reclaim using the vmmemctl modulesmemctl.maxMemorymemMegabytemegaByteslatestabsolute44428Memory reservation health state, 2->Red, 1->Greenhealth.reservationStateMemorymemNumbernumberlatestabsolute44429Amount of Overhead memory actively usedcapacity.overheadMemorymemMegabytemegaBytesaverageabsolute44430Amount of OverheadResv memorycapacity.overheadResvMemorymemMegabytemegaBytesaverageabsolute44431Per tier consumed memory. This value is expressed in megabytescapacity.consumedMemorymemMegabytemegaByteslatestabsolute44432Per tier active memory. This value is expressed in megabytescapacity.activeMemorymemMegabytemegaByteslatestabsolute44433Current CPU power usagecapacity.usageCpuPowerpowerWattwattaverageabsolute44434Current memory power usagecapacity.usageMemPowerpowerWattwattaverageabsolute44435Current other power usagecapacity.usageOtherPowerpowerWattwattaverageabsolute44436vmkernel.downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44437downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44438precopy.timeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44439rttMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44440dst.migration.timeMigration of powered on VMvmotionSecondsecondlatestabsolute44441mem.sizembMigration of powered on VMvmotionMegabytemegaByteslatestabsolute44442Current number of replicated virtual machinesvmsvSphere ReplicationhbrNumbernumberlatestabsolute44443Average amount of data received per secondthroughput.hbr.inboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44444Average amount of data transmitted per secondthroughput.hbr.outboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44445Average disk read latency seen by vSphere Replicationhbr.readLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44446Average guest I/O stall introduced by vSphere Replicationhbr.stallLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44447Average latency seen by vSphere Replicationlatency.hbr.outboundNetworknetMillisecondmillisecondlatestabsolute44448Number of Lightweight Delta (LWD) snapshots takennumSnapshotsvSphere Data Protection (LWD)lwdNumbernumberlatestabsolute44449APD state of the nfs volumeapdStateNFSnfsNumbernumberlatestabsolute44450Cumulative read issue time on NFS volumereadIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44451Cumulative write issue time on NFS volumewriteIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44452Total reads on NFS volumetotalReadsNFSnfsNumbernumberlatestabsolute44453Total reads failed on NFS volumereadsFailedNFSnfsNumbernumberlatestabsolute44454Total writes on NFS volumetotalWritesNFSnfsNumbernumberlatestabsolute44455Total writes failed on NFS volumewritesFailedNFSnfsNumbernumberlatestabsolute44456Cumulative readTime on NFS volumereadTimeNFSnfsMicrosecondmicrosecondlatestabsolute44457Cumulative writeTime on NFS volumewriteTimeNFSnfsMicrosecondmicrosecondlatestabsolute44458Total IO requests queued in NFS volumeioRequestsQueuedNFSnfsNumbernumberlatestabsolute44459Total create calls on NFS volumetotalCreateNFSnfsNumbernumberlatestabsolute44460Total create calls failed on NFS volumecreateFailedNFSnfsNumbernumberlatestabsolute44461Number of times we hit into socket buffer out of space condition for NFS volumesocketBufferFullNFSnfsNumbernumberlatestabsolute44462Total journal transactions on VMFS volumevmfs.totalTxnDatastoredatastoreNumbernumberlatestabsolute44463Total cancelled journal transactions on VMFS volumevmfs.cancelledTxnDatastoredatastoreNumbernumberlatestabsolute44464Current APD state of the VMFS volumevmfs.apdStateDatastoredatastoreNumbernumberlatestabsolute44465Total apd timeout events received on the VMFS volumevmfs.apdCountDatastoredatastoreNumbernumberlatestabsolute44466vVol PE is accessiblepe.isaccessiblevVol object related statsvvolNumbernumberlatestabsolute44467Total no. of read cmds done on vVol PEpe.reads.donevVol object related statsvvolNumbernumberlatestabsolute44468Total no. of write cmds done on vVol PEpe.writes.donevVol object related statsvvolNumbernumberlatestabsolute44469Total no. of cmds done on vVol PEpe.total.donevVol object related statsvvolNumbernumberlatestabsolute44470Total no. of read cmds sent on vVol PEpe.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44471Total no. of write cmds sent on vVol PEpe.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44472Total no. of cmds sent on vVol PEpe.total.sentvVol object related statsvvolNumbernumberlatestabsolute44473No. of read cmds issued on vVol PE that failedpe.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44474No. of write cmds issued on vVol PE that failedpe.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44475Total no. of cmds issued on vVol PE that failedpe.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44476Total no. of read cmds failed on vVol PEpe.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44477Total no. of write cmds failed on vVol PEpe.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44478Total no. of cmds failed on vVol PEpe.total.failedvVol object related statsvvolNumbernumberlatestabsolute44479Cumulative latency of successful reads on vVol PEpe.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44480Cumulative latency of successful writes on vVol PEpe.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44481Cumulative latency of cmds that failed before issue on vVol PEpe.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44482Cumulative latency of all issued cmds on vVol PEpe.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44483Total no. of cancel cmds sent on vVol PEpe.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44484Total no. of cancel cmds failed on vVol PEpe.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44485Total no. of device reset cmds sent on vVol PEpe.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44486Total no. of device reset cmds failed on vVol PEpe.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44487Total no. of reset cmds sent on vVol PEpe.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44488Total no. of reset cmds failed on vVol PEpe.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44489Total no. of unmap cmds sent on vVol PEpe.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44490Total no. of unmap cmds failed on vVol PEpe.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44491Total no. of read cmds done by vVol Containercontainer.reads.donevVol object related statsvvolNumbernumberlatestabsolute44492Total no. of write cmds done by vVol Containercontainer.writes.donevVol object related statsvvolNumbernumberlatestabsolute44493Total no. of cmds done by vVol Containercontainer.total.donevVol object related statsvvolNumbernumberlatestabsolute44494Total no. of read cmds sent by vVol Containercontainer.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44495Total no. of write cmds sent by vVol Containercontainer.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44496Total no. of cmds sent by vVol Containercontainer.total.sentvVol object related statsvvolNumbernumberlatestabsolute44497No. of read cmds issued by vVol Container that failedcontainer.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44498No. of write cmds issued by vVol Container that failedcontainer.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44499Total no. of cmds issued by vVol Container that failedcontainer.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44500Total no. of read cmds failed by vVol Containercontainer.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44501Container:Total no. of write cmds failed by vVol Containercontainer.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44502Total no. of cmds failed by vVol Containercontainer.total.failedvVol object related statsvvolNumbernumberlatestabsolute44503Cumulative latency of successful reads by vVol Containercontainer.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44504Cumulative latency of successful writes by vVol Containercontainer.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44505Cumulative latency of cmds that failed before issue by vVol Containercontainer.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44506Cumulative latency of all issued cmds by vVol Containercontainer.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44507Total no. of read cmds done by vVol Devicedevice.reads.donevVol object related statsvvolNumbernumberlatestabsolute44508Total no. of write cmds done by vVol Devicedevice.writes.donevVol object related statsvvolNumbernumberlatestabsolute44509Total no. of cmds done by vVol Devicedevice.total.donevVol object related statsvvolNumbernumberlatestabsolute44510Total no. of read cmds sent by vVol Devicedevice.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44511Total no. of write cmds sent by vVol Devicedevice.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44512Total no. of cmds sent by vVol Devicedevice.total.sentvVol object related statsvvolNumbernumberlatestabsolute44513No. of read cmds issued by vVol Device that faileddevice.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44514No. of write cmds issued by vVol Device that faileddevice.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44515Total no. of cmds issued by vVol Device that faileddevice.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44516Total no. of read cmds failed by vVol Devicedevice.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44517Total no. of write cmds failed by vVol Devicedevice.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44518Total no. of cmds failed by vVol Devicedevice.total.failedvVol object related statsvvolNumbernumberlatestabsolute44519Cumulative latency of successful reads by vVol Devicedevice.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44520Cumulative latency of successful writes by vVol Devicedevice.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44521Cumulative latency of cmds that failed before issue by vVol Devicedevice.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44522Cumulative latency of all issued cmds by vVol Devicedevice.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44523Total no. of cancel cmds sent by vVol Devicedevice.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44524Total no. of cancel cmds failed by vVol Devicedevice.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44525Total no. of device reset cmds sent by vVol Devicedevice.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44526Total no. of device reset cmds failed by vVol Devicedevice.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44527Total no. of reset cmds sent by vVol Devicedevice.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44528Total no. of reset cmds failed by vVol Devicedevice.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44529Total no. of unmap cmds sent by vVol Devicedevice.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44530Total no. of unmap cmds failed by vVol Devicedevice.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44531CPU time spent waiting for swap-inswapwaitCPUcpuMillisecondmillisecondsummationdelta33532CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentnonerate44533CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentmaximumrate44534CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentminimumrate44535CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentnonerate44536CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentaveragerate23537CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentmaximumrate44538CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentminimumrate44539Total CPU capacity reserved by and available for virtual machinestotalCapacityCPUcpuMegahertzmegaHertzaverageabsolute23540Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)latencyCPUcpuPercentagepercentaveragerate23541CPU resources devoted by the ESX schedulerentitlementCPUcpuMegahertzmegaHertzlatestabsolute23542The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limitdemandCPUcpuMegahertzmegaHertzaverageabsolute23543Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraintscostopCPUcpuMillisecondmillisecondsummationdelta23544Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit settingmaxlimitedCPUcpuMillisecondmillisecondsummationdelta23545Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machinesoverlapCPUcpuMillisecondmillisecondsummationdelta33546Time the virtual machine is scheduled to runrunCPUcpuMillisecondmillisecondsummationdelta23547CPU resource entitlement to CPU demand ratio (in percents)demandEntitlementRatioCPUcpuPercentagepercentlatestabsolute44548Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPUreadinessCPUcpuPercentagepercentaveragerate44549Virtual CPU usage as a percentage during the intervalusage.vcpusCPUcpuPercentagepercentaveragerate44550Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesnoneabsolute44551Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesaverageabsolute23552Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesmaximumabsolute44553Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesminimumabsolute44554Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesnoneabsolute44555Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesaverageabsolute23556Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesmaximumabsolute44557Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesminimumabsolute44558Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesnoneabsolute44559Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesaverageabsolute23560Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesmaximumabsolute44561Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesminimumabsolute44562Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXiactivewriteMemorymemKilobytekiloBytesaverageabsolute23563Host physical memory reserved by ESXi, for its data structures, for running the virtual machineoverheadMaxMemorymemKilobytekiloBytesaverageabsolute23564Total reservation, available and consumed, for powered-on virtual machinestotalCapacityMemorymemMegabytemegaBytesaverageabsolute23565Amount of guest physical memory pages compressed by ESXizippedMemorymemKilobytekiloByteslatestabsolute23566Host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memoryzipSavedMemorymemKilobytekiloByteslatestabsolute23567Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memorylatencyMemorymemPercentagepercentaverageabsolute23568Amount of host physical memory the virtual machine deserves, as determined by ESXientitlementMemorymemKilobytekiloBytesaverageabsolute23569Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooninglowfreethresholdMemorymemKilobytekiloBytesaverageabsolute23570Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesnoneabsolute44571Rate at which guest physical memory is swapped in from the host swap cachellSwapInRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23572Rate at which guest physical memory is swapped out to the host swap cachellSwapOutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23573Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXioverheadTouchedMemorymemKilobytekiloBytesaverageabsolute44574Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesaverageabsolute44575Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesmaximumabsolute44576Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesminimumabsolute44577Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesnoneabsolute44578Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesaverageabsolute44579Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesmaximumabsolute44580Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesminimumabsolute44581Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesnoneabsolute44582Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesaverageabsolute44583Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesmaximumabsolute44584Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesminimumabsolute44585Space used for holding VMFS Pointer Blocks in memoryvmfs.pbc.sizeMemorymemMegabytemegaByteslatestabsolute44586Maximum size the VMFS Pointer Block Cache can grow tovmfs.pbc.sizeMaxMemorymemMegabytemegaByteslatestabsolute44587Amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMemorymemTerabyteteraByteslatestabsolute44588Maximum amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMaxMemorymemTerabyteteraByteslatestabsolute44589Amount of VMFS heap used by the VMFS PB Cachevmfs.pbc.overheadMemorymemKilobytekiloByteslatestabsolute44590Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cachevmfs.pbc.capMissRatioMemorymemPercentagepercentlatestabsolute44591Number of Storage commands issued during the collection intervalcommandsDiskdiskNumbernumbersummationdelta23592Average amount of time, in milliseconds, to read from the physical devicedeviceReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23593Average amount of time, in milliseconds, spent by VMkernel to process each Storage read commandkernelReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23594Average amount of time taken during the collection interval to process a Storage read command issued from the guest OS to the virtual machinetotalReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23595Average amount of time spent in the VMkernel queue, per Storage read command, during the collection intervalqueueReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23596Average amount of time, in milliseconds, to write to the physical devicedeviceWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23597Average amount of time, in milliseconds, spent by VMkernel to process each Storage write commandkernelWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23598Average amount of time taken during the collection interval to process a Storage write command issued by the guest OS to the virtual machinetotalWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23599Average amount of time spent in the VMkernel queue, per Storage write command, during the collection intervalqueueWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23600Average amount of time, in milliseconds, to complete a Storage command from the physical devicedeviceLatencyDiskdiskMillisecondmillisecondaverageabsolute13601Average amount of time, in milliseconds, spent by VMkernel to process each Storage commandkernelLatencyDiskdiskMillisecondmillisecondaverageabsolute23602Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyDiskdiskMillisecondmillisecondaverageabsolute23603Maximum queue depthmaxQueueDepthDiskdiskNumbernumberaverageabsolute13604Average number of Storage commands issued per second during the collection intervalcommandsAveragedDiskdiskNumbernumberaveragerate23605Number of receives droppeddroppedRxNetworknetNumbernumbersummationdelta23606Number of transmits droppeddroppedTxNetworknetNumbernumbersummationdelta23607Average amount of data received per secondbytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23608Average amount of data transmitted per secondbytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23609Number of broadcast packets received during the sampling intervalbroadcastRxNetworknetNumbernumbersummationdelta23610Number of broadcast packets transmitted during the sampling intervalbroadcastTxNetworknetNumbernumbersummationdelta23611Number of multicast packets received during the sampling intervalmulticastRxNetworknetNumbernumbersummationdelta23612Number of multicast packets transmitted during the sampling intervalmulticastTxNetworknetNumbernumbersummationdelta23613Number of packets with errors received during the sampling intervalerrorsRxNetworknetNumbernumbersummationdelta23614Number of packets with errors transmitted during the sampling intervalerrorsTxNetworknetNumbernumbersummationdelta23615Number of frames with unknown protocol received during the sampling intervalunknownProtosNetworknetNumbernumbersummationdelta23616Average amount of data received per second by a pNicpnicBytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44617Average amount of data transmitted per second through a pNicpnicBytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44618Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumberlatestabsolute44619Amount of disk space usage for each mount pointdiskUsageSystemsysPercentagepercentlatestabsolute33620Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertznonerate44621Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzaveragerate33622Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzmaximumrate44623Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzminimumrate44624Memory touched by the system resource groupresourceMemTouchedSystemsysKilobytekiloByteslatestabsolute33625Memory mapped by the system resource groupresourceMemMappedSystemsysKilobytekiloByteslatestabsolute33626Memory saved due to sharing by the system resource groupresourceMemSharedSystemsysKilobytekiloByteslatestabsolute33627Memory swapped out by the system resource groupresourceMemSwappedSystemsysKilobytekiloByteslatestabsolute33628Overhead memory consumed by the system resource groupresourceMemOverheadSystemsysKilobytekiloByteslatestabsolute33629Memory shared by the system resource groupresourceMemCowSystemsysKilobytekiloByteslatestabsolute33630Zero filled memory used by the system resource groupresourceMemZeroSystemsysKilobytekiloByteslatestabsolute33631CPU running average over 1 minute of the system resource groupresourceCpuRun1SystemsysPercentagepercentlatestabsolute33632CPU active average over 1 minute of the system resource groupresourceCpuAct1SystemsysPercentagepercentlatestabsolute33633CPU maximum limited over 1 minute of the system resource groupresourceCpuMaxLimited1SystemsysPercentagepercentlatestabsolute33634CPU running average over 5 minutes of the system resource groupresourceCpuRun5SystemsysPercentagepercentlatestabsolute33635CPU active average over 5 minutes of the system resource groupresourceCpuAct5SystemsysPercentagepercentlatestabsolute33636CPU maximum limited over 5 minutes of the system resource groupresourceCpuMaxLimited5SystemsysPercentagepercentlatestabsolute33637CPU allocation reservation (in MHz) of the system resource groupresourceCpuAllocMinSystemsysMegahertzmegaHertzlatestabsolute33638CPU allocation limit (in MHz) of the system resource groupresourceCpuAllocMaxSystemsysMegahertzmegaHertzlatestabsolute33639CPU allocation shares of the system resource groupresourceCpuAllocSharesSystemsysNumbernumberlatestabsolute33640Memory allocation reservation (in KB) of the system resource groupresourceMemAllocMinSystemsysKilobytekiloByteslatestabsolute33641Memory allocation limit (in KB) of the system resource groupresourceMemAllocMaxSystemsysKilobytekiloByteslatestabsolute33642Memory allocation shares of the system resource groupresourceMemAllocSharesSystemsysNumbernumberlatestabsolute33643Total time elapsed, in seconds, since last operating system boot-uposUptimeSystemsysSecondsecondlatestabsolute44644Memory consumed by the system resource groupresourceMemConsumedSystemsysKilobytekiloByteslatestabsolute44645Number of file descriptors used by the system resource groupresourceFdUsageSystemsysNumbernumberlatestabsolute44646CPU active peak over 1 minuteactpk1Resource group CPUrescpuPercentagepercentlatestabsolute33647CPU running average over 1 minuterunav1Resource group CPUrescpuPercentagepercentlatestabsolute33648CPU active average over 5 minutesactav5Resource group CPUrescpuPercentagepercentlatestabsolute33649CPU active peak over 5 minutesactpk5Resource group CPUrescpuPercentagepercentlatestabsolute33650CPU running average over 5 minutesrunav5Resource group CPUrescpuPercentagepercentlatestabsolute33651CPU active average over 15 minutesactav15Resource group CPUrescpuPercentagepercentlatestabsolute33652CPU active peak over 15 minutesactpk15Resource group CPUrescpuPercentagepercentlatestabsolute33653CPU running average over 15 minutesrunav15Resource group CPUrescpuPercentagepercentlatestabsolute33654CPU running peak over 1 minuterunpk1Resource group CPUrescpuPercentagepercentlatestabsolute33655Amount of CPU resources over the limit that were refused, average over 1 minutemaxLimited1Resource group CPUrescpuPercentagepercentlatestabsolute33656CPU running peak over 5 minutesrunpk5Resource group CPUrescpuPercentagepercentlatestabsolute33657Amount of CPU resources over the limit that were refused, average over 5 minutesmaxLimited5Resource group CPUrescpuPercentagepercentlatestabsolute33658CPU running peak over 15 minutesrunpk15Resource group CPUrescpuPercentagepercentlatestabsolute33659Amount of CPU resources over the limit that were refused, average over 15 minutesmaxLimited15Resource group CPUrescpuPercentagepercentlatestabsolute33660Group CPU sample countsampleCountResource group CPUrescpuNumbernumberlatestabsolute33661Group CPU sample periodsamplePeriodResource group CPUrescpuMillisecondmillisecondlatestabsolute33662Amount of total configured memory that is available for usememUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33663Sum of the memory swapped by all powered-on virtual machines on the hostswapUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33664Amount of Service Console CPU usagecpuUsageManagement agentmanagementAgentMegahertzmegaHertzaveragerate33665Average number of commands issued per second on the storage path during the collection intervalcommandsAveragedStorage pathstoragePathNumbernumberaveragerate33666Average number of read commands issued per second on the storage path during the collection intervalnumberReadAveragedStorage pathstoragePathNumbernumberaveragerate33667Average number of write commands issued per second on the storage path during the collection intervalnumberWriteAveragedStorage pathstoragePathNumbernumberaveragerate33668Rate of reading data on the storage pathreadStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33669Rate of writing data on the storage pathwriteStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33670The average time a read issued on the storage path takestotalReadLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33671The average time a write issued on the storage path takestotalWriteLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33672Average read request size in bytesreadIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44673Average write request size in byteswriteIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44674Number of seeks during the interval that were less than 64 LBNs apartsmallSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44675Number of seeks during the interval that were between 64 and 8192 LBNs apartmediumSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44676Number of seeks during the interval that were greater than 8192 LBNs apartlargeSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44677Read latency in microsecondsreadLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44678Write latency in microsecondswriteLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44679Storage I/O Control datastore maximum queue depthdatastoreMaxQueueDepthDatastoredatastoreNumbernumberlatestabsolute13680Unmapped size in MBunmapSizeDatastoredatastoreMegabytemegaBytessummationdelta44681Number of unmap IOs issuedunmapIOsDatastoredatastoreNumbernumbersummationdelta44682Current number of replicated virtual machineshbrNumVmsvSphere ReplicationhbrNumbernumberaverageabsolute44683Average amount of data received per secondhbrNetRxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44684Average amount of data transmitted per secondhbrNetTxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44685Average network latency seen by vSphere ReplicationhbrNetLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44686Average disk read latency seen by vSphere ReplicationhbrDiskReadLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44687Average guest I/O stall introduced by vSphere ReplicationhbrDiskStallLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44688Average amount of successful transfer time per diskhbrDiskTransferSuccessvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44689Average amount of idle time per diskhbrDiskTransferIdlevSphere ReplicationhbrMillisecondmillisecondaverageabsolute44690Average amount of data in KB successfully transferred per diskhbrDiskTransferBytesvSphere ReplicationhbrKilobytekiloBytesaverageabsolute44691Number of caches controlled by the virtual flash modulenumActiveVMDKsVirtual flash module related statistical valuesvflashModuleNumbernumberlatestabsolute44692Read IOPSreadIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44693Read throughput in kBpsreadThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44694Average read latency in msreadAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44695Max read latency in msreadMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44696Cache hit rate percentagereadCacheHitRatevSAN DOM object related statistical valuesvsanDomObjPercentagepercentlatestabsolute44697Read congestionreadCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44698Write IOPSwriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44699Write throughput in kBpswriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44700Average write latency in mswriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44701Max write latency in mswriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44702Write congestionwriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44703Recovery write IOPSrecoveryWriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44704Recovery write through-put in kBpsrecoveryWriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44705Average recovery write latency in msrecoveryWriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44706Max recovery write latency in msrecoveryWriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44707Recovery write congestionrecoveryWriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44708The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentnoneabsolute44709The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentmaximumabsolute44710The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentminimumabsolute44711The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesnoneabsolute44712The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesmaximumabsolute44713The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesminimumabsolute44714The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentnoneabsolute44715The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentaverageabsolute44716The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentmaximumabsolute44717The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentminimumabsolute44718The amount of GPU memory used in gigabytesmem.used.gbGPUgpuGigabytegigaByteslatestabsolute33719The amount of GPU memory reserved in gigabytesmem.reserved.gbGPUgpuGigabytegigaByteslatestabsolute33720The total amount of GPU memory in gigabytesmem.total.gbGPUgpuGigabytegigaByteslatestabsolute33721Persistent memory available reservation on a host.available.reservationPMEMpmemMegabytemegaByteslatestabsolute44722Persistent memory reservation managed by DRS on a host.drsmanaged.reservationPMEMpmemMegabytemegaByteslatestabsolute44723Total count of virtual CPUs in VMnumVCPUsVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44724Minimum clock speed of the vCPUs during last stats intervalvcpusMhzMinVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44725Maximum clock speed of the vCPUs during last stats intervalvcpusMhzMaxVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44726Average clock speed of the vCPUs during last stats intervalvcpusMhzMeanVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44727Actual clock speed of host CPUcpuSpeedVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44728Minimum overhead heap memory usage since the VM started runningoverheadMemSizeMinVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44729Maximum overhead heap memory usage since the VM started runningoverheadMemSizeMaxVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44730vigor.opsTotalVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44731poll.itersPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44732userRpc.opsPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44
12855:20241101:185743.937 adding performance counter cpu/usage[none]:1
12855:20241101:185743.937 adding performance counter cpu/usage[none,rate]:1
12855:20241101:185743.937 adding performance counter cpu/usage[average]:2
12855:20241101:185743.937 adding performance counter cpu/usage[average,rate]:2
12855:20241101:185743.937 adding performance counter cpu/usage[minimum]:3
12855:20241101:185743.937 adding performance counter cpu/usage[minimum,rate]:3
12855:20241101:185743.937 adding performance counter cpu/usage[maximum]:4
12855:20241101:185743.937 adding performance counter cpu/usage[maximum,rate]:4
12855:20241101:185743.938 adding performance counter cpu/usagemhz[none]:5
12855:20241101:185743.938 adding performance counter cpu/usagemhz[none,rate]:5
12855:20241101:185743.938 adding performance counter cpu/usagemhz[average]:6
12855:20241101:185743.938 adding performance counter cpu/usagemhz[average,rate]:6
12855:20241101:185743.938 adding performance counter cpu/usagemhz[minimum]:7
12855:20241101:185743.938 adding performance counter cpu/usagemhz[minimum,rate]:7
12855:20241101:185743.938 adding performance counter cpu/usagemhz[maximum]:8
12855:20241101:185743.938 adding performance counter cpu/usagemhz[maximum,rate]:8
12855:20241101:185743.938 adding performance counter cpu/reservedCapacity[average]:9
12855:20241101:185743.938 adding performance counter cpu/reservedCapacity[average,absolute]:9
12855:20241101:185743.938 adding performance counter cpu/system[summation]:10
12855:20241101:185743.938 adding performance counter cpu/system[summation,delta]:10
12855:20241101:185743.938 adding performance counter cpu/wait[summation]:11
12855:20241101:185743.938 adding performance counter cpu/wait[summation,delta]:11
12855:20241101:185743.938 adding performance counter cpu/ready[summation]:12
12855:20241101:185743.938 adding performance counter cpu/ready[summation,delta]:12
12855:20241101:185743.938 adding performance counter cpu/idle[summation]:13
12855:20241101:185743.938 adding performance counter cpu/idle[summation,delta]:13
12855:20241101:185743.938 adding performance counter cpu/used[summation]:14
12855:20241101:185743.938 adding performance counter cpu/used[summation,delta]:14
12855:20241101:185743.938 adding performance counter cpu/capacity.provisioned[average]:15
12855:20241101:185743.938 adding performance counter cpu/capacity.provisioned[average,absolute]:15
12855:20241101:185743.938 adding performance counter cpu/capacity.entitlement[average]:16
12855:20241101:185743.938 adding performance counter cpu/capacity.entitlement[average,absolute]:16
12855:20241101:185743.938 adding performance counter cpu/capacity.usage[average]:17
12855:20241101:185743.938 adding performance counter cpu/capacity.usage[average,rate]:17
12855:20241101:185743.938 adding performance counter cpu/capacity.demand[average]:18
12855:20241101:185743.938 adding performance counter cpu/capacity.demand[average,absolute]:18
12855:20241101:185743.938 adding performance counter cpu/capacity.contention[average]:19
12855:20241101:185743.938 adding performance counter cpu/capacity.contention[average,rate]:19
12855:20241101:185743.938 adding performance counter cpu/corecount.provisioned[average]:20
12855:20241101:185743.938 adding performance counter cpu/corecount.provisioned[average,absolute]:20
12855:20241101:185743.938 adding performance counter cpu/corecount.usage[average]:21
12855:20241101:185743.938 adding performance counter cpu/corecount.usage[average,absolute]:21
12855:20241101:185743.938 adding performance counter cpu/corecount.contention[average]:22
12855:20241101:185743.938 adding performance counter cpu/corecount.contention[average,rate]:22
12855:20241101:185743.939 adding performance counter mem/usage[none]:23
12855:20241101:185743.939 adding performance counter mem/usage[none,absolute]:23
12855:20241101:185743.939 adding performance counter mem/usage[average]:24
12855:20241101:185743.939 adding performance counter mem/usage[average,absolute]:24
12855:20241101:185743.939 adding performance counter mem/usage[minimum]:25
12855:20241101:185743.939 adding performance counter mem/usage[minimum,absolute]:25
12855:20241101:185743.939 adding performance counter mem/usage[maximum]:26
12855:20241101:185743.939 adding performance counter mem/usage[maximum,absolute]:26
12855:20241101:185743.939 adding performance counter mem/reservedCapacity[average]:27
12855:20241101:185743.939 adding performance counter mem/reservedCapacity[average,absolute]:27
12855:20241101:185743.939 adding performance counter mem/granted[none]:28
12855:20241101:185743.939 adding performance counter mem/granted[none,absolute]:28
12855:20241101:185743.939 adding performance counter mem/granted[average]:29
12855:20241101:185743.939 adding performance counter mem/granted[average,absolute]:29
12855:20241101:185743.939 adding performance counter mem/granted[minimum]:30
12855:20241101:185743.939 adding performance counter mem/granted[minimum,absolute]:30
12855:20241101:185743.939 adding performance counter mem/granted[maximum]:31
12855:20241101:185743.939 adding performance counter mem/granted[maximum,absolute]:31
12855:20241101:185743.939 adding performance counter mem/active[none]:32
12855:20241101:185743.939 adding performance counter mem/active[none,absolute]:32
12855:20241101:185743.939 adding performance counter mem/active[average]:33
12855:20241101:185743.939 adding performance counter mem/active[average,absolute]:33
12855:20241101:185743.939 adding performance counter mem/active[minimum]:34
12855:20241101:185743.939 adding performance counter mem/active[minimum,absolute]:34
12855:20241101:185743.939 adding performance counter mem/active[maximum]:35
12855:20241101:185743.939 adding performance counter mem/active[maximum,absolute]:35
12855:20241101:185743.939 adding performance counter mem/shared[none]:36
12855:20241101:185743.939 adding performance counter mem/shared[none,absolute]:36
12855:20241101:185743.939 adding performance counter mem/shared[average]:37
12855:20241101:185743.939 adding performance counter mem/shared[average,absolute]:37
12855:20241101:185743.939 adding performance counter mem/shared[minimum]:38
12855:20241101:185743.939 adding performance counter mem/shared[minimum,absolute]:38
12855:20241101:185743.939 adding performance counter mem/shared[maximum]:39
12855:20241101:185743.939 adding performance counter mem/shared[maximum,absolute]:39
12855:20241101:185743.939 adding performance counter mem/zero[none]:40
12855:20241101:185743.939 adding performance counter mem/zero[none,absolute]:40
12855:20241101:185743.940 adding performance counter mem/zero[average]:41
12855:20241101:185743.940 adding performance counter mem/zero[average,absolute]:41
12855:20241101:185743.940 adding performance counter mem/zero[minimum]:42
12855:20241101:185743.940 adding performance counter mem/zero[minimum,absolute]:42
12855:20241101:185743.940 adding performance counter mem/zero[maximum]:43
12855:20241101:185743.940 adding performance counter mem/zero[maximum,absolute]:43
12855:20241101:185743.940 adding performance counter mem/unreserved[none]:44
12855:20241101:185743.940 adding performance counter mem/unreserved[none,absolute]:44
12855:20241101:185743.940 adding performance counter mem/unreserved[average]:45
12855:20241101:185743.940 adding performance counter mem/unreserved[average,absolute]:45
12855:20241101:185743.940 adding performance counter mem/unreserved[minimum]:46
12855:20241101:185743.940 adding performance counter mem/unreserved[minimum,absolute]:46
12855:20241101:185743.940 adding performance counter mem/unreserved[maximum]:47
12855:20241101:185743.940 adding performance counter mem/unreserved[maximum,absolute]:47
12855:20241101:185743.940 adding performance counter mem/swapused[none]:48
12855:20241101:185743.940 adding performance counter mem/swapused[none,absolute]:48
12855:20241101:185743.940 adding performance counter mem/swapused[average]:49
12855:20241101:185743.940 adding performance counter mem/swapused[average,absolute]:49
12855:20241101:185743.940 adding performance counter mem/swapused[minimum]:50
12855:20241101:185743.940 adding performance counter mem/swapused[minimum,absolute]:50
12855:20241101:185743.940 adding performance counter mem/swapused[maximum]:51
12855:20241101:185743.940 adding performance counter mem/swapused[maximum,absolute]:51
12855:20241101:185743.940 adding performance counter mem/swapunreserved[none]:52
12855:20241101:185743.940 adding performance counter mem/swapunreserved[none,absolute]:52
12855:20241101:185743.940 adding performance counter mem/swapunreserved[average]:53
12855:20241101:185743.940 adding performance counter mem/swapunreserved[average,absolute]:53
12855:20241101:185743.940 adding performance counter mem/swapunreserved[minimum]:54
12855:20241101:185743.940 adding performance counter mem/swapunreserved[minimum,absolute]:54
12855:20241101:185743.940 adding performance counter mem/swapunreserved[maximum]:55
12855:20241101:185743.940 adding performance counter mem/swapunreserved[maximum,absolute]:55
12855:20241101:185743.940 adding performance counter mem/sharedcommon[none]:56
12855:20241101:185743.940 adding performance counter mem/sharedcommon[none,absolute]:56
12855:20241101:185743.940 adding performance counter mem/sharedcommon[average]:57
12855:20241101:185743.940 adding performance counter mem/sharedcommon[average,absolute]:57
12855:20241101:185743.940 adding performance counter mem/sharedcommon[minimum]:58
12855:20241101:185743.940 adding performance counter mem/sharedcommon[minimum,absolute]:58
12855:20241101:185743.941 adding performance counter mem/sharedcommon[maximum]:59
12855:20241101:185743.941 adding performance counter mem/sharedcommon[maximum,absolute]:59
12855:20241101:185743.941 adding performance counter mem/heap[none]:60
12855:20241101:185743.941 adding performance counter mem/heap[none,absolute]:60
12855:20241101:185743.941 adding performance counter mem/heap[average]:61
12855:20241101:185743.941 adding performance counter mem/heap[average,absolute]:61
12855:20241101:185743.941 adding performance counter mem/heap[minimum]:62
12855:20241101:185743.941 adding performance counter mem/heap[minimum,absolute]:62
12855:20241101:185743.941 adding performance counter mem/heap[maximum]:63
12855:20241101:185743.941 adding performance counter mem/heap[maximum,absolute]:63
12855:20241101:185743.941 adding performance counter mem/heapfree[none]:64
12855:20241101:185743.941 adding performance counter mem/heapfree[none,absolute]:64
12855:20241101:185743.941 adding performance counter mem/heapfree[average]:65
12855:20241101:185743.941 adding performance counter mem/heapfree[average,absolute]:65
12855:20241101:185743.941 adding performance counter mem/heapfree[minimum]:66
12855:20241101:185743.941 adding performance counter mem/heapfree[minimum,absolute]:66
12855:20241101:185743.941 adding performance counter mem/heapfree[maximum]:67
12855:20241101:185743.941 adding performance counter mem/heapfree[maximum,absolute]:67
12855:20241101:185743.941 adding performance counter mem/state[latest]:68
12855:20241101:185743.941 adding performance counter mem/state[latest,absolute]:68
12855:20241101:185743.941 adding performance counter mem/swapped[none]:69
12855:20241101:185743.941 adding performance counter mem/swapped[none,absolute]:69
12855:20241101:185743.941 adding performance counter mem/swapped[average]:70
12855:20241101:185743.941 adding performance counter mem/swapped[average,absolute]:70
12855:20241101:185743.941 adding performance counter mem/swapped[minimum]:71
12855:20241101:185743.941 adding performance counter mem/swapped[minimum,absolute]:71
12855:20241101:185743.941 adding performance counter mem/swapped[maximum]:72
12855:20241101:185743.941 adding performance counter mem/swapped[maximum,absolute]:72
12855:20241101:185743.941 adding performance counter mem/swaptarget[none]:73
12855:20241101:185743.941 adding performance counter mem/swaptarget[none,absolute]:73
12855:20241101:185743.941 adding performance counter mem/swaptarget[average]:74
12855:20241101:185743.941 adding performance counter mem/swaptarget[average,absolute]:74
12855:20241101:185743.941 adding performance counter mem/swaptarget[minimum]:75
12855:20241101:185743.941 adding performance counter mem/swaptarget[minimum,absolute]:75
12855:20241101:185743.941 adding performance counter mem/swaptarget[maximum]:76
12855:20241101:185743.941 adding performance counter mem/swaptarget[maximum,absolute]:76
12855:20241101:185743.942 adding performance counter mem/swapIn[none]:77
12855:20241101:185743.942 adding performance counter mem/swapIn[none,absolute]:77
12855:20241101:185743.942 adding performance counter mem/swapIn[average]:78
12855:20241101:185743.942 adding performance counter mem/swapIn[average,absolute]:78
12855:20241101:185743.942 adding performance counter mem/swapIn[minimum]:79
12855:20241101:185743.942 adding performance counter mem/swapIn[minimum,absolute]:79
12855:20241101:185743.942 adding performance counter mem/swapIn[maximum]:80
12855:20241101:185743.942 adding performance counter mem/swapIn[maximum,absolute]:80
12855:20241101:185743.942 adding performance counter mem/swapOut[none]:81
12855:20241101:185743.942 adding performance counter mem/swapOut[none,absolute]:81
12855:20241101:185743.942 adding performance counter mem/swapOut[average]:82
12855:20241101:185743.942 adding performance counter mem/swapOut[average,absolute]:82
12855:20241101:185743.942 adding performance counter mem/swapOut[minimum]:83
12855:20241101:185743.942 adding performance counter mem/swapOut[minimum,absolute]:83
12855:20241101:185743.942 adding performance counter mem/swapOut[maximum]:84
12855:20241101:185743.942 adding performance counter mem/swapOut[maximum,absolute]:84
12855:20241101:185743.942 adding performance counter mem/swapinRate[average]:85
12855:20241101:185743.942 adding performance counter mem/swapinRate[average,rate]:85
12855:20241101:185743.942 adding performance counter mem/swapoutRate[average]:86
12855:20241101:185743.942 adding performance counter mem/swapoutRate[average,rate]:86
12855:20241101:185743.942 adding performance counter managementAgent/swapOut[average]:87
12855:20241101:185743.942 adding performance counter managementAgent/swapOut[average,rate]:87
12855:20241101:185743.942 adding performance counter managementAgent/swapIn[average]:88
12855:20241101:185743.942 adding performance counter managementAgent/swapIn[average,rate]:88
12855:20241101:185743.942 adding performance counter mem/vmmemctl[none]:89
12855:20241101:185743.942 adding performance counter mem/vmmemctl[none,absolute]:89
12855:20241101:185743.942 adding performance counter mem/vmmemctl[average]:90
12855:20241101:185743.942 adding performance counter mem/vmmemctl[average,absolute]:90
12855:20241101:185743.942 adding performance counter mem/vmmemctl[minimum]:91
12855:20241101:185743.942 adding performance counter mem/vmmemctl[minimum,absolute]:91
12855:20241101:185743.942 adding performance counter mem/vmmemctl[maximum]:92
12855:20241101:185743.942 adding performance counter mem/vmmemctl[maximum,absolute]:92
12855:20241101:185743.942 adding performance counter mem/vmmemctltarget[none]:93
12855:20241101:185743.942 adding performance counter mem/vmmemctltarget[none,absolute]:93
12855:20241101:185743.942 adding performance counter mem/vmmemctltarget[average]:94
12855:20241101:185743.943 adding performance counter mem/vmmemctltarget[average,absolute]:94
12855:20241101:185743.943 adding performance counter mem/vmmemctltarget[minimum]:95
12855:20241101:185743.943 adding performance counter mem/vmmemctltarget[minimum,absolute]:95
12855:20241101:185743.943 adding performance counter mem/vmmemctltarget[maximum]:96
12855:20241101:185743.943 adding performance counter mem/vmmemctltarget[maximum,absolute]:96
12855:20241101:185743.943 adding performance counter mem/consumed[none]:97
12855:20241101:185743.943 adding performance counter mem/consumed[none,absolute]:97
12855:20241101:185743.943 adding performance counter mem/consumed[average]:98
12855:20241101:185743.943 adding performance counter mem/consumed[average,absolute]:98
12855:20241101:185743.943 adding performance counter mem/consumed[minimum]:99
12855:20241101:185743.943 adding performance counter mem/consumed[minimum,absolute]:99
12855:20241101:185743.943 adding performance counter mem/consumed[maximum]:100
12855:20241101:185743.943 adding performance counter mem/consumed[maximum,absolute]:100
12855:20241101:185743.943 adding performance counter mem/overhead[none]:101
12855:20241101:185743.943 adding performance counter mem/overhead[none,absolute]:101
12855:20241101:185743.943 adding performance counter mem/overhead[average]:102
12855:20241101:185743.943 adding performance counter mem/overhead[average,absolute]:102
12855:20241101:185743.943 adding performance counter mem/overhead[minimum]:103
12855:20241101:185743.943 adding performance counter mem/overhead[minimum,absolute]:103
12855:20241101:185743.943 adding performance counter mem/overhead[maximum]:104
12855:20241101:185743.943 adding performance counter mem/overhead[maximum,absolute]:104
12855:20241101:185743.943 adding performance counter mem/compressed[average]:105
12855:20241101:185743.943 adding performance counter mem/compressed[average,absolute]:105
12855:20241101:185743.943 adding performance counter mem/compressionRate[average]:106
12855:20241101:185743.943 adding performance counter mem/compressionRate[average,rate]:106
12855:20241101:185743.943 adding performance counter mem/decompressionRate[average]:107
12855:20241101:185743.943 adding performance counter mem/decompressionRate[average,rate]:107
12855:20241101:185743.943 adding performance counter mem/capacity.provisioned[average]:108
12855:20241101:185743.943 adding performance counter mem/capacity.provisioned[average,absolute]:108
12855:20241101:185743.943 adding performance counter mem/capacity.entitlement[average]:109
12855:20241101:185743.943 adding performance counter mem/capacity.entitlement[average,absolute]:109
12855:20241101:185743.943 adding performance counter mem/capacity.usable[average]:110
12855:20241101:185743.943 adding performance counter mem/capacity.usable[average,absolute]:110
12855:20241101:185743.943 adding performance counter mem/capacity.usage[average]:111
12855:20241101:185743.943 adding performance counter mem/capacity.usage[average,absolute]:111
12855:20241101:185743.944 adding performance counter mem/capacity.contention[average]:112
12855:20241101:185743.944 adding performance counter mem/capacity.contention[average,rate]:112
12855:20241101:185743.944 adding performance counter mem/capacity.usage.vm[average]:113
12855:20241101:185743.944 adding performance counter mem/capacity.usage.vm[average,absolute]:113
12855:20241101:185743.944 adding performance counter mem/capacity.usage.vmOvrhd[average]:114
12855:20241101:185743.944 adding performance counter mem/capacity.usage.vmOvrhd[average,absolute]:114
12855:20241101:185743.944 adding performance counter mem/capacity.usage.vmkOvrhd[average]:115
12855:20241101:185743.944 adding performance counter mem/capacity.usage.vmkOvrhd[average,absolute]:115
12855:20241101:185743.944 adding performance counter mem/capacity.usage.userworld[average]:116
12855:20241101:185743.944 adding performance counter mem/capacity.usage.userworld[average,absolute]:116
12855:20241101:185743.944 adding performance counter mem/reservedCapacity.vm[average]:117
12855:20241101:185743.944 adding performance counter mem/reservedCapacity.vm[average,absolute]:117
12855:20241101:185743.944 adding performance counter mem/reservedCapacity.vmOvhd[average]:118
12855:20241101:185743.944 adding performance counter mem/reservedCapacity.vmOvhd[average,absolute]:118
12855:20241101:185743.944 adding performance counter mem/reservedCapacity.vmkOvrhd[average]:119
12855:20241101:185743.944 adding performance counter mem/reservedCapacity.vmkOvrhd[average,absolute]:119
12855:20241101:185743.944 adding performance counter mem/reservedCapacity.userworld[average]:120
12855:20241101:185743.944 adding performance counter mem/reservedCapacity.userworld[average,absolute]:120
12855:20241101:185743.944 adding performance counter mem/reservedCapacityPct[average]:121
12855:20241101:185743.944 adding performance counter mem/reservedCapacityPct[average,absolute]:121
12855:20241101:185743.944 adding performance counter mem/consumed.vms[average]:122
12855:20241101:185743.944 adding performance counter mem/consumed.vms[average,absolute]:122
12855:20241101:185743.944 adding performance counter mem/consumed.userworlds[average]:123
12855:20241101:185743.944 adding performance counter mem/consumed.userworlds[average,absolute]:123
12855:20241101:185743.944 adding performance counter mem/bandwidth.read[latest]:124
12855:20241101:185743.944 adding performance counter mem/bandwidth.read[latest,absolute]:124
12855:20241101:185743.944 adding performance counter mem/bandwidth.write[latest]:125
12855:20241101:185743.944 adding performance counter mem/bandwidth.write[latest,absolute]:125
12855:20241101:185743.944 adding performance counter mem/bandwidth.total[latest]:126
12855:20241101:185743.944 adding performance counter mem/bandwidth.total[latest,absolute]:126
12855:20241101:185743.944 adding performance counter mem/vm.bandwidth.read[latest]:127
12855:20241101:185743.944 adding performance counter mem/vm.bandwidth.read[latest,absolute]:127
12855:20241101:185743.944 adding performance counter mem/missrate[latest]:128
12855:20241101:185743.944 adding performance counter mem/missrate[latest,absolute]:128
12855:20241101:185743.944 adding performance counter mem/latency.read[latest]:129
12855:20241101:185743.944 adding performance counter mem/latency.read[latest,absolute]:129
12855:20241101:185743.945 adding performance counter mem/latency.write[latest]:130
12855:20241101:185743.945 adding performance counter mem/latency.write[latest,absolute]:130
12855:20241101:185743.945 adding performance counter disk/usage[none]:131
12855:20241101:185743.945 adding performance counter disk/usage[none,rate]:131
12855:20241101:185743.945 adding performance counter disk/usage[average]:132
12855:20241101:185743.945 adding performance counter disk/usage[average,rate]:132
12855:20241101:185743.945 adding performance counter disk/usage[minimum]:133
12855:20241101:185743.945 adding performance counter disk/usage[minimum,rate]:133
12855:20241101:185743.945 adding performance counter disk/usage[maximum]:134
12855:20241101:185743.945 adding performance counter disk/usage[maximum,rate]:134
12855:20241101:185743.945 adding performance counter disk/numberRead[summation]:135
12855:20241101:185743.945 adding performance counter disk/numberRead[summation,delta]:135
12855:20241101:185743.945 adding performance counter disk/numberWrite[summation]:136
12855:20241101:185743.945 adding performance counter disk/numberWrite[summation,delta]:136
12855:20241101:185743.945 adding performance counter disk/read[average]:137
12855:20241101:185743.945 adding performance counter disk/read[average,rate]:137
12855:20241101:185743.945 adding performance counter disk/write[average]:138
12855:20241101:185743.945 adding performance counter disk/write[average,rate]:138
12855:20241101:185743.945 adding performance counter disk/totalLatency[average]:139
12855:20241101:185743.945 adding performance counter disk/totalLatency[average,absolute]:139
12855:20241101:185743.945 adding performance counter disk/maxTotalLatency[latest]:140
12855:20241101:185743.945 adding performance counter disk/maxTotalLatency[latest,absolute]:140
12855:20241101:185743.945 adding performance counter disk/commandsAborted[summation]:141
12855:20241101:185743.945 adding performance counter disk/commandsAborted[summation,delta]:141
12855:20241101:185743.945 adding performance counter disk/busResets[summation]:142
12855:20241101:185743.945 adding performance counter disk/busResets[summation,delta]:142
12855:20241101:185743.945 adding performance counter disk/numberReadAveraged[average]:143
12855:20241101:185743.945 adding performance counter disk/numberReadAveraged[average,rate]:143
12855:20241101:185743.945 adding performance counter disk/numberWriteAveraged[average]:144
12855:20241101:185743.945 adding performance counter disk/numberWriteAveraged[average,rate]:144
12855:20241101:185743.945 adding performance counter disk/throughput.usage[average]:145
12855:20241101:185743.945 adding performance counter disk/throughput.usage[average,rate]:145
12855:20241101:185743.945 adding performance counter disk/throughput.contention[average]:146
12855:20241101:185743.945 adding performance counter disk/throughput.contention[average,absolute]:146
12855:20241101:185743.945 adding performance counter disk/scsiReservationConflicts[summation]:147
12855:20241101:185743.945 adding performance counter disk/scsiReservationConflicts[summation,delta]:147
12855:20241101:185743.946 adding performance counter disk/scsiReservationCnflctsPct[average]:148
12855:20241101:185743.946 adding performance counter disk/scsiReservationCnflctsPct[average,absolute]:148
12855:20241101:185743.946 adding performance counter net/usage[none]:149
12855:20241101:185743.946 adding performance counter net/usage[none,rate]:149
12855:20241101:185743.946 adding performance counter net/usage[average]:150
12855:20241101:185743.946 adding performance counter net/usage[average,rate]:150
12855:20241101:185743.946 adding performance counter net/usage[minimum]:151
12855:20241101:185743.946 adding performance counter net/usage[minimum,rate]:151
12855:20241101:185743.946 adding performance counter net/usage[maximum]:152
12855:20241101:185743.946 adding performance counter net/usage[maximum,rate]:152
12855:20241101:185743.946 adding performance counter net/packetsRx[summation]:153
12855:20241101:185743.946 adding performance counter net/packetsRx[summation,delta]:153
12855:20241101:185743.946 adding performance counter net/packetsTx[summation]:154
12855:20241101:185743.946 adding performance counter net/packetsTx[summation,delta]:154
12855:20241101:185743.946 adding performance counter net/received[average]:155
12855:20241101:185743.946 adding performance counter net/received[average,rate]:155
12855:20241101:185743.946 adding performance counter net/transmitted[average]:156
12855:20241101:185743.946 adding performance counter net/transmitted[average,rate]:156
12855:20241101:185743.946 adding performance counter net/throughput.provisioned[average]:157
12855:20241101:185743.946 adding performance counter net/throughput.provisioned[average,absolute]:157
12855:20241101:185743.946 adding performance counter net/throughput.usable[average]:158
12855:20241101:185743.946 adding performance counter net/throughput.usable[average,absolute]:158
12855:20241101:185743.946 adding performance counter net/throughput.usage[average]:159
12855:20241101:185743.946 adding performance counter net/throughput.usage[average,rate]:159
12855:20241101:185743.946 adding performance counter net/throughput.contention[summation]:160
12855:20241101:185743.946 adding performance counter net/throughput.contention[summation,delta]:160
12855:20241101:185743.946 adding performance counter net/throughput.packetsPerSec[average]:161
12855:20241101:185743.946 adding performance counter net/throughput.packetsPerSec[average,rate]:161
12855:20241101:185743.946 adding performance counter sys/uptime[latest]:162
12855:20241101:185743.946 adding performance counter sys/uptime[latest,absolute]:162
12855:20241101:185743.946 adding performance counter sys/heartbeat[summation]:163
12855:20241101:185743.946 adding performance counter sys/heartbeat[summation,delta]:163
12855:20241101:185743.946 adding performance counter power/power[average]:164
12855:20241101:185743.946 adding performance counter power/power[average,rate]:164
12855:20241101:185743.946 adding performance counter power/powerCap[average]:165
12855:20241101:185743.947 adding performance counter power/powerCap[average,absolute]:165
12855:20241101:185743.947 adding performance counter power/energy[summation]:166
12855:20241101:185743.947 adding performance counter power/energy[summation,delta]:166
12855:20241101:185743.947 adding performance counter power/capacity.usagePct[average]:167
12855:20241101:185743.947 adding performance counter power/capacity.usagePct[average,absolute]:167
12855:20241101:185743.947 adding performance counter storageAdapter/commandsAveraged[average]:168
12855:20241101:185743.947 adding performance counter storageAdapter/commandsAveraged[average,rate]:168
12855:20241101:185743.947 adding performance counter storageAdapter/numberReadAveraged[average]:169
12855:20241101:185743.947 adding performance counter storageAdapter/numberReadAveraged[average,rate]:169
12855:20241101:185743.947 adding performance counter storageAdapter/numberWriteAveraged[average]:170
12855:20241101:185743.947 adding performance counter storageAdapter/numberWriteAveraged[average,rate]:170
12855:20241101:185743.947 adding performance counter storageAdapter/read[average]:171
12855:20241101:185743.947 adding performance counter storageAdapter/read[average,rate]:171
12855:20241101:185743.947 adding performance counter storageAdapter/write[average]:172
12855:20241101:185743.947 adding performance counter storageAdapter/write[average,rate]:172
12855:20241101:185743.947 adding performance counter storageAdapter/totalReadLatency[average]:173
12855:20241101:185743.947 adding performance counter storageAdapter/totalReadLatency[average,absolute]:173
12855:20241101:185743.947 adding performance counter storageAdapter/totalWriteLatency[average]:174
12855:20241101:185743.947 adding performance counter storageAdapter/totalWriteLatency[average,absolute]:174
12855:20241101:185743.947 adding performance counter storageAdapter/maxTotalLatency[latest]:175
12855:20241101:185743.947 adding performance counter storageAdapter/maxTotalLatency[latest,absolute]:175
12855:20241101:185743.947 adding performance counter storageAdapter/throughput.cont[average]:176
12855:20241101:185743.947 adding performance counter storageAdapter/throughput.cont[average,absolute]:176
12855:20241101:185743.947 adding performance counter storageAdapter/OIOsPct[average]:177
12855:20241101:185743.947 adding performance counter storageAdapter/OIOsPct[average,absolute]:177
12855:20241101:185743.947 adding performance counter virtualDisk/numberReadAveraged[average]:178
12855:20241101:185743.947 adding performance counter virtualDisk/numberReadAveraged[average,rate]:178
12855:20241101:185743.947 adding performance counter virtualDisk/numberWriteAveraged[average]:179
12855:20241101:185743.947 adding performance counter virtualDisk/numberWriteAveraged[average,rate]:179
12855:20241101:185743.947 adding performance counter virtualDisk/read[average]:180
12855:20241101:185743.947 adding performance counter virtualDisk/read[average,rate]:180
12855:20241101:185743.947 adding performance counter virtualDisk/write[average]:181
12855:20241101:185743.947 adding performance counter virtualDisk/write[average,rate]:181
12855:20241101:185743.947 adding performance counter virtualDisk/totalReadLatency[average]:182
12855:20241101:185743.947 adding performance counter virtualDisk/totalReadLatency[average,absolute]:182
12855:20241101:185743.948 adding performance counter virtualDisk/totalWriteLatency[average]:183
12855:20241101:185743.948 adding performance counter virtualDisk/totalWriteLatency[average,absolute]:183
12855:20241101:185743.948 adding performance counter virtualDisk/throughput.cont[average]:184
12855:20241101:185743.948 adding performance counter virtualDisk/throughput.cont[average,absolute]:184
12855:20241101:185743.948 adding performance counter datastore/numberReadAveraged[average]:185
12855:20241101:185743.948 adding performance counter datastore/numberReadAveraged[average,rate]:185
12855:20241101:185743.948 adding performance counter datastore/numberWriteAveraged[average]:186
12855:20241101:185743.948 adding performance counter datastore/numberWriteAveraged[average,rate]:186
12855:20241101:185743.948 adding performance counter datastore/read[average]:187
12855:20241101:185743.948 adding performance counter datastore/read[average,rate]:187
12855:20241101:185743.948 adding performance counter datastore/write[average]:188
12855:20241101:185743.948 adding performance counter datastore/write[average,rate]:188
12855:20241101:185743.948 adding performance counter datastore/totalReadLatency[average]:189
12855:20241101:185743.948 adding performance counter datastore/totalReadLatency[average,absolute]:189
12855:20241101:185743.948 adding performance counter datastore/totalWriteLatency[average]:190
12855:20241101:185743.948 adding performance counter datastore/totalWriteLatency[average,absolute]:190
12855:20241101:185743.948 adding performance counter datastore/maxTotalLatency[latest]:191
12855:20241101:185743.948 adding performance counter datastore/maxTotalLatency[latest,absolute]:191
12855:20241101:185743.948 adding performance counter datastore/datastoreIops[average]:192
12855:20241101:185743.948 adding performance counter datastore/datastoreIops[average,absolute]:192
12855:20241101:185743.948 adding performance counter datastore/sizeNormalizedDatastoreLatency[average]:193
12855:20241101:185743.948 adding performance counter datastore/sizeNormalizedDatastoreLatency[average,absolute]:193
12855:20241101:185743.948 adding performance counter datastore/throughput.usage[average]:194
12855:20241101:185743.948 adding performance counter datastore/throughput.usage[average,absolute]:194
12855:20241101:185743.948 adding performance counter datastore/throughput.contention[average]:195
12855:20241101:185743.948 adding performance counter datastore/throughput.contention[average,absolute]:195
12855:20241101:185743.948 adding performance counter datastore/busResets[summation]:196
12855:20241101:185743.948 adding performance counter datastore/busResets[summation,delta]:196
12855:20241101:185743.948 adding performance counter datastore/commandsAborted[summation]:197
12855:20241101:185743.948 adding performance counter datastore/commandsAborted[summation,delta]:197
12855:20241101:185743.948 adding performance counter datastore/siocActiveTimePercentage[average]:198
12855:20241101:185743.948 adding performance counter datastore/siocActiveTimePercentage[average,absolute]:198
12855:20241101:185743.948 adding performance counter storagePath/throughput.cont[average]:199
12855:20241101:185743.948 adding performance counter storagePath/throughput.cont[average,absolute]:199
12855:20241101:185743.948 adding performance counter storagePath/maxTotalLatency[latest]:200
12855:20241101:185743.948 adding performance counter storagePath/maxTotalLatency[latest,absolute]:200
12855:20241101:185743.949 adding performance counter virtualDisk/throughput.usage[average]:201
12855:20241101:185743.949 adding performance counter virtualDisk/throughput.usage[average,rate]:201
12855:20241101:185743.949 adding performance counter virtualDisk/commandsAborted[summation]:202
12855:20241101:185743.949 adding performance counter virtualDisk/commandsAborted[summation,delta]:202
12855:20241101:185743.949 adding performance counter virtualDisk/busResets[summation]:203
12855:20241101:185743.949 adding performance counter virtualDisk/busResets[summation,delta]:203
12855:20241101:185743.949 adding performance counter storageAdapter/outstandingIOs[average]:204
12855:20241101:185743.949 adding performance counter storageAdapter/outstandingIOs[average,absolute]:204
12855:20241101:185743.949 adding performance counter storageAdapter/queued[average]:205
12855:20241101:185743.949 adding performance counter storageAdapter/queued[average,absolute]:205
12855:20241101:185743.949 adding performance counter storageAdapter/queueDepth[average]:206
12855:20241101:185743.949 adding performance counter storageAdapter/queueDepth[average,absolute]:206
12855:20241101:185743.949 adding performance counter storageAdapter/queueLatency[average]:207
12855:20241101:185743.949 adding performance counter storageAdapter/queueLatency[average,absolute]:207
12855:20241101:185743.949 adding performance counter storageAdapter/throughput.usag[average]:208
12855:20241101:185743.949 adding performance counter storageAdapter/throughput.usag[average,rate]:208
12855:20241101:185743.949 adding performance counter storagePath/busResets[summation]:209
12855:20241101:185743.949 adding performance counter storagePath/busResets[summation,delta]:209
12855:20241101:185743.949 adding performance counter storagePath/commandsAborted[summation]:210
12855:20241101:185743.949 adding performance counter storagePath/commandsAborted[summation,delta]:210
12855:20241101:185743.949 adding performance counter storagePath/throughput.usage[average]:211
12855:20241101:185743.949 adding performance counter storagePath/throughput.usage[average,rate]:211
12855:20241101:185743.949 adding performance counter net/throughput.usage.vm[average]:212
12855:20241101:185743.949 adding performance counter net/throughput.usage.vm[average,rate]:212
12855:20241101:185743.949 adding performance counter net/throughput.usage.nfs[average]:213
12855:20241101:185743.949 adding performance counter net/throughput.usage.nfs[average,rate]:213
12855:20241101:185743.949 adding performance counter net/throughput.usage.vmotion[average]:214
12855:20241101:185743.949 adding performance counter net/throughput.usage.vmotion[average,rate]:214
12855:20241101:185743.949 adding performance counter net/throughput.usage.ft[average]:215
12855:20241101:185743.949 adding performance counter net/throughput.usage.ft[average,rate]:215
12855:20241101:185743.949 adding performance counter net/throughput.usage.iscsi[average]:216
12855:20241101:185743.949 adding performance counter net/throughput.usage.iscsi[average,rate]:216
12855:20241101:185743.949 adding performance counter net/throughput.usage.hbr[average]:217
12855:20241101:185743.949 adding performance counter net/throughput.usage.hbr[average,rate]:217
12855:20241101:185743.949 adding performance counter power/capacity.usable[average]:218
12855:20241101:185743.949 adding performance counter power/capacity.usable[average,absolute]:218
12855:20241101:185743.950 adding performance counter power/capacity.usage[average]:219
12855:20241101:185743.950 adding performance counter power/capacity.usage[average,absolute]:219
12855:20241101:185743.950 adding performance counter power/capacity.usageIdle[average]:220
12855:20241101:185743.950 adding performance counter power/capacity.usageIdle[average,absolute]:220
12855:20241101:185743.950 adding performance counter power/capacity.usageSystem[average]:221
12855:20241101:185743.950 adding performance counter power/capacity.usageSystem[average,absolute]:221
12855:20241101:185743.950 adding performance counter power/capacity.usageVm[average]:222
12855:20241101:185743.950 adding performance counter power/capacity.usageVm[average,absolute]:222
12855:20241101:185743.950 adding performance counter power/capacity.usageStatic[average]:223
12855:20241101:185743.950 adding performance counter power/capacity.usageStatic[average,absolute]:223
12855:20241101:185743.950 adding performance counter cpu/cpuentitlement[latest]:224
12855:20241101:185743.950 adding performance counter cpu/cpuentitlement[latest,absolute]:224
12855:20241101:185743.950 adding performance counter mem/mementitlement[latest]:225
12855:20241101:185743.950 adding performance counter mem/mementitlement[latest,absolute]:225
12855:20241101:185743.950 adding performance counter clusterServices/vmDrsScore[latest]:226
12855:20241101:185743.950 adding performance counter clusterServices/vmDrsScore[latest,absolute]:226
12855:20241101:185743.950 adding performance counter clusterServices/cpufairness[latest]:227
12855:20241101:185743.950 adding performance counter clusterServices/cpufairness[latest,absolute]:227
12855:20241101:185743.950 adding performance counter clusterServices/memfairness[latest]:228
12855:20241101:185743.950 adding performance counter clusterServices/memfairness[latest,absolute]:228
12855:20241101:185743.950 adding performance counter net/throughput.pktsTx[average]:229
12855:20241101:185743.950 adding performance counter net/throughput.pktsTx[average,absolute]:229
12855:20241101:185743.950 adding performance counter net/throughput.pktsTxMulticast[average]:230
12855:20241101:185743.950 adding performance counter net/throughput.pktsTxMulticast[average,absolute]:230
12855:20241101:185743.950 adding performance counter net/throughput.pktsTxBroadcast[average]:231
12855:20241101:185743.950 adding performance counter net/throughput.pktsTxBroadcast[average,absolute]:231
12855:20241101:185743.950 adding performance counter net/throughput.pktsRx[average]:232
12855:20241101:185743.950 adding performance counter net/throughput.pktsRx[average,absolute]:232
12855:20241101:185743.950 adding performance counter net/throughput.pktsRxMulticast[average]:233
12855:20241101:185743.950 adding performance counter net/throughput.pktsRxMulticast[average,absolute]:233
12855:20241101:185743.950 adding performance counter net/throughput.pktsRxBroadcast[average]:234
12855:20241101:185743.950 adding performance counter net/throughput.pktsRxBroadcast[average,absolute]:234
12855:20241101:185743.950 adding performance counter net/throughput.droppedTx[average]:235
12855:20241101:185743.950 adding performance counter net/throughput.droppedTx[average,absolute]:235
12855:20241101:185743.951 adding performance counter net/throughput.droppedRx[average]:236
12855:20241101:185743.951 adding performance counter net/throughput.droppedRx[average,absolute]:236
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsTx[average]:237
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsTx[average,absolute]:237
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsTxMcast[average]:238
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsTxMcast[average,absolute]:238
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsTxBcast[average]:239
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsTxBcast[average,absolute]:239
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsRx[average]:240
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsRx[average,absolute]:240
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsRxMcast[average]:241
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsRxMcast[average,absolute]:241
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsRxBcast[average]:242
12855:20241101:185743.951 adding performance counter net/throughput.vds.pktsRxBcast[average,absolute]:242
12855:20241101:185743.951 adding performance counter net/throughput.vds.droppedTx[average]:243
12855:20241101:185743.951 adding performance counter net/throughput.vds.droppedTx[average,absolute]:243
12855:20241101:185743.951 adding performance counter net/throughput.vds.droppedRx[average]:244
12855:20241101:185743.951 adding performance counter net/throughput.vds.droppedRx[average,absolute]:244
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagTx[average]:245
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagTx[average,absolute]:245
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagTxMcast[average]:246
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagTxMcast[average,absolute]:246
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagTxBcast[average]:247
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagTxBcast[average,absolute]:247
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagRx[average]:248
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagRx[average,absolute]:248
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagRxMcast[average]:249
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagRxMcast[average,absolute]:249
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagRxBcast[average]:250
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagRxBcast[average,absolute]:250
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagDropTx[average]:251
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagDropTx[average,absolute]:251
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagDropRx[average]:252
12855:20241101:185743.951 adding performance counter net/throughput.vds.lagDropRx[average,absolute]:252
12855:20241101:185743.951 adding performance counter vmop/numPoweron[latest]:253
12855:20241101:185743.951 adding performance counter vmop/numPoweron[latest,absolute]:253
12855:20241101:185743.952 adding performance counter vmop/numPoweroff[latest]:254
12855:20241101:185743.952 adding performance counter vmop/numPoweroff[latest,absolute]:254
12855:20241101:185743.952 adding performance counter vmop/numSuspend[latest]:255
12855:20241101:185743.952 adding performance counter vmop/numSuspend[latest,absolute]:255
12855:20241101:185743.952 adding performance counter vmop/numReset[latest]:256
12855:20241101:185743.952 adding performance counter vmop/numReset[latest,absolute]:256
12855:20241101:185743.952 adding performance counter vmop/numRebootGuest[latest]:257
12855:20241101:185743.952 adding performance counter vmop/numRebootGuest[latest,absolute]:257
12855:20241101:185743.952 adding performance counter vmop/numStandbyGuest[latest]:258
12855:20241101:185743.952 adding performance counter vmop/numStandbyGuest[latest,absolute]:258
12855:20241101:185743.952 adding performance counter vmop/numShutdownGuest[latest]:259
12855:20241101:185743.952 adding performance counter vmop/numShutdownGuest[latest,absolute]:259
12855:20241101:185743.952 adding performance counter vmop/numCreate[latest]:260
12855:20241101:185743.952 adding performance counter vmop/numCreate[latest,absolute]:260
12855:20241101:185743.952 adding performance counter vmop/numDestroy[latest]:261
12855:20241101:185743.952 adding performance counter vmop/numDestroy[latest,absolute]:261
12855:20241101:185743.952 adding performance counter vmop/numRegister[latest]:262
12855:20241101:185743.952 adding performance counter vmop/numRegister[latest,absolute]:262
12855:20241101:185743.952 adding performance counter vmop/numUnregister[latest]:263
12855:20241101:185743.952 adding performance counter vmop/numUnregister[latest,absolute]:263
12855:20241101:185743.952 adding performance counter vmop/numReconfigure[latest]:264
12855:20241101:185743.952 adding performance counter vmop/numReconfigure[latest,absolute]:264
12855:20241101:185743.952 adding performance counter vmop/numClone[latest]:265
12855:20241101:185743.952 adding performance counter vmop/numClone[latest,absolute]:265
12855:20241101:185743.952 adding performance counter vmop/numDeploy[latest]:266
12855:20241101:185743.952 adding performance counter vmop/numDeploy[latest,absolute]:266
12855:20241101:185743.952 adding performance counter vmop/numChangeHost[latest]:267
12855:20241101:185743.952 adding performance counter vmop/numChangeHost[latest,absolute]:267
12855:20241101:185743.952 adding performance counter vmop/numChangeDS[latest]:268
12855:20241101:185743.952 adding performance counter vmop/numChangeDS[latest,absolute]:268
12855:20241101:185743.952 adding performance counter vmop/numChangeHostDS[latest]:269
12855:20241101:185743.952 adding performance counter vmop/numChangeHostDS[latest,absolute]:269
12855:20241101:185743.952 adding performance counter vmop/numVMotion[latest]:270
12855:20241101:185743.952 adding performance counter vmop/numVMotion[latest,absolute]:270
12855:20241101:185743.953 adding performance counter vmop/numSVMotion[latest]:271
12855:20241101:185743.953 adding performance counter vmop/numSVMotion[latest,absolute]:271
12855:20241101:185743.953 adding performance counter vmop/numXVMotion[latest]:272
12855:20241101:185743.953 adding performance counter vmop/numXVMotion[latest,absolute]:272
12855:20241101:185743.953 adding performance counter clusterServices/effectivecpu[average]:273
12855:20241101:185743.953 adding performance counter clusterServices/effectivecpu[average,rate]:273
12855:20241101:185743.953 adding performance counter clusterServices/effectivemem[average]:274
12855:20241101:185743.953 adding performance counter clusterServices/effectivemem[average,absolute]:274
12855:20241101:185743.953 adding performance counter cpu/totalmhz[average]:275
12855:20241101:185743.953 adding performance counter cpu/totalmhz[average,rate]:275
12855:20241101:185743.953 adding performance counter mem/totalmb[average]:276
12855:20241101:185743.953 adding performance counter mem/totalmb[average,absolute]:276
12855:20241101:185743.953 adding performance counter clusterServices/clusterDrsScore[latest]:277
12855:20241101:185743.953 adding performance counter clusterServices/clusterDrsScore[latest,absolute]:277
12855:20241101:185743.953 adding performance counter clusterServices/failover[latest]:278
12855:20241101:185743.953 adding performance counter clusterServices/failover[latest,absolute]:278
12855:20241101:185743.953 adding performance counter gpu/utilization[average]:279
12855:20241101:185743.953 adding performance counter gpu/utilization[average,absolute]:279
12855:20241101:185743.953 adding performance counter gpu/mem.used[average]:280
12855:20241101:185743.953 adding performance counter gpu/mem.used[average,absolute]:280
12855:20241101:185743.953 adding performance counter gpu/mem.reserved[latest]:281
12855:20241101:185743.953 adding performance counter gpu/mem.reserved[latest,absolute]:281
12855:20241101:185743.953 adding performance counter gpu/power.used[latest]:282
12855:20241101:185743.953 adding performance counter gpu/power.used[latest,absolute]:282
12855:20241101:185743.953 adding performance counter gpu/temperature[average]:283
12855:20241101:185743.953 adding performance counter gpu/temperature[average,absolute]:283
12855:20241101:185743.953 adding performance counter gpu/mem.total[latest]:284
12855:20241101:185743.953 adding performance counter gpu/mem.total[latest,absolute]:284
12855:20241101:185743.953 adding performance counter disk/used[latest]:285
12855:20241101:185743.953 adding performance counter disk/used[latest,absolute]:285
12855:20241101:185743.953 adding performance counter disk/provisioned[latest]:286
12855:20241101:185743.953 adding performance counter disk/provisioned[latest,absolute]:286
12855:20241101:185743.953 adding performance counter disk/capacity[latest]:287
12855:20241101:185743.953 adding performance counter disk/capacity[latest,absolute]:287
12855:20241101:185743.954 adding performance counter disk/unshared[latest]:288
12855:20241101:185743.954 adding performance counter disk/unshared[latest,absolute]:288
12855:20241101:185743.954 adding performance counter disk/actualused[latest]:289
12855:20241101:185743.954 adding performance counter disk/actualused[latest,absolute]:289
12855:20241101:185743.954 adding performance counter disk/deltaused[latest]:290
12855:20241101:185743.954 adding performance counter disk/deltaused[latest,absolute]:290
12855:20241101:185743.954 adding performance counter disk/capacity.provisioned[average]:291
12855:20241101:185743.954 adding performance counter disk/capacity.provisioned[average,absolute]:291
12855:20241101:185743.954 adding performance counter disk/capacity.usage[average]:292
12855:20241101:185743.954 adding performance counter disk/capacity.usage[average,absolute]:292
12855:20241101:185743.954 adding performance counter disk/capacity.contention[average]:293
12855:20241101:185743.954 adding performance counter disk/capacity.contention[average,absolute]:293
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationlatencystats[maximum]:294
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationlatencystats[maximum,absolute]:294
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationlatencystats[minimum]:295
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationlatencystats[minimum,absolute]:295
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationlatencystats[summation]:296
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationlatencystats[summation,absolute]:296
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationstats[maximum]:297
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationstats[maximum,absolute]:297
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationstats[minimum]:298
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationstats[minimum,absolute]:298
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationstats[summation]:299
12855:20241101:185743.954 adding performance counter vcDebugInfo/activationstats[summation,absolute]:299
12855:20241101:185743.954 adding performance counter vcResources/buffersz[average]:300
12855:20241101:185743.954 adding performance counter vcResources/buffersz[average,absolute]:300
12855:20241101:185743.954 adding performance counter vcResources/cachesz[average]:301
12855:20241101:185743.954 adding performance counter vcResources/cachesz[average,absolute]:301
12855:20241101:185743.954 adding performance counter vcResources/ctxswitchesrate[average]:302
12855:20241101:185743.954 adding performance counter vcResources/ctxswitchesrate[average,rate]:302
12855:20241101:185743.954 adding performance counter vcResources/diskreadsectorrate[average]:303
12855:20241101:185743.954 adding performance counter vcResources/diskreadsectorrate[average,rate]:303
12855:20241101:185743.954 adding performance counter vcResources/diskreadsrate[average]:304
12855:20241101:185743.954 adding performance counter vcResources/diskreadsrate[average,rate]:304
12855:20241101:185743.954 adding performance counter vcResources/diskwritesectorrate[average]:305
12855:20241101:185743.954 adding performance counter vcResources/diskwritesectorrate[average,rate]:305
12855:20241101:185743.955 adding performance counter vcResources/diskwritesrate[average]:306
12855:20241101:185743.955 adding performance counter vcResources/diskwritesrate[average,rate]:306
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsynclatencystats[maximum]:307
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsynclatencystats[maximum,absolute]:307
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsynclatencystats[minimum]:308
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsynclatencystats[minimum,absolute]:308
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsynclatencystats[summation]:309
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsynclatencystats[summation,absolute]:309
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsyncstats[maximum]:310
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsyncstats[maximum,absolute]:310
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsyncstats[minimum]:311
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsyncstats[minimum,absolute]:311
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsyncstats[summation]:312
12855:20241101:185743.955 adding performance counter vcDebugInfo/hostsyncstats[summation,absolute]:312
12855:20241101:185743.955 adding performance counter vcDebugInfo/inventorystats[maximum]:313
12855:20241101:185743.955 adding performance counter vcDebugInfo/inventorystats[maximum,absolute]:313
12855:20241101:185743.955 adding performance counter vcDebugInfo/inventorystats[minimum]:314
12855:20241101:185743.955 adding performance counter vcDebugInfo/inventorystats[minimum,absolute]:314
12855:20241101:185743.955 adding performance counter vcDebugInfo/inventorystats[summation]:315
12855:20241101:185743.955 adding performance counter vcDebugInfo/inventorystats[summation,absolute]:315
12855:20241101:185743.955 adding performance counter vcDebugInfo/lockstats[maximum]:316
12855:20241101:185743.955 adding performance counter vcDebugInfo/lockstats[maximum,absolute]:316
12855:20241101:185743.955 adding performance counter vcDebugInfo/lockstats[minimum]:317
12855:20241101:185743.955 adding performance counter vcDebugInfo/lockstats[minimum,absolute]:317
12855:20241101:185743.955 adding performance counter vcDebugInfo/lockstats[summation]:318
12855:20241101:185743.955 adding performance counter vcDebugInfo/lockstats[summation,absolute]:318
12855:20241101:185743.955 adding performance counter vcDebugInfo/lrostats[maximum]:319
12855:20241101:185743.955 adding performance counter vcDebugInfo/lrostats[maximum,absolute]:319
12855:20241101:185743.955 adding performance counter vcDebugInfo/lrostats[minimum]:320
12855:20241101:185743.955 adding performance counter vcDebugInfo/lrostats[minimum,absolute]:320
12855:20241101:185743.955 adding performance counter vcDebugInfo/lrostats[summation]:321
12855:20241101:185743.955 adding performance counter vcDebugInfo/lrostats[summation,absolute]:321
12855:20241101:185743.955 adding performance counter vcDebugInfo/miscstats[maximum]:322
12855:20241101:185743.955 adding performance counter vcDebugInfo/miscstats[maximum,absolute]:322
12855:20241101:185743.956 adding performance counter vcDebugInfo/miscstats[minimum]:323
12855:20241101:185743.956 adding performance counter vcDebugInfo/miscstats[minimum,absolute]:323
12855:20241101:185743.956 adding performance counter vcDebugInfo/miscstats[summation]:324
12855:20241101:185743.956 adding performance counter vcDebugInfo/miscstats[summation,absolute]:324
12855:20241101:185743.956 adding performance counter vcDebugInfo/morefregstats[maximum]:325
12855:20241101:185743.956 adding performance counter vcDebugInfo/morefregstats[maximum,absolute]:325
12855:20241101:185743.956 adding performance counter vcDebugInfo/morefregstats[minimum]:326
12855:20241101:185743.956 adding performance counter vcDebugInfo/morefregstats[minimum,absolute]:326
12855:20241101:185743.956 adding performance counter vcDebugInfo/morefregstats[summation]:327
12855:20241101:185743.956 adding performance counter vcDebugInfo/morefregstats[summation,absolute]:327
12855:20241101:185743.956 adding performance counter vcResources/packetrecvrate[average]:328
12855:20241101:185743.956 adding performance counter vcResources/packetrecvrate[average,rate]:328
12855:20241101:185743.956 adding performance counter vcResources/packetsentrate[average]:329
12855:20241101:185743.956 adding performance counter vcResources/packetsentrate[average,rate]:329
12855:20241101:185743.956 adding performance counter vcResources/systemcpuusage[average]:330
12855:20241101:185743.956 adding performance counter vcResources/systemcpuusage[average,rate]:330
12855:20241101:185743.956 adding performance counter vcResources/pagefaultrate[average]:331
12855:20241101:185743.956 adding performance counter vcResources/pagefaultrate[average,rate]:331
12855:20241101:185743.956 adding performance counter vcResources/physicalmemusage[average]:332
12855:20241101:185743.956 adding performance counter vcResources/physicalmemusage[average,absolute]:332
12855:20241101:185743.956 adding performance counter vcResources/priviledgedcpuusage[average]:333
12855:20241101:185743.956 adding performance counter vcResources/priviledgedcpuusage[average,rate]:333
12855:20241101:185743.956 adding performance counter vcDebugInfo/scoreboard[maximum]:334
12855:20241101:185743.956 adding performance counter vcDebugInfo/scoreboard[maximum,absolute]:334
12855:20241101:185743.956 adding performance counter vcDebugInfo/scoreboard[minimum]:335
12855:20241101:185743.956 adding performance counter vcDebugInfo/scoreboard[minimum,absolute]:335
12855:20241101:185743.956 adding performance counter vcDebugInfo/scoreboard[summation]:336
12855:20241101:185743.956 adding performance counter vcDebugInfo/scoreboard[summation,absolute]:336
12855:20241101:185743.956 adding performance counter vcDebugInfo/sessionstats[maximum]:337
12855:20241101:185743.956 adding performance counter vcDebugInfo/sessionstats[maximum,absolute]:337
12855:20241101:185743.956 adding performance counter vcDebugInfo/sessionstats[minimum]:338
12855:20241101:185743.956 adding performance counter vcDebugInfo/sessionstats[minimum,absolute]:338
12855:20241101:185743.956 adding performance counter vcDebugInfo/sessionstats[summation]:339
12855:20241101:185743.956 adding performance counter vcDebugInfo/sessionstats[summation,absolute]:339
12855:20241101:185743.957 adding performance counter vcResources/syscallsrate[average]:340
12855:20241101:185743.957 adding performance counter vcResources/syscallsrate[average,rate]:340
12855:20241101:185743.957 adding performance counter vcDebugInfo/systemstats[maximum]:341
12855:20241101:185743.957 adding performance counter vcDebugInfo/systemstats[maximum,absolute]:341
12855:20241101:185743.957 adding performance counter vcDebugInfo/systemstats[minimum]:342
12855:20241101:185743.957 adding performance counter vcDebugInfo/systemstats[minimum,absolute]:342
12855:20241101:185743.957 adding performance counter vcDebugInfo/systemstats[summation]:343
12855:20241101:185743.957 adding performance counter vcDebugInfo/systemstats[summation,absolute]:343
12855:20241101:185743.957 adding performance counter vcResources/usercpuusage[average]:344
12855:20241101:185743.957 adding performance counter vcResources/usercpuusage[average,rate]:344
12855:20241101:185743.957 adding performance counter vcDebugInfo/vcservicestats[maximum]:345
12855:20241101:185743.957 adding performance counter vcDebugInfo/vcservicestats[maximum,absolute]:345
12855:20241101:185743.957 adding performance counter vcDebugInfo/vcservicestats[minimum]:346
12855:20241101:185743.957 adding performance counter vcDebugInfo/vcservicestats[minimum,absolute]:346
12855:20241101:185743.957 adding performance counter vcDebugInfo/vcservicestats[summation]:347
12855:20241101:185743.957 adding performance counter vcDebugInfo/vcservicestats[summation,absolute]:347
12855:20241101:185743.957 adding performance counter vcResources/virtualmemusage[average]:348
12855:20241101:185743.957 adding performance counter vcResources/virtualmemusage[average,absolute]:348
12855:20241101:185743.957 adding performance counter virtualDisk/readOIO[latest]:349
12855:20241101:185743.957 adding performance counter virtualDisk/readOIO[latest,absolute]:349
12855:20241101:185743.957 adding performance counter virtualDisk/writeOIO[latest]:350
12855:20241101:185743.957 adding performance counter virtualDisk/writeOIO[latest,absolute]:350
12855:20241101:185743.957 adding performance counter virtualDisk/readLoadMetric[latest]:351
12855:20241101:185743.957 adding performance counter virtualDisk/readLoadMetric[latest,absolute]:351
12855:20241101:185743.957 adding performance counter virtualDisk/writeLoadMetric[latest]:352
12855:20241101:185743.957 adding performance counter virtualDisk/writeLoadMetric[latest,absolute]:352
12855:20241101:185743.957 adding performance counter rescpu/actav1[latest]:353
12855:20241101:185743.957 adding performance counter rescpu/actav1[latest,absolute]:353
12855:20241101:185743.957 adding performance counter datastore/datastoreReadBytes[latest]:354
12855:20241101:185743.957 adding performance counter datastore/datastoreReadBytes[latest,absolute]:354
12855:20241101:185743.957 adding performance counter datastore/datastoreWriteBytes[latest]:355
12855:20241101:185743.957 adding performance counter datastore/datastoreWriteBytes[latest,absolute]:355
12855:20241101:185743.958 adding performance counter datastore/datastoreReadIops[latest]:356
12855:20241101:185743.958 adding performance counter datastore/datastoreReadIops[latest,absolute]:356
12855:20241101:185743.958 adding performance counter datastore/datastoreWriteIops[latest]:357
12855:20241101:185743.958 adding performance counter datastore/datastoreWriteIops[latest,absolute]:357
12855:20241101:185743.958 adding performance counter datastore/datastoreReadOIO[latest]:358
12855:20241101:185743.958 adding performance counter datastore/datastoreReadOIO[latest,absolute]:358
12855:20241101:185743.958 adding performance counter datastore/datastoreWriteOIO[latest]:359
12855:20241101:185743.958 adding performance counter datastore/datastoreWriteOIO[latest,absolute]:359
12855:20241101:185743.958 adding performance counter datastore/datastoreNormalReadLatency[latest]:360
12855:20241101:185743.958 adding performance counter datastore/datastoreNormalReadLatency[latest,absolute]:360
12855:20241101:185743.958 adding performance counter datastore/datastoreNormalWriteLatency[latest]:361
12855:20241101:185743.958 adding performance counter datastore/datastoreNormalWriteLatency[latest,absolute]:361
12855:20241101:185743.958 adding performance counter datastore/datastoreReadLoadMetric[latest]:362
12855:20241101:185743.958 adding performance counter datastore/datastoreReadLoadMetric[latest,absolute]:362
12855:20241101:185743.958 adding performance counter datastore/datastoreWriteLoadMetric[latest]:363
12855:20241101:185743.958 adding performance counter datastore/datastoreWriteLoadMetric[latest,absolute]:363
12855:20241101:185743.958 adding performance counter datastore/datastoreVMObservedLatency[latest]:364
12855:20241101:185743.958 adding performance counter datastore/datastoreVMObservedLatency[latest,absolute]:364
12855:20241101:185743.958 adding performance counter disk/scsiReservationCnflctsPct[average]:365
12855:20241101:185743.958 adding performance counter disk/scsiReservationCnflctsPct[average,rate]:365
12855:20241101:185743.958 adding performance counter disk/read[latest]:366
12855:20241101:185743.958 adding performance counter disk/read[latest,absolute]:366
12855:20241101:185743.958 adding performance counter disk/readFailed[latest]:367
12855:20241101:185743.958 adding performance counter disk/readFailed[latest,absolute]:367
12855:20241101:185743.958 adding performance counter disk/write[latest]:368
12855:20241101:185743.958 adding performance counter disk/write[latest,absolute]:368
12855:20241101:185743.958 adding performance counter disk/writeFailed[latest]:369
12855:20241101:185743.958 adding performance counter disk/writeFailed[latest,absolute]:369
12855:20241101:185743.958 adding performance counter disk/commands.success[latest]:370
12855:20241101:185743.958 adding performance counter disk/commands.success[latest,absolute]:370
12855:20241101:185743.958 adding performance counter disk/commands.failed[latest]:371
12855:20241101:185743.958 adding performance counter disk/commands.failed[latest,absolute]:371
12855:20241101:185743.958 adding performance counter disk/commands.queued[latest]:372
12855:20241101:185743.958 adding performance counter disk/commands.queued[latest,absolute]:372
12855:20241101:185743.958 adding performance counter disk/commands.active[latest]:373
12855:20241101:185743.958 adding performance counter disk/commands.active[latest,absolute]:373
12855:20241101:185743.959 adding performance counter disk/state[latest]:374
12855:20241101:185743.959 adding performance counter disk/state[latest,absolute]:374
12855:20241101:185743.959 adding performance counter disk/TM.abort[latest]:375
12855:20241101:185743.959 adding performance counter disk/TM.abort[latest,absolute]:375
12855:20241101:185743.959 adding performance counter disk/TM.abortRetry[latest]:376
12855:20241101:185743.959 adding performance counter disk/TM.abortRetry[latest,absolute]:376
12855:20241101:185743.959 adding performance counter disk/TM.abortFailed[latest]:377
12855:20241101:185743.959 adding performance counter disk/TM.abortFailed[latest,absolute]:377
12855:20241101:185743.959 adding performance counter disk/TM.virtReset[latest]:378
12855:20241101:185743.959 adding performance counter disk/TM.virtReset[latest,absolute]:378
12855:20241101:185743.959 adding performance counter disk/TM.virtResetRetry[latest]:379
12855:20241101:185743.959 adding performance counter disk/TM.virtResetRetry[latest,absolute]:379
12855:20241101:185743.959 adding performance counter disk/TM.virtResetFailed[latest]:380
12855:20241101:185743.959 adding performance counter disk/TM.virtResetFailed[latest,absolute]:380
12855:20241101:185743.959 adding performance counter disk/TM.lunReset[latest]:381
12855:20241101:185743.959 adding performance counter disk/TM.lunReset[latest,absolute]:381
12855:20241101:185743.959 adding performance counter disk/TM.lunResetRetry[latest]:382
12855:20241101:185743.959 adding performance counter disk/TM.lunResetRetry[latest,absolute]:382
12855:20241101:185743.959 adding performance counter disk/TM.lunResetFailed[latest]:383
12855:20241101:185743.959 adding performance counter disk/TM.lunResetFailed[latest,absolute]:383
12855:20241101:185743.959 adding performance counter disk/TM.deviceReset[latest]:384
12855:20241101:185743.959 adding performance counter disk/TM.deviceReset[latest,absolute]:384
12855:20241101:185743.959 adding performance counter disk/TM.deviceResetRetry[latest]:385
12855:20241101:185743.959 adding performance counter disk/TM.deviceResetRetry[latest,absolute]:385
12855:20241101:185743.959 adding performance counter disk/TM.deviceResetFailed[latest]:386
12855:20241101:185743.959 adding performance counter disk/TM.deviceResetFailed[latest,absolute]:386
12855:20241101:185743.959 adding performance counter disk/TM.busReset[latest]:387
12855:20241101:185743.959 adding performance counter disk/TM.busReset[latest,absolute]:387
12855:20241101:185743.959 adding performance counter disk/TM.busResetRetry[latest]:388
12855:20241101:185743.959 adding performance counter disk/TM.busResetRetry[latest,absolute]:388
12855:20241101:185743.959 adding performance counter disk/TM.busResetFailed[latest]:389
12855:20241101:185743.959 adding performance counter disk/TM.busResetFailed[latest,absolute]:389
12855:20241101:185743.959 adding performance counter disk/latency.qavg[latest]:390
12855:20241101:185743.959 adding performance counter disk/latency.qavg[latest,absolute]:390
12855:20241101:185743.960 adding performance counter disk/latency.davg[latest]:391
12855:20241101:185743.960 adding performance counter disk/latency.davg[latest,absolute]:391
12855:20241101:185743.960 adding performance counter disk/latency.kavg[latest]:392
12855:20241101:185743.960 adding performance counter disk/latency.kavg[latest,absolute]:392
12855:20241101:185743.960 adding performance counter disk/latency.gavg[latest]:393
12855:20241101:185743.960 adding performance counter disk/latency.gavg[latest,absolute]:393
12855:20241101:185743.960 adding performance counter storageAdapter/outstandingIOs[latest]:394
12855:20241101:185743.960 adding performance counter storageAdapter/outstandingIOs[latest,absolute]:394
12855:20241101:185743.960 adding performance counter storageAdapter/queued[latest]:395
12855:20241101:185743.960 adding performance counter storageAdapter/queued[latest,absolute]:395
12855:20241101:185743.960 adding performance counter storageAdapter/queueDepth[latest]:396
12855:20241101:185743.960 adding performance counter storageAdapter/queueDepth[latest,absolute]:396
12855:20241101:185743.960 adding performance counter cpu/partnerBusyTime[average]:397
12855:20241101:185743.960 adding performance counter cpu/partnerBusyTime[average,rate]:397
12855:20241101:185743.960 adding performance counter cpu/utilization[average]:398
12855:20241101:185743.960 adding performance counter cpu/utilization[average,rate]:398
12855:20241101:185743.960 adding performance counter cpu/corecount.provisioned[latest]:399
12855:20241101:185743.960 adding performance counter cpu/corecount.provisioned[latest,absolute]:399
12855:20241101:185743.960 adding performance counter cpu/cache.l3.occupancy[average]:400
12855:20241101:185743.960 adding performance counter cpu/cache.l3.occupancy[average,absolute]:400
12855:20241101:185743.960 adding performance counter cpu/corecount.usage[latest]:401
12855:20241101:185743.960 adding performance counter cpu/corecount.usage[latest,absolute]:401
12855:20241101:185743.960 adding performance counter cpu/load.avg1min[latest]:402
12855:20241101:185743.960 adding performance counter cpu/load.avg1min[latest,absolute]:402
12855:20241101:185743.960 adding performance counter cpu/load.avg5min[latest]:403
12855:20241101:185743.960 adding performance counter cpu/load.avg5min[latest,absolute]:403
12855:20241101:185743.960 adding performance counter cpu/load.avg15min[latest]:404
12855:20241101:185743.960 adding performance counter cpu/load.avg15min[latest,absolute]:404
12855:20241101:185743.960 adding performance counter mem/capacity.provisioned[latest]:405
12855:20241101:185743.960 adding performance counter mem/capacity.provisioned[latest,absolute]:405
12855:20241101:185743.960 adding performance counter mem/reservedCapacityPct[latest]:406
12855:20241101:185743.960 adding performance counter mem/reservedCapacityPct[latest,absolute]:406
12855:20241101:185743.960 adding performance counter mem/overcommit.avg1min[latest]:407
12855:20241101:185743.960 adding performance counter mem/overcommit.avg1min[latest,absolute]:407
12855:20241101:185743.961 adding performance counter mem/overcommit.avg5min[latest]:408
12855:20241101:185743.961 adding performance counter mem/overcommit.avg5min[latest,absolute]:408
12855:20241101:185743.961 adding performance counter mem/overcommit.avg15min[latest]:409
12855:20241101:185743.961 adding performance counter mem/overcommit.avg15min[latest,absolute]:409
12855:20241101:185743.961 adding performance counter mem/physical.total[latest]:410
12855:20241101:185743.961 adding performance counter mem/physical.total[latest,absolute]:410
12855:20241101:185743.961 adding performance counter mem/physical.user[latest]:411
12855:20241101:185743.961 adding performance counter mem/physical.user[latest,absolute]:411
12855:20241101:185743.961 adding performance counter mem/physical.free[latest]:412
12855:20241101:185743.961 adding performance counter mem/physical.free[latest,absolute]:412
12855:20241101:185743.961 adding performance counter mem/kernel.managed[latest]:413
12855:20241101:185743.961 adding performance counter mem/kernel.managed[latest,absolute]:413
12855:20241101:185743.961 adding performance counter mem/kernel.minfree[latest]:414
12855:20241101:185743.961 adding performance counter mem/kernel.minfree[latest,absolute]:414
12855:20241101:185743.961 adding performance counter mem/kernel.unreserved[latest]:415
12855:20241101:185743.961 adding performance counter mem/kernel.unreserved[latest,absolute]:415
12855:20241101:185743.961 adding performance counter mem/pshare.shared[latest]:416
12855:20241101:185743.961 adding performance counter mem/pshare.shared[latest,absolute]:416
12855:20241101:185743.961 adding performance counter mem/pshare.common[latest]:417
12855:20241101:185743.961 adding performance counter mem/pshare.common[latest,absolute]:417
12855:20241101:185743.961 adding performance counter mem/pshare.sharedSave[latest]:418
12855:20241101:185743.961 adding performance counter mem/pshare.sharedSave[latest,absolute]:418
12855:20241101:185743.961 adding performance counter mem/swap.current[latest]:419
12855:20241101:185743.961 adding performance counter mem/swap.current[latest,absolute]:419
12855:20241101:185743.961 adding performance counter mem/swap.target[latest]:420
12855:20241101:185743.961 adding performance counter mem/swap.target[latest,absolute]:420
12855:20241101:185743.961 adding performance counter mem/swap.readrate[average]:421
12855:20241101:185743.961 adding performance counter mem/swap.readrate[average,rate]:421
12855:20241101:185743.961 adding performance counter mem/swap.writerate[average]:422
12855:20241101:185743.961 adding performance counter mem/swap.writerate[average,rate]:422
12855:20241101:185743.961 adding performance counter mem/zip.zipped[latest]:423
12855:20241101:185743.961 adding performance counter mem/zip.zipped[latest,absolute]:423
12855:20241101:185743.961 adding performance counter mem/zip.saved[latest]:424
12855:20241101:185743.961 adding performance counter mem/zip.saved[latest,absolute]:424
12855:20241101:185743.961 adding performance counter mem/memctl.current[latest]:425
12855:20241101:185743.962 adding performance counter mem/memctl.current[latest,absolute]:425
12855:20241101:185743.962 adding performance counter mem/memctl.target[latest]:426
12855:20241101:185743.962 adding performance counter mem/memctl.target[latest,absolute]:426
12855:20241101:185743.962 adding performance counter mem/memctl.max[latest]:427
12855:20241101:185743.962 adding performance counter mem/memctl.max[latest,absolute]:427
12855:20241101:185743.962 adding performance counter mem/health.reservationState[latest]:428
12855:20241101:185743.962 adding performance counter mem/health.reservationState[latest,absolute]:428
12855:20241101:185743.962 adding performance counter mem/capacity.overhead[average]:429
12855:20241101:185743.962 adding performance counter mem/capacity.overhead[average,absolute]:429
12855:20241101:185743.962 adding performance counter mem/capacity.overheadResv[average]:430
12855:20241101:185743.962 adding performance counter mem/capacity.overheadResv[average,absolute]:430
12855:20241101:185743.962 adding performance counter mem/capacity.consumed[latest]:431
12855:20241101:185743.962 adding performance counter mem/capacity.consumed[latest,absolute]:431
12855:20241101:185743.962 adding performance counter mem/capacity.active[latest]:432
12855:20241101:185743.962 adding performance counter mem/capacity.active[latest,absolute]:432
12855:20241101:185743.962 adding performance counter power/capacity.usageCpu[average]:433
12855:20241101:185743.962 adding performance counter power/capacity.usageCpu[average,absolute]:433
12855:20241101:185743.962 adding performance counter power/capacity.usageMem[average]:434
12855:20241101:185743.962 adding performance counter power/capacity.usageMem[average,absolute]:434
12855:20241101:185743.962 adding performance counter power/capacity.usageOther[average]:435
12855:20241101:185743.962 adding performance counter power/capacity.usageOther[average,absolute]:435
12855:20241101:185743.962 adding performance counter vmotion/vmkernel.downtime[latest]:436
12855:20241101:185743.962 adding performance counter vmotion/vmkernel.downtime[latest,absolute]:436
12855:20241101:185743.962 adding performance counter vmotion/downtime[latest]:437
12855:20241101:185743.962 adding performance counter vmotion/downtime[latest,absolute]:437
12855:20241101:185743.962 adding performance counter vmotion/precopy.time[latest]:438
12855:20241101:185743.962 adding performance counter vmotion/precopy.time[latest,absolute]:438
12855:20241101:185743.962 adding performance counter vmotion/rtt[latest]:439
12855:20241101:185743.962 adding performance counter vmotion/rtt[latest,absolute]:439
12855:20241101:185743.962 adding performance counter vmotion/dst.migration.time[latest]:440
12855:20241101:185743.962 adding performance counter vmotion/dst.migration.time[latest,absolute]:440
12855:20241101:185743.962 adding performance counter vmotion/mem.sizemb[latest]:441
12855:20241101:185743.962 adding performance counter vmotion/mem.sizemb[latest,absolute]:441
12855:20241101:185743.962 adding performance counter hbr/vms[latest]:442
12855:20241101:185743.962 adding performance counter hbr/vms[latest,absolute]:442
12855:20241101:185743.963 adding performance counter net/throughput.hbr.inbound[average]:443
12855:20241101:185743.963 adding performance counter net/throughput.hbr.inbound[average,rate]:443
12855:20241101:185743.963 adding performance counter net/throughput.hbr.outbound[average]:444
12855:20241101:185743.963 adding performance counter net/throughput.hbr.outbound[average,rate]:444
12855:20241101:185743.963 adding performance counter virtualDisk/hbr.readLatencyMS[latest]:445
12855:20241101:185743.963 adding performance counter virtualDisk/hbr.readLatencyMS[latest,absolute]:445
12855:20241101:185743.963 adding performance counter virtualDisk/hbr.stallLatencyMS[latest]:446
12855:20241101:185743.963 adding performance counter virtualDisk/hbr.stallLatencyMS[latest,absolute]:446
12855:20241101:185743.963 adding performance counter net/latency.hbr.outbound[latest]:447
12855:20241101:185743.963 adding performance counter net/latency.hbr.outbound[latest,absolute]:447
12855:20241101:185743.963 adding performance counter lwd/numSnapshots[latest]:448
12855:20241101:185743.963 adding performance counter lwd/numSnapshots[latest,absolute]:448
12855:20241101:185743.963 adding performance counter nfs/apdState[latest]:449
12855:20241101:185743.963 adding performance counter nfs/apdState[latest,absolute]:449
12855:20241101:185743.963 adding performance counter nfs/readIssueTime[latest]:450
12855:20241101:185743.963 adding performance counter nfs/readIssueTime[latest,absolute]:450
12855:20241101:185743.963 adding performance counter nfs/writeIssueTime[latest]:451
12855:20241101:185743.963 adding performance counter nfs/writeIssueTime[latest,absolute]:451
12855:20241101:185743.963 adding performance counter nfs/totalReads[latest]:452
12855:20241101:185743.963 adding performance counter nfs/totalReads[latest,absolute]:452
12855:20241101:185743.963 adding performance counter nfs/readsFailed[latest]:453
12855:20241101:185743.963 adding performance counter nfs/readsFailed[latest,absolute]:453
12855:20241101:185743.963 adding performance counter nfs/totalWrites[latest]:454
12855:20241101:185743.963 adding performance counter nfs/totalWrites[latest,absolute]:454
12855:20241101:185743.963 adding performance counter nfs/writesFailed[latest]:455
12855:20241101:185743.963 adding performance counter nfs/writesFailed[latest,absolute]:455
12855:20241101:185743.963 adding performance counter nfs/readTime[latest]:456
12855:20241101:185743.963 adding performance counter nfs/readTime[latest,absolute]:456
12855:20241101:185743.963 adding performance counter nfs/writeTime[latest]:457
12855:20241101:185743.963 adding performance counter nfs/writeTime[latest,absolute]:457
12855:20241101:185743.963 adding performance counter nfs/ioRequestsQueued[latest]:458
12855:20241101:185743.963 adding performance counter nfs/ioRequestsQueued[latest,absolute]:458
12855:20241101:185743.963 adding performance counter nfs/totalCreate[latest]:459
12855:20241101:185743.963 adding performance counter nfs/totalCreate[latest,absolute]:459
12855:20241101:185743.964 adding performance counter nfs/createFailed[latest]:460
12855:20241101:185743.964 adding performance counter nfs/createFailed[latest,absolute]:460
12855:20241101:185743.964 adding performance counter nfs/socketBufferFull[latest]:461
12855:20241101:185743.964 adding performance counter nfs/socketBufferFull[latest,absolute]:461
12855:20241101:185743.964 adding performance counter datastore/vmfs.totalTxn[latest]:462
12855:20241101:185743.964 adding performance counter datastore/vmfs.totalTxn[latest,absolute]:462
12855:20241101:185743.964 adding performance counter datastore/vmfs.cancelledTxn[latest]:463
12855:20241101:185743.964 adding performance counter datastore/vmfs.cancelledTxn[latest,absolute]:463
12855:20241101:185743.964 adding performance counter datastore/vmfs.apdState[latest]:464
12855:20241101:185743.964 adding performance counter datastore/vmfs.apdState[latest,absolute]:464
12855:20241101:185743.964 adding performance counter datastore/vmfs.apdCount[latest]:465
12855:20241101:185743.964 adding performance counter datastore/vmfs.apdCount[latest,absolute]:465
12855:20241101:185743.964 adding performance counter vvol/pe.isaccessible[latest]:466
12855:20241101:185743.964 adding performance counter vvol/pe.isaccessible[latest,absolute]:466
12855:20241101:185743.964 adding performance counter vvol/pe.reads.done[latest]:467
12855:20241101:185743.964 adding performance counter vvol/pe.reads.done[latest,absolute]:467
12855:20241101:185743.964 adding performance counter vvol/pe.writes.done[latest]:468
12855:20241101:185743.964 adding performance counter vvol/pe.writes.done[latest,absolute]:468
12855:20241101:185743.964 adding performance counter vvol/pe.total.done[latest]:469
12855:20241101:185743.964 adding performance counter vvol/pe.total.done[latest,absolute]:469
12855:20241101:185743.964 adding performance counter vvol/pe.reads.sent[latest]:470
12855:20241101:185743.964 adding performance counter vvol/pe.reads.sent[latest,absolute]:470
12855:20241101:185743.964 adding performance counter vvol/pe.writes.sent[latest]:471
12855:20241101:185743.964 adding performance counter vvol/pe.writes.sent[latest,absolute]:471
12855:20241101:185743.964 adding performance counter vvol/pe.total.sent[latest]:472
12855:20241101:185743.964 adding performance counter vvol/pe.total.sent[latest,absolute]:472
12855:20241101:185743.964 adding performance counter vvol/pe.readsissued.failed[latest]:473
12855:20241101:185743.964 adding performance counter vvol/pe.readsissued.failed[latest,absolute]:473
12855:20241101:185743.964 adding performance counter vvol/pe.writesissued.failed[latest]:474
12855:20241101:185743.964 adding performance counter vvol/pe.writesissued.failed[latest,absolute]:474
12855:20241101:185743.964 adding performance counter vvol/pe.totalissued.failed[latest]:475
12855:20241101:185743.964 adding performance counter vvol/pe.totalissued.failed[latest,absolute]:475
12855:20241101:185743.964 adding performance counter vvol/pe.reads.failed[latest]:476
12855:20241101:185743.964 adding performance counter vvol/pe.reads.failed[latest,absolute]:476
12855:20241101:185743.965 adding performance counter vvol/pe.writes.failed[latest]:477
12855:20241101:185743.965 adding performance counter vvol/pe.writes.failed[latest,absolute]:477
12855:20241101:185743.965 adding performance counter vvol/pe.total.failed[latest]:478
12855:20241101:185743.965 adding performance counter vvol/pe.total.failed[latest,absolute]:478
12855:20241101:185743.965 adding performance counter vvol/pe.read.latency[latest]:479
12855:20241101:185743.965 adding performance counter vvol/pe.read.latency[latest,absolute]:479
12855:20241101:185743.965 adding performance counter vvol/pe.write.latency[latest]:480
12855:20241101:185743.965 adding performance counter vvol/pe.write.latency[latest,absolute]:480
12855:20241101:185743.965 adding performance counter vvol/pe.issue.latency[latest]:481
12855:20241101:185743.965 adding performance counter vvol/pe.issue.latency[latest,absolute]:481
12855:20241101:185743.965 adding performance counter vvol/pe.total.latency[latest]:482
12855:20241101:185743.965 adding performance counter vvol/pe.total.latency[latest,absolute]:482
12855:20241101:185743.965 adding performance counter vvol/pe.cancel.sent[latest]:483
12855:20241101:185743.965 adding performance counter vvol/pe.cancel.sent[latest,absolute]:483
12855:20241101:185743.965 adding performance counter vvol/pe.cancel.failed[latest]:484
12855:20241101:185743.965 adding performance counter vvol/pe.cancel.failed[latest,absolute]:484
12855:20241101:185743.965 adding performance counter vvol/pe.deviceresets.sent[latest]:485
12855:20241101:185743.965 adding performance counter vvol/pe.deviceresets.sent[latest,absolute]:485
12855:20241101:185743.965 adding performance counter vvol/pe.deviceresets.failed[latest]:486
12855:20241101:185743.965 adding performance counter vvol/pe.deviceresets.failed[latest,absolute]:486
12855:20241101:185743.965 adding performance counter vvol/pe.resets.sent[latest]:487
12855:20241101:185743.965 adding performance counter vvol/pe.resets.sent[latest,absolute]:487
12855:20241101:185743.965 adding performance counter vvol/pe.resets.failed[latest]:488
12855:20241101:185743.965 adding performance counter vvol/pe.resets.failed[latest,absolute]:488
12855:20241101:185743.965 adding performance counter vvol/pe.unmaps.sent[latest]:489
12855:20241101:185743.965 adding performance counter vvol/pe.unmaps.sent[latest,absolute]:489
12855:20241101:185743.965 adding performance counter vvol/pe.unmaps.failed[latest]:490
12855:20241101:185743.965 adding performance counter vvol/pe.unmaps.failed[latest,absolute]:490
12855:20241101:185743.965 adding performance counter vvol/container.reads.done[latest]:491
12855:20241101:185743.965 adding performance counter vvol/container.reads.done[latest,absolute]:491
12855:20241101:185743.965 adding performance counter vvol/container.writes.done[latest]:492
12855:20241101:185743.965 adding performance counter vvol/container.writes.done[latest,absolute]:492
12855:20241101:185743.965 adding performance counter vvol/container.total.done[latest]:493
12855:20241101:185743.965 adding performance counter vvol/container.total.done[latest,absolute]:493
12855:20241101:185743.966 adding performance counter vvol/container.reads.sent[latest]:494
12855:20241101:185743.966 adding performance counter vvol/container.reads.sent[latest,absolute]:494
12855:20241101:185743.966 adding performance counter vvol/container.writes.sent[latest]:495
12855:20241101:185743.966 adding performance counter vvol/container.writes.sent[latest,absolute]:495
12855:20241101:185743.966 adding performance counter vvol/container.total.sent[latest]:496
12855:20241101:185743.966 adding performance counter vvol/container.total.sent[latest,absolute]:496
12855:20241101:185743.966 adding performance counter vvol/container.readsissued.failed[latest]:497
12855:20241101:185743.966 adding performance counter vvol/container.readsissued.failed[latest,absolute]:497
12855:20241101:185743.966 adding performance counter vvol/container.writesissued.failed[latest]:498
12855:20241101:185743.966 adding performance counter vvol/container.writesissued.failed[latest,absolute]:498
12855:20241101:185743.966 adding performance counter vvol/container.totalissued.failed[latest]:499
12855:20241101:185743.966 adding performance counter vvol/container.totalissued.failed[latest,absolute]:499
12855:20241101:185743.966 adding performance counter vvol/container.reads.failed[latest]:500
12855:20241101:185743.966 adding performance counter vvol/container.reads.failed[latest,absolute]:500
12855:20241101:185743.966 adding performance counter vvol/container.writes.failed[latest]:501
12855:20241101:185743.966 adding performance counter vvol/container.writes.failed[latest,absolute]:501
12855:20241101:185743.966 adding performance counter vvol/container.total.failed[latest]:502
12855:20241101:185743.966 adding performance counter vvol/container.total.failed[latest,absolute]:502
12855:20241101:185743.966 adding performance counter vvol/container.read.latency[latest]:503
12855:20241101:185743.966 adding performance counter vvol/container.read.latency[latest,absolute]:503
12855:20241101:185743.966 adding performance counter vvol/container.write.latency[latest]:504
12855:20241101:185743.966 adding performance counter vvol/container.write.latency[latest,absolute]:504
12855:20241101:185743.966 adding performance counter vvol/container.issue.latency[latest]:505
12855:20241101:185743.966 adding performance counter vvol/container.issue.latency[latest,absolute]:505
12855:20241101:185743.966 adding performance counter vvol/container.total.latency[latest]:506
12855:20241101:185743.966 adding performance counter vvol/container.total.latency[latest,absolute]:506
12855:20241101:185743.966 adding performance counter vvol/device.reads.done[latest]:507
12855:20241101:185743.966 adding performance counter vvol/device.reads.done[latest,absolute]:507
12855:20241101:185743.966 adding performance counter vvol/device.writes.done[latest]:508
12855:20241101:185743.966 adding performance counter vvol/device.writes.done[latest,absolute]:508
12855:20241101:185743.966 adding performance counter vvol/device.total.done[latest]:509
12855:20241101:185743.966 adding performance counter vvol/device.total.done[latest,absolute]:509
12855:20241101:185743.966 adding performance counter vvol/device.reads.sent[latest]:510
12855:20241101:185743.966 adding performance counter vvol/device.reads.sent[latest,absolute]:510
12855:20241101:185743.967 adding performance counter vvol/device.writes.sent[latest]:511
12855:20241101:185743.967 adding performance counter vvol/device.writes.sent[latest,absolute]:511
12855:20241101:185743.967 adding performance counter vvol/device.total.sent[latest]:512
12855:20241101:185743.967 adding performance counter vvol/device.total.sent[latest,absolute]:512
12855:20241101:185743.967 adding performance counter vvol/device.readsissued.failed[latest]:513
12855:20241101:185743.967 adding performance counter vvol/device.readsissued.failed[latest,absolute]:513
12855:20241101:185743.967 adding performance counter vvol/device.writesissued.failed[latest]:514
12855:20241101:185743.967 adding performance counter vvol/device.writesissued.failed[latest,absolute]:514
12855:20241101:185743.967 adding performance counter vvol/device.totalissued.failed[latest]:515
12855:20241101:185743.967 adding performance counter vvol/device.totalissued.failed[latest,absolute]:515
12855:20241101:185743.967 adding performance counter vvol/device.reads.failed[latest]:516
12855:20241101:185743.967 adding performance counter vvol/device.reads.failed[latest,absolute]:516
12855:20241101:185743.967 adding performance counter vvol/device.writes.failed[latest]:517
12855:20241101:185743.967 adding performance counter vvol/device.writes.failed[latest,absolute]:517
12855:20241101:185743.967 adding performance counter vvol/device.total.failed[latest]:518
12855:20241101:185743.967 adding performance counter vvol/device.total.failed[latest,absolute]:518
12855:20241101:185743.967 adding performance counter vvol/device.read.latency[latest]:519
12855:20241101:185743.967 adding performance counter vvol/device.read.latency[latest,absolute]:519
12855:20241101:185743.967 adding performance counter vvol/device.write.latency[latest]:520
12855:20241101:185743.967 adding performance counter vvol/device.write.latency[latest,absolute]:520
12855:20241101:185743.967 adding performance counter vvol/device.issue.latency[latest]:521
12855:20241101:185743.967 adding performance counter vvol/device.issue.latency[latest,absolute]:521
12855:20241101:185743.967 adding performance counter vvol/device.total.latency[latest]:522
12855:20241101:185743.967 adding performance counter vvol/device.total.latency[latest,absolute]:522
12855:20241101:185743.967 adding performance counter vvol/device.cancel.sent[latest]:523
12855:20241101:185743.967 adding performance counter vvol/device.cancel.sent[latest,absolute]:523
12855:20241101:185743.967 adding performance counter vvol/device.cancel.failed[latest]:524
12855:20241101:185743.967 adding performance counter vvol/device.cancel.failed[latest,absolute]:524
12855:20241101:185743.967 adding performance counter vvol/device.deviceresets.sent[latest]:525
12855:20241101:185743.967 adding performance counter vvol/device.deviceresets.sent[latest,absolute]:525
12855:20241101:185743.967 adding performance counter vvol/device.deviceresets.failed[latest]:526
12855:20241101:185743.967 adding performance counter vvol/device.deviceresets.failed[latest,absolute]:526
12855:20241101:185743.967 adding performance counter vvol/device.resets.sent[latest]:527
12855:20241101:185743.967 adding performance counter vvol/device.resets.sent[latest,absolute]:527
12855:20241101:185743.968 adding performance counter vvol/device.resets.failed[latest]:528
12855:20241101:185743.968 adding performance counter vvol/device.resets.failed[latest,absolute]:528
12855:20241101:185743.968 adding performance counter vvol/device.unmaps.sent[latest]:529
12855:20241101:185743.968 adding performance counter vvol/device.unmaps.sent[latest,absolute]:529
12855:20241101:185743.968 adding performance counter vvol/device.unmaps.failed[latest]:530
12855:20241101:185743.968 adding performance counter vvol/device.unmaps.failed[latest,absolute]:530
12855:20241101:185743.968 adding performance counter cpu/swapwait[summation]:531
12855:20241101:185743.968 adding performance counter cpu/swapwait[summation,delta]:531
12855:20241101:185743.968 adding performance counter cpu/utilization[none]:532
12855:20241101:185743.968 adding performance counter cpu/utilization[none,rate]:532
12855:20241101:185743.968 adding performance counter cpu/utilization[maximum]:533
12855:20241101:185743.968 adding performance counter cpu/utilization[maximum,rate]:533
12855:20241101:185743.968 adding performance counter cpu/utilization[minimum]:534
12855:20241101:185743.968 adding performance counter cpu/utilization[minimum,rate]:534
12855:20241101:185743.968 adding performance counter cpu/coreUtilization[none]:535
12855:20241101:185743.968 adding performance counter cpu/coreUtilization[none,rate]:535
12855:20241101:185743.968 adding performance counter cpu/coreUtilization[average]:536
12855:20241101:185743.968 adding performance counter cpu/coreUtilization[average,rate]:536
12855:20241101:185743.968 adding performance counter cpu/coreUtilization[maximum]:537
12855:20241101:185743.968 adding performance counter cpu/coreUtilization[maximum,rate]:537
12855:20241101:185743.968 adding performance counter cpu/coreUtilization[minimum]:538
12855:20241101:185743.968 adding performance counter cpu/coreUtilization[minimum,rate]:538
12855:20241101:185743.968 adding performance counter cpu/totalCapacity[average]:539
12855:20241101:185743.968 adding performance counter cpu/totalCapacity[average,absolute]:539
12855:20241101:185743.968 adding performance counter cpu/latency[average]:540
12855:20241101:185743.968 adding performance counter cpu/latency[average,rate]:540
12855:20241101:185743.968 adding performance counter cpu/entitlement[latest]:541
12855:20241101:185743.968 adding performance counter cpu/entitlement[latest,absolute]:541
12855:20241101:185743.968 adding performance counter cpu/demand[average]:542
12855:20241101:185743.968 adding performance counter cpu/demand[average,absolute]:542
12855:20241101:185743.968 adding performance counter cpu/costop[summation]:543
12855:20241101:185743.968 adding performance counter cpu/costop[summation,delta]:543
12855:20241101:185743.968 adding performance counter cpu/maxlimited[summation]:544
12855:20241101:185743.968 adding performance counter cpu/maxlimited[summation,delta]:544
12855:20241101:185743.968 adding performance counter cpu/overlap[summation]:545
12855:20241101:185743.968 adding performance counter cpu/overlap[summation,delta]:545
12855:20241101:185743.969 adding performance counter cpu/run[summation]:546
12855:20241101:185743.969 adding performance counter cpu/run[summation,delta]:546
12855:20241101:185743.969 adding performance counter cpu/demandEntitlementRatio[latest]:547
12855:20241101:185743.969 adding performance counter cpu/demandEntitlementRatio[latest,absolute]:547
12855:20241101:185743.969 adding performance counter cpu/readiness[average]:548
12855:20241101:185743.969 adding performance counter cpu/readiness[average,rate]:548
12855:20241101:185743.969 adding performance counter cpu/usage.vcpus[average]:549
12855:20241101:185743.969 adding performance counter cpu/usage.vcpus[average,rate]:549
12855:20241101:185743.969 adding performance counter mem/swapin[none]:550
12855:20241101:185743.969 adding performance counter mem/swapin[none,absolute]:550
12855:20241101:185743.969 adding performance counter mem/swapin[average]:551
12855:20241101:185743.969 adding performance counter mem/swapin[average,absolute]:551
12855:20241101:185743.969 adding performance counter mem/swapin[maximum]:552
12855:20241101:185743.969 adding performance counter mem/swapin[maximum,absolute]:552
12855:20241101:185743.969 adding performance counter mem/swapin[minimum]:553
12855:20241101:185743.969 adding performance counter mem/swapin[minimum,absolute]:553
12855:20241101:185743.969 adding performance counter mem/swapout[none]:554
12855:20241101:185743.969 adding performance counter mem/swapout[none,absolute]:554
12855:20241101:185743.969 adding performance counter mem/swapout[average]:555
12855:20241101:185743.969 adding performance counter mem/swapout[average,absolute]:555
12855:20241101:185743.969 adding performance counter mem/swapout[maximum]:556
12855:20241101:185743.969 adding performance counter mem/swapout[maximum,absolute]:556
12855:20241101:185743.969 adding performance counter mem/swapout[minimum]:557
12855:20241101:185743.969 adding performance counter mem/swapout[minimum,absolute]:557
12855:20241101:185743.969 adding performance counter mem/sysUsage[none]:558
12855:20241101:185743.969 adding performance counter mem/sysUsage[none,absolute]:558
12855:20241101:185743.969 adding performance counter mem/sysUsage[average]:559
12855:20241101:185743.969 adding performance counter mem/sysUsage[average,absolute]:559
12855:20241101:185743.969 adding performance counter mem/sysUsage[maximum]:560
12855:20241101:185743.969 adding performance counter mem/sysUsage[maximum,absolute]:560
12855:20241101:185743.969 adding performance counter mem/sysUsage[minimum]:561
12855:20241101:185743.969 adding performance counter mem/sysUsage[minimum,absolute]:561
12855:20241101:185743.969 adding performance counter mem/activewrite[average]:562
12855:20241101:185743.969 adding performance counter mem/activewrite[average,absolute]:562
12855:20241101:185743.970 adding performance counter mem/overheadMax[average]:563
12855:20241101:185743.970 adding performance counter mem/overheadMax[average,absolute]:563
12855:20241101:185743.970 adding performance counter mem/totalCapacity[average]:564
12855:20241101:185743.970 adding performance counter mem/totalCapacity[average,absolute]:564
12855:20241101:185743.970 adding performance counter mem/zipped[latest]:565
12855:20241101:185743.970 adding performance counter mem/zipped[latest,absolute]:565
12855:20241101:185743.970 adding performance counter mem/zipSaved[latest]:566
12855:20241101:185743.970 adding performance counter mem/zipSaved[latest,absolute]:566
12855:20241101:185743.970 adding performance counter mem/latency[average]:567
12855:20241101:185743.970 adding performance counter mem/latency[average,absolute]:567
12855:20241101:185743.970 adding performance counter mem/entitlement[average]:568
12855:20241101:185743.970 adding performance counter mem/entitlement[average,absolute]:568
12855:20241101:185743.970 adding performance counter mem/lowfreethreshold[average]:569
12855:20241101:185743.970 adding performance counter mem/lowfreethreshold[average,absolute]:569
12855:20241101:185743.970 adding performance counter mem/llSwapUsed[none]:570
12855:20241101:185743.970 adding performance counter mem/llSwapUsed[none,absolute]:570
12855:20241101:185743.970 adding performance counter mem/llSwapInRate[average]:571
12855:20241101:185743.970 adding performance counter mem/llSwapInRate[average,rate]:571
12855:20241101:185743.970 adding performance counter mem/llSwapOutRate[average]:572
12855:20241101:185743.970 adding performance counter mem/llSwapOutRate[average,rate]:572
12855:20241101:185743.970 adding performance counter mem/overheadTouched[average]:573
12855:20241101:185743.970 adding performance counter mem/overheadTouched[average,absolute]:573
12855:20241101:185743.970 adding performance counter mem/llSwapUsed[average]:574
12855:20241101:185743.970 adding performance counter mem/llSwapUsed[average,absolute]:574
12855:20241101:185743.970 adding performance counter mem/llSwapUsed[maximum]:575
12855:20241101:185743.970 adding performance counter mem/llSwapUsed[maximum,absolute]:575
12855:20241101:185743.970 adding performance counter mem/llSwapUsed[minimum]:576
12855:20241101:185743.970 adding performance counter mem/llSwapUsed[minimum,absolute]:576
12855:20241101:185743.970 adding performance counter mem/llSwapIn[none]:577
12855:20241101:185743.970 adding performance counter mem/llSwapIn[none,absolute]:577
12855:20241101:185743.970 adding performance counter mem/llSwapIn[average]:578
12855:20241101:185743.970 adding performance counter mem/llSwapIn[average,absolute]:578
12855:20241101:185743.970 adding performance counter mem/llSwapIn[maximum]:579
12855:20241101:185743.970 adding performance counter mem/llSwapIn[maximum,absolute]:579
12855:20241101:185743.971 adding performance counter mem/llSwapIn[minimum]:580
12855:20241101:185743.971 adding performance counter mem/llSwapIn[minimum,absolute]:580
12855:20241101:185743.971 adding performance counter mem/llSwapOut[none]:581
12855:20241101:185743.971 adding performance counter mem/llSwapOut[none,absolute]:581
12855:20241101:185743.971 adding performance counter mem/llSwapOut[average]:582
12855:20241101:185743.971 adding performance counter mem/llSwapOut[average,absolute]:582
12855:20241101:185743.971 adding performance counter mem/llSwapOut[maximum]:583
12855:20241101:185743.971 adding performance counter mem/llSwapOut[maximum,absolute]:583
12855:20241101:185743.971 adding performance counter mem/llSwapOut[minimum]:584
12855:20241101:185743.971 adding performance counter mem/llSwapOut[minimum,absolute]:584
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.size[latest]:585
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.size[latest,absolute]:585
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.sizeMax[latest]:586
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.sizeMax[latest,absolute]:586
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.workingSet[latest]:587
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.workingSet[latest,absolute]:587
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.workingSetMax[latest]:588
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.workingSetMax[latest,absolute]:588
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.overhead[latest]:589
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.overhead[latest,absolute]:589
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.capMissRatio[latest]:590
12855:20241101:185743.971 adding performance counter mem/vmfs.pbc.capMissRatio[latest,absolute]:590
12855:20241101:185743.971 adding performance counter disk/commands[summation]:591
12855:20241101:185743.971 adding performance counter disk/commands[summation,delta]:591
12855:20241101:185743.971 adding performance counter disk/deviceReadLatency[average]:592
12855:20241101:185743.971 adding performance counter disk/deviceReadLatency[average,absolute]:592
12855:20241101:185743.971 adding performance counter disk/kernelReadLatency[average]:593
12855:20241101:185743.971 adding performance counter disk/kernelReadLatency[average,absolute]:593
12855:20241101:185743.971 adding performance counter disk/totalReadLatency[average]:594
12855:20241101:185743.971 adding performance counter disk/totalReadLatency[average,absolute]:594
12855:20241101:185743.971 adding performance counter disk/queueReadLatency[average]:595
12855:20241101:185743.971 adding performance counter disk/queueReadLatency[average,absolute]:595
12855:20241101:185743.971 adding performance counter disk/deviceWriteLatency[average]:596
12855:20241101:185743.971 adding performance counter disk/deviceWriteLatency[average,absolute]:596
12855:20241101:185743.971 adding performance counter disk/kernelWriteLatency[average]:597
12855:20241101:185743.971 adding performance counter disk/kernelWriteLatency[average,absolute]:597
12855:20241101:185743.972 adding performance counter disk/totalWriteLatency[average]:598
12855:20241101:185743.972 adding performance counter disk/totalWriteLatency[average,absolute]:598
12855:20241101:185743.972 adding performance counter disk/queueWriteLatency[average]:599
12855:20241101:185743.972 adding performance counter disk/queueWriteLatency[average,absolute]:599
12855:20241101:185743.972 adding performance counter disk/deviceLatency[average]:600
12855:20241101:185743.972 adding performance counter disk/deviceLatency[average,absolute]:600
12855:20241101:185743.972 adding performance counter disk/kernelLatency[average]:601
12855:20241101:185743.972 adding performance counter disk/kernelLatency[average,absolute]:601
12855:20241101:185743.972 adding performance counter disk/queueLatency[average]:602
12855:20241101:185743.972 adding performance counter disk/queueLatency[average,absolute]:602
12855:20241101:185743.972 adding performance counter disk/maxQueueDepth[average]:603
12855:20241101:185743.972 adding performance counter disk/maxQueueDepth[average,absolute]:603
12855:20241101:185743.972 adding performance counter disk/commandsAveraged[average]:604
12855:20241101:185743.972 adding performance counter disk/commandsAveraged[average,rate]:604
12855:20241101:185743.972 adding performance counter net/droppedRx[summation]:605
12855:20241101:185743.972 adding performance counter net/droppedRx[summation,delta]:605
12855:20241101:185743.972 adding performance counter net/droppedTx[summation]:606
12855:20241101:185743.972 adding performance counter net/droppedTx[summation,delta]:606
12855:20241101:185743.972 adding performance counter net/bytesRx[average]:607
12855:20241101:185743.972 adding performance counter net/bytesRx[average,rate]:607
12855:20241101:185743.972 adding performance counter net/bytesTx[average]:608
12855:20241101:185743.972 adding performance counter net/bytesTx[average,rate]:608
12855:20241101:185743.972 adding performance counter net/broadcastRx[summation]:609
12855:20241101:185743.972 adding performance counter net/broadcastRx[summation,delta]:609
12855:20241101:185743.972 adding performance counter net/broadcastTx[summation]:610
12855:20241101:185743.972 adding performance counter net/broadcastTx[summation,delta]:610
12855:20241101:185743.972 adding performance counter net/multicastRx[summation]:611
12855:20241101:185743.972 adding performance counter net/multicastRx[summation,delta]:611
12855:20241101:185743.972 adding performance counter net/multicastTx[summation]:612
12855:20241101:185743.972 adding performance counter net/multicastTx[summation,delta]:612
12855:20241101:185743.972 adding performance counter net/errorsRx[summation]:613
12855:20241101:185743.972 adding performance counter net/errorsRx[summation,delta]:613
12855:20241101:185743.972 adding performance counter net/errorsTx[summation]:614
12855:20241101:185743.972 adding performance counter net/errorsTx[summation,delta]:614
12855:20241101:185743.973 adding performance counter net/unknownProtos[summation]:615
12855:20241101:185743.973 adding performance counter net/unknownProtos[summation,delta]:615
12855:20241101:185743.973 adding performance counter net/pnicBytesRx[average]:616
12855:20241101:185743.973 adding performance counter net/pnicBytesRx[average,rate]:616
12855:20241101:185743.973 adding performance counter net/pnicBytesTx[average]:617
12855:20241101:185743.973 adding performance counter net/pnicBytesTx[average,rate]:617
12855:20241101:185743.973 adding performance counter sys/heartbeat[latest]:618
12855:20241101:185743.973 adding performance counter sys/heartbeat[latest,absolute]:618
12855:20241101:185743.973 adding performance counter sys/diskUsage[latest]:619
12855:20241101:185743.973 adding performance counter sys/diskUsage[latest,absolute]:619
12855:20241101:185743.973 adding performance counter sys/resourceCpuUsage[none]:620
12855:20241101:185743.973 adding performance counter sys/resourceCpuUsage[none,rate]:620
12855:20241101:185743.973 adding performance counter sys/resourceCpuUsage[average]:621
12855:20241101:185743.973 adding performance counter sys/resourceCpuUsage[average,rate]:621
12855:20241101:185743.973 adding performance counter sys/resourceCpuUsage[maximum]:622
12855:20241101:185743.973 adding performance counter sys/resourceCpuUsage[maximum,rate]:622
12855:20241101:185743.973 adding performance counter sys/resourceCpuUsage[minimum]:623
12855:20241101:185743.973 adding performance counter sys/resourceCpuUsage[minimum,rate]:623
12855:20241101:185743.973 adding performance counter sys/resourceMemTouched[latest]:624
12855:20241101:185743.973 adding performance counter sys/resourceMemTouched[latest,absolute]:624
12855:20241101:185743.973 adding performance counter sys/resourceMemMapped[latest]:625
12855:20241101:185743.973 adding performance counter sys/resourceMemMapped[latest,absolute]:625
12855:20241101:185743.973 adding performance counter sys/resourceMemShared[latest]:626
12855:20241101:185743.973 adding performance counter sys/resourceMemShared[latest,absolute]:626
12855:20241101:185743.973 adding performance counter sys/resourceMemSwapped[latest]:627
12855:20241101:185743.973 adding performance counter sys/resourceMemSwapped[latest,absolute]:627
12855:20241101:185743.973 adding performance counter sys/resourceMemOverhead[latest]:628
12855:20241101:185743.973 adding performance counter sys/resourceMemOverhead[latest,absolute]:628
12855:20241101:185743.973 adding performance counter sys/resourceMemCow[latest]:629
12855:20241101:185743.973 adding performance counter sys/resourceMemCow[latest,absolute]:629
12855:20241101:185743.973 adding performance counter sys/resourceMemZero[latest]:630
12855:20241101:185743.973 adding performance counter sys/resourceMemZero[latest,absolute]:630
12855:20241101:185743.973 adding performance counter sys/resourceCpuRun1[latest]:631
12855:20241101:185743.973 adding performance counter sys/resourceCpuRun1[latest,absolute]:631
12855:20241101:185743.974 adding performance counter sys/resourceCpuAct1[latest]:632
12855:20241101:185743.974 adding performance counter sys/resourceCpuAct1[latest,absolute]:632
12855:20241101:185743.974 adding performance counter sys/resourceCpuMaxLimited1[latest]:633
12855:20241101:185743.974 adding performance counter sys/resourceCpuMaxLimited1[latest,absolute]:633
12855:20241101:185743.974 adding performance counter sys/resourceCpuRun5[latest]:634
12855:20241101:185743.974 adding performance counter sys/resourceCpuRun5[latest,absolute]:634
12855:20241101:185743.974 adding performance counter sys/resourceCpuAct5[latest]:635
12855:20241101:185743.974 adding performance counter sys/resourceCpuAct5[latest,absolute]:635
12855:20241101:185743.974 adding performance counter sys/resourceCpuMaxLimited5[latest]:636
12855:20241101:185743.974 adding performance counter sys/resourceCpuMaxLimited5[latest,absolute]:636
12855:20241101:185743.974 adding performance counter sys/resourceCpuAllocMin[latest]:637
12855:20241101:185743.974 adding performance counter sys/resourceCpuAllocMin[latest,absolute]:637
12855:20241101:185743.974 adding performance counter sys/resourceCpuAllocMax[latest]:638
12855:20241101:185743.974 adding performance counter sys/resourceCpuAllocMax[latest,absolute]:638
12855:20241101:185743.974 adding performance counter sys/resourceCpuAllocShares[latest]:639
12855:20241101:185743.974 adding performance counter sys/resourceCpuAllocShares[latest,absolute]:639
12855:20241101:185743.974 adding performance counter sys/resourceMemAllocMin[latest]:640
12855:20241101:185743.974 adding performance counter sys/resourceMemAllocMin[latest,absolute]:640
12855:20241101:185743.974 adding performance counter sys/resourceMemAllocMax[latest]:641
12855:20241101:185743.974 adding performance counter sys/resourceMemAllocMax[latest,absolute]:641
12855:20241101:185743.974 adding performance counter sys/resourceMemAllocShares[latest]:642
12855:20241101:185743.974 adding performance counter sys/resourceMemAllocShares[latest,absolute]:642
12855:20241101:185743.974 adding performance counter sys/osUptime[latest]:643
12855:20241101:185743.974 adding performance counter sys/osUptime[latest,absolute]:643
12855:20241101:185743.974 adding performance counter sys/resourceMemConsumed[latest]:644
12855:20241101:185743.974 adding performance counter sys/resourceMemConsumed[latest,absolute]:644
12855:20241101:185743.974 adding performance counter sys/resourceFdUsage[latest]:645
12855:20241101:185743.974 adding performance counter sys/resourceFdUsage[latest,absolute]:645
12855:20241101:185743.974 adding performance counter rescpu/actpk1[latest]:646
12855:20241101:185743.974 adding performance counter rescpu/actpk1[latest,absolute]:646
12855:20241101:185743.974 adding performance counter rescpu/runav1[latest]:647
12855:20241101:185743.974 adding performance counter rescpu/runav1[latest,absolute]:647
12855:20241101:185743.974 adding performance counter rescpu/actav5[latest]:648
12855:20241101:185743.974 adding performance counter rescpu/actav5[latest,absolute]:648
12855:20241101:185743.974 adding performance counter rescpu/actpk5[latest]:649
12855:20241101:185743.974 adding performance counter rescpu/actpk5[latest,absolute]:649
12855:20241101:185743.975 adding performance counter rescpu/runav5[latest]:650
12855:20241101:185743.975 adding performance counter rescpu/runav5[latest,absolute]:650
12855:20241101:185743.975 adding performance counter rescpu/actav15[latest]:651
12855:20241101:185743.975 adding performance counter rescpu/actav15[latest,absolute]:651
12855:20241101:185743.975 adding performance counter rescpu/actpk15[latest]:652
12855:20241101:185743.975 adding performance counter rescpu/actpk15[latest,absolute]:652
12855:20241101:185743.975 adding performance counter rescpu/runav15[latest]:653
12855:20241101:185743.975 adding performance counter rescpu/runav15[latest,absolute]:653
12855:20241101:185743.975 adding performance counter rescpu/runpk1[latest]:654
12855:20241101:185743.975 adding performance counter rescpu/runpk1[latest,absolute]:654
12855:20241101:185743.975 adding performance counter rescpu/maxLimited1[latest]:655
12855:20241101:185743.975 adding performance counter rescpu/maxLimited1[latest,absolute]:655
12855:20241101:185743.975 adding performance counter rescpu/runpk5[latest]:656
12855:20241101:185743.975 adding performance counter rescpu/runpk5[latest,absolute]:656
12855:20241101:185743.975 adding performance counter rescpu/maxLimited5[latest]:657
12855:20241101:185743.975 adding performance counter rescpu/maxLimited5[latest,absolute]:657
12855:20241101:185743.975 adding performance counter rescpu/runpk15[latest]:658
12855:20241101:185743.975 adding performance counter rescpu/runpk15[latest,absolute]:658
12855:20241101:185743.975 adding performance counter rescpu/maxLimited15[latest]:659
12855:20241101:185743.975 adding performance counter rescpu/maxLimited15[latest,absolute]:659
12855:20241101:185743.975 adding performance counter rescpu/sampleCount[latest]:660
12855:20241101:185743.975 adding performance counter rescpu/sampleCount[latest,absolute]:660
12855:20241101:185743.975 adding performance counter rescpu/samplePeriod[latest]:661
12855:20241101:185743.975 adding performance counter rescpu/samplePeriod[latest,absolute]:661
12855:20241101:185743.975 adding performance counter managementAgent/memUsed[average]:662
12855:20241101:185743.975 adding performance counter managementAgent/memUsed[average,absolute]:662
12855:20241101:185743.975 adding performance counter managementAgent/swapUsed[average]:663
12855:20241101:185743.975 adding performance counter managementAgent/swapUsed[average,absolute]:663
12855:20241101:185743.975 adding performance counter managementAgent/cpuUsage[average]:664
12855:20241101:185743.975 adding performance counter managementAgent/cpuUsage[average,rate]:664
12855:20241101:185743.975 adding performance counter storagePath/commandsAveraged[average]:665
12855:20241101:185743.975 adding performance counter storagePath/commandsAveraged[average,rate]:665
12855:20241101:185743.975 adding performance counter storagePath/numberReadAveraged[average]:666
12855:20241101:185743.975 adding performance counter storagePath/numberReadAveraged[average,rate]:666
12855:20241101:185743.976 adding performance counter storagePath/numberWriteAveraged[average]:667
12855:20241101:185743.976 adding performance counter storagePath/numberWriteAveraged[average,rate]:667
12855:20241101:185743.976 adding performance counter storagePath/read[average]:668
12855:20241101:185743.976 adding performance counter storagePath/read[average,rate]:668
12855:20241101:185743.976 adding performance counter storagePath/write[average]:669
12855:20241101:185743.976 adding performance counter storagePath/write[average,rate]:669
12855:20241101:185743.976 adding performance counter storagePath/totalReadLatency[average]:670
12855:20241101:185743.976 adding performance counter storagePath/totalReadLatency[average,absolute]:670
12855:20241101:185743.976 adding performance counter storagePath/totalWriteLatency[average]:671
12855:20241101:185743.976 adding performance counter storagePath/totalWriteLatency[average,absolute]:671
12855:20241101:185743.976 adding performance counter virtualDisk/readIOSize[latest]:672
12855:20241101:185743.976 adding performance counter virtualDisk/readIOSize[latest,absolute]:672
12855:20241101:185743.976 adding performance counter virtualDisk/writeIOSize[latest]:673
12855:20241101:185743.976 adding performance counter virtualDisk/writeIOSize[latest,absolute]:673
12855:20241101:185743.976 adding performance counter virtualDisk/smallSeeks[latest]:674
12855:20241101:185743.976 adding performance counter virtualDisk/smallSeeks[latest,absolute]:674
12855:20241101:185743.976 adding performance counter virtualDisk/mediumSeeks[latest]:675
12855:20241101:185743.976 adding performance counter virtualDisk/mediumSeeks[latest,absolute]:675
12855:20241101:185743.976 adding performance counter virtualDisk/largeSeeks[latest]:676
12855:20241101:185743.976 adding performance counter virtualDisk/largeSeeks[latest,absolute]:676
12855:20241101:185743.976 adding performance counter virtualDisk/readLatencyUS[latest]:677
12855:20241101:185743.976 adding performance counter virtualDisk/readLatencyUS[latest,absolute]:677
12855:20241101:185743.976 adding performance counter virtualDisk/writeLatencyUS[latest]:678
12855:20241101:185743.976 adding performance counter virtualDisk/writeLatencyUS[latest,absolute]:678
12855:20241101:185743.976 adding performance counter datastore/datastoreMaxQueueDepth[latest]:679
12855:20241101:185743.976 adding performance counter datastore/datastoreMaxQueueDepth[latest,absolute]:679
12855:20241101:185743.976 adding performance counter datastore/unmapSize[summation]:680
12855:20241101:185743.976 adding performance counter datastore/unmapSize[summation,delta]:680
12855:20241101:185743.976 adding performance counter datastore/unmapIOs[summation]:681
12855:20241101:185743.976 adding performance counter datastore/unmapIOs[summation,delta]:681
12855:20241101:185743.976 adding performance counter hbr/hbrNumVms[average]:682
12855:20241101:185743.976 adding performance counter hbr/hbrNumVms[average,absolute]:682
12855:20241101:185743.976 adding performance counter hbr/hbrNetRx[average]:683
12855:20241101:185743.976 adding performance counter hbr/hbrNetRx[average,rate]:683
12855:20241101:185743.976 adding performance counter hbr/hbrNetTx[average]:684
12855:20241101:185743.976 adding performance counter hbr/hbrNetTx[average,rate]:684
12855:20241101:185743.977 adding performance counter hbr/hbrNetLatency[average]:685
12855:20241101:185743.977 adding performance counter hbr/hbrNetLatency[average,absolute]:685
12855:20241101:185743.977 adding performance counter hbr/hbrDiskReadLatency[average]:686
12855:20241101:185743.977 adding performance counter hbr/hbrDiskReadLatency[average,absolute]:686
12855:20241101:185743.977 adding performance counter hbr/hbrDiskStallLatency[average]:687
12855:20241101:185743.977 adding performance counter hbr/hbrDiskStallLatency[average,absolute]:687
12855:20241101:185743.977 adding performance counter hbr/hbrDiskTransferSuccess[average]:688
12855:20241101:185743.977 adding performance counter hbr/hbrDiskTransferSuccess[average,absolute]:688
12855:20241101:185743.977 adding performance counter hbr/hbrDiskTransferIdle[average]:689
12855:20241101:185743.977 adding performance counter hbr/hbrDiskTransferIdle[average,absolute]:689
12855:20241101:185743.977 adding performance counter hbr/hbrDiskTransferBytes[average]:690
12855:20241101:185743.977 adding performance counter hbr/hbrDiskTransferBytes[average,absolute]:690
12855:20241101:185743.977 adding performance counter vflashModule/numActiveVMDKs[latest]:691
12855:20241101:185743.977 adding performance counter vflashModule/numActiveVMDKs[latest,absolute]:691
12855:20241101:185743.977 adding performance counter vsanDomObj/readIops[average]:692
12855:20241101:185743.977 adding performance counter vsanDomObj/readIops[average,rate]:692
12855:20241101:185743.977 adding performance counter vsanDomObj/readThroughput[average]:693
12855:20241101:185743.977 adding performance counter vsanDomObj/readThroughput[average,rate]:693
12855:20241101:185743.977 adding performance counter vsanDomObj/readAvgLatency[average]:694
12855:20241101:185743.977 adding performance counter vsanDomObj/readAvgLatency[average,absolute]:694
12855:20241101:185743.977 adding performance counter vsanDomObj/readMaxLatency[latest]:695
12855:20241101:185743.977 adding performance counter vsanDomObj/readMaxLatency[latest,absolute]:695
12855:20241101:185743.977 adding performance counter vsanDomObj/readCacheHitRate[latest]:696
12855:20241101:185743.977 adding performance counter vsanDomObj/readCacheHitRate[latest,absolute]:696
12855:20241101:185743.977 adding performance counter vsanDomObj/readCongestion[average]:697
12855:20241101:185743.977 adding performance counter vsanDomObj/readCongestion[average,rate]:697
12855:20241101:185743.977 adding performance counter vsanDomObj/writeIops[average]:698
12855:20241101:185743.977 adding performance counter vsanDomObj/writeIops[average,rate]:698
12855:20241101:185743.977 adding performance counter vsanDomObj/writeThroughput[average]:699
12855:20241101:185743.977 adding performance counter vsanDomObj/writeThroughput[average,rate]:699
12855:20241101:185743.977 adding performance counter vsanDomObj/writeAvgLatency[average]:700
12855:20241101:185743.977 adding performance counter vsanDomObj/writeAvgLatency[average,absolute]:700
12855:20241101:185743.977 adding performance counter vsanDomObj/writeMaxLatency[latest]:701
12855:20241101:185743.977 adding performance counter vsanDomObj/writeMaxLatency[latest,absolute]:701
12855:20241101:185743.978 adding performance counter vsanDomObj/writeCongestion[average]:702
12855:20241101:185743.978 adding performance counter vsanDomObj/writeCongestion[average,rate]:702
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteIops[average]:703
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteIops[average,rate]:703
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteThroughput[average]:704
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteThroughput[average,rate]:704
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average]:705
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average,absolute]:705
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest]:706
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest,absolute]:706
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteCongestion[average]:707
12855:20241101:185743.978 adding performance counter vsanDomObj/recoveryWriteCongestion[average,rate]:707
12855:20241101:185743.978 adding performance counter gpu/utilization[none]:708
12855:20241101:185743.978 adding performance counter gpu/utilization[none,absolute]:708
12855:20241101:185743.978 adding performance counter gpu/utilization[maximum]:709
12855:20241101:185743.978 adding performance counter gpu/utilization[maximum,absolute]:709
12855:20241101:185743.978 adding performance counter gpu/utilization[minimum]:710
12855:20241101:185743.978 adding performance counter gpu/utilization[minimum,absolute]:710
12855:20241101:185743.978 adding performance counter gpu/mem.used[none]:711
12855:20241101:185743.978 adding performance counter gpu/mem.used[none,absolute]:711
12855:20241101:185743.978 adding performance counter gpu/mem.used[maximum]:712
12855:20241101:185743.978 adding performance counter gpu/mem.used[maximum,absolute]:712
12855:20241101:185743.978 adding performance counter gpu/mem.used[minimum]:713
12855:20241101:185743.978 adding performance counter gpu/mem.used[minimum,absolute]:713
12855:20241101:185743.978 adding performance counter gpu/mem.usage[none]:714
12855:20241101:185743.978 adding performance counter gpu/mem.usage[none,absolute]:714
12855:20241101:185743.978 adding performance counter gpu/mem.usage[average]:715
12855:20241101:185743.978 adding performance counter gpu/mem.usage[average,absolute]:715
12855:20241101:185743.978 adding performance counter gpu/mem.usage[maximum]:716
12855:20241101:185743.978 adding performance counter gpu/mem.usage[maximum,absolute]:716
12855:20241101:185743.978 adding performance counter gpu/mem.usage[minimum]:717
12855:20241101:185743.978 adding performance counter gpu/mem.usage[minimum,absolute]:717
12855:20241101:185743.978 Unknown performance counter 718 type of unitInfo:gigaBytes
12855:20241101:185743.978 adding performance counter gpu/mem.used.gb[latest]:718
12855:20241101:185743.978 Unknown performance counter 718 type of unitInfo:gigaBytes
12855:20241101:185743.978 adding performance counter gpu/mem.used.gb[latest,absolute]:718
12855:20241101:185743.979 Unknown performance counter 719 type of unitInfo:gigaBytes
12855:20241101:185743.979 adding performance counter gpu/mem.reserved.gb[latest]:719
12855:20241101:185743.979 Unknown performance counter 719 type of unitInfo:gigaBytes
12855:20241101:185743.979 adding performance counter gpu/mem.reserved.gb[latest,absolute]:719
12855:20241101:185743.979 Unknown performance counter 720 type of unitInfo:gigaBytes
12855:20241101:185743.979 adding performance counter gpu/mem.total.gb[latest]:720
12855:20241101:185743.979 Unknown performance counter 720 type of unitInfo:gigaBytes
12855:20241101:185743.979 adding performance counter gpu/mem.total.gb[latest,absolute]:720
12855:20241101:185743.979 adding performance counter pmem/available.reservation[latest]:721
12855:20241101:185743.979 adding performance counter pmem/available.reservation[latest,absolute]:721
12855:20241101:185743.979 adding performance counter pmem/drsmanaged.reservation[latest]:722
12855:20241101:185743.979 adding performance counter pmem/drsmanaged.reservation[latest,absolute]:722
12855:20241101:185743.979 adding performance counter vmx/numVCPUs[latest]:723
12855:20241101:185743.979 adding performance counter vmx/numVCPUs[latest,absolute]:723
12855:20241101:185743.979 adding performance counter vmx/vcpusMhzMin[latest]:724
12855:20241101:185743.979 adding performance counter vmx/vcpusMhzMin[latest,absolute]:724
12855:20241101:185743.979 adding performance counter vmx/vcpusMhzMax[latest]:725
12855:20241101:185743.979 adding performance counter vmx/vcpusMhzMax[latest,absolute]:725
12855:20241101:185743.979 adding performance counter vmx/vcpusMhzMean[latest]:726
12855:20241101:185743.979 adding performance counter vmx/vcpusMhzMean[latest,absolute]:726
12855:20241101:185743.979 adding performance counter vmx/cpuSpeed[latest]:727
12855:20241101:185743.979 adding performance counter vmx/cpuSpeed[latest,absolute]:727
12855:20241101:185743.979 adding performance counter vmx/overheadMemSizeMin[latest]:728
12855:20241101:185743.979 adding performance counter vmx/overheadMemSizeMin[latest,absolute]:728
12855:20241101:185743.979 adding performance counter vmx/overheadMemSizeMax[latest]:729
12855:20241101:185743.979 adding performance counter vmx/overheadMemSizeMax[latest,absolute]:729
12855:20241101:185743.979 adding performance counter vmx/vigor.opsTotal[latest]:730
12855:20241101:185743.979 adding performance counter vmx/vigor.opsTotal[latest,absolute]:730
12855:20241101:185743.979 adding performance counter vmx/poll.itersPerS[latest]:731
12855:20241101:185743.979 adding performance counter vmx/poll.itersPerS[latest,absolute]:731
12855:20241101:185743.979 adding performance counter vmx/userRpc.opsPerS[latest]:732
12855:20241101:185743.979 adding performance counter vmx/userRpc.opsPerS[latest,absolute]:732
12855:20241101:185743.980 End of vmware_service_get_perf_counters():SUCCEED
12855:20241101:185743.981 In vmware_service_get_evt_severity()
12855:20241101:185744.029 vmware_service_get_evt_severity() SOAP response:
EventManagerdescriptionInformationinfoWarningwarningErrorerrorUseruserExtendedEventImport certificate successinfoImport certificate succeeded.Import certificate succeeded.Import certificate succeeded.Import certificate succeeded.ad.event.ImportCertEvent|Import certificate succeeded. <EventLongDescription id="ad.event.ImportCertEvent"> <description> Import certificate succeeded </description> </EventLongDescription> ExtendedEventImport certificate failureerrorImport certificate failed.Import certificate failed.Import certificate failed.Import certificate failed.ad.event.ImportCertFailedEvent|Import certificate failed. <EventLongDescription id="ad.event.ImportCertFailedEvent"> <description> Import certificate failed </description> </EventLongDescription> ExtendedEventJoin domain successinfoJoin domain succeeded.Join domain succeeded.Join domain succeeded.Join domain succeeded.ad.event.JoinDomainEvent|Join domain succeeded. <EventLongDescription id="ad.event.JoinDomainEvent"> <description> Join domain succeeded </description> </EventLongDescription> ExtendedEventJoin domain failureerrorJoin domain failed.Join domain failed.Join domain failed.Join domain failed.ad.event.JoinDomainFailedEvent|Join domain failed. <EventLongDescription id="ad.event.JoinDomainFailedEvent"> <description> Join domain failed </description> </EventLongDescription> ExtendedEventLeave domain successinfoLeave domain succeeded.Leave domain succeeded.Leave domain succeeded.Leave domain succeeded.ad.event.LeaveDomainEvent|Leave domain succeeded. <EventLongDescription id="ad.event.LeaveDomainEvent"> <description> Leave domain succeeded </description> </EventLongDescription> ExtendedEventLeave domain failureerrorLeave domain failed.Leave domain failed.Leave domain failed.Leave domain failed.ad.event.LeaveDomainFailedEvent|Leave domain failed. <EventLongDescription id="ad.event.LeaveDomainFailedEvent"> <description> Leave domain failed </description> </EventLongDescription> ExtendedEventBackup job failederrorcom.vmware.applmgmt.backup.job.failed.event|Backup job failed <EventLongDescription id="com.vmware.applmgmt.backup.job.failed.event"> <description> Backup job failed </description> <cause> <description> Backup job failed </description> <action> Check backup server connectivity and available space </action> </cause> </EventLongDescription> ExtendedEventBackup job finished successfullyinfocom.vmware.applmgmt.backup.job.finished.event|Backup job finished successfully <EventLongDescription id="com.vmware.applmgmt.backup.job.finished.event"> <description> Backup job finished successfully </description> <cause> <description> Backup job finished successfully </description> </cause> </EventLongDescription> ExtendedEventGlobal Permission created for user with role and propagation.infocom.vmware.cis.CreateGlobalPermission|Global Permission created for user {User} with role {Role} and propagation {Propagation}.EventExPermission created for user on item with role.infocom.vmware.cis.CreatePermission|Permission created for user {User} on item {DocType} with role {Role}.EventExGlobal Permission removed for user.infocom.vmware.cis.RemoveGlobalPermission|Global Permission removed for user {User}.EventExPermission removed for user on iteminfocom.vmware.cis.RemovePermission|Permission removed for user {User} on item {DocType}EventExUser attached tag(s) to object(s)com.vmware.cis.tagging.attach|User {User} attached tag(s) {Tag} to object(s) {Object}EventExUser detached tag(s) from object(s)com.vmware.cis.tagging.detach|User {User} detached tag(s) {Tag} from object(s) {Object}ExtendedEventHttpNfc service disabled - missing configurationerrorHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationcom.vmware.configuration.httpnfc.missing|HttpNfc service is disabled because of missing configuration. Please check vpxa configuration file and correct the error and reconnect host. <EventLongDescription id="com.vmware.configuration.httpnfc.missing"> <description> The HttpNfc service is disabled because of missing configuration section in vpxa.cfg. Please check vpxa configuration file and correct the error and reconnect host. </description> <cause> <description>The vpxa configuration file requires a configuration section for HttpNfc</description> <action>Please check vpxa configuration file and correct the error and reconnect host.</action> </cause> </EventLongDescription> EventExAdded Licenseinfocom.vmware.license.AddLicenseEvent|License {licenseKey} added to VirtualCenterEventExAssigned Licenseinfocom.vmware.license.AssignLicenseEvent|License {licenseKey} assigned to asset {entityName} with id {entityId}EventExDownload License Informationwarningcom.vmware.license.DLFDownloadFailedEvent|Failed to download license information from the host {hostname} due to {errorReason.@enum.com.vmware.license.DLFDownloadFailedEvent.DLFDownloadFailedReason}EventExDefault License Keys Updatedinfocom.vmware.license.DefaultLicenseKeysUpdatedEvent|Default License Keys for asset {entityName} have been updatedEventExHost License Edition Not Allowedwarningcom.vmware.license.HostLicenseEditionNotAllowedEvent|The host is licensed with {edition}. The license edition of vCenter Server does not support {edition}.ExtendedEventHost license or evaluation period has expiredwarningcom.vmware.license.HostLicenseExpiredEvent|Expired host license or evaluation period. <EventLongDescription id="com.vmware.license.HostLicenseExpiredEvent"> <description> Host license or evaluation period has expired. </description> <cause> <description>Expired host license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventHost time-limited license has expiredwarningcom.vmware.license.HostSubscriptionLicenseExpiredEvent|Expired host time-limited license. <EventLongDescription id="com.vmware.license.HostSubscriptionLicenseExpiredEvent"> <description> Host time-limited license has expired. </description> <cause> <description>Expired host time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> EventExLicense assignment faultsinfocom.vmware.license.LicenseAssignFailedEvent|License assignment on the host fails. Reasons: {errorMessage.@enum.com.vmware.license.LicenseAssignError}. <EventLongDescription id="com.vmware.license.LicenseAssignFailedEvent"> <description> The host license assignment succeeds on vCenter Server but can not be successfully pushed down to the host. Any license assignment to a host proceeds in two stages. In the first stage vCenter Server does preliminary checks on the license key, the license state of the host and determines if the requested assignment is valid. If so, it stores this assignment locally in its database. In the second stage, vCenter Server pushes the newly assigned license to the host. During the second stage the host might reject the assignment under certain circumstances. These circumstances usually result from a mismatch of the information available to vCenter Server and the host concerned. Any such discrepancies are notified to the user via this event. This event lists the reason because of which it was logged and also shows up as a configuration issue on the vSphere Client. </description> <cause> <description>License expiry information mismatch between vCenter Server and host</description> <action>If the system time on the machine running vCenter Server and host are not in sync then put them in sync</action> </cause> <cause> <description>The license key is a per Virtual Machine key and the number of powered on Virtual Machines is larger than the maximum limit of the key</description> <action>Use a different key with a larger capacity</action> </cause> </EventLongDescription> EventExLicense Capacity Exceededwarningcom.vmware.license.LicenseCapacityExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the license capacity ({capacity} {costUnitText})EventExLicense ExpirywarningYour host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires.com.vmware.license.LicenseExpiryEvent|Your host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires. <EventLongDescription id="com.vmware.license.LicenseExpiryEvent"> <description> If a host is assigned a temporary license (a license key with an expiry), this event is logged in order to provide users an advanced warning on the imminent expiry of the license key. The event logging starts 15 days prior to the expiry of the license key. This event also shows up on the host summary page as a configuration issue on the vSphere Client. </description> <cause> <description>License key is about to expire or has expired</description> <action>Assign a different license key</action> </cause> </EventLongDescription> EventExLicense User Threshold Exceededwarningcom.vmware.license.LicenseUserThresholdExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the user-defined threshold ({threshold} {costUnitText}) <EventLongDescription id="com.vmware.license.LicenseUserThresholdExceededEvent"> <description> Users can define thresholds to monitor overuse of the product license. This event is logged when the license usage threshold defined by the user for a product edition is exceeded. </description> <cause> <description> License usage of a product edition has exceeded the user-defined threshold </description> <action> Review license assignments and usage </action> </cause> </EventLongDescription> EventExRemoved Licenseinfocom.vmware.license.RemoveLicenseEvent|License {licenseKey} removed from VirtualCenterEventExUnassigned Licenseinfocom.vmware.license.UnassignLicenseEvent|License unassigned from asset {entityName} with id {entityId}ExtendedEventvCenter Server license or evaluation period has expiredwarningcom.vmware.license.VcLicenseExpiredEvent|Expired vCenter Server license or evaluation period. <EventLongDescription id="com.vmware.license.VcLicenseExpiredEvent"> <description> vCenter Server license or evaluation period has expired. </description> <cause> <description>Expired vCenter Server license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventvCenter Server time-limited license has expiredwarningcom.vmware.license.VcSubscriptionLicenseExpiredEvent|Expired vCenter Server time-limited license. <EventLongDescription id="com.vmware.license.VcSubscriptionLicenseExpiredEvent"> <description> vCenter Server time-limited license has expired. </description> <cause> <description>Expired vCenter Server time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventSome in-use features are not supported by current licensewarningcom.vmware.license.vsan.FeatureBeyondCapability|In-use vSAN features {feature} are not supported by current license.ExtendedEventHost flash capacity exceeds the licensed limit for vSANwarningcom.vmware.license.vsan.HostSsdOverUsageEvent|The capacity of the flash disks on the host exceeds the limit of the vSAN license. <EventLongDescription id="com.vmware.license.vsan.HostSsdOverUsageEvent"> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <cause> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN license or evaluation period has expiredwarningcom.vmware.license.vsan.LicenseExpiryEvent|Expired vSAN license or evaluation period. <EventLongDescription id="com.vmware.license.vsan.LicenseExpiryEvent"> <description> Expired vSAN license or evaluation period. </description> <cause> <description> Expired vSAN license or evaluation period. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN time-limited license has expiredwarningcom.vmware.license.vsan.SubscriptionLicenseExpiredEvent|Expired vSAN time-limited license. <EventLongDescription id="com.vmware.license.vsan.SubscriptionLicenseExpiredEvent"> <description> Expired vSAN time-limited license. </description> <cause> <description> Expired vSAN time-limited license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> EventExStorage policy associatedinfoAssociated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}com.vmware.pbm.profile.associate|Associated storage policy: {ProfileId} with entity: {EntityId}EventExStorage policy createdinfoStorage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.create|Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}EventExStorage policy deletedinfoDeleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}com.vmware.pbm.profile.delete|Deleted storage policy: {ProfileId}EventExStorage policy dissociatedinfoDissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}com.vmware.pbm.profile.dissociate|Dissociated storage policy: {ProfileId} from entity: {EntityId}EventExStorage policy updatedinfoStorage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.update|Storage policy updated for {ProfileId}. Policy name: {ProfileName}EventExStorage policy name updatedinfoStorage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}com.vmware.pbm.profile.updateName|Storage policy name updated for {ProfileId}. New name: {NewProfileName}EventExCertificate Manager event in SSOinfocom.vmware.sso.CertificateManager|Certificate Manager event by {userName} at {timestamp} : {description}EventExConfiguration Management event in SSOinfocom.vmware.sso.ConfigurationManagement|Configuration Management event by {userName} at {timestamp} : {description}EventExDomain Management event in SSOinfocom.vmware.sso.DomainManagement|Domain Management event by {userName} at {timestamp} : {description}EventExIdentity Source Management event in SSOinfocom.vmware.sso.IdentitySourceManagement|Identity Source Management event by {userName} at {timestamp} : {description}EventExIdentity Source LDAP Certificate is about to expireinfocom.vmware.sso.LDAPCertExpiry|Renew Identity Source LDAP Certificate: {description}EventExLockout Policy event in SSOinfocom.vmware.sso.LockoutPolicy|Lockout Policy event by {userName} at {timestamp} : {description}EventExFailed login attempt event in SSOerrorcom.vmware.sso.LoginFailure|Failed login {userName} from {userIp} at {timestamp} in SSOEventExSuccessful login attempt event in SSOinfocom.vmware.sso.LoginSuccess|Successful login {userName} from {userIp} at {timestamp} in SSOEventExLogout attempt event in SSOinfocom.vmware.sso.Logout|Logout event by {userName} from {userIp} at {timestamp} in SSOEventExPassword Policy event in SSOinfocom.vmware.sso.PasswordPolicy|Password Policy event by {userName} at {timestamp} : {description}EventExPrincipal Management event in SSOinfocom.vmware.sso.PrincipalManagement|Principal Management event by {userName} at {timestamp} : {description}EventExRole Management event in SSOinfocom.vmware.sso.RoleManagement|Role Management event by {userName} at {timestamp} : {description}EventExSTS Signing Certificates are about to expireinfocom.vmware.sso.STSCertExpiry|Renew STS Signing Certificates: {description}EventExSMTP Configuration event in SSOinfocom.vmware.sso.SmtpConfiguration|SMTP Configuration event by {userName} at {timestamp} : {description}EventExSystem Management event in SSOinfocom.vmware.sso.SystemManagement|System Management event by {userName} at {timestamp} : {description}EventExvCenter Identity event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentity|vCenter Identity event by {userName} at {timestamp} : {description}EventExvCenter Identity Providers event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentityProviders|vCenter Identity Providers event by {userName} at {timestamp} : {description}EventExvCenter Trusts event in Trustmanagementinfocom.vmware.trustmanagement.VcTrusts|vCenter Trusts event by {userName} at {timestamp} : {description}EventExIdentity Provider SSL Trust Certificate is about to expireinfocom.vmware.trustmanagement.WS1SSLCertExpiry|Renew Identity Provider SSL Trust Certificate: {description}EventExIdentity Provider Users and Groups token is about to expireinfocom.vmware.trustmanagement.WS1SyncTokenExpiry|Renew Identity Provider Users and Groups token: {description}EventExReports that a stage from autonomous cluster creation has failedwarningcom.vmware.vc.A8sCluster.CreateStageFailedEvent|Autonomous cluster creation stage: {stage} failed: {reason}EventExReports that a stage from autonomous cluster creation has completed successfullyinfocom.vmware.vc.A8sCluster.CreateStageSuccessEvent|Autonomous cluster creation stage: {stage} succeededEventExAutonomous cluster health is degraded.warningcom.vmware.vc.A8sCluster.HealthDegradedEvent|Autonomous cluster health is degraded. Reason: {reason}ExtendedEventAutonomous cluster is healthy.infocom.vmware.vc.A8sCluster.HealthHealthyEvent|Autonomous cluster is healthy.EventExAutonomous cluster is unhealthy.warningcom.vmware.vc.A8sCluster.HealthUnhealthyEvent|Autonomous cluster is unhealthy. Reason: {reason}ExtendedEventAuthz service is not running. Authorization data might not be synchronized.errorcom.vmware.vc.AuthzDataNotSynced|Authz service is not running. Authorization data might not be synchronized.ExtendedEventAuthz service is running. Authorization data is being synchronized.infocom.vmware.vc.AuthzDataSynced|Authz service is running. Authorization data is being synchronized.ExtendedEventEvent sequence ID reached its max value and was reset.infocom.vmware.vc.EventIdOverflow|Event sequence ID reached its max value and was reset.ExtendedEventcom.vmware.vc.FailedToApplyPermissionsEvent|ExtendedEventvSphere HA agent can reach all cluster management addressesinfoThe vSphere HA agent on the host {host.name} in cluster {computeResource.name} can reach all the cluster management addressesThe vSphere HA agent on the host {host.name} can reach all the cluster management addressesThe vSphere HA agent on this host can reach all the cluster management addressescom.vmware.vc.HA.AllHostAddrsPingable|The vSphere HA agent on the host {host.name} in cluster {computeResource.name} in {datacenter.name} can reach all the cluster management addresses <EventLongDescription id="com.vmware.vc.HA.AllHostAddrsPingable"> <description> The host is able to ping all of the vSphere HA management addresses of every other cluster host. </description> </EventLongDescription> ExtendedEventvSphere HA agent can reach all isolation addressesinfoAll vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name}All vSphere HA isolation addresses are reachable by this hostAll vSphere HA isolation addresses are reachable by hostcom.vmware.vc.HA.AllIsoAddrsPingable|All vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.AllIsoAddrsPingable"> <description> The host is able to ping all of the vSphere HA isolation addresses. </description> </EventLongDescription> ExtendedEventvSphere HA answered a lock-lost question on a virtual machinewarningvSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name}vSphere HA answered the lock-lost question on virtual machine {vm.name}vSphere HA answered the lock-lost question on this virtual machinecom.vmware.vc.HA.AnsweredVmLockLostQuestionEvent|vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmLockLostQuestionEvent"> <description> The virtual machine running on this host lost the exclusive lock of its files on disk. This will occur if another instance of this virtual machine is running on a different host. This situation can happen if a host loses access to both its storage and management networks but is not configured to shutdown its virtual machines on isolation. The virtual machines on this host will continue to run without access to their disks, while vSphere HA will start a new instance of the virtual machines on another host in the cluster. When the isolated host regains access to the storage network, it will try to reacquire the disk locks. This will fail since the disk locks are held by another host. The host will then issue a question on the virtual machine indicating that disk locks have been lost. vSphere HA will automatically answer this question to allow the virtual machine instance without the disk locks to power off. <description> </EventLongDescription> ExtendedEventvSphere HA answered a question from the host about terminating a virtual machinewarningvSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name}vSphere HA answered a question from host {host.name} about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating this virtual machinecom.vmware.vc.HA.AnsweredVmTerminatePDLEvent|vSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmTerminatePDLEvent"> <description> The virtual machine running on this host had a virtual disk which experienced permenant device loss. The host will issue a question if it is configured to terminate the VM automatically under such condition. This event indicates that vSphere HA answered the question. After the VM is terminated, vSphere HA will make a best effort to restart it. <description> </EventLongDescription> ExtendedEventvSphere HA disabled the automatic VM Startup/Shutdown featureinfovSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on the host {host.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature. Automatic VM restarts will interfere with HA when reacting to a host failure.com.vmware.vc.HA.AutoStartDisabled|vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Automatic VM restarts will interfere with HA when reacting to a host failure. <EventLongDescription id="com.vmware.vc.HA.AutoStartDisabled"> <description> Virtual Machine Startup/Shutdown has been disabled by HA. A host which is contained in an vSphere HA cluster is not permitted to have automatic virtual machine startup and shutdown since it may conflict with HA's attempts to relocate the virtual machines if a host fails. </description> </EventLongDescription> ExtendedEventvSphere HA did not reset a VM which had files on inaccessible datastore(s)warningvSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on host {host.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on this host because the VM had files on inaccessible datastore(s)vSphere HA did not reset this VM because the VM had file(s) on inaccessible datastore(s)com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore|vSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the VM had files on inaccessible datastore(s) <EventLongDescription id=" com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore"> <description> This event is logged when vSphere HA did not reset a VM affected by an inaccessible datastore. It will attempt to reset the VM after storage failure is cleared. </description> <cause> <description> The VM is affected by an inaccessible datastore due to storage connectivity loss. Resetting such a VM might cause the VM to be powered off and not restarted by vSphere HA. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA cluster contains incompatible hosts.warningvSphere HA Cluster {computeResource.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.vSphere HA Cluster contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.com.vmware.vc.HA.ClusterContainsIncompatibleHosts|vSphere HA Cluster {computeResource.name} in {datacenter.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported. <EventLongDescription id="com.vmware.vc.HA.ClusterContainsIncompatibleHosts"> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions. </description> <cause> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions, which isn't fully supported. Failover of VMs from ESX/ESXi 3.5 hosts to newer hosts is not guaranteed. </description> <action> Place ESX/ESXi 3.5 hosts into a separate vSphere HA cluster from hosts with more recent ESX versions. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA completed a failover actioninfovSphere HA completed a virtual machine failover action in cluster {computeResource.name}vSphere HA completed a virtual machine failover actioncom.vmware.vc.HA.ClusterFailoverActionCompletedEvent|vSphere HA completed a virtual machine failover action in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA initiated a failover actionwarningvSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name}vSphere HA initiated a failover action on {pendingVms} virtual machinescom.vmware.vc.HA.ClusterFailoverActionInitiatedEvent|vSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA failover operation in progressWarningvSphere HA failover operation in progress in cluster {computeResource.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMsvSphere HA failover operation in progress: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMscom.vmware.vc.HA.ClusterFailoverInProgressEvent|vSphere HA failover operation in progress in cluster {computeResource.name} in datacenter {datacenter.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMs <EventLongDescription id="com.vmware.vc.HA.ClusterFailoverInProgressEvent"> <description> This event is logged when a vSphere HA failover operation is in progress for virtual machines in the cluster. It also reports the number of virtual machines that are being restarted. There are four different categories of such VMs. (1) VMs being placed: vSphere HA is in the process of trying to restart these VMs; (2) VMs awaiting retry: a previous restart attempt failed, and vSphere HA is waiting for a timeout to expire before trying again; (3) VMs requiring additional resources: insufficient resources are available to restart these VMs. vSphere HA will retry when more resources become available (such as a host comes back on line); (4) Inaccessible vSAN VMs: vSphere HA cannot restart these vSAN VMs because they are not accessible. It will retry when there is a change in accessibility. </description> <cause> <description> vSphere HA is attempting to restart failed virtual machines in the cluster. It might be that the virtual machine restart is pending and has not yet completed. </description> <action> vSphere HA will retry the failover on another host unless the maximum number of failover attempts has been reached. A subsequent retry may succeed in powering on the virtual machine so allow the vSphere HA failover operation to be declared a success or failure. </action> </cause> <cause> <description> This event might also be generated when a required resource in the cluster becomes temporarily unavailabile due to network reconfiguration, hardware upgrade, software update, host overload, etc. which can cause vSphere HA to lose its network or storage hearbeats to certain hosts or virtual machines and mark them inaccessible. </description> <action> In many cases, this may be a temporary condition. If the cluster soon stabilizes to its normal condition vSphere HA will detect the host and virtual machines to be live and discard any failover attempts. In such cases, this event may be treated as a soft alarm caused by such changes. </action> </cause> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It can also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to fail over the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM might be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it might have been powered on by a user on a host. If any hosts have been declared dead, investigate whether a networking or storage issue is the cause. </action> <action> If the error reports that the virtual machine is in an invalid state, there might be an operation in progress that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation, that are taking a long time to complete. </action> </cause> </EventLongDescription> ExtendedEventHost connected to a vSphere HA masterinfovSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}com.vmware.vc.HA.ConnectedToMaster|vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.ConnectedToMaster"> <description> This event is logged whenever a host in a vSphere HA cluster transitions to a slave host state and establishes a connection with a master host. </description> </EventLongDescription> ExtendedEventvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}errorvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}com.vmware.vc.HA.CreateConfigVvolFailedEvent|vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault} <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolFailedEvent"> <description> vSphere HA failed to create a config vvol on the datastore </description> <cause> <description>A possible VP, host, network, or lack of resources prevented vSphere HA from creating a config vvol</description> <action>Look for errors in the environment, then re-enable vSphere HA</action> </cause> </EventLongDescription> ExtendedEventvSphere HA successfully created a configuration vVol after the previous failureinfovSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurecom.vmware.vc.HA.CreateConfigVvolSucceededEvent|vSphere HA successfully created a configuration vVol after the previous failure <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolSucceededEvent"> <description> vSphere HA successfully created a config vvol on the datastore. If there was a failed config vvol datastore configuration issue, it is being cleared </description> <cause> <description> There were no errors during creation of the config vvol on the datastore</description> </cause> </EventLongDescription> ExtendedEventvSphere HA agent is runninginfovSphere HA agent on host {host.name} in cluster {computeResource.name} is runningvSphere HA agent on host {host.name} is runningvSphere HA agent is runningcom.vmware.vc.HA.DasAgentRunningEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is running <EventLongDescription id=" com.vmware.vc.HA.DasAgentRunningEvent"> <description> This event is logged when the vSphere HA agent is running on a host. </description> <cause> <description> This event is reported after vSphere HA is configured on a host or after the vSphere HA agent on a host starts, such as after a host reboot. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected an HA cluster state version inconsistencywarningvSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name}vSphere HA detected an HA cluster state version inconsistencycom.vmware.vc.HA.DasClusterVersionInconsistentEvent|vSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasClusterVersionInconsistentEvent"> <description> This event is logged when vSphere HA cluster has a version inconsistency for cluster state(HostList, ClusterConfiguration, VM protection state). </description> <cause> <description> This situation could primarily occur if vCenter has been restored to an older backed up state causing vCenter to rollback to older version for the vSphere HA cluster state (HostList, ClusterConfiguration, VM protection state) while the hosts on the cluster have the latest version for the cluster state. As a result, protection state for VMs will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster, any new cluster configuration state will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster and if hosts were added or removed to/from this vSphere HA cluster after vCenter backup and before vCenter Restore, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </description> <action> Step 1. If hosts were added or removed to/from the vSphere HA cluster after vCenter backup and before vCenter Restore, please add or remove those respective hosts back to the vSphere HA cluster so that the list of hosts in the vSphere HA cluster is identical to the list of hosts in the cluster before vCenter was last restored. If you do not want to add hosts to the cluster, stop the vSphere HA process on the hosts that were added to vCenter after the backup. If this is not done, in case of a failure, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </action> <action> Step 2. Disable vSphere HA on the cluster and then re-enable vSphere HA on the cluster. This will make sure that vCenter's version for the vSphere HA cluster state(HostList, ClusterConfiguration, VM protection state) is reset with a new fault domain id for the HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a failed failover hosterrorvSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name}vSphere HA detected a possible failure of failover host {host.name}vSphere HA detected a possible failure of this failover hostcom.vmware.vc.HA.DasFailoverHostFailedEvent|vSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostFailedEvent"> <description> This event is logged when vSphere HA has detected the failure of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the failure of a failover host. A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the failover host failure, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is not running and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-isolated failover hosterrorvSphere HA detected that failover host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that failover host {host.name} is network isolated from the clustervSphere HA detected that this failover host is network isolated from the clustercom.vmware.vc.HA.DasFailoverHostIsolatedEvent|Host {host.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the network isolation of a failover host. vSphere HA reports a host as isolated if there are no heartbeats received from the HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the failover host isolation, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is isolated and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-partitioned failover hostwarningvSphere HA detected that failover host {host.name} in {computeResource.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that failover host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this failover host is in a different network partition than the mastercom.vmware.vc.HA.DasFailoverHostPartitionedEvent|Failover Host {host.name} in {computeResource.name} in {datacenter.name} is in a different network partition than the master <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostPartitionedEvent"> <description> This event is logged when vSphere HA has detected a designated failover host is network partitioned. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if a vSphere HA master agent detects a failover host is network partitioned. vSphere HA reports a host as partitioned if it cannot communicate with a subset of hosts in the cluster, yet can determine that the host is alive via its datastore heartbeats. </description> <action> Determine the cause of the partitioned failover host, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if a failover host is partitioned and a host failure occurs. See the prodcut documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA agent on a failover host is unreachableerrorThe vSphere HA agent on the failover host {host.name} in {computeResource.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on the failover host {host.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on this failover host is not reachable but host responds to ICMP pingscom.vmware.vc.HA.DasFailoverHostUnreachableEvent|The vSphere HA agent on the failover host {host.name} in cluster {computeResource.name} in {datacenter.name} is not reachable but host responds to ICMP pingsEventExHost complete datastore failureerrorAll shared datastores failed on the host {hostName} in cluster {computeResource.name}All shared datastores failed on the host {hostName}All shared datastores failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent|All shared datastores failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent"> <description> A host in a Component Protection-enabled cluster has lost connectivity to all shared datastores </description> <cause> <description>Connectivity to all shared datastores has been lost</description> <action>Reconnect at least one shared datastore</action> </cause> </EventLongDescription> EventExHost complete network failureerrorAll VM networks failed on the host {hostName} in cluster {computeResource.name}All VM networks failed on the host {hostName}All VM networks failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent|All VM networks failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent"> <description> A host in a Component Protection enabled cluster has lost connectivity to all virtual machine networks </description> <cause> <description>Connectivity to all virtual machine networks has been lost</description> <action>Reconnect at least one virtual machine network</action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a host failureerrorvSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name}vSphere HA detected a possible host failure of host {host.name}vSphere HA detected a possible host failure of this hostcom.vmware.vc.HA.DasHostFailedEvent|vSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostFailedEvent"> <description> This event is logged when vSphere HA detects a possible host failure. </description> <cause> <description> A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the host failure, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network isolated hosterrorvSphere HA detected that host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that host {host.name} is network isolated from the clustervSphere HA detected that this host is network isolated from the clustercom.vmware.vc.HA.DasHostIsolatedEvent|vSphere HA detected that host {host.name} is isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a host. </description> <cause> <description> This event will be generated if there are no heartbeats received from the vSphere HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the host isolation, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA host monitoring is disabledwarningvSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name}vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabledcom.vmware.vc.HA.DasHostMonitoringDisabledEvent|vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostMonitoringDisabledEvent"> <description> This event is logged when host monitoring has been disabled in a vSphere HA cluster. </description> <cause> <description> Host monitoring is disabled, so vSphere HA will not perform any failover actions. This event is generated to inform the user that their cluster is temporarily not being protected against host or VM failures. If host or VM failures occur while host monitoring is disabled, HA will not attempt to restart the the VMs that were running on the failed hosts. Other vSphere HA features are not impacted by whether host monitoring is disabled. </description> <action> Enable host monitoring to resume hosts monitoring. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA failed to restart a network isolated virtual machineerrorvSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart this virtual machine after it was powered off in response to a network isolation eventcom.vmware.vc.HA.FailedRestartAfterIsolationEvent|vSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} after it was powered off in response to a network isolation event. The virtual machine should be manually powered back on.EventExRunning VMs utilization cannot satisfy the configured failover resources on the cluster.warningRunning VMs utilization cannot satisfy the configured failover resources on cluster {computeResource.name}Running VMs utilization cannot satisfy the configured failover resources on the cluster.com.vmware.vc.HA.FailoverResourcesViolationEvent|Running VMs utilization cannot satisfy the configured failover resources on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.FailoverResourcesViolationEvent"> <description> This event is logged when the total utilization of the running VMs cannot satisfy the configured failover resources on a vSphere HA admission controlled cluster. </description> <cause> <description> The total utilization of the running VMs on this cluster is unable to satisfy the configured failover resources in the cluster. This event is generated to inform the user that their cluster will be running in a compromised state during failover and would not have sufficient failover resources to ensure the optimal functioning of the VMs and their workloads. The side-effect of this situation is that VMs won't be working optimally even though we ensure required failover capacity in case of failures. Other vSphere HA features are not impacted by this and this warning doesn't affect any VM related operations like power-on, vmotion etc. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to ensure that there is sufficient failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA changed a host's heartbeat datastoresinfoDatastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on this hostcom.vmware.vc.HA.HeartbeatDatastoreChanged|Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreSelected"> <description> A datastore is selected or deselected for storage heartbeating monitored by the vSphere agent on this host. vSphere HA employs stroage heartbeating to detect host failures when there is network partition. </description> </EventLongDescription> EventExvSphere HA heartbeat datastore number for a host is insufficientwarningThe number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for host {host.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for this host is {selectedNum}, which is less than required: {requiredNum}com.vmware.vc.HA.HeartbeatDatastoreNotSufficient|The number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} in {datacenter.name} is {selectedNum}, which is less than required: {requiredNum} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreNotSufficient"> <description> The number of heartbeat datastores used for this host is less than required. Multiple heartbeat datastores are needed to tolerate storage failures. The host summary page will report a configuration issue in this case. To ignore the configuration issue, use the vSphere HA cluster advanced option, das.ignoreInsufficientHbDatastore. </description> <cause> <description> The host does not have sufficient number of accessible datastores that are shared among other hosts in the cluster. </description> <action> Add more shared datastores to the host or check if any of its datastore is currently inaccessible. </action> </cause> </EventLongDescription> EventExvSphere HA agent on a host has an errorwarningvSphere HA agent for host {host.name} has an error in {computeResource.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for this host has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}com.vmware.vc.HA.HostAgentErrorEvent|vSphere HA agent for host {host.name} has an error in {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason} <EventLongDescription id="com.vmware.vc.HA.AgentErrorEvent"> <description> This event is logged when the vSphere HA agent for the host has an error. </description> <action> See product documentation for troubleshooting tips. </action> </EventLongDescription> ExtendedEventvSphere HA agent is healthyinfovSphere HA agent on host {host.name} in cluster {computeResource.name} is healthyvSphere HA agent on host {host.name} is healthyvSphere HA agent is healthycom.vmware.vc.HA.HostDasAgentHealthyEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is healthy <EventLongDescription id=" com.vmware.vc.HA.HostDasAgentHealthyEvent"> <description> This event is logged when the vSphere HA agent on a host transitions to a healthy state. </description> <cause> <description> vSphere HA reports this event when the vSphere HA agent on the host is either a master or a slave that is connected to the master over the management network. </description> </cause> </EventLongDescription> EventExvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error. {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}com.vmware.vc.HA.HostDasErrorEvent|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason} <EventLongDescription id="com.vmware.vc.HA.HostDasErrorEvent"> <description> The vSphere HA agent on this host has an error. The event may provide details with extra information indicating the cause of the error. </description> <cause> <description>There was an error configuring the vSphere HA agent on the host</description> <action> Look at the task details for the configure vSphere HA task that failed. That will provide more details about why the failure occurred. Address the problem and reconfigure vSphere HA on the host. </action> </cause> <cause> <description> There was a timeout while communicating with the vSphere HA agent. This can occur if there is a high rate of operations being performed on virtual machines in the cluster resulting in the vSphere HA agents not being able to process the changes fast enough. </description> <action> Verify that this is a transient problem by stopping operations on virtual machines in the cluster for a few minutes to give time to the vSphere HA agents to process all their pending messages. If this resolves the problem, consider reducing the rate of operations performed on the cluster. </action> </cause> <cause> <description>There is vSphere HA agent is in a shutdown or failed state</description> <action>Reconfigure vSphere HA on the host. If this fails, reconfigure vSphere HA on the cluster</action> </cause> </EventLongDescription> EventExvSphere HA detected a datastore failurewarningvSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of datastore {arg1} on host {host.name}vSphere HA detected a failure of datastore {arg1}com.vmware.vc.HA.HostDatastoreFailedEvent|vSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventUnsupported vSphere HA and vCloud Distributed Storage configurationerrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured on host {host.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured because vCloud Distributed Storage is enabled but the host does not support that featurecom.vmware.vc.HA.HostDoesNotSupportVsan|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in {datacenter.name} because vCloud Distributed Storage is enabled but the host does not support that featureExtendedEventHost has no vSphere HA isolation addresseserrorHost {host.name} in cluster {computeResource.name} has no isolation addresses defined as required by vSphere HAHost {host.name} has no isolation addresses defined as required by vSphere HAThis host has no isolation addresses defined as required by vSphere HAcom.vmware.vc.HA.HostHasNoIsolationAddrsDefined|Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no isolation addresses defined as required by vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostHasNoIsolationAddrsDefined"> <description> The host has an vSphere HA configuration issue because there were no IP addresses that vSphere HA could use for detecting network isolation. Without at least one, the host will not take any isolation response. HA, by default, will use the host's default gateway (defined in the host's networking configuration), or use the addresses that were specified in the cluster's advanced settings. </description> <action> Define a default gateway in the host's networking configuration. </action> <action> If the cluster advanced setting das.usedefaultisolationaddress is false, you must define at least one isolation address using the advanced options. </action> <action> Define one or more cluster advanced options, each containing an IP address to be pinged by vSphere HA to detect if it is network-isolated when it no longer receives communication with other hosts in the cluster. The advanced option is das.isolationAddress[n], where 'n' is a number from 1 to 9. You may specify multiple addresses. </action> </EventLongDescription> ExtendedEventvSphere HA cannot be configured on this host because there are no mounted datastores.errorvSphere HA cannot be configured on {host.name} in cluster {computeResource.name} because there are no mounted datastores.vSphere HA cannot be configured on {host.name} because there are no mounted datastores.vSphere HA cannot be configured on this host because there are no mounted datastores.com.vmware.vc.HA.HostHasNoMountedDatastores|vSphere HA cannot be configured on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because there are no mounted datastores.ExtendedEventvSphere HA requires a SSL Thumbprint for hosterrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on {host.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on this host because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for this host has been verified.com.vmware.vc.HA.HostHasNoSslThumbprint|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified. <EventLongDescription id="com.vmware.vc.HA.HostHasNoSslThumbprint"> <description> The host has an vSphere HA configuration issue because it does not have a verified ssl thumbprint. Hosts need verified SSL thumbprints for secure vSphere HA communications. </description> <action> If the host is using self-signed certificates, check that vCenter Server is configured to verify SSL certificates, and verify the thumbprints for the hosts in the vSphere HA cluster. </action> </EventLongDescription> ExtendedEventHost is incompatible with vSphere HAerrorThe product version of host {host.name} in cluster {computeResource.name} is incompatible with vSphere HA.The product version of host {host.name} is incompatible with vSphere HA.The product version of this host is incompatible with vSphere HA.com.vmware.vc.HA.HostIncompatibleWithHA|The product version of host {host.name} in cluster {computeResource.name} in {datacenter.name} is incompatible with vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostIncompatibleWithHA"> <description> The host is in a vSphere HA cluster but its product version is incompatible with HA. </description> <action> To fix the situation the host should either be moved out of the vSphere HA cluster or upgraded to a version supporting HA. </action> </EventLongDescription> EventExvSphere HA detected a network failurewarningvSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of network {network} on host {host.name}vSphere HA detected a failure of network {network}com.vmware.vc.HA.HostNetworkFailedEvent|vSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventvSphere HA detected a network-partitioned hostwarningvSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connected in {computeResource.name}vSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this host is in a different network partition than the master to which vCenter Server is connectedcom.vmware.vc.HA.HostPartitionedFromMasterEvent|vSphere HA detected that host {host.name} is in a different network partition than the master {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HostPartitionedFromMasterEvent"> <description> This event is logged when the host is in a different partition than the master. </description> </EventLongDescription> EventExThe vSphere HA host availability state changedinfoThe vSphere HA availability state of the host {host.name} in cluster {computeResource.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of the host {host.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of this host has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}com.vmware.vc.HA.HostStateChangedEvent|The vSphere HA availability state of the host {host.name} in cluster in {computeResource.name} in {datacenter.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState} <EventLongDescription id="com.vmware.vc.HA.HostStateChangedEvent"> <description> This event is logged when the availability state of a host has changed. </description> </EventLongDescription> ExtendedEventvSphere HA agent unconfigure failed on hostwarningThere was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on host {host.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on this host. To solve this problem, reconnect the host to vCenter Server.com.vmware.vc.HA.HostUnconfigureError|There was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name}. To solve this problem, reconnect the host to vCenter Server. <EventLongDescription id="com.vmware.vc.HA.HostUnconfigureError"> <description> There was an error unconfiguring the vSphere HA agent on this host. </description> <cause> <description> The vSphere HA unconfiguration task failed to send the updated hostList to vSphere HA agent on the host. This condition may interfere with the vSphere HA cluster to which the host used to belong and should be corrected. </description> <action> Add the host back to a vCenter Server of version 5.0 or later. </action> </cause> </EventLongDescription> EventExA disconnected host has vSphere HA protected VMserrorHost {host.name} in cluster {computeResource.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)Host {host.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)This host is disconnected from vCenter Server, but contains {protectedVmCount} vSphere HA protected virtual machine(s)com.vmware.vc.HA.HostUnconfiguredWithProtectedVms|Host {host.name} in cluster {computeResource.name} in {datacenter.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s) <EventLongDescription id="com.vmware.vc.HA.HostUnconfiguredWithProtectedVms"> <description> This host is disconnected and contains one or more virtual machine(s) that are still protected by vSphere HA. Consequently, these virtual machines could be failed over to another host if this host should fail. </description> <cause> <description> If a vSphere HA-enabled host is disconnected and is unable to unprotect the virtual machines currently running on it (perhaps due to datastores being unavailable, or not being able to communicate with the vSphere HA master host) then these virtual machines would still be protected, but reside on the disconnected host. Also, if a virtual machine is migrated using vMotion to a vSphere HA-enabled host that is currently in the process of disconnecting, this can lead to the same result. </description> <action> To correct this situation, ensure that the host has access to the datastores used by these virtual machines, and then reconnect the host to an vSphere HA-enabled cluster. The virtual machines should become unprotected shortly after vSphere HA is configured on the host. </action> </cause> </EventLongDescription> EventExvSphere HA configured failover resources are insufficient to satisfy desired failover levelwarningInsufficient configured resources to satisfy the desired vSphere HA failover level on cluster {computeResource.name}Insufficient configured resources to satisfy the desired vSphere HA failover levelcom.vmware.vc.HA.InsufficientFailoverLevelEvent|Insufficient configured resources to satisfy the desired vSphere HA failover level on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.InsufficientFailoverLevelEvent"> <description> The cluster does not have enough failover capacity to satisfy the desired host failures to tolerate for vSphere HA. Failovers may still be performed by vSphere HA but will be on a best effort basis and configured resources may not sufficient to respect the desired host failures to tolerate. </description> <cause> <description> The desired host failures to tolerate setting might not be completely respected since the cluster does not have the required failover capacity to satisfy the failover of the largest desired number of hosts. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to reserve more failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA detected an invalid master agentwarningvSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.com.vmware.vc.HA.InvalidMaster|vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised. <EventLongDescription id="com.vmware.vc.HA.InvalidMaster"> <description> A host in a vSphere HA cluster that is claiming to be a master has been determined to be invalid be another master host. This occurs when an existing master gets a message from another master in the same cluster. The existing master verifies that the other master is actually a valid master before it considers abdicating to the other master. An invalid master is an indication that there may be a compromised host on the network that is attempting to disrupt the HA cluster. The offending host should be examined to determine if it has been compromised. Its also possible a compromised host is impersonating a valid host so the reported host may not be the actual host that is compromised. </description> </EventLongDescription> ExtendedEventvSphere HA could not identify lock owner host on VM with duplicatesinfovSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name}vSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on this VM with duplicatescom.vmware.vc.HA.LockOwnerUnKnownForDupVms|vSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.LockOwnerUnKnownForDupVms"> <description> The vSphere HA agent could not identify lock owner host on duplicate VMs. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Could not determine the lock owner host on duplicate VM. </action> </cause> </EventLongDescription> EventExvSphere HA agent cannot reach some cluster management addressesinfovSphere HA agent on {host.name} in cluster {computeResource.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on {host.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on host cannot reach some management network addresses of other hosts: {unpingableAddrs}com.vmware.vc.HA.NotAllHostAddrsPingable|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} cannot reach some management network addresses of other hosts: {unpingableAddrs} <EventLongDescription id="com.vmware.vc.HA.NotAllIsoAddrsPingable"> <description> The vSphere HA agent on host cannot reach some of the management network addresses of other hosts, and vSphere HA may not be able to restart VMs if a host failure occurs. </description> <cause> <description> There is a network issue preventing this host from communicating with some or all of the hosts in the cluster over their vSphere HA management networks. vSphere HA reliability ic currently compromised in the cluster and failover may not reliably occur if a host or hosts should fail during this condition. </description> <action> Determine and correct the source of the communication problem. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not terminate the VM that was selected for preemptionerrorvSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name}vSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate this VM that was selected for preemptioncom.vmware.vc.HA.PreemptionFailedWithMaxRetry|vSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.PreemptionFailedWithMaxRetry"> <description> vSphere HA could not terminate the VM that was selected for preemption. </description> <cause> <description> Instances when vSphere HA receives the InsufficientResourcesFault, for any VM with fault reason indicating presence of preemptible VM. vSphere HA terminates appropriate preemptibe VM to free up resources. </description> <action> Terminate the preemptibe VM manually to free up resources. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA remediated duplicates of VMinfovSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of this VMcom.vmware.vc.HA.RemediatedDupVMs|vSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediatedDupVMs"> <description> The vSphere HA agent on host remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Kept the VM running on host which holds the lock on datastore, terminated VM on rest of the hosts where VM was running. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not remediate duplicates of VMwarningvSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of this VMcom.vmware.vc.HA.RemediationFailedForDupVMs|vSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediationFailedForDupVMs"> <description> The vSphere HA agent on host could not remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Duplicates of VM running on multiple hosts could not be terminated. </action> </cause> </EventLongDescription> EventExvSphere HA failed to start a Fault Tolerance secondary VM.errorvSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.com.vmware.vc.HA.StartFTSecondaryFailedEvent|vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name} in {datacenter.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondaryFailedEvent"> <description> vSphere HA agent failed to start a Fault Tolerance secondary VM. vSphere HA will retry until either the operation succeeds or until the maximum number of restart attempts is reached. </description> </EventLongDescription> EventExvSphere HA successfully started a Fault Tolerance secondary VM.infovSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}.com.vmware.vc.HA.StartFTSecondarySucceededEvent|vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondarySucceededEvent"> <description> vSphere HA agent successfully started a Fault Tolerance secondary virtual machine. </description> </EventLongDescription> EventExvSphere HA removed a datastore from preferred heartbeat datastoreswarningvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} because the datastore is removed from inventoryvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster because the datastore is removed from inventorycom.vmware.vc.HA.UserHeartbeatDatastoreRemoved|vSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} in {datacenter.name} because the datastore is removed from inventory <EventLongDescription id="com.vmware.vc.HA.UserHeartbeatDatastoreRemoved"> <description> The datastore is removed from the set of preferred heartbeat datastores selected for this cluster. </description> <cause> <description> The datastore does not exist in the inventory. This happens when the datastore is removed from a host in the cluster manually or via a rescan. </description> <action> Choose a different datastore by reconfiguring the vSphere HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA did not perform an isolation response for vm because its VM restart priority is DisabledinfovSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} because its VM restart priorirty is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is Disabled"vSphere HA did not perform an isolation response because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledIsolationEvent|vSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledIsolationEvent"> <description> This event is logged when a host in a vSphere HA cluster was isolated and no isolation response was taken. </description> <cause> <description> The VM restart priority setting is set to disabled, so vSphere HA did not perform any action on this VM when the host became isolated. If the restart priority is disabled, HA will not attempt to restart the VM on another host, so HA will take no action for this VM on the isolated host. This event is informational only. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA did not attempt to restart vm because its VM restart priority is DisabledinfovSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is Disabled"vSphere HA did not attempt to restart vm because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledRestartEvent|vSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledRestartEvent"> <description> This event is logged when a failed VM in a vSphere HA cluster will not be restarted because its VM restart priority setting is set to disabled. </description> <cause> <description> The restart priority for the cluster or VM is disabled, so vSphere HA did not perform any action on this VM failed. This event is informational only. </description> </cause> </EventLongDescription> EventExvCenter Server cannot communicate with the master vSphere HA agentwarningvCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name}vCenter Server cannot communicate with the master vSphere HA agent on {hostname}com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent|vCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent"> <description> This event is logged when vCenter Server cannot communicate with a vSphere HA master agent. </description> <cause> <description> This event is reported when vCenter Server is not able to communicate with a vSphere HA master agent on the host, but it can communicate with other vSphere HA agents in the cluster and these are reporting the host is a master. </description> <action> Correct the networking issue that is preventing vCenter Server from communicating with the host listed in the event. This problem can occur, for example, if the physical NIC in use by this network connection has failed. </action> </cause> </EventLongDescription> ExtendedEventvCenter Server is unable to find a master vSphere HA agentwarningvCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name}vCenter Server is unable to find a master vSphere HA agentcom.vmware.vc.HA.VcCannotFindMasterEvent|vCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotFindMasterEvent"> <description> This event is logged when vCenter Server is unable to find a master vSphere HA agent. </description> <cause> <description> </description> <action> </action> </cause> </EventLongDescription> EventExvCenter Server connected to a vSphere HA master agentinfovCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is connected to a master HA agent running on host {hostname}com.vmware.vc.HA.VcConnectedToMasterEvent|vCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcConnectedToMasterEvent"> <description> This event is logged when vCenter Server is connected with a master vSphere HA agent. </description> </EventLongDescription> EventExvCenter Server disconnected from a master vSphere HA agentwarningvCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is disconnected from a master HA agent running on host {hostname}com.vmware.vc.HA.VcDisconnectedFromMasterEvent|vCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcDisconnectedFromMasterEvent"> <description> This event is logged when vCenter Server is disconnected from a master vSphere HA agent. </description> </EventLongDescription> ExtendedEventvSphere HA was unable to reset a VM after it exhausted the retrieserrorvSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on host {host.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on this host after {retryTimes} retriesvSphere HA was unable to reset this VM after {retryTimes} retriescom.vmware.vc.HA.VmDasResetAbortedEvent|vSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmDasResetAbortedEvent"> <description> This event is logged when vSphere HA was unable to reset a VM. </description> <cause> <description> The operation to reset the VM continued to fail. vSphere HA stopped resetting the VM after it exhausted the retries. </description> <action>Ensure that the host system is manageable, for example host agent is not hung. Check if there are no other concurrent tasks running for the VM.</action> </cause> </EventLongDescription> ExtendedEventVirtual machine failed to become vSphere HA ProtectederrorVirtual machine {vm.name} in cluster {computeResource.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.This virtual machine failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.com.vmware.vc.HA.VmNotProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmNotProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation but the VM has not transitioned to vSphere HA Protected in the time period expected. This condition exists because the master vSphere HA agent has not yet persisted that the VM successfully powered on or vCenter is unaware that it did. Consequently, vSphere HA may not restart the VM after a failure. </description> <action> There are a number of reasons why a VM may remain not protected for a period of time. First, the system may be heavily loaded, in which case the transition will just take longer. Second, vCenter may be unable to communicate with the vSphere HA master agent. Examine the inventory to see if any hosts in the cluster are not responding. Third, the the management network may be partitioned, which is preventing the master that owns the VM from protecting it or reporting this information to vCenter. The cluster summary page may report a config issue in this case or hosts in the VM inventory will be reported as not responding. Finally, the vSphere HA master election is taking too long to complete. The cluster summary page will report if this situation exists. See the product documentation for additional troubleshooting tips. </action> </EventLongDescription> ExtendedEventVirtual machine is vSphere HA protectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.This virtual machine is vSphere HA Protected and HA will attempt to restart it after a failure.com.vmware.vc.HA.VmProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is vSphere HA Protected and HA will attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation and vSphere HA has persisted this fact. Consequently, vSphere HA will attempt to restart the VM after a failure. </description> </EventLongDescription> ExtendedEventVirtual machine is not vSphere HA ProtectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.This virtual machine is not vSphere HA Protected.com.vmware.vc.HA.VmUnprotectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is not vSphere HA Protected. <EventLongDescription id="com.vmware.vc.HA.VmUnprotectedEvent"> <description> The virtual machine transitioned from the vSphere HA protected to unprotected state. This transition is a result of a user powering off the virtual machine, disabling vSphere HA, disconnecting the host on which the virtual machine is running, or destroying the cluster in which the virtual machine resides. </description> </EventLongDescription> ExtendedEventvSphere HA has unprotected out-of-disk-space VMinfovSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected this virtual machine because it ran out of disk spacecom.vmware.vc.HA.VmUnprotectedOnDiskSpaceFull|vSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} because it ran out of disk spaceExtendedEventvSphere HA did not terminate a VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}warningvSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate this VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore|vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore} <EventLongDescription id=" com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore"> <description> This event is logged when a VM affected by an inaccessible datastore in a vSphere HA cluster was not terminated. </description> <cause> <description> VM Component Protection is configured to not terminate the VM, or vSphere HA host monitoring is disabled, or VM restart priority is diabled, or the VM is an agent VM, or there are no sufficient resources to fail over the VM. For the case of insufficent resources, vSphere HA will attempt to terminate the VM when resources become available. </description> <action>Select VM Component Protection option to terminate VM</action> <action>Enable host monitoring</action> <action>Enable VM Restart priority</action> <action>Reduce resource reservations of other VMs in the cluster</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition or isolation if one exists</action> <action>If vSphere DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> </EventLongDescription> ExtendedEventDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleinfoDatastore {ds.name} mounted on host {host.name} in cluster {computeResource.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on host {host.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessiblecom.vmware.vc.HA.VmcpStorageFailureCleared|Datastore {ds.name} mounted on host {host.name} was inaccessible. The condition was cleared and the datastore is now accessible <EventLongDescription id=" com.vmware.vc.HA.VmcpStorageFailureCleared"> <description> This event is logged when a datastore connectivity was restored. The host can have the following storage access failures: All Paths Down (APD) and Permanent Device Loss (PDL). Datastore was shown as unavailable/inaccessible in storage view. </description> <cause> <description> A datastore on this host was inaccessible. The condition was cleared and the datastore is now accessible. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected that a datastore was inaccessible. This affected the VM with files on the datastorewarningvSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on host {host.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on this host was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected the VM with files on the datastorecom.vmware.vc.HA.VmcpStorageFailureDetectedForVm|vSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} in {datacenter.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastore <EventLongDescription id="com.vmware.vc.HA.VmcpStorageFailureDetectedForVm"> <description> This event is logged when a VM's files were not accessible due to a storage connectivity failure. vSphere HA will take action if VM Component Protection is enabled for the VM. </description> <cause> <description> A datastore was inaccessible due to a storage connectivity loss of All Paths Down or Permenant Device Loss. A VM was affected because it had files on the inaccessible datastore. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA was unable to terminate VM affected by an inaccessible datastore after it exhausted the retrieserrorvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on this host after {retryTimes} retriesvSphere HA was unable to terminate this VM affected by an inaccessible datastore after {retryTimes} retriescom.vmware.vc.HA.VmcpTerminateVmAborted|vSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminateVmAborted"> <description> This event is logged when vSphere HA was unable to terminate a VM affected by an inaccessible datastore. </description> <cause> <description> The operation to terminate the VM continued to fail. vSphere HA stopped terminating the VM after it exhausted the retries. </description> <action> Ensure that the host system is manageable, for example host agent is not hung. Check if there are other concurrent tasks running for the VM.</action> <action> Reset the VM if guest application is not operational after the datastore becomes accessible.</action> </cause> </EventLongDescription> ExtendedEventvSphere HA attempted to terminate a VM affected by an inaccessible datastorewarningvSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on host{host.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on this host because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate this VM because the VM was affected by an inaccessible datastorecom.vmware.vc.HA.VmcpTerminatingVm|vSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} in {datacenter.name} because the VM was affected by an inaccessible datastore <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminatingVm"> <description> This event is logged when vSphere HA attempted to terminate a VM affected by an inaccessible datastore. A VM is terminated by issuing a SIGKILL to the vmx process. </description> <cause> <description> The VM was affected by an inaccessible datastore. vSphere HA VM Component Protection attempted to terminate the VM. </description> </cause> </EventLongDescription> EventExHardware Health Status Changedinfocom.vmware.vc.HardwareSensorEvent|Sensor {sensorNumber} type {sensorType}, Description {sensorName} state {status} for {message}. Part Name/Number {partName} {partNumber} Manufacturer {manufacturer}EventExStatus of each Hardware Health Sensor Groupinfocom.vmware.vc.HardwareSensorGroupStatus|Hardware Sensor Status: Processor {processor}, Memory {memory}, Fan {fan}, Voltage {voltage}, Temperature {temperature}, Power {power}, System Board {systemBoard}, Battery {battery}, Storage {storage}, Other {other}ExtendedEventHost configuration is TPM encrypted.warningcom.vmware.vc.HostTpmConfigEncryptionEvent|Host configuration is TPM encrypted.EventExOperation cleanup encountered errorsinfoOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup with task {taskId} encountered errorscom.vmware.vc.OperationCleanupErrorsEvent|Operation cleanup for {vm.name} with task {taskId} encountered errorsExtendedEventThe user does not have permission to view the entity associated with this event.infocom.vmware.vc.RestrictedAccess|The user does not have permission to view the entity associated with this event.EventExFailed to register host with Intel® SGX Registration Service.errorFailed to register host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.FailedRegistration|Failed to register host {host.name} with Intel® SGX Registration Service {registrationUrl}. The service responded with {statusCode}, {errorCode}: {errorMessage}.EventExSending registration request to Intel® SGX Registration Service.infoSending registration request to Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.InitiatingRegistration|Sending registration request for host {host.name} to Intel® SGX Registration Service {registrationUrl}.EventExSuccessfully registered host with Intel® SGX Registration Service.infoSuccessfully registered host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.SuccessfulRegistration|Successfully registered host {host.name} with Intel® SGX Registration Service {registrationUrl}.EventExStateless Alarm TriggeredinfoAlarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'com.vmware.vc.StatelessAlarmTriggeredEvent|Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'ExtendedEventTrusted Host attestation failed.errorcom.vmware.vc.TaHostAttestFailEvent|Trusted Host attestation failed.ExtendedEventTrusted Host attestation passed.infocom.vmware.vc.TaHostAttestPassEvent|Trusted Host attestation passed.ExtendedEventTrusted Host attestation status unset.infocom.vmware.vc.TaHostAttestUnsetEvent|Trusted Host attestation status unset.EventExHost Time Syncronization establishedinfocom.vmware.vc.TimeSyncEvent|Time service {serviceName} has synchronized with remote time source, details: {message}.EventExHost Time Syncronization losterrorcom.vmware.vc.TimeSyncFailedEvent|Time service {serviceName} is not sychronized with the remote time source, details: {message}.ExtendedEventHost must be decommissioned when moved out of a Trusted Infrastructure cluster.errorHost {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.com.vmware.vc.TrustAuthority.DecommissionHost|Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.ExtendedEventHost is not configured for vSphere Trust Authority.errorHost {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.com.vmware.vc.TrustAuthority.HostNotConfigured|Host {host.name} is not configured for vSphere Trust Authority.EventExThe client certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSClientCertExpirationEvent|The client certificate for the Key Provider {keyProviderId} in the Trust Authority Host {hostName} will expire in {dayNum} day(s).EventExThe server certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSServerCertExpirationEvent|The server cetificate of key server {serverName} in the Trusted Key Provider {keyProviderId} will expire in {dayNum} day(s).ExtendedEventCertificates have changed. Trust authority cluster needs to be reconfigured.errorcom.vmware.vc.TrustAuthority.StsCertificatesChange|Certificates have changed. Trust authority cluster needs to be reconfigured.EventExvCenter Service Overall Health Changedinfocom.vmware.vc.VCHealthStateChangedEvent|vCenter Service overall health changed from '{oldState}' to '{newState}' <EventLongDescription id="com.vmware.vc.VCHealthStateChangedEvent"> <description> This event is logged when the overall health of vCenter Service has changed or become unavailable. </description> <cause> <description> The vCenter Service overall health state has changed or become unavailable </description> <action> Examine the vCenter Service health state and make sure the VimWebServices service is up and running on the vCenter Server </action> </cause> </EventLongDescription> EventExDatastore is in healthy state within the clusterinfoDatastore {dsName} is in healthy state within the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreHealthy|Datastore {dsName} is in healthy state within the cluster {computeResource.name}EventExDatastore is not accessible on the host(s)warningDatastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreInaccessible|Datastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}EventExDatastore unmount is failederrorUnmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreUnmountFailed|Unmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}EventExDatastore in desired configuration is missing on the host(s)warningDatastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}com.vmware.vc.VMCStorage.DesiredDatastoreMissing|Datastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}EventExHost(s) mounted with the datastore which is not present in desired configurationerrorHost(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}com.vmware.vc.VMCStorage.NotDesiredDatastorePresent|Host(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}EventExExecuting VM Instant CloneinfoExecuting Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} to {destVmName}Executing Instant Clone to {destVmName}com.vmware.vc.VmBeginInstantCloneEvent|Executing Instant Clone of {vm.name} on {host.name} to {destVmName}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneFailedInvalidDestinationEvent|Cannot clone {vm.name} as {destVmName} to invalid or non-existent destination with ID {invalidMoRef}: {fault}EventExRestarting VM CloneinfoRestarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} to {destVmName} with task {taskId}Restarting VM Clone to {destVmName} with task {taskId}com.vmware.vc.VmCloneRestartEvent|Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneToResourcePoolFailedEvent|Cannot clone {vm.name} as {destVmName} to resource pool {destResourcePool}: {fault}EventExFailed to create virtual machineerrorFailed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName}Failed to create virtual machine on {host.name}com.vmware.vc.VmCreateFailedEvent|Failed to create virtual machine {vmName} on {host.name}ExtendedEventVirtual machine disks consolidation succeeded.infoVirtual machine {vm.name} disks consolidatation succeeded on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation succeeded on {host.name}.Virtual machine {vm.name} disks consolidation succeeded.Virtual machine disks consolidation succeeded.com.vmware.vc.VmDiskConsolidatedEvent|Virtual machine {vm.name} disks consolidated successfully on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation needed.warningVirtual machine {vm.name} disks consolidatation is needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is needed on {host.name}.Virtual machine {vm.name} disks consolidation is needed.Virtual machine disks consolidation is needed.com.vmware.vc.VmDiskConsolidationNeeded|Virtual machine {vm.name} disks consolidation is needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation no longer needed.infoVirtual machine {vm.name} disks consolidatation is no longer needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is no longer needed on {host.name}.Virtual machine {vm.name} disks consolidation is no longer needed.Virtual machine disks consolidation is no longer needed.com.vmware.vc.VmDiskConsolidationNoLongerNeeded|Virtual machine {vm.name} disks consolidation is no longer needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation failed.warningVirtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation failed on {host.name}.Virtual machine {vm.name} disks consolidation failed.Virtual machine disks consolidation failed.com.vmware.vc.VmDiskFailedToConsolidateEvent|Virtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExcom.vmware.vc.VmForkFailedInvalidDestinationEvent|EventExCannot complete Instant Clone of VMerrorCannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone to {destVmName}. Reason : {fault.msg}com.vmware.vc.VmInstantCloneFailedEvent|Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}EventExInstant Clone WarningwarningInstant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning - {warning}com.vmware.vc.VmInstantCloneWarningEvent|Instant Clone Warning for {vmName} - {warning}EventExInstant Clone of VM has completedinfoInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} has completedInstant Clone of {srcVmName} has completedcom.vmware.vc.VmInstantClonedEvent|Instant Clone of {srcVmName} on {host.name} has completedEventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.infocom.vmware.vc.VpxdMemoryUsageClearEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status}.EventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.errorcom.vmware.vc.VpxdMemoryUsageErrorEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status} (used: {usedMemory}%, soft limit: {limit}%).EventExOperation enabledinfocom.vmware.vc.authorization.MethodEnabled|The operation {MethodName} on the {EntityName} of type {EntityType} is enabled.EventExPrivilege check failedwarningPrivilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}com.vmware.vc.authorization.NoPermission|Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}ExtendedEventErrors occurred during automatic CPVM certificate rotation.errorcom.vmware.vc.certificatemanagement.CPVMCertificateUpdateFailedEvent|Errors occurred during automatic CPVM certificate rotation.ExtendedEventCPVM successfully performed automatic certificate rotation.infocom.vmware.vc.certificatemanagement.CPVMCertificateUpdateHealthyEvent|CPVM successfully performed automatic certificate rotation.ExtendedEventErrors occurred during automatic Spherelet certificate rotation.errorcom.vmware.vc.certificatemanagement.SphereletCertificateUpdateFailedEvent|Errors occurred during automatic Spherelet certificate rotation.ExtendedEventNo errors found during automatic Spherelet certificate rotation.infocom.vmware.vc.certificatemanagement.SphereletCertificateUpdateHealthyEvent|No errors found during automatic Spherelet certificate rotation.ExtendedEventTRUSTED ROOT certificates imported successfully.infocom.vmware.vc.certificatemanagement.TrustedRootsImportedEvent|TRUSTED ROOT certificates imported successfully.ExtendedEventTRUSTED ROOT certificates imported successfully, but with warnings.warningcom.vmware.vc.certificatemanagement.TrustedRootsImportedWithWarningsEvent|TRUSTED ROOT certificates imported successfully, but with warnings.ExtendedEventvCenter Server TLS certificate replaced successfully.infocom.vmware.vc.certificatemanagement.VcCertificateReplacedEvent|vCenter Server TLS certificate replaced successfully.ExtendedEventvCenter Server TLS certificate replaced successfully, but there are warnings detected.warningcom.vmware.vc.certificatemanagement.VcCertificateReplacedWithWarningsEvent|vCenter Server TLS certificate replaced successfully, but there are warnings detected.EventExFailed to update the vCenter server certificate.warningcom.vmware.vc.certificatemanagement.VcServerCertificateUpdateFailureEvent|{cause} for the {serviceName}. Remediation suggested: {remediation}. For more details, please refer to {kbLink}.EventExCA Certificates were updated on hostinfoCA Certificates were updated on {hostname}com.vmware.vc.certmgr.HostCaCertsAndCrlsUpdatedEvent|CA Certificates were updated on {hostname}EventExHost Certificate expiration is imminentwarningHost Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpirationImminentEvent|Host Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}EventExHost Certificate is nearing expirationwarningHost Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringEvent|Host Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}EventExHost Certificate will expire soonwarningHost Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringShortlyEvent|Host Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}ExtendedEventHost Certificate Management Mode changedinfocom.vmware.vc.certmgr.HostCertManagementModeChangedEvent|Host Certificate Management Mode changed from {previousMode} to {presentMode}ExtendedEventHost Certificate Management Metadata changedinfocom.vmware.vc.certmgr.HostCertMetadataChangedEvent|Host Certificate Management Metadata changedEventExHost Certificate revokedwarningHost Certificate on {hostname} is revoked.com.vmware.vc.certmgr.HostCertRevokedEvent|Host Certificate on {hostname} is revoked.EventExHost Certificate was updatedinfoHost Certificate was updated on {hostname}, new thumbprint: {thumbprint}com.vmware.vc.certmgr.HostCertUpdatedEvent|Host Certificate was updated on {hostname}, new thumbprint: {thumbprint}EventExAdding host to cluster store failederrorAdding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.AddHostFailed|Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}EventExInitializing cluster store member cache failederrorInitializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.InitializeMemberCacheFailed|Initializing cluster store member cache failed. Fault Reason : {errorMessage}EventExRemoving host from cluster store failederrorRemoving host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.RemoveHostFailed|Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}EventExUpdating host encryption keyinfocom.vmware.vc.crypto.HostKeyUpdatedEvent|Host encryption key set to {newKey}. Old key: {oldKey}EventExcom.vmware.vc.crypto.IntegrityCheckFailed|EventExcom.vmware.vc.crypto.IntegrityCheckPassed|EventExCrypto operation audit eventinfocom.vmware.vc.crypto.Operation|Cryptographic operations during {description}{operation}{diskOperations}EventExFailed to update VM fileserrorFailed to update VM files on datastore {ds.name}com.vmware.vc.datastore.UpdateVmFilesFailedEvent|Failed to update VM files on datastore {ds.name} using host {hostName}EventExUpdated VM filesinfoUpdated VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatedVmFilesEvent|Updated VM files on datastore {ds.name} using host {hostName}EventExUpdating VM FilesinfoUpdating VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatingVmFilesEvent|Updating VM files on datastore {ds.name} using host {hostName}ExtendedEventLink Aggregation Control Protocol configuration is inconsistentinfoSingle Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.com.vmware.vc.dvs.LacpConfigInconsistentEvent|Single Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.ExtendedEventFault Tolerance VM restart disabledwarningvSphere HA has been disabled in cluster {computeResource.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart this VM or its Secondary VM after a failure.com.vmware.vc.ft.VmAffectedByDasDisabledEvent|vSphere HA has been disabled in cluster {computeResource.name} of datacenter {datacenter.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure. <EventLongDescription id="com.vmware.vc.ft.VmAffectedByDasDisabledEvent"> <description> When vSphere HA is disabled in a cluster, you cannot restart a Primary VM or its Secondary VM after a failure. This event is issued when vSphere HA is disabled and a Fault Tolerant virtual machine is powered on. The event alerts you of the risk to the Fault Tolerant virtual machine that results from disabling vSphere HA. </description> <cause> <description>vSphere HA was disabled when a Fault Tolerant virtual machine was powered on</description> <action>Re-enable vSphere HA</action> </cause> </EventLongDescription> EventExGuest operationinfoGuest operation {operationName.@enum.com.vmware.vc.guestOp} performed.com.vmware.vc.guestOperations.GuestOperation|Guest operation {operationName.@enum.com.vmware.vc.guestOp} performed on Virtual machine {vm.name}.EventExGuest operation authentication failurewarningGuest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp}.com.vmware.vc.guestOperations.GuestOperationAuthFailure|Guest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp} on Virtual machine {vm.name}.ExtendedEventvSphere HA restarted a virtual machinewarningvSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA restarted virtual machine {vm.name} on host {host.name}vSphere HA restarted virtual machine {vm.name}vSphere HA restarted this virtual machinecom.vmware.vc.ha.VmRestartedByHAEvent|vSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.ha.VmRestartedByHAEvent"> <description> The virtual machine was restarted automatically by vSphere HA on this host. This response may be triggered by a failure of the host the virtual machine was originally running on or by an unclean power-off of the virtual machine (eg. if the vmx process was killed). </description> </EventLongDescription> ExtendedEventAutostart power on failederrorPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on this host failedcom.vmware.vc.host.AutoStartPowerOnFailedEvent|Powering on virtual machines according to autostart rules on host {host.name} in datacenter {datacenter.name} failedExtendedEventAutostart rules reconfigure failederrorReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on this host failedcom.vmware.vc.host.AutoStartReconfigureFailedEvent|Reconfiguring autostart rules for virtual machines on {host.name} in datacenter {datacenter.name} failedEventExEncryption mode is enabled on host.infoEncryption mode is enabled on host.com.vmware.vc.host.Crypto.Enabled|Encryption mode is enabled on host {hostName}.EventExThe operation is not supported on hosts which have encryption disabled.errorcom.vmware.vc.host.Crypto.HostCryptoDisabled|The operation is not supported on host {hostName} because encryption is disabled.EventExHost key is being renewed because an error occurred on the key provider.warningHost key is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KMSClusterError|Host key of {hostName} is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.EventExHost key is being renewed because key was missing on the key provider.warningHost key is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KeyMissingOnKMS|Host key of {hostName} is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.EventExHost requires encryption mode enabled and the key provider is not available.errorHost requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KMSClusterError|Host {hostName} requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExHost requires encryption mode enabled and the key is not available on the key provider.errorHost requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KeyMissingOnKMS|Host {hostName} requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExFailed to send keys to host because of host error.errorcom.vmware.vc.host.Crypto.SendKeyError.HostError|Failed to send keys {keys} to host {hostName}. Please check host connection.EventExHost profile {operation} failed with error: {error}.errorHost profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.com.vmware.vc.host.HPOperationFailed|Host profile {operation} failed with error: {error}.ExtendedEventHost booted from stateless cache.warningHost booted from stateless cache.Host booted from stateless cache.Host booted from stateless cache.com.vmware.vc.host.HostBootedFromStatelessCacheEvent|Host booted from stateless cache.EventExHost IP address conflict detectederrorHost IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}com.vmware.vc.host.HostIpConflictEvent|Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}ExtendedEventHost in Memory Mode and active DRAM usage is normalinfo{host.name} is in Memory Mode and its active DRAM usage is normal{host.name} is in Memory Mode and its active DRAM usage is normalThe host is in Memory Mode and its active DRAM usage is normalcom.vmware.vc.host.MemoryModeActiveDRAMGreen|Host {host.name} is in Memory Mode and its active DRAM usage is normalExtendedEventHost in Memory Mode and active DRAM usage is highwarningHost {host.name} is in Memory Mode and its active DRAM usage is highHost {host.name} is in Memory Mode and its active DRAM usage is highThe host is in Memory Mode and its active DRAM usage is highcom.vmware.vc.host.MemoryModeActiveDRAMYellow|Host {host.name} is in Memory Mode and its active DRAM usage is highExtendedEventNSX installation failed on host.errorNSX installation failed on host.NSX installation failed on host.NSX installation failed on host.com.vmware.vc.host.NsxInstallFailed|NSX installation failed on host.ExtendedEventNSX installation successful on host.infoNSX installation successful on host.NSX installation successful on host.NSX installation successful on host.com.vmware.vc.host.NsxInstallSuccess|NSX installation successful on host.ExtendedEventPartial maintenance mode status has changed.infoHost status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.com.vmware.vc.host.PartialMaintenanceModeStatusChanged|Host status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}com.vmware.vc.host.StatelessHPApplyEarlyBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}com.vmware.vc.host.StatelessHPApplyFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}com.vmware.vc.host.StatelessHPApplyPostBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}EventExHost TPM attestation failederrorHost TPM attestation failed for host {host.name}: {1}Host TPM attestation failed for host {host.name}: {1}Host TPM attestation failed: {1}com.vmware.vc.host.TPMAttestationFailedEvent|Host TPM attestation failed for host {host.name} in datacenter {datacenter.name}: {1}ExtendedEventActive DRAM usage of the memory tiered host is normalinfoHost {host.name} is a memory tiered host and its active DRAM usage is normalHost {host.name} is a memory tiered host and its active DRAM usage is normalActive DRAM usage of the memory tiered host is normalcom.vmware.vc.host.TieringMemoryActiveDRAMGreen|Host {host.name} is a memory tiered host and its active DRAM usage is normalExtendedEventActive DRAM usage of the memory tiered host is highwarningHost {host.name} is a memory tiered host and its active DRAM usage is highHost {host.name} is a memory tiered host and its active DRAM usage is highActive DRAM usage of the memory tiered host is highcom.vmware.vc.host.TieringMemoryActiveDRAMYellow|Host {host.name} is a memory tiered host and its active DRAM usage is highExtendedEventNew TPM host endorsement key doesn't match the one in the DBerrorThe new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DBcom.vmware.vc.host.TpmEndorsementKeyMismatch|The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name} in datacenter {datacenter.name}ExtendedEventHost's virtual flash resource is accessible.infoHost's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.com.vmware.vc.host.clear.vFlashResource.inaccessible|Host's virtual flash resource is restored to be accessible.EventExHost's virtual flash resource usage dropped below the threshold.infoHost's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.com.vmware.vc.host.clear.vFlashResource.reachthreshold|Host's virtual flash resource usage dropped below {1}%.ExtendedEventDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.warningDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound|Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.ExtendedEventDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostswarningDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostscom.vmware.vc.host.problem.DeprecatedVMFSVolumeFoundAfterVMFS3EOL|Deprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsExtendedEventImproved virtual disk infrastructure's catalog management turned unhealthywarningcom.vmware.vc.host.problem.VStorageObjectInfraCatalogUnhealthy|Improved virtual disk infrastructure's catalog management turned unhealthyExtendedEventImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.warningImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent|Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. <EventLongDescription id="com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent"> <description> Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. </description> <cause> <description> This is caused by creating improved virtual disk infrastructure namespaces with empty storage policy. </description> <action> Update infrastructure namespaces storage policy. </action> </cause> </EventLongDescription> ExtendedEventHost's virtual flash resource is inaccessible.warningHost's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.com.vmware.vc.host.problem.vFlashResource.inaccessible|Host's virtual flash resource is inaccessible. <EventLongDescription id="com.vmware.vc.host.problem.vFlashResource.inaccessible"> <description> Inaccessible host virtual flash resource indicates that its backing VFFS volume is inaccessible. Due to inaccessible host virtual flash resource, virtual machines with vSphere Flash Read Cache configured cannot be powered on or might experience unpredicted behavior if powered on. </description> <cause> <description> This might be caused by an unmounted VFFS volume or an APD/PDL on the VFFS volume. </description> <action> Check the backing VFFS volume connection status. For example, mount the unmounted volume or resolve the APD/PDL issues. The host virtual flash resource is accessible as long as the backing VFFS volume is accessible. </action> </cause> </EventLongDescription> EventExHost's virtual flash resource usage exceeds the threshold.warningHost's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.com.vmware.vc.host.problem.vFlashResource.reachthreshold|Host's virtual flash resource usage is more than {1}%.ExtendedEventVirtual flash resource is configured on the hostinfoVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostcom.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent|Virtual flash resource is configured on the hostExtendedEventVirtual flash resource is removed from the hostinfoVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostcom.vmware.vc.host.vFlash.VFlashResourceRemovedEvent|Virtual flash resource is removed from the hostEventExDefault virtual flash module is changed to {vFlashModule} on the hostinfoDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostcom.vmware.vc.host.vFlash.defaultModuleChangedEvent|Any new virtual Flash Read Cache configuration request will use {vFlashModule} as default virtual flash module. All existing virtual Flash Read Cache configurations remain unchanged. <EventLongDescription id="com.vmware.vc.host.vFlash.defaultModuleChangedEvent"> <description> The default virtual flash module has been changed. Any new virtual Flash Read Cache configuration uses the new default virtual flash module if undefined in configuration. All existing configurations will remain unchanged. </description> </EventLongDescription> ExtendedEventVirtual flash modules are loaded or reloaded on the hostinfoVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostcom.vmware.vc.host.vFlash.modulesLoadedEvent|Virtual flash modules are loaded or reloaded on the hostEventExEntity became healthyinfo{entityName} became healthycom.vmware.vc.infraUpdateHa.GreenHealthEvent|{entityName} became healthyEventExProvider has posted invalid health updateswarningProvider {providerName} has posted invalid health updatesProvider {providerName} has posted invalid health updatescom.vmware.vc.infraUpdateHa.InvalidUpdatesEvent|Provider {providerName} has posted invalid health updatesEventExProvider reported a healthy statusinfo{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}com.vmware.vc.infraUpdateHa.PostGreenHealthUpdateEvent|{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}EventExProvider reported a severely degraded statuswarning{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostRedHealthUpdateEvent|{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExProvider reported a moderately degraded statuswarning{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostYellowHealthUpdateEvent|{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExEntity has entered quarantine modewarning{entityName} has entered quarantine modecom.vmware.vc.infraUpdateHa.QuarantineEvent|{entityName} has entered quarantine modeEventExEntity has exited quarantine modeinfo{entityName} has exited quarantine modecom.vmware.vc.infraUpdateHa.QuarantineRemovedEvent|{entityName} has exited quarantine modeEventExEntity became severely degradedwarning{entityName} became severely degradedcom.vmware.vc.infraUpdateHa.RedHealthEvent|{entityName} became severely degradedEventExProvider has stale updateswarningProvider {providerName} has not posted an update in {timeout} secondsProvider {providerName} has not posted an update in {timeout} secondscom.vmware.vc.infraUpdateHa.StaleUpdatesEvent|Provider {providerName} has not posted an update in {timeout} secondsEventExEntity has unknown health statewarning{entityName} has unknown health statecom.vmware.vc.infraUpdateHa.UnknownHealthEvent|{entityName} has unknown health stateEventExEntity became moderately degradedwarning{entityName} became moderately degradedcom.vmware.vc.infraUpdateHa.YellowHealthEvent|{entityName} became moderately degradedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters has failederrorvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterInstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterInstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters has failederrorvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUninstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} are successfulcom.vmware.vc.iofilter.FilterUninstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters has failederrorvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} and in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUpgradeFailedEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters is successfulinfovSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterUpgradeSuccessEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has succeededEventExvSphere APIs for I/O Filters (VAIO) host vendor provider registration has failed.errorvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderRegistrationFailedEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully registeredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredcom.vmware.vc.iofilter.HostVendorProviderRegistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredEventExFailed to unregister vSphere APIs for I/O Filters (VAIO) host vendor provider.errorFailed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderUnregistrationFailedEvent|Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully unregisteredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredcom.vmware.vc.iofilter.HostVendorProviderUnregistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredExtendedEventIoFilterManager API invoked with untrusted certificate SSL trust policywarningIoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name}com.vmware.vc.iofilter.UntrustedCertificateEvent|IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventKey providers are backed up.infocom.vmware.vc.kms.crypto.AllBackedUp|All key providers are backed up.EventExKey creation failed on key provider.errorcom.vmware.vc.kms.crypto.KeyGenerateFail|Key creation failed on key provider {clusterName} with error code {errorCode}. Check log for details.EventExKey provider(s) are not backed up.errorcom.vmware.vc.kms.crypto.NotBackedUp|Key provider(s) {providerIds} are not backed up.EventExKey provider backup is suggested after it is updated.warningcom.vmware.vc.kms.crypto.NotBackedUpAfterUpdate|Key provider(s) {providerIds} are not backed up. Backup is suggested after updating a provider.EventExFailed to send keys because of key provider error.errorcom.vmware.vc.kms.crypto.SendKeyError.KMSClusterError|Failed to send keys {keys} because of KMS connection error.EventExFailed to send keys because keys are missing on key provider.errorcom.vmware.vc.kms.crypto.SendKeyError.KeyMissingOnKMS|Failed to send keys {keys} because of keys missing on key provider.EventExThe Trusted Key Provider is not available.warningcom.vmware.vc.kms.crypto.TrustAuthority.ClusterNotAvailable|The Trusted Key Provider {keyProviderId} is not available.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.ClusterUnhealthy|The Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.KmsUnhealthy|The key server {serverName} in the Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExKey Management Server is unreachableerrorcom.vmware.vc.kms.crypto.Unreachable|Key Management Server {serverName}({address}) is unreachableEventExRetrieved Key Management Server vendor information.infocom.vmware.vc.kms.crypto.Vendor|Key Management Server {serverName}({address}) vendor: {vendor}EventExVirtual NIC entered passthrough modeinfoNetwork passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is active on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent|Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExVirtual NIC exited passthrough modeinfoNetwork passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is inactive on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterExitedPassthroughEvent|Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExFailed to clone state for entity on extensionerrorFailed to clone state on extension {extensionName}com.vmware.vc.ovfconsumers.CloneOvfConsumerStateErrorEvent|Failed to clone state for the entity '{entityName}' on extension {extensionName}EventExFailed to retrieve OVF environment sections for VM on extensionerrorFailed to retrieve OVF environment sections from extension {extensionName}com.vmware.vc.ovfconsumers.GetOvfEnvironmentSectionsErrorEvent|Failed to retrieve OVF environment sections for VM '{vm.name}' from extension {extensionName}EventExUnable to power on VM after cloningerrorPowering on after cloning was blocked by an extension. Message: {description}com.vmware.vc.ovfconsumers.PowerOnAfterCloneErrorEvent|Powering on VM '{vm.name}' after cloning was blocked by an extension. Message: {description}EventExFailed to register entity on extensionerrorcom.vmware.vc.ovfconsumers.RegisterEntityErrorEvent|Failed to register entity '{entityName}' on extension {extensionName}EventExFailed to unregister entities on extensionerrorcom.vmware.vc.ovfconsumers.UnregisterEntitiesErrorEvent|Failed to unregister entities on extension {extensionName}EventExFailed to validate OVF descriptor on extensionerrorcom.vmware.vc.ovfconsumers.ValidateOstErrorEvent|Failed to validate OVF descriptor on extension {extensionName}ExtendedEventAnswer file exportedinfoAnswer file for host {host.name} has been exportedAnswer file for host {host.name} has been exportedAnswer file exportedcom.vmware.vc.profile.AnswerFileExportedEvent|Answer file for host {host.name} in datacenter {datacenter.name} has been exportedExtendedEventHost customization settings updatedinfoHost customization settings for host {host.name} has been updatedHost customization settings for host {host.name} has been updatedHost customization settings updatedcom.vmware.vc.profile.AnswerFileUpdatedEvent|Host customization settings for host {host.name} in datacenter {datacenter.name} has been updatedEventExResource pool renamedinfoResource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'com.vmware.vc.rp.ResourcePoolRenamedEvent|Resource pool '{oldName}' has been renamed to '{newName}'ExtendedEventDatastore maintenance mode operation canceledinfoThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledcom.vmware.vc.sdrs.CanceledDatastoreMaintenanceModeEvent|The datastore maintenance mode operation has been canceledExtendedEventDatastore cluster is healthyinfoDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthycom.vmware.vc.sdrs.ClearDatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} is healthyExtendedEventConfigured storage DRSinfoConfigured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.ConfiguredStorageDrsOnPodEvent|Configured storage DRS on datastore cluster {objectName}ExtendedEventDatastore cluster has datastores that belong to different SRM Consistency GroupswarningDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency Groupscom.vmware.vc.sdrs.ConsistencyGroupViolationEvent|Datastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsExtendedEventDatastore entered maintenance modeinfoDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modecom.vmware.vc.sdrs.DatastoreEnteredMaintenanceModeEvent|Datastore {ds.name} has entered maintenance modeExtendedEventDatastore is entering maintenance modeinfoDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modecom.vmware.vc.sdrs.DatastoreEnteringMaintenanceModeEvent|Datastore {ds.name} is entering maintenance modeExtendedEventDatastore exited maintenance modeinfoDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modecom.vmware.vc.sdrs.DatastoreExitedMaintenanceModeEvent|Datastore {ds.name} has exited maintenance modeEventExDatastore cluster has datastores shared across multiple datacenterswarningDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacenterscom.vmware.vc.sdrs.DatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersExtendedEventErrors encountered while datastore entering into maintenance modeerrorDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modecom.vmware.vc.sdrs.DatastoreMaintenanceModeErrorsEvent|Datastore {ds.name} encountered errors while entering maintenance modeExtendedEventStorage DRS disabledinfoDisabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsDisabledEvent|Disabled storage DRS on datastore cluster {objectName}EventExStorage DRS enabledinfoEnabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}com.vmware.vc.sdrs.StorageDrsEnabledEvent|Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}ExtendedEventStorage DRS invocation failederrorStorage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsInvocationFailedEvent|Storage DRS invocation failed on datastore cluster {objectName}ExtendedEventNew storage DRS recommendation generatedinfoA new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsNewRecommendationPendingEvent|A new storage DRS recommendation has been generated on datastore cluster {objectName}EventExDatastore cluster connected to host(s) that do not support storage DRSwarningDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRScom.vmware.vc.sdrs.StorageDrsNotSupportedHostConnectedToPodEvent|Datastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSExtendedEventPending storage recommendations were appliedinfoAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedcom.vmware.vc.sdrs.StorageDrsRecommendationApplied|All pending recommendations on datastore cluster {objectName} were appliedEventExStorage DRS migrated VM disksinfoStorage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStorageMigrationEvent|Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}EventExStorage DRS placed VM disksinfoStorage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStoragePlacementEvent|Storage DRS placed disks of VM {vm.name} on datastore {ds.name}EventExDatastore cluster createdinfoCreated datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodCreatedEvent|Created datastore cluster {objectName}EventExDatastore cluster deletedinfoRemoved datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodDestroyedEvent|Removed datastore cluster {objectName}EventExSIOC: pre-4.1 host connected to SIOC-enabled datastorewarningSIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.com.vmware.vc.sioc.NotSupportedHostConnectedToDatastoreEvent|SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.ExtendedEventESXi VASA client certificate provision has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateProvisionFailure|ESXi VASA client certificate provision has failedExtendedEventESXi VASA client certificate provision has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateProvisionSuccess|ESXi VASA client certificate provision has succeededExtendedEventESXi VASA client certificate register to some/all VP(s) has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateRegisterFailure|ESXi VASA client certificate register to some/all VP(s) has failedExtendedEventESXi VASA client certificate register to VP(s) has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateRegisterSuccess|ESXi VASA client certificate register to VP(s) has succeededEventExSystem capability warning from storage providerwarningcom.vmware.vc.sms.LunCapabilityInitEvent|Storage provider [{providerName}] : system capability warning for {eventSubjectId} : {msgTxt}EventExSystem capability normal event from storage providerinfocom.vmware.vc.sms.LunCapabilityMetEvent|Storage provider [{providerName}] : system capability normal for {eventSubjectId}EventExSystem capability alert from storage providererrorcom.vmware.vc.sms.LunCapabilityNotMetEvent|Storage provider [{providerName}] : system capability alert for {eventSubjectId} : {msgTxt}EventExA Storage Alarm of type 'Object' cleared by the VASA providerinfocom.vmware.vc.sms.ObjectTypeAlarmClearedEvent|Storage provider [{providerName}] cleared a Storage Alarm of type 'Object' on {eventSubjectId} : {msgTxt}EventExAn alert on an object raised by the VASA providererrorcom.vmware.vc.sms.ObjectTypeAlarmErrorEvent|Storage provider [{providerName}] raised an alert type 'Object' on {eventSubjectId} : {msgTxt}EventExA warning on an object raised by the VASA providerwarningcom.vmware.vc.sms.ObjectTypeAlarmWarningEvent|Storage provider [{providerName}] raised a warning of type 'Object' on {eventSubjectId} : {msgTxt}EventExRegistering renewed VC Client Certificate failed for the VASA provider.errorcom.vmware.vc.sms.RegisterVcClientCertOnRenewalFailure|Registering renewed VC Client Certificate failed for VASA provider with url : {provider}.ExtendedEventRegistering renewed VC Client Certificate succeeded for all the VASA providers.infocom.vmware.vc.sms.RegisterVcClientCertOnRenewalSuccess|Registering renewed VC Client Certificate succeeded for all the VASA providers.EventExThin provisioning capacity threshold normal event from storage providerinfocom.vmware.vc.sms.ThinProvisionedLunThresholdClearedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold normal for {eventSubjectId}EventExThin provisioning capacity threshold alert from storage providererrorcom.vmware.vc.sms.ThinProvisionedLunThresholdCrossedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold alert for {eventSubjectId}EventExThin provisioning capacity threshold warning from storage providerwarningcom.vmware.vc.sms.ThinProvisionedLunThresholdInitEvent|Storage provider [{providerName}] : thin provisioning capacity threshold warning for {eventSubjectId}EventExStorage provider certificate will expire very shortlyerrorcom.vmware.vc.sms.VasaProviderCertificateHardLimitReachedEvent|Certificate for storage provider {providerName} will expire very shortly. Expiration date : {expiryDate}EventExVASA Provider certificate is renewedinfocom.vmware.vc.sms.VasaProviderCertificateRenewalEvent|VASA Provider certificate for {providerName} is renewedEventExStorage provider certificate will expire soonwarningcom.vmware.vc.sms.VasaProviderCertificateSoftLimitReachedEvent|Certificate for storage provider {providerName} will expire soon. Expiration date : {expiryDate}EventExStorage provider certificate is validinfocom.vmware.vc.sms.VasaProviderCertificateValidEvent|Certificate for storage provider {providerName} is validEventExStorage provider is connectedinfocom.vmware.vc.sms.VasaProviderConnectedEvent|Storage provider {providerName} is connectedEventExStorage provider is disconnectederrorcom.vmware.vc.sms.VasaProviderDisconnectedEvent|Storage provider {providerName} is disconnectedEventExRefreshing CA certificates and CRLs failed for some VASA providerserrorcom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsFailure|Refreshing CA certificates and CRLs failed for VASA providers with url : {providerUrls}ExtendedEventRefreshing CA certificates and CRLs succeeded for all registered VASA providers.infocom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsSuccess|Refreshing CA certificates and CRLs succeeded for all registered VASA providers.EventExOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate failed for the VASA 5.0 or greater VASA providers.errorcom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationFailure|On VMCA Root Certificate rotation, register and refresh certificates failed for VASA 5.0 or greater VASA provider : {provider}ExtendedEventOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate succeeded for all the VASA 5.0 or greater VASA providers.infocom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationSuccess|On VMCA Root Certificate rotation, register and refresh certificates succeeded for all the VASA 5.0 or greater VASA providers.EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vmName} connected to {datastore.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vmName} connected to datastore {datastore.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vmName} connected to {datastore.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vmName} connected to {datastore.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExHealth event from storage providerinfocom.vmware.vc.sms.provider.health.event|Storage provider [{providerName}] : health event for {eventSubjectId} : {msgTxt}EventExSystem event from storage providerinfocom.vmware.vc.sms.provider.system.event|Storage provider [{providerName}] : system event : {msgTxt}EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant {operationalStatus] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExProfile association/dissociation failederrorProfile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}com.vmware.vc.spbm.ProfileAssociationFailedEvent|Profile association/dissociation failed for {entityName}EventExConfiguring storage policy failed.errorConfiguring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}com.vmware.vc.spbm.ServiceErrorEvent|Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}ExtendedEventQuick stats is not up-to-dateinfoQuick stats on {host.name} in {computeResource.name} is not up-to-dateQuick stats on {host.name} is not up-to-dateQuick stats on {host.name} is not up-to-datecom.vmware.vc.stats.HostQuickStatesNotUpToDateEvent|Quick stats on {host.name} in {computeResource.name} in {datacenter.name} is not up-to-date <EventLongDescription id="com.vmware.vc.stats.HostQuickStatesNotUpToDateEvent"> <description> "Quick stats on the host is not up-to-date. </description> <cause> <description> Quickstats on the host are not up-to-date. This is expected if the host was recently added or reconnected or VC just started up. </description> <action> No specific action needs to be taken. </action> </cause> </EventLongDescription> EventExODBC errorerrorcom.vmware.vc.stats.StatsInsertErrorEvent|Stats insertion failed for entity {entity} due to ODBC error. <EventLongDescription id="com.vmware.vc.stats.StatsInsertErrorEvent"> <description> If a set of performance statistics data insertion fails due to database related issues, this event is logged. </description> <cause> <description>Usually an attempt to insert duplicate entries causes this event</description> <action>Usually it is transient and self-healing. If not then probably the database contains rogue entries. Manually deleting the data for the particular stat provider might fix the issue</action> </cause> </EventLongDescription> EventExRoot user password expired.errorcom.vmware.vc.system.RootPasswordExpiredEvent|Root user password has expired. Log in to https://{pnid}:5480 to update the root password.EventExRoot user password is about to expire.warningcom.vmware.vc.system.RootPasswordExpiryEvent|Root user password expires in {days} days. Log in to https://{pnid}:5480 to update the root password.ExtendedEventFT Disabled VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection will protect this virtul machine as non-FT virtual machine because the FT state is disabledcom.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because the FT state is disabledExtendedEventFailover FT VM due to component failureinfoFT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} on host {host.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} is going to fail over to Secondary VM due to component failureFT Primary VM is going to fail over to Secondary VM due to component failurecom.vmware.vc.vcp.FtFailoverEvent|FT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is going to fail over to Secondary VM due to component failure ExtendedEventFT VM failover failederrorFT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} failed to failover to secondaryFT virtual machine {vm.name} on host {host.name} failed to failover to secondaryFT virtual machine {vm.name} failed to failover to secondaryFT virtual machine failed to failover to secondarycom.vmware.vc.vcp.FtFailoverFailedEvent|FT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to failover to secondaryExtendedEventRestarting FT secondary due to component failureinfoHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine due to component failurecom.vmware.vc.vcp.FtSecondaryRestartEvent|HA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to component failureExtendedEventFT secondary VM restart failederrorFT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} failed to restartFT Secondary VM {vm.name} on host {host.name} failed to restartFT Secondary VM {vm.name} failed to restartFT Secondary VM failed to restartcom.vmware.vc.vcp.FtSecondaryRestartFailedEvent|FT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartExtendedEventNeed secondary VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine since it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects this virtul machine as non-FT virtual machine because it has been in the needSecondary state too longcom.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because it has been in the needSecondary state too longEventExVM Component Protection test endsinfoVM Component Protection test ends on host {host.name} in cluster {computeResource.name}VM Component Protection test ends on host {host.name}VM Component Protection test endscom.vmware.vc.vcp.TestEndEvent|VM Component Protection test ends on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}EventExVM Component Protection test startsinfoVM Component Protection test starts on host {host.name} in cluster {computeResource.name}VM Component Protection test starts on host {host.name}VM Component Protection test startscom.vmware.vc.vcp.TestStartEvent|VM Component Protection test starts on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventNo action on VMinfoHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} due to the feature configuration settingHA VM Component Protection did not take action due to the feature configuration settingcom.vmware.vc.vcp.VcpNoActionEvent|HA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to the feature configuration settingEventExVirtual machine lost datastore accesserrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {datastore}Virtual machine {vm.name} on host {host.name} lost access to {datastore}Virtual machine {vm.name} lost access to {datastore}Virtual machine lost access to {datastore}com.vmware.vc.vcp.VmDatastoreFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {datastore}EventExVirtual machine lost VM network accessibilityerrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {network}Virtual machine {vm.name} on host {host.name} lost access to {network}Virtual machine {vm.name} lost access to {network}Virtual machine lost access to {network}com.vmware.vc.vcp.VmNetworkFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {network}EventExVM power off hangerrorHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine successfully after trying {numTimes} times and will keep tryingcom.vmware.vc.vcp.VmPowerOffHangEvent|HA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} successfully after trying {numTimes} times and will keep tryingExtendedEventRestarting VM due to component failureinfoHA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failureHA VM Component Protection is restarting virtual machine due to component failurecom.vmware.vc.vcp.VmRestartEvent|HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventVirtual machine affected by component failure failed to restarterrorVirtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} failed to restartVirtual machine {vm.name} affected by component failure on host {host.name} failed to restartVirtual machine {vm.name} affected by component failure failed to restartVirtual machine affected by component failure failed to restartcom.vmware.vc.vcp.VmRestartFailedEvent|Virtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartEventExNo candidate host to restarterrorHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for this virtual machine after waiting {numSecWait} seconds and will keep tryingcom.vmware.vc.vcp.VmWaitForCandidateHostEvent|HA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} after waiting {numSecWait} seconds and will keep tryingEventExCertificate will expire soon.warningcom.vmware.vc.vecs.CertExpirationEvent|Certificate '{subject}' from '{store}' expires on {expiryDate}EventExKMS Client Certificate will expire soon.warningcom.vmware.vc.vecs.KMSClientCertExpirationEvent|KMS Client Certificate '{subject}' expires on {expiryDate}EventExKMS Server Certificate will expire soon.warningcom.vmware.vc.vecs.KMSServerCertExpirationEvent|KMS Server Certificate '{subject}' expires on {expiryDate}EventExOperation on the SSD device failederrorConfiguration on disk {disk.path} failed. Reason : {fault.msg}com.vmware.vc.vflash.SsdConfigurationFailedEvent|Configuration on disk {disk.path} failed. Reason : {fault.msg}EventExVirtual machine is locked because an error occurred on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KMSClusterError|Virtual machine {vmName} is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the host.errorVirtual machine is locked because keys were missing on the host {host}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnHost|Virtual machine {vmName} is locked because keys were missing on the host {host}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnKMS|Virtual machine {vmName} is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because the required Trusted Key Provider(s) is unavailable.errorVirtual machine is locked. Before unlocking, check the status of Trusted Key Provider(s) {kmsCluster} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).com.vmware.vc.vm.Crypto.VMLocked.TAKMSClusterUnavaliable|Virtual machine {vmName} is locked. Before unlocking, check the status of Trusted Key Provider(s) {keyProviderId} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).EventExVirtual machine is locked because Trust Authority managed key(s) are missing on the required host.errorVirtual machine is locked because Trust Authority managed key(s) are missing on host {host}.com.vmware.vc.vm.Crypto.VMLocked.TAKeyMissingOnHost|Virtual machine {vmName} is locked because Trust Authority managed key(s) {missedkeys} are missing on the required host {host}.EventExVirtual machine is unlocked.infoVirtual machine is unlocked.com.vmware.vc.vm.Crypto.VMUnlocked|Virtual machine {vmName} is unlocked.EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}com.vmware.vc.vm.DstVmClonedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was cloned from {oldMoRef}EventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}com.vmware.vc.vm.DstVmMigratedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was migrated from {oldMoRef}ExtendedEventVirtual machine PMem bandwidth usage is normalinfoVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalThe virtual machine's PMem bandwidth usage is normalcom.vmware.vc.vm.PMemBandwidthGreen|Virtual machine {vm.name}'s PMem bandwidth usage is normalExtendedEventVirtual machine PMem bandwidth usage is highwarningVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highThe virtual machine's PMem bandwidth usage is highcom.vmware.vc.vm.PMemBandwidthYellow|Virtual machine {vm.name}'s PMem bandwidth usage is highExtendedEventVirtual machine failed to power on after cloning.errorVirtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after performing cloning operation on this host.Virtual machine failed to power on after cloning.com.vmware.vc.vm.PowerOnAfterCloneErrorEvent|Virtual machine {vm.name} failed to power on after cloning on host {host.name} in datacenter {datacenter.name}EventExVirtual machine clone failederrorVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}"Virtual machine on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}com.vmware.vc.vm.SrcVmCloneFailedEvent|Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {oldMoRef} in {computeResource.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} on host {host.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}com.vmware.vc.vm.SrcVmClonedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} was cloned to {newMoRef}ExtendedEventVirtual machine failed to create instant clone childerrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childcom.vmware.vc.vm.SrcVmForkFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to create instant clone childEventExVirtual machine migration failederrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to migrateVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migratecom.vmware.vc.vm.SrcVmMigrateFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to migrateEventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} on {host.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}com.vmware.vc.vm.SrcVmMigratedEvent|Virtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} in {datacenter.name} was migrated to {newMoRef}ExtendedEventTemplate converted to VMinfoTemplate {vm.name} converted to VM on {host.name}Template {vm.name} converted to VM on {host.name}Template {vm.name} converted to VMConverted to VM on {host.name}com.vmware.vc.vm.TemplateConvertedToVmEvent|Template {vm.name} converted to VM on {host.name} in {datacenter.name}ExtendedEventVirtual machine tier 1 bandwidth usage is normalinfoVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalThe virtual machine's tier 1 bandwidth usage is normalcom.vmware.vc.vm.Tier1BandwidthGreen|Virtual machine {vm.name}'s tier 1 bandwidth usage is normalExtendedEventVirtual machine tier 1 bandwidth usage is highwarningVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highThe virtual machine's tier 1 bandwidth usage is highcom.vmware.vc.vm.Tier1BandwidthYellow|Virtual machine {vm.name}'s tier 1 bandwidth usage is highExtendedEventThe network adapter of VM successfully activate UPTinfoUPT on network adapter is activatedcom.vmware.vc.vm.Uptv2Active|The UPT is successfully activated on the network adapterEventExThe network adapter of VM fails to activate UPTwarningUPT on network adapter is not activatedcom.vmware.vc.vm.Uptv2Inactive|The UPT failed to activate on the network adapter.{details}EventExVirtual NIC reservation is not satisfiederrorReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is not satisfiedReservation of Virtual NIC {deviceLabel} is not satisfiedcom.vmware.vc.vm.VmAdapterResvNotSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is not satisfiedEventExVirtual NIC reservation is satisfiedinfoReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is satisfiedReservation of Virtual NIC {deviceLabel} is satisfiedcom.vmware.vc.vm.VmAdapterResvSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is satisfiedExtendedEventVM marked as templateinfoVM {vm.name} marked as template on {host.name}VM {vm.name} marked as template on {host.name}VM {vm.name} marked as templateMarked as template on {host.name}com.vmware.vc.vm.VmConvertedToTemplateEvent|VM {vm.name} marked as template on {host.name} in {datacenter.name}ExtendedEventPromoted disks of virtual machine successfullyinfoPromoted disks of virtual machine {vm.name} in {computeResource.name}Promoted disks of virtual machine {vm.name} on host {host.name}Promoted disks of virtual machine {vm.name}Promoted disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotedEvent|Promoted disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventPromoting disks of virtual machineinfoPromoting disks of virtual machine {vm.name} in {computeResource.name}Promoting disks of virtual machine {vm.name} on host {host.name}Promoting disks of virtual machine {vm.name}Promoting disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotingEvent|Promoting disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}EventExHot migrating virtual machine with encryptioninfoHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating from {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptioncom.vmware.vc.vm.VmHotMigratingWithEncryptionEvent|Hot migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost}, {destDatastore} in {destDatacenter} with encryptionEventExcom.vmware.vc.vm.VmMigratingWithEncryptionEvent|ExtendedEventFailed to promote disks of virtual machineinfoFailed to promote disks of virtual machine {vm.name} in {computeResource.name}Failed to promote disks of virtual machine {vm.name} on host {host.name}Failed to promote disks of virtual machine {vm.name}Failed to promote disks of virtual machine {vm.name}com.vmware.vc.vm.VmPromoteDisksFailedEvent|Failed to promote disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventReconfigure VM failed for {VM} on shared diskwarningReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskcom.vmware.vc.vm.VmReconfigureFailedonSharedDiskEvent|Reconfigure VM failed for {VM} on shared diskExtendedEventVirtual machine register failederrorVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on this host failedVirtual machine registration failedcom.vmware.vc.vm.VmRegisterFailedEvent|Virtual machine {vm.name} registration on {host.name} in datacenter {datacenter.name} failedEventExFailed to revert the virtual machine state to a snapshoterrorFailed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} on host {host.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine to snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateFailedToRevertToSnapshot|Failed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}EventExThe virtual machine state has been reverted to a snapshotinfoThe execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} on host {host.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateRevertedToSnapshot|The execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}EventExFault Tolerance virtual machine syncing to secondary with encryptioninfoFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM syncing to secondary on {dstHost} with encryptioncom.vmware.vc.vm.VmSyncingWithEncryptionEvent|Fault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionExtendedEventVirtual machine termination requestedinfoVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine termination requestedcom.vmware.vc.vm.VmTerminateEvent|Virtual machine {vm.name} termination requestedExtendedEventVirtual machine termination failederrorVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine termination failedcom.vmware.vc.vm.VmTerminateFailedEvent|Virtual machine {vm.name} termination failedEventExThe disk device is encrypted with mixed keys.warningThe disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.com.vmware.vc.vm.crypto.DiskchainUsingMixedKeys|The disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.EventExCryptographic operation failed due to insufficient disk space on datastoreerrorCryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.com.vmware.vc.vm.crypto.NoDiskSpace|Cryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.EventExcom.vmware.vc.vm.crypto.RekeyFail|ExtendedEventApplication Monitoring Is Not SupportedwarningApplication monitoring is not supported on {host.name} in cluster {computeResource.name}Application monitoring is not supported on {host.name}Application monitoring is not supportedcom.vmware.vc.vmam.AppMonitoringNotSupported|Application monitoring is not supported on {host.name} in cluster {computeResource.name} in {datacenter.name}EventExvSphere HA detected application heartbeat status changewarningvSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent|vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent"> <description> Application monitoring state changes indicate a change in the health of the application being monitored or in the application-monitoring process. A transition from gray to green occurs when application heartbeat is being enabled from within the guest. A transition to red occurs after vSphere HA didn't receive any heartbeats within 30 seconds. A transition from red to green occurs if heartbeats begin again before vSphere HA can react. A transition to gray occurs after application heartbeating is disabled from within the guest. </description> <cause> <description> Either the user initiated action from inside the guest or vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> If the state transitions to red, investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. Frequent state transitions to or from gray may indicate a problem with the application-monitoring agent. If they occur, investigate whether the enabling/disabling of monitoring is expected. </action> </cause> </EventLongDescription> EventExvSphere HA detected application state changewarningvSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthStateChangedEvent|vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthStateChangedEvent"> <description> Application state changes indicate that an in-guest application has posted one of the two allowed values - appStateOk or appStateNeedReset. The former indicates that the monitored application is fine, the latter causes an immediate reset if Application Monitoring is enabled for this virtual machine. </description> <cause> <description> This is an in-guest initated action. </description> <action> If vSphere HA and Application Monitoring are enabled for this virtual machine, it is reset if the state is appStateNeedReset. If the virtual machine is being migrated using vMotion the reset will be delayed until the virtual machine has reached its destination. Also, the reset will be delayed until the datastore connectivity issues are resolved. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected application heartbeat failurewarningvSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected application heartbeat failure for {vm.name} on {host.name}vSphere HA detected application heartbeat failure for {vm.name}vSphere HA detected application heartbeat failure for this virtual machinecom.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent|vSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent"> <description> vSphere HA has detected a heartbeat failure from the application-monitoring agent inside the guest. If application monitoring is enabled in vSphere the virtual machine will be reset. </description> <cause> <description> vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> Investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. </action> </cause> </EventLongDescription> EventExvCenter server replication status has changed.infocom.vmware.vc.vmdir.ReplicationStatusChangeEvent|vCenter Server Replication Status : {replicationStatus} . {message}EventExvCenter server replication state has changedinfocom.vmware.vc.vmdir.StateChangeEvent|vCenter Server Replication State changed to '{newState}' from '{oldState}' cause: {reason}EventExvSAN datastore {datastoreName} does not have capacityerrorvSAN datastore {datastoreName} in cluster {computeResource.name} does not have capacityvSAN datastore {datastoreName} does not have capacitycom.vmware.vc.vsan.DatastoreNoCapacityEvent|vSAN datastore {datastoreName} in cluster {computeResource.name} in datacenter {datacenter.name} does not have capacity <EventLongDescription id="com.vmware.vc.vsan.DatastoreNoCapacityEvent"> <description> vSAN datastore does not have capacity. </description> <cause> <description> This might be because no disk is configured for vSAN, local disks configured for vSAN service become inaccessible or flash disks configured for vSAN service become inaccessible. </description> <action> Check if vSAN storage configuration is correct and if the local disks and flash disks configured for vSAN service are accessible. </action> </cause> </EventLongDescription> EventExHost cannot communicate with one or more other nodes in the vSAN enabled clustererrorHost {host.name} in cluster {computeResource.name} cannot communicate with all other nodes in the vSAN enabled clusterHost {host.name} cannot communicate with all other nodes in the vSAN enabled clusterHost cannot communicate with one or more other nodes in the vSAN enabled clustercom.vmware.vc.vsan.HostCommunicationErrorEvent|Host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} cannot communicate with all other nodes in the vSAN enabled cluster <EventLongDescription id="com.vmware.vc.vsan.HostCommunicationErrorEvent"> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. </description> <cause> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. This might be caused by network partition or misconfiguration. Each host needs at least one vmnic with vSAN enabled. Those vmnics need to be on the same physical network. The host should have the vSAN service enabled. </description> <action> Check the host for vSAN service configuration, vSAN network configuration and network connection. </action> </cause> </EventLongDescription> ExtendedEventHost with vSAN service enabled is not in the vCenter clustererror{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name}{host.name} with vSAN service enabled is not in the vCenter clusterHost with vSAN service enabled is not in the vCenter clustercom.vmware.vc.vsan.HostNotInClusterEvent|{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.HostNotInClusterEvent"> <description> Host with the vSAN service enabled is not in the vCenter cluster. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN service is not enabled with the recommended interface. </description> <action> Add the host into the cluster or disable vSAN on the host. </action> </cause> </EventLongDescription> ExtendedEventHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailserror{host.name} is in a vSAN cluster {computeResource.name} but does not have vSAN service enabled{host.name} is in a vSAN cluster but does not have vSAN service enabledHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailscom.vmware.vc.vsan.HostNotInVsanClusterEvent|{host.name} is in a vSAN enabled cluster {computeResource.name} in datacenter {datacenter.name} but does not have vSAN service enabled <EventLongDescription id="com.vmware.vc.vsan.HostNotInVsanClusterEvent"> <description> Host is in a vSAN enabled cluster but does not have vSAN service enabled. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN is not enabled with the recommended interface or the vSAN configuration is not set up appropriately. </description> <action> Re-enable vSAN or check the vSAN configuration. </action> </cause> </EventLongDescription> EventExvSAN host vendor provider registration has failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent"> <description> Cannot deregister host vendor provider in Storage management service </description> <cause> <description>Host vendor provider deregistration failed</description> <action>Check if Storage management service is running</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider has been successfully unregisteredinfovSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredcom.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully unregistered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent"> <description> Deregistered host vendor provider from Storage management service </description> </EventLongDescription> EventExvSAN host vendor provider registration failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent"> <description> Cannot register host vendor provider in Storage management service </description> <cause> <description>Host vendor provider registration failed</description> <action>Check if Storage management service is running</action> <action>Check if the vendor provider on host is running</action> <action>Check if there are network connectivity issues between host and VC</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider registration succeededinfovSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredcom.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully registered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent"> <description> Registered host vendor provider in Storage management service </description> </EventLongDescription> ExtendedEventvSAN network is not configurederrorvSAN network is not configured on {host.name} in cluster {computeResource.name}vSAN network is not configured on {host.name}vSAN network is not configuredcom.vmware.vc.vsan.NetworkMisConfiguredEvent|vSAN network is not configured on {host.name}, in cluster {computeResource.name}, and in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.NetworkMisConfiguredEvent"> <description> vSAN network is not configured. </description> <cause> <description> vSAN network is not set up appropriately. vSAN datastore will not be formed as expected. </description> <action> Create at least one vmnic with vSAN enabled on the host. </action> </cause> </EventLongDescription> EventExFound another host participating in the vSAN service which is not a member of this host's vCenter clustererrorFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter cluster {computeResource.name}Found host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clusterFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clustercom.vmware.vc.vsan.RogueHostFoundEvent|Found host(s) {hostString} participating in the vSAN service in cluster {computeResource.name} in datacenter {datacenter.name} is not a member of this host's vCenter cluster <EventLongDescription id="com.vmware.vc.vsan.RogueHostFoundEvent"> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. </description> <cause> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. This might be caused by misconfiguration. </description> <action> Add the rogue host into the cluster or disable vSAN on the rogue host. </action> </cause> </EventLongDescription> EventExFailed to turn off the disk locator LEDerrorFailed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOffFailedEvent|Failed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}EventExFailed to turn on the disk locator LEDerrorFailed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOnFailedEvent|Failed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}EventExvSAN cluster needs disk format upgradewarningvSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationvSAN cluster has one or more hosts for which disk format upgrade is recommended: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationcom.vmware.vc.vsan.VsanHostNeedsUpgradeEvent|vSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationEventExUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}errorUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}com.vmware.vc.vtpm.FailedProcessingVTpmCertsEvent|Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}ExtendedEventA compute policy has been createdinfocom.vmware.vcenter.compute.policies.createEvent|Compute policy {policyName} has been createdExtendedEventA compute policy has been deletedinfocom.vmware.vcenter.compute.policies.deleteEvent|Compute policy {policyName} has been deletedEventExDatabase replication state changed: sync, async or no replicationinfocom.vmware.vcha.DB.replication.state.changed|Database replication mode changed to {newState}EventExThe management interface (NIC0) IP address you specified as for the Passive node is different than the original IP address used to configure vCenter HA. You must use the same IP address.errorcom.vmware.vcha.cluster.differentFailoverIp|The management interface (NIC0) IP address you specified as {given} for the Passive node is different than the original IP address {original} used to configure vCenter HA. You must use the same IP address.EventExvCenter HA cluster mode changedinfocom.vmware.vcha.cluster.mode.changed|vCenter HA cluster mode changed to {clusterMode}ExtendedEventUnable to enable mode.errorcom.vmware.vcha.cluster.modeEnableFailed|Unable to enable mode.EventExThe hostname for a node does not map to the vCenter Server PNID.errorcom.vmware.vcha.cluster.pnidHostnameMismatch|The hostname for {nodeIp} does not map to the vCenter Server PNID. Review the hostname you specified during the VM clone customization step.ExtendedEventVerify if the Passive and the Witness nodes are up and reachable.errorcom.vmware.vcha.cluster.quorumNotCloned|Verify if the Passive and the Witness nodes are up and reachable.EventExUnable to SSH to a node.errorcom.vmware.vcha.cluster.sshConnectFailed|Unable to SSH to {nodeIp}.ExtendedEventvCenter HA cluster state is currently degradedwarningcom.vmware.vcha.cluster.state.degraded|vCenter HA cluster state is currently degradedExtendedEventvCenter HA cluster is destroyedinfocom.vmware.vcha.cluster.state.destroyed|vCenter HA cluster is destroyedExtendedEventvCenter HA cluster state is currently healthyinfocom.vmware.vcha.cluster.state.healthy|vCenter HA cluster state is currently healthyExtendedEventvCenter HA cluster state is currently isolatederrorcom.vmware.vcha.cluster.state.isolated|vCenter HA cluster state is currently isolatedExtendedEventUnable to get vpxd hostname.errorcom.vmware.vcha.cluster.vcFqdnUnavailable|Unable to get vpxd hostname.ExtendedEventFailover cannot proceed when cluster is in disabled modewarningcom.vmware.vcha.failover.failed.disabled.mode|Failover cannot proceed when cluster is in disabled modeExtendedEventFailover cannot proceed when cluster does not have all three nodes connectedwarningcom.vmware.vcha.failover.failed.node.lost|Failover cannot proceed when cluster does not have all three nodes connectedExtendedEventFailover cannot proceed when Passive node is not ready to takeoverwarningcom.vmware.vcha.failover.failed.passive.not.ready|Failover cannot proceed when vPostgres on Passive node is not ready to takeoverExtendedEventFailover did not succeed. Failed to flush the data to the Passive nodewarningcom.vmware.vcha.failover.flush.failed.degraded|Failover did not succeed. Failed to flush the data to the Passive nodeExtendedEventFailover failure is acknowledgedinfocom.vmware.vcha.failover.flush.failed.healthy|Failover failure is acknowledgedExtendedEventFailover status is unknowninfocom.vmware.vcha.failover.flush.failed.unknown|Failover status is unknownExtendedEventFailover succeededinfocom.vmware.vcha.failover.succeeded|Failover succeededEventExAppliance File replication state changedinfocom.vmware.vcha.file.replication.state.changed|Appliance {fileProviderType} is {state}EventExThis node was forcefully converted to the Active nodeinfocom.vmware.vcha.force.reset.active|Node {nodename} was forcefully converted to the Active nodeEventExOne node joined back to the clusterinfocom.vmware.vcha.node.joined|Node {nodeName} joined back to the clusterEventExOne node left the clusterwarningcom.vmware.vcha.node.left|Node {nodeName} left the clusterExtendedEventPSC HA state is currently degradedinfocom.vmware.vcha.psc.ha.health.degraded|PSC HA state is currently degradedExtendedEventPSC HA state is currently healthyinfocom.vmware.vcha.psc.ha.health.healthy|PSC HA state is currently healthyExtendedEventPSC HA state is not being monitoredinfocom.vmware.vcha.psc.ha.health.unknown|PSC HA is not monitored after vCenter HA cluster is destroyedExtendedEventVMware Directory Service health is currently degradedwarningcom.vmware.vcha.vmdir.health.degraded|VMware Directory Service health is currently degradedExtendedEventVMware Directory Service is currently healthyinfocom.vmware.vcha.vmdir.health.healthy|VMware Directory Service is currently healthyExtendedEventVMware Directory Service health is not being monitoredinfocom.vmware.vcha.vmdir.health.unknown|VMware Directory Service health is not being monitoredExtendedEventvSphere Cluster Services mode is system managed on cluster.infocom.vmware.vcls.cluster.DeploymentModeSystemManagedEvent|vSphere Cluster Services mode is system managed on cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.infocom.vmware.vcls.cluster.DrsDisabledHaDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-enabled cluster.errorcom.vmware.vcls.cluster.DrsEnabledDeployModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-enabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.DrsEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.ExtendedEventvSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.warningcom.vmware.vcls.cluster.HaEnabledDrsDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.HaEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.ExtendedEventVSAN VASA provider became available.infocom.vmware.vcls.cluster.VsanProviderAvailableEvent|VSAN VASA provider became available.ExtendedEventTimed out waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.VsanProviderTimedoutEvent|Timed out waiting for VSAN VASA provider availability.EventExA Data Processing Unit is down.infoA Data Processing Unit is down.com.vmware.vim.dpu.down|The Data Processing Unit with id '{dpuId}' is down.EventExA Data Processing Unit has been removed from the system.infoA Data Processing Unit has been removed from the system.com.vmware.vim.dpu.removed|The Data Processing Unit with id '{dpuId}' has been removed from the system.EventExThe management state for a Data Processing Unit has changed.infoThe management state for a Data Processing Unit has changed.com.vmware.vim.dpu.state.changed|The management state for the Data Processing Unit with id '{dpuId}' has changed to '{state}'.EventExThe dpu failover ended on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.com.vmware.vim.dpuFailover.end|DPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.EventExThe dpu failover started on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.com.vmware.vim.dpuFailover.start|DPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.ExtendedEventInvalid UTF-8 string encountered.warningInvalid UTF-8 string encountered.com.vmware.vim.utf8filter.badvalue|Invalid UTF-8 string encountered.ExtendedEventSome of the disks of the virtual machine failed to load. The information present for them in the virtual machine configuration may be incompletewarningSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} failed to load. The information present for them in the virtual machine configuration may be incompletecom.vmware.vim.vm.DisksNotLoaded|Some of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteExtendedEventSnapshot operations are not allowed due to some of the snapshot related objects failed to load.warningSnapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.com.vmware.vim.vm.SnapshotNotAllowed|Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.ExtendedEventVirtual machine reboot converted to power off because the rebootPowerOff option is enabledinfoReboot converted to power off on virtual machine {vm.name}.Reboot converted to power off.com.vmware.vim.vm.reboot.powerOff|Reboot converted to power off on virtual machine {vm.name} on {host.name} because the rebootPowerOff option is enabled.EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' bound to provider '{providerName}'EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vm|vService dependency '{dependencyName}' on '{vm.name}' bound to provider '{providerName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vApp|Created vService dependency '{dependencyName}' with type '{dependencyType}' on vApp '{targetName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vm|Created vService dependency '{dependencyName}' with type '{dependencyType}' on '{vm.name}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vApp|Destroyed vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vm|Destroyed vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vApp|Reconfigured vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vm|Reconfigured vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' unbound from provider '{providerName}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vm|vService dependency '{dependencyName}' on '{vm.name}' unbound from provider '{providerName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vApp|Updated vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vm|Updated vService dependency '{dependencyName}' on '{vm.name}'EventExvService provider registeredinfocom.vmware.vim.vsm.provider.register|vService provider '{providerName}' with type '{providerType}' registered for extension '{extensionKey}'EventExvService provider unregisteredinfocom.vmware.vim.vsm.provider.unregister|vService provider '{providerName}' with type '{providerType}' unregistered for extension '{extensionKey}'EventExvService provider updatedinfocom.vmware.vim.vsm.provider.update|Updating vService provider '{providerName}' registered for extension '{extensionKey}'EventExDeleting stale vdisks generated by FCD migration failed.errorcom.vmware.vslm.DeleteStaleDiskFailureEvent|Deleting stale vdisk {diskPath} and related files generated as part of FCD migration failed. Try to delete them manually.EventExRegistering vdisk as FCD at source failed during rollback of unsuccessful migration.errorcom.vmware.vslm.RegisterDiskFailed|Registering {fcdPath} with name {fcdName} as FCD at source failed during rollback of unsuccessful migration. Try to register it manually using RegisterDisk API.EventExUnregistering of vdisk at destination failed during rollback of unsuccessful migration.errorcom.vmware.vslm.UnRegisterDiskFailed|Unregistering of FCD {fcdId} failed at destination during rollback of unsuccessful migration. Reconcile of datastore {datastore} should fix inconsistencies if any.EventExConnectivity check completedinfocom.vmware.vsphere.client.security.ConnectivityCheckEvent|Connectivity check completed. Operation: {Operation}. Subscription status: {SubscriptionCheckResult}. Connectivity status: {ConnectivityCheckResult}. Access type: {AccessType}. User: {Username}ExtendedEventDatastore is accessible to all hosts under the cluster.infocom.vmware.wcp.Datastore.accessible|Datastore is accessible to all hosts under the clusterExtendedEventDatastore not accessible to all hosts under the cluster.warningcom.vmware.wcp.Datastore.inaccessible|Datastore not accessible to all hosts under the cluster.EventExRemote access for an ESXi local user account has been locked temporarilly due to multiple failed login attempts.warningesx.audit.account.locked|Remote access for ESXi local user account '{1}' has been locked for {2} seconds after {3} failed login attempts.EventExMultiple remote login failures detected for an ESXi local user account.warningesx.audit.account.loginfailures|Multiple remote login failures detected for ESXi local user account '{1}'.ExtendedEventRestoring factory defaults through DCUI.warningesx.audit.dcui.defaults.factoryrestore|The host has been restored to default factory settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventThe DCUI has been disabled.infoesx.audit.dcui.disabled|The DCUI has been disabled.ExtendedEventThe DCUI has been enabled.infoesx.audit.dcui.enabled|The DCUI has been enabled.ExtendedEventRebooting host through DCUI.warningesx.audit.dcui.host.reboot|The host is being rebooted through the Direct Console User Interface (DCUI).ExtendedEventShutting down host through DCUI.warningesx.audit.dcui.host.shutdown|The host is being shut down through the Direct Console User Interface (DCUI).ExtendedEventRestarting host agents through DCUI.infoesx.audit.dcui.hostagents.restart|The management agents on the host are being restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExLogin authentication on DCUI failederroresx.audit.dcui.login.failed|Authentication of user {1} has failed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExDCUI login password changed.infoesx.audit.dcui.login.passwd.changed|Login password for user {1} has been changed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventFactory network settings restored through DCUI.warningesx.audit.dcui.network.factoryrestore|The host has been restored to factory network settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExRestarting network through DCUI.infoesx.audit.dcui.network.restart|A management interface {1} has been restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.esx.audit.entropy.available.low|Host is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.esx.audit.entropy.external.source.disconnected|Host is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.EventExPowering off host through esxcliwarningesx.audit.esxcli.host.poweroff.reason|The host is being powered off through esxcli. Reason for powering off: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.reboot.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.restart.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExHost acceptance level changedinfoesx.audit.esximage.hostacceptance.changed|Host acceptance level changed from {1} to {2}ExtendedEventUEFI Secure Boot enabled: Cannot skip signature checks.warningesx.audit.esximage.install.nobypasssigcheck|UEFI Secure Boot enabled: Cannot skip signature checks. Installing unsigned VIBs will prevent the system from booting. So the vib signature check will be enforced.ExtendedEventAttempting to install an image profile bypassing signing and acceptance level verification.warningesx.audit.esximage.install.nosigcheck|Attempting to install an image profile bypassing signing and acceptance level verification. This may pose a large security risk.ExtendedEventAttempting to install an image profile with validation disabled.warningesx.audit.esximage.install.novalidation|Attempting to install an image profile with validation disabled. This may result in an image with unsatisfied dependencies, file or package conflicts, and potential security violations.EventExSECURITY ALERT: Installing image profile.warningesx.audit.esximage.install.securityalert|SECURITY ALERT: Installing image profile '{1}' with {2}.EventExSuccessfully installed image profile.infoesx.audit.esximage.profile.install.successful|Successfully installed image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully updated host to new image profile.infoesx.audit.esximage.profile.update.successful|Successfully updated host to image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully changed software on host.infoesx.audit.esximage.software.apply.succeeded|Successfully installed {1} component(s) and removed {2} component(s) on host. To see more details about the transaction, run 'esxcli software profile get'.EventExSuccessfully installed VIBs.infoesx.audit.esximage.vib.install.successful|Successfully installed {1} VIB(s), removed {2} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully removed VIBsinfoesx.audit.esximage.vib.remove.successful|Successfully removed {1} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExDPU trust validation failederroresx.audit.esxtokend.dputrust.failed|DPU: {1} trust validation failedEventExDPU was removedwarningesx.audit.esxtokend.dputrust.removed|DPU:{1} was removed.EventExDPU trust validation succeededinfoesx.audit.esxtokend.dputrust.succeeded|DPU: {1} trust validation succeeded.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Warning tripped.EventExNVDIMM: Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.lifetime.warning|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Warning tripped.EventExNVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.spareblocks|NVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.temperature|NVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).warningesx.audit.hardware.nvd.health.life.pctused|NVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).EventExNVDIMM Count of DRAM correctable ECC errors above threshold.infoesx.audit.hardware.nvd.health.module.ce|NVDIMM (handle {1}, idString {2}): Count of DRAM correctable ECC errors above threshold.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Warning tripped.EventExNVDIMM: Module Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.lifetime.warning|NVDIMM (handle {1}, idString {2}): Module Lifetime Warning tripped.EventExNVDIMM: Module Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.temperature.warning|NVDIMM (handle {1}, idString {2}): Module Temperature Warning tripped.EventExNVDIMM: Maintenance needed.warningesx.audit.hardware.nvd.health.vmw.statusflags.maintNeeded|NVDIMM (handle {1}, idString {2}): Maintenance needed.EventExA physical disk has been inserted.infoA physical disk has been insertedesx.audit.hcm.event.disk.insertion|A physical disk has been inserted ({1}).EventExA physical disk has been removed.infoA physical disk has been removed.esx.audit.hcm.event.disk.removal|A physical disk has been removed ({1}).ExtendedEventHost has booted.infoesx.audit.host.boot|Host has booted.EventExHost experienced a crashinfoesx.audit.host.crash.reason|The crash at {1} occurred due to: {2}. More details will be available in the generated vmkernel-zdump.EventExThe host experienced a crashinfoesx.audit.host.crash.reason.available|The host experienced a crash. Reason: {1}.ExtendedEventHost experienced a crashinfoesx.audit.host.crash.reason.unavailable|Host experienced a crash. More details will be available in the generated vmkernel-zdump.EventExThe number of virtual machines registered on the host exceeded limit.warningThe number of virtual machines registered on host {host.name} in cluster {computeResource.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered on host {host.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered exceeded limit: {current} registered, {limit} is the maximum supported.esx.audit.host.maxRegisteredVMsExceeded|The number of virtual machines registered on host {host.name} in cluster {computeResource.name} in {datacenter.name} exceeded limit: {current} registered, {limit} is the maximum supported.EventExThe host has been powered offinfoesx.audit.host.poweroff.reason.available|The host has been powered off. Reason for powering off: {1}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.management|The power off at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.subsystem|The power off at {1} was requested by {2} due to: {3}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.timestamp|The power off at {1} was requested due to: {2}.ExtendedEventHost had been powered offinfoesx.audit.host.poweroff.reason.unavailable|Host had been powered off. The poweroff was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.user|The power off at {1} was requested by user/entity {2} due to: {3}.EventExThe host experienced Quick Bootinfoesx.audit.host.quickboot.reason.available|The host experienced Quick Boot. Reason for reboot: {1}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.management|The Quick Boot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.subsystem|The Quick Boot at {1} was requested by {2} due to: {3}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.timestamp|The Quick Boot at {1} was requested due to: {2}.ExtendedEventHost experienced Quick Bootinfoesx.audit.host.quickboot.reason.unavailable|Host experienced Quick Boot. The Quick Boot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.user|The Quick Boot at {1} was requested by user/entity {2} due to: {3}.EventExThe host has been rebootedinfoesx.audit.host.reboot.reason.available|The host has been rebooted. Reason for reboot: {1}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.management|The reboot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.subsystem|The reboot at {1} was requested by {2} due to: {3}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.timestamp|The reboot at {1} was requested due to: {2}.ExtendedEventHost had been rebootedinfoesx.audit.host.reboot.reason.unavailable|Host had been rebooted. The reboot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.user|The reboot at {1} was requested by user/entity {2} due to: {3}.ExtendedEventHost is rebooting.infoesx.audit.host.stop.reboot|Host is rebooting.ExtendedEventHost is shutting down.infoesx.audit.host.stop.shutdown|Host is shutting down.EventExPowering off host through hostdwarningesx.audit.hostd.host.poweroff.reason|The host is being powered off through hostd. Reason for powering off: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.reboot.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.restart.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.ExtendedEventAdministrator access to the host has been enabled.infoesx.audit.lockdownmode.disabled|Administrator access to the host has been enabled.ExtendedEventAdministrator access to the host has been disabled.infoesx.audit.lockdownmode.enabled|Administrator access to the host has been disabled.ExtendedEventList of lockdown exception users has been changed.infoesx.audit.lockdownmode.exceptions.changed|List of lockdown exception users has been changed.ExtendedEventThe host has canceled entering maintenance mode.infoesx.audit.maintenancemode.canceled|The host has canceled entering maintenance mode.ExtendedEventThe host has entered maintenance mode.infoesx.audit.maintenancemode.entered|The host has entered maintenance mode.ExtendedEventThe host has begun entering maintenance mode.infoesx.audit.maintenancemode.entering|The host has begun entering maintenance mode.ExtendedEventThe host has exited maintenance mode.infoesx.audit.maintenancemode.exited|The host has exited maintenance mode.ExtendedEventThe host has failed entering maintenance mode.erroresx.audit.maintenancemode.failed|The host has failed entering maintenance mode.EventExFirewall configuration has changed.infoesx.audit.net.firewall.config.changed|Firewall configuration has changed. Operation '{1}' for rule set {2} succeeded.ExtendedEventFirewall has been disabled.warningesx.audit.net.firewall.disabled|Firewall has been disabled.EventExFirewall has been enabled for port.infoesx.audit.net.firewall.enabled|Firewall has been enabled for port {1}.EventExPort is now protected by Firewall.infoesx.audit.net.firewall.port.hooked|Port {1} is now protected by Firewall.EventExPort is no longer protected with Firewall.warningesx.audit.net.firewall.port.removed|Port {1} is no longer protected with Firewall.EventExLACP disabledinfoesx.audit.net.lacp.disable|LACP for VDS {1} is disabled.EventExLACP eabledinfoesx.audit.net.lacp.enable|LACP for VDS {1} is enabled.EventExuplink is connectedinfoesx.audit.net.lacp.uplink.connected|LACP info: uplink {1} on VDS {2} got connected.EventExThe host has canceled entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.canceled|The host has canceled entering '{1}'.EventExThe host has entered a partial maintenance mode.infoesx.audit.partialmaintenancemode.entered|The host has entered '{1}'.EventExThe host has begun entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.entering|The host has begun entering '{1}'.EventExThe host has exited a partial maintenance mode.infoesx.audit.partialmaintenancemode.exited|The host has exited '{1}'.EventExThe host has failed entering a partial maintenance mode.erroresx.audit.partialmaintenancemode.failed|The host has failed entering '{1}'.ExtendedEventThe ESXi command line shell has been disabled.infoesx.audit.shell.disabled|The ESXi command line shell has been disabled.ExtendedEventThe ESXi command line shell has been enabled.infoesx.audit.shell.enabled|The ESXi command line shell has been enabled.ExtendedEventSSH access has been disabled.infoesx.audit.ssh.disabled|SSH access has been disabled.ExtendedEventSSH access has been enabled.infoesx.audit.ssh.enabled|SSH access has been enabled.EventExSSH session was closed.infoesx.audit.ssh.session.closed|SSH session was closed for '{1}@{2}'.EventExSSH login has failed.infoesx.audit.ssh.session.failed|SSH login has failed for '{1}@{2}'.EventExSSH session was opened.infoesx.audit.ssh.session.opened|SSH session was opened for '{1}@{2}'.EventExPowering off hostwarningesx.audit.subsystem.host.poweroff.reason|The host is being powered off. Reason for powering off: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.reboot.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.restart.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.ExtendedEventSupershell session has been started by a user.warningSupershell session has been started by a user.esx.audit.supershell.access|Supershell session has been started by a user.EventExTest with an int argumenterroresx.audit.test.test1d|Test with {1}EventExTest with a string argumenterroresx.audit.test.test1s|Test with {1}ExtendedEventUSB configuration has changed.infoUSB configuration has changed on host {host.name} in cluster {computeResource.name}.USB configuration has changed on host {host.name}.USB configuration has changed.esx.audit.usb.config.changed|USB configuration has changed on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExEnforcement level changed for all security domains.warningesx.audit.uw.secpolicy.alldomains.level.changed|The enforcement level for all security domains has been changed to {1}. The enforcement level must always be set to enforcing.EventExEnforcement level changed for security domain.warningesx.audit.uw.secpolicy.domain.level.changed|The enforcement level for security domain {1} has been changed to {2}. The enforcement level must always be set to enforcing.ExtendedEventExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.warningesx.audit.uw.security.User.ExecInstalledOnly.disabled|ExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.Infoesx.audit.uw.security.User.ExecInstalledOnly.enabled|ExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.EventExExecution of non-installed file prevented.warningesx.audit.uw.security.execInstalledOnly.violation|Execution of unknown (non VIB installed) binary '{1}' prevented. Unknown content can cause malware attacks similar to Ransomware.EventExExecution of non-installed file detected.warningesx.audit.uw.security.execInstalledOnly.warning|Execution of unknown (non VIB installed) binary '{1}'. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventLVM device discovered.infoesx.audit.vmfs.lvm.device.discovered|One or more LVM devices have been discovered on this host.EventExRead IO performance maybe impacted for diskinfoRead IO performance maybe impacted for disk {1}: {2}Read IO performance maybe impacted for disk {1}: {2}esx.audit.vmfs.sesparse.bloomfilter.disabled|Read IO performance maybe impacted for disk {1}: {2}EventExFile system mounted.infoesx.audit.vmfs.volume.mounted|File system {1} on volume {2} has been mounted in {3} mode on this host.EventExLVM volume un-mounted.infoesx.audit.vmfs.volume.umounted|The volume {1} has been safely un-mounted. The datastore is no longer accessible on this host.EventExvSAN device is added back successfully after MEDIUM error.infovSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.devicerebuild|vSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExvSAN diskgroup is rebuilt successfully after MEDIUM error.infovSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.diskgrouprebuild|vSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExFound components with invalid metadatawarning{1} components found with invalid metadata on disk {2} {3}esx.audit.vob.vsan.lsom.foundInvalidMetadataComp|{1} components found with invalid metadata on disk {2} {3}EventExvSAN storagepool is added back successfully after MEDIUM error.infovSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.storagepoolrebuild|vSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExTest with both int and sting arguments.infoesx.audit.vobdtestcorrelator.test|Test with both string: {2} and int: {1}.ExtendedEventvSAN clustering services have been enabled.infovSAN clustering and directory services have been enabled.esx.audit.vsan.clustering.enabled|vSAN clustering and directory services have been enabled.ExtendedEventvSAN virtual NIC has been added.infovSAN virtual NIC has been added.esx.audit.vsan.net.vnic.added|vSAN virtual NIC has been added.ExtendedEventvSAN network configuration has been removed.errorvSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.esx.audit.vsan.net.vnic.deleted|vSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.EventExvSAN RDMA changed for vmknic.infovSAN RDMA changed for vmknic {1}.esx.audit.vsan.rdma.changed|vSAN RDMA changed for vmknic {1}.ExtendedEventHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445warningHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445esx.audit.weak.ssl.protocol|Weak SSL protocols found and disabled. Please refer to KB article: KB 1234567ExtendedEventA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.infoA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.esx.clear.coredump.configured|A vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.ExtendedEventAt least one coredump target has been configured. Host core dumps will be saved.infoAt least one coredump target has been configured. Host core dumps will be saved.esx.clear.coredump.configured2|At least one coredump target has been configured. Host core dumps will be saved.EventExNVDIMM Energy Source is sufficiently charged.infoesx.clear.hardware.nvd.health.module.es.charged|NVDIMM (handle {1}, idString {2}): Energy Source is sufficiently charged.EventExRestored network connectivity to portgroupsinfoesx.clear.net.connectivity.restored|Network connectivity restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExRestored Network Connectivity to DVPortsinfoesx.clear.net.dvport.connectivity.restored|Network connectivity restored on DVPorts: {1}. Physical NIC {2} is up.EventExRestored Network Redundancy to DVPortsinfoesx.clear.net.dvport.redundancy.restored|Uplink redundancy restored on DVPorts: {1}. Physical NIC {2} is up recently.EventExlag transition upinfoesx.clear.net.lacp.lag.transition.up|LACP info: LAG {1} on VDS {2} is up.EventExuplink transition upinfoesx.clear.net.lacp.uplink.transition.up|LACP info: uplink {1} on VDS {2} is moved into link aggregation group.EventExuplink is unblockedinfoesx.clear.net.lacp.uplink.unblocked|LACP info: uplink {1} on VDS {2} is unblocked.EventExRestored uplink redundancy to portgroupsinfoesx.clear.net.redundancy.restored|Uplink redundancy restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExLink state upinfoesx.clear.net.vmnic.linkstate.up|Physical NIC {1} linkstate is up.EventExStorage Device I/O Latency has improvedinfoesx.clear.psastor.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.psastor.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.psastor.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExScsi Device I/O Latency has improvedinfoesx.clear.scsi.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.scsi.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.scsi.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExExited the All Paths Down stateinfoesx.clear.storage.apd.exit|Device or filesystem with identifier {1} has exited the All Paths Down state.EventExRestored connectivity to storage deviceinfoesx.clear.storage.connectivity.restored|Connectivity to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored path redundancy to storage deviceinfoesx.clear.storage.redundancy.restored|Path redundancy to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored connection to NFS serverinfoesx.clear.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExNFS volume I/O Latency has improvedinfoesx.clear.vmfs.nfs.volume.io.latency.improved|NFS volume {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExvSAN device has come online.infovSAN device {1} has come online.esx.clear.vob.vsan.pdl.online|vSAN device {1} has come online.EventExTest with both int and sting arguments.infoesx.clear.vobdtestcorrelator.test|Test with both string: {1} {3} and int: {2}.ExtendedEventvSAN clustering services have now been enabled.infovSAN clustering and directory services have now been enabled.esx.clear.vsan.clustering.enabled|vSAN clustering and directory services have now been enabled.ExtendedEventvSAN now has at least one active network configuration.infovSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.network.available|vSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExA previously reported vmknic now has a valid IP.infovmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.vmknic.ready|vmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExVVol container has come online.infoesx.clear.vvol.container.online|VVol container {1} has come online.EventExA 3rd party component on ESXi has reported an error.erroresx.problem.3rdParty.error|A 3rd party component, {1}, running on ESXi has reported an error. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.info|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.information|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported a warning.warningesx.problem.3rdParty.warning|A 3rd party component, {1}, running on ESXi has reported a warning related to a problem. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA corrected memory error occurrederroresx.problem.apei.bert.memory.error.corrected|A corrected memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA fatal memory error occurrederroresx.problem.apei.bert.memory.error.fatal|A fatal memory error occurred in the last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA recoverable memory error occurrederroresx.problem.apei.bert.memory.error.recoverable|A recoverable memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA corrected PCIe error occurrederroresx.problem.apei.bert.pcie.error.corrected|A corrected PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA fatal PCIe error occurrederroresx.problem.apei.bert.pcie.error.fatal|Platform encounterd a fatal PCIe error in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA recoverable PCIe error occurrederroresx.problem.apei.bert.pcie.error.recoverable|A recoverable PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExAn application running on ESXi host has crashed and core file creation failed.warningesx.problem.application.core.dumpFailed|An application ({1}) running on ESXi host has crashed ({2} time(s) so far), but core dump creation failed.EventExAn application running on ESXi host has crashed and a core file was created.warningesx.problem.application.core.dumped|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). A core file might have been created at {3}.EventExAn application running on ESXi host has crashed and an encrypted core file was created.warningesx.problem.application.core.dumped.encrypted|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). An encrypted core file using keyId {3} might have been created at {4}.ExtendedEventCritical failure detected during boot, please refer to KB 93107.errorA critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.esx.problem.boot.failure.detected|A critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.ExtendedEventSystem clock no longer synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.lostsync|system clock no longer synchronized to upstream time serversExtendedEventSystem clock synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.sync|system clock synchronized to upstream time serversExtendedEventSystem clock lost synchronization to upstream time serverswarningesx.problem.clock.correction.adjtime.unsync|system clock lost synchronization to upstream time serversEventExApplication system changed clock, synchronization lostwarningesx.problem.clock.correction.changed|{1} stepped system clock to {2}.{3}, synchronization lostEventExAllowed system clock update with large time changewarningesx.problem.clock.correction.delta.allowed|Clock stepped to {1}.{2}, but delta {3} > {4} secondsEventExFailed system clock update with large time changeerroresx.problem.clock.correction.delta.failed|Clock step to {1}.{2} failed, delta {3} > {4} seconds, number of large corrections > {5}EventExAllowed system clock update with large time change, but number of future updates limitedwarningesx.problem.clock.correction.delta.warning|Clock stepped to {1}.{2}, but delta {3} > {4} seconds, {5}/{6} large correctionsEventExSystem clock stepped, lost synchronizationwarningesx.problem.clock.correction.step.unsync|system clock stepped to {1}.{2}, lost synchronizationEventExSystem clock maximum number of large corrections changedwarningesx.problem.clock.parameter.set.maxLargeCorrections|system clock max number of correction set to {1}EventExSystem clock maximum negative phase correction changedwarningesx.problem.clock.parameter.set.maxNegPhaseCorrection|system clock max negative phase correction set to {1}EventExSystem clock maximum positive phase correction changedwarningesx.problem.clock.parameter.set.maxPosPhaseCorrection|system clock max positive phase correction set to {1}EventExSystem clock count of number of large corrections changedwarningesx.problem.clock.parameter.set.numLargeCorrections|system clock number of large correction set to {1}EventExSystem clock VOB report interval changedwarningesx.problem.clock.parameter.set.vobReportInterval|system clock max number of correction set to {1}ExtendedEventSystem clock state has been resetwarningesx.problem.clock.state.reset|system clock state has been resetEventExThe storage capacity of the coredump targets is insufficient to capture a complete coredump.warningThe storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.esx.problem.coredump.capacity.insufficient|The storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.EventExThe free space available in default coredump copy location is insufficient to copy new coredumps.warningThe free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.esx.problem.coredump.copyspace|The free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.EventExThe given partition has insufficient amount of free space to extract the coredump.warningThe given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.esx.problem.coredump.extraction.failed.nospace|The given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.ExtendedEventNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.warningNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured|No vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.ExtendedEventNo coredump target has been configured. Host core dumps cannot be saved.warningNo coredump target has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured2|No coredump target has been configured. Host core dumps cannot be saved.ExtendedEventDRAM ECC not enabled. Please enable it in BIOS.erroresx.problem.cpu.amd.mce.dram.disabled|DRAM ECC not enabled. Please enable it in BIOS.ExtendedEventNot all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. erroresx.problem.cpu.intel.ioapic.listing.error|Not all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. ExtendedEventMCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.erroresx.problem.cpu.mce.invalid|MCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.EventExHigh number of corrected errors on a page.infoesx.problem.cpu.page.correctederrors.high|High number of corrected errors on host physical page number {1}EventExDisabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.erroresx.problem.cpu.smp.ht.invalid|Disabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.EventExFound {1} PCPUs, but only using {2} of them due to specified limit.erroresx.problem.cpu.smp.ht.numpcpus.max|Found {1} PCPUs, but only using {2} of them due to specified limit.EventExDisabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.erroresx.problem.cpu.smp.ht.partner.missing|Disabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.EventExError copying ConfigStore from backup.errorError copying ConfigStore from backup.esx.problem.cs.createstore.copy.backup.error|Error copying ConfigStore from backup {1}.ExtendedEventFailed an operation on the ConfigStore database.errorFailed an operation on the ConfigStore database.esx.problem.cs.db.operation.error|Failed an operation on the ConfigStore database.ExtendedEventFailed to setup desired configuration.errorFailed to setup desired configuration.esx.problem.cs.desired.config.error|Failed to setup desired configuration.ExtendedEventError cleaning up Datafile store.errorError cleaning up Datafile store.esx.problem.cs.dfs.cleanup.error|Error cleaning up Datafile store.ExtendedEventDataFile store cannot be restored.errorDataFile store cannot be restored.esx.problem.cs.dfs.restore.error|DataFile store cannot be restored.EventExError processing schema file.errorError processing schema file.esx.problem.cs.schema.file.error|Error processing schema file {1}.EventExInvalid metadata in schema file.errorInvalid metadata in schema file.esx.problem.cs.schema.metadata.error|Invalid metadata in schema file {1}.EventExVibId validation failed for schema file.errorVibId validation failed for schema file.esx.problem.cs.schema.validation.error|VibId validation failed for schema file {1}.EventExError in upgrading config.errorError in upgrading config.esx.problem.cs.upgrade.config.error|Error in upgrading config {1}.EventExUnable to obtain a DHCP lease.erroresx.problem.dhclient.lease.none|Unable to obtain a DHCP lease on interface {1}.EventExNo expiry time on offered DHCP lease.erroresx.problem.dhclient.lease.offered.noexpiry|No expiry time on offered DHCP lease from {1}.EventExThe maintenance mode state for some Data Processing Units may be out of sync with the host.warningThe maintenance mode state for some Data Processing Units may be out of sync with the host.esx.problem.dpu.maintenance.sync.failed|The maintenance mode state for Data Processing Units with ids '{dpus}' may be out of sync with the host.EventExSome drivers need special notice.warningDriver for device {1} is {2}. Please refer to KB article: {3}.esx.problem.driver.abnormal|Driver for device {1} is {2}. Please refer to KB article: {3}.EventExHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.esx.problem.entropy.config.error|Host is configured with external entropy source. Entropy daemon has become non functional because of an {1} change. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.empty|Host is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.inmemory.empty|Host is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.EventExCould not install image profile.erroresx.problem.esximage.install.error|Could not install image profile: {1}EventExHost doesn't meet image profile hardware requirements.erroresx.problem.esximage.install.invalidhardware|Host doesn't meet image profile '{1}' hardware requirements: {2}EventExCould not stage image profile.erroresx.problem.esximage.install.stage.error|Could not stage image profile '{1}': {2}ExtendedEventThe host can not support the applied EVC mode.warningesx.problem.evc.incompatible|The host can not support the applied EVC mode.EventExSkipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.device.invalid|Skipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.EventExSkipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.pin.invalid|Skipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.EventExFPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.clear|FPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.creditstall|FPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.devicespecific|FPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.lostcredit|FPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.oversubscription|FPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.devicespecific|FPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.timeout|FPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.unabletoroute|FPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.unknown|FPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.devicespecific|FPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidCRC|FPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidtransmissionword|FPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link failure: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.linkfailure|FPIN FC link failure: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsignal|FPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsynchronization|FPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.primitivesequenceprotocolerror|FPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.uncorrectableFECerror|FPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.unknown|FPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.clear|FPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.creditstall|FPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.devicespecific|FPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.lostcredit|FPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.oversubscription|FPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.EventExIOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.erroresx.problem.hardware.ioapic.missing|IOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.ExtendedEventFailed to communicate with the BMC. IPMI functionality will be unavailable on this system.erroresx.problem.hardware.ipmi.bmc.bad|Failed to communicate with the BMC. IPMI functionality will be unavailable on this system.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Error tripped.EventExNVDIMM: Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.lifetime.error|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Error tripped.EventExNVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.erroresx.problem.hardware.nvd.health.lastshutdownstatus|NVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.EventExNVDIMM Configuration error detected.erroresx.problem.hardware.nvd.health.module.config.error|NVDIMM (handle {1}, idString {2}): Configuration error detected.EventExNVDIMM Controller failure detected.erroresx.problem.hardware.nvd.health.module.ctlr.fail|NVDIMM (handle {1}, idString {2}): Controller failure detected. Access to the device and its capabilities are lost.EventExNVDIMM Controller firmware error detected.erroresx.problem.hardware.nvd.health.module.ctlr.fw.error|NVDIMM (handle {1}, idString {2}): Controller firmware error detected.EventExNVDIMM Energy Source still charging.warningesx.problem.hardware.nvd.health.module.es.charging|NVDIMM (handle {1}, idString {2}): Energy Source still charging but does not have sufficient charge to support a backup. Persistency is temporarily lost for the device.EventExNVDIMM Energy Source failure detected.erroresx.problem.hardware.nvd.health.module.es.fail|NVDIMM (handle {1}, idString {2}): Energy Source failure detected. Persistency is lost for the device.EventExNVDIMM Previous ARM operation failed.warningesx.problem.hardware.nvd.health.module.ops.arm.fail|NVDIMM (handle {1}, idString {2}): Previous ARM operation failed.EventExNVDIMM Previous ERASE operation failed.warningesx.problem.hardware.nvd.health.module.ops.erase.fail|NVDIMM (handle {1}, idString {2}): Previous ERASE operation failed.EventExThe Platform flush failed. The restored data may be inconsistent.erroresx.problem.hardware.nvd.health.module.ops.flush.fail|NVDIMM (handle {1}, idString {2}): The Platform flush failed. The restored data may be inconsistent.EventExNVDIMM Last RESTORE operation failed.erroresx.problem.hardware.nvd.health.module.ops.restore.fail|NVDIMM (handle {1}, idString {2}): Last RESTORE operation failed.EventExNVDIMM Previous SAVE operation failed.erroresx.problem.hardware.nvd.health.module.ops.save.fail|NVDIMM (handle {1}, idString {2}): Previous SAVE operation failed.EventExNVDIMM Count of DRAM uncorrectable ECC errors above threshold.warningesx.problem.hardware.nvd.health.module.uce|NVDIMM (handle {1}, idString {2}): Count of DRAM uncorrectable ECC errors above threshold.EventExNVDIMM Vendor specific error.erroresx.problem.hardware.nvd.health.module.vendor.error|NVDIMM (handle {1}, idString {2}): Vendor specific error.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Error tripped.EventExNVDIMM: Module Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.lifetime.error|NVDIMM (handle {1}, idString {2}): Module Lifetime Error tripped.EventExNVDIMM: Module Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.temperature.error|NVDIMM (handle {1}, idString {2}): Module Temperature Error tripped.EventExNVDIMM: All data may be lost in the event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInPowerLoss|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of power loss.EventExNVDIMM: All data may be lost in the event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInShutdown|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of shutdown.EventExNVDIMM: Subsequent reads may fail or return invalid data and subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossNow|NVDIMM (handle {1}, idString {2}): Subsequent reads may fail or return invalid data and subsequent writes may not persist.EventExNVDIMM: Performance degraded.erroresx.problem.hardware.nvd.health.vmw.statusflags.perfDegraded|NVDIMM (handle {1}, idString {2}): Performance degraded.EventExNVDIMM: Write persistency loss may happen in event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInPowerLoss|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of power loss.EventExNVDIMM: Write persistency loss may happen in event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInShutdown|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of shutdown.EventExNVDIMM: Subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossNow|NVDIMM (handle {1}, idString {2}): Subsequent writes may not persist.ExtendedEventTPM 2.0 device detected but a connection cannot be established.warningesx.problem.hardware.tpm2.connection|TPM 2.0 device detected but a connection cannot be established.ExtendedEventTPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.nosha256|TPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.ExtendedEventTPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.notis|TPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.ExtendedEventUnable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.warningesx.problem.hardware.tpm2.ownership|Unable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.ExtendedEventesx.problem.hardware.tpm2.provisioning|EventExA physical disk has a predictive failure.warningA physical disk has a predictive failure.esx.problem.hcm.event.disk.predictive.failure|A physical disk has a predictive failure ({1}).ExtendedEventAn unread host kernel core dump has been found.warningesx.problem.host.coredump|An unread host kernel core dump has been found.EventExHostd crashed and a core file was created.warningesx.problem.hostd.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExHostd crashed and an encrypted core file was created.warningesx.problem.hostd.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventThis host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.infoesx.problem.hyperthreading.unmitigated|This host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.ExtendedEventSome of the config entries in the VM inventory were skipped because they are invalid.warningesx.problem.inventory.invalidConfigEntries|Some of the config entries in the VM inventory were skipped because they are invalid.EventExAn iofilter installed on the host has stopped functioning.errorIOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}esx.problem.iofilter.disabled|IOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}EventExStorage I/O Control version mismatchinfoesx.problem.iorm.badversion|Host {1} cannot participate in Storage I/O Control(SIOC) on datastore {2} because the version number {3} of the SIOC agent on this host is incompatible with number {4} of its counterparts on other hosts connected to this datastore.EventExUnmanaged workload detected on SIOC-enabled datastoreinfoesx.problem.iorm.nonviworkload|An unmanaged I/O workload is detected on a SIOC-enabled datastore: {1}.EventExThe metadata store has degraded on one of the hosts in the cluster.errorThe metadata store has degraded on host {1}.esx.problem.metadatastore.degraded|The metadata store has degraded on host {1}.ExtendedEventThe metadata store is healthy.infoThe metadata store is healthy.esx.problem.metadatastore.healthy|The metadata store is healthy.ExtendedEventFailed to create default migration heapwarningesx.problem.migrate.vmotion.default.heap.create.failed|Failed to create default migration heap. This might be the result of severe host memory pressure or virtual address space exhaustion. Migration might still be possible, but will be unreliable in cases of extreme host memory pressure.EventExError with migration listen socketerroresx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown|The ESXi host's vMotion network server encountered an error while monitoring incoming network connections. Shutting down listener socket. vMotion might not be possible with this host until vMotion is manually re-enabled. Failure status: {1}EventExThe max_vfs module option has been set for at least one module.warningSetting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.esx.problem.module.maxvfs.set|Setting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.EventExLost Network Connectivityerroresx.problem.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Connectivity to DVPortserroresx.problem.net.dvport.connectivity.lost|Lost network connectivity on DVPorts: {1}. Physical NIC {2} is down.EventExNetwork Redundancy Degraded on DVPortswarningesx.problem.net.dvport.redundancy.degraded|Uplink redundancy degraded on DVPorts: {1}. Physical NIC {2} is down.EventExLost Network Redundancy on DVPortswarningesx.problem.net.dvport.redundancy.lost|Lost uplink redundancy on DVPorts: {1}. Physical NIC {2} is down.EventExNo IPv6 TSO supporterroresx.problem.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid fenceId configuration on dvPorterroresx.problem.net.fence.port.badfenceid|VMkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: invalid fenceId.EventExMaximum number of fence networks or portserroresx.problem.net.fence.resource.limited|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: maximum number of fence networks or ports have been reached.EventExSwitch fence property is not seterroresx.problem.net.fence.switch.unavailable|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: dvSwitch fence property is not set.EventExFirewall configuration operation failed. The changes were not applied.erroresx.problem.net.firewall.config.failed|Firewall configuration operation '{1}' failed. The changes were not applied to rule set {2}.EventExAdding port to Firewall failed.erroresx.problem.net.firewall.port.hookfailed|Adding port {1} to Firewall failed.EventExFailed to set gatewayerroresx.problem.net.gateway.set.failed|Cannot connect to the specified gateway {1}. Failed to set it.EventExNetwork memory pool thresholdwarningesx.problem.net.heap.belowthreshold|{1} free size dropped below {2} percent.EventExlag transition downwarningesx.problem.net.lacp.lag.transition.down|LACP warning: LAG {1} on VDS {2} is down.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse|LACP error: No peer response on uplink {1} for VDS {2}.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse.2|LACP error: No peer response on VDS {1}.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.incompatible|LACP error: Current teaming policy on VDS {1} is incompatible, supported is IP hash only.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.linkstatus|LACP error: Current teaming policy on VDS {1} is incompatible, supported link failover detection is link status only.EventExuplink is blockedwarningesx.problem.net.lacp.uplink.blocked|LACP warning: uplink {1} on VDS {2} is blocked.EventExuplink is disconnectedwarningesx.problem.net.lacp.uplink.disconnected|LACP warning: uplink {1} on VDS {2} got disconnected.EventExuplink duplex mode is differenterroresx.problem.net.lacp.uplink.fail.duplex|LACP error: Duplex mode across all uplink ports must be full, VDS {1} uplink {2} has different mode.EventExuplink speed is differenterroresx.problem.net.lacp.uplink.fail.speed|LACP error: Speed across all uplink ports must be same, VDS {1} uplink {2} has different speed.EventExAll uplinks must be activeerroresx.problem.net.lacp.uplink.inactive|LACP error: All uplinks on VDS {1} must be active.EventExuplink transition downwarningesx.problem.net.lacp.uplink.transition.down|LACP warning: uplink {1} on VDS {2} is moved out of link aggregation group.EventExInvalid vmknic specified in /Migrate/Vmknicwarningesx.problem.net.migrate.bindtovmk|The ESX advanced configuration option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Update the configuration option with a valid vmknic. Alternatively, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExUnsupported vMotion network latency detectedwarningesx.problem.net.migrate.unsupported.latency|ESXi has detected {1}ms round-trip vMotion network latency between host {2} and {3}. High latency vMotion networks are supported only if both ESXi hosts have been configured for vMotion latency tolerance.EventExFailed to apply for free portserroresx.problem.net.portset.port.full|Portset {1} has reached the maximum number of ports ({2}). Cannot apply for any more free ports.EventExVlan ID of the port is invaliderroresx.problem.net.portset.port.vlan.invalidid|{1} VLANID {2} is invalid. VLAN ID must be between 0 and 4095.EventExTry to register an unsupported portset classwarningesx.problem.net.portset.unsupported.psclass|{1} is not a VMware supported portset class, the relevant module must be unloaded.EventExVirtual NIC connection to switch failedwarningesx.problem.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningesx.problem.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Redundancywarningesx.problem.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExRSPAN src session conflict with teamingerroresx.problem.net.rspan.teaming.uplink.io.conflict|Failed to set RSPAN src session {1} on portset {2} due to it disallows uplink I/O which conflicts with {3} teaming policy {4}.EventExThe teaming policy has an invalid uplinkerroresx.problem.net.teaming.policy.invalid.uplink|Failed to update teaming policy {1} on portset {2} due to an invalid uplink {3} which disallows normal I/O.EventExFailed to set MTU on an uplinkwarningesx.problem.net.uplink.mtu.failed|VMkernel failed to set the MTU value {1} on the uplink {2}.EventExA duplicate IP address was detected on a vmknic interfacewarningesx.problem.net.vmknic.ip.duplicate|A duplicate IP address was detected for {1} on the interface {2}. The current owner is {3}.EventExLink state downwarningesx.problem.net.vmnic.linkstate.down|Physical NIC {1} linkstate is down.EventExLink state unstablewarningesx.problem.net.vmnic.linkstate.flapping|Taking down physical NIC {1} because the link is unstable.EventExNic Watchdog Resetwarningesx.problem.net.vmnic.watchdog.reset|Uplink {1} has recovered from a transient failure due to watchdog timeoutEventExNTP daemon stopped. Time correction out of bounds.erroresx.problem.ntpd.clock.correction.error|NTP daemon stopped. Time correction {1} > {2} seconds. Manually set the time and restart ntpd.EventExOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.osdata.partition.full|OSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212ExtendedEventConfigured OSData cannot be found. Please refer to KB article: KB 87212.warningConfigured OSData cannot be found. Please refer to KB article: KB 87212.esx.problem.osdata.path.notfound|Configured OSData cannot be found. Please refer to KB article: KB 87212.EventExVirtual machine killed as it kept using a corrupted memory page.erroresx.problem.pageretire.mce.injected|Killing virtual machine with config path {1} because at least {2} uncorrectable memory error machine check exceptions were injected for guest physical page {3} but the virtual machine's operating system kept using the page.EventExA virtual machine was killed as it kept using a corrupted memory page.errorThe virtual machine was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.esx.problem.pageretire.mce.injected.2|{1} was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.EventExMemory page retirement requested by platform firmware.infoesx.problem.pageretire.platform.retire.request|Memory page retirement requested by platform firmware. FRU ID: {1}. Refer to System Hardware Log: {2}EventExNumber of host physical memory pages that have been selected for retirement but could not yet be retired is high.warningesx.problem.pageretire.selectedbutnotretired.high|Number of host physical memory pages that have been selected for retirement but could not yet be retired is high: ({1})EventExNumber of host physical memory pages selected for retirement exceeds threshold.warningesx.problem.pageretire.selectedmpnthreshold.host.exceeded|Number of host physical memory pages that have been selected for retirement ({1}) exceeds threshold ({2}).ExtendedEventNo memory to allocate APD Eventwarningesx.problem.psastor.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExStorage Device close failed.warningesx.problem.psastor.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.psastor.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.psastor.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExStorage Device I/O Latency going highwarningesx.problem.psastor.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExPlugin's isLocal entry point failedwarningesx.problem.psastor.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.psastor.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.psastor.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.psastor.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExDevice has been turned off administratively.infoesx.problem.psastor.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.psastor.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.psastor.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExMaximum number of storage pathserroresx.problem.psastor.psastorpath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.psastor.unsupported.plugin.type|Storage Device Allocation not supported for plugin type {1}EventExFailed to delete resource group.warningFailed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.esx.problem.resourcegroup.delete.failed|Failed to delete resource groups with names '{rgnames}'.EventExFailed to Set the Virtual Machine's Latency Sensitivitywarningesx.problem.sched.latency.abort|Unable to apply latency-sensitivity setting to virtual machine {1}. No valid placement on the host.EventExNo Cache Allocation Resourcewarningesx.problem.sched.qos.cat.noresource|Unable to support cache allocation for virtual machine {1}. Out of resources.EventExNo Cache Allocation Supportwarningesx.problem.sched.qos.cat.notsupported|Unable to support L3 cache allocation for virtual machine {1}. No processor capabilities.EventExNo Cache Monitoring Resourcewarningesx.problem.sched.qos.cmt.noresource|Unable to support cache monitoring for virtual machine {1}. Out of resources.EventExNo Cache Monitoring Supportwarningesx.problem.sched.qos.cmt.notsupported|Unable to support L3 cache monitoring for virtual machine {1}. No processor capabilities.ExtendedEventScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.warningScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.esx.problem.scratch.on.usb|Scratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.EventExScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.scratch.partition.full|Scratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212EventExSize of scratch partition is too small.warningSize of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.esx.problem.scratch.partition.size.small|Size of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.EventExNo scratch partition has been configured.warningNo scratch partition has been configured. Recommended scratch partition size is {} MiB.esx.problem.scratch.partition.unconfigured|No scratch partition has been configured. Recommended scratch partition size is {} MiB.ExtendedEventNo memory to allocate APD Eventwarningesx.problem.scsi.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExScsi Device close failed.warningesx.problem.scsi.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.scsi.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExFailed to attach filter to device.warningesx.problem.scsi.device.filter.attach.failed|Failed to attach filters to device '%s' during registration. Plugin load failed or the filter rules are incorrect.EventExInvalid XCOPY request for devicewarningesx.problem.scsi.device.invalid.xcopy.request|Invalid XCOPY request for device {1}. Host {2}, Device {3}, Plugin {4}, {5} sense, sense.key = {6}, sense.asc = {7}, sense.ascq = {8}: {9}EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.scsi.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExFailed to obtain INQUIRY data from the devicewarningesx.problem.scsi.device.io.inquiry.failed|Failed to get standard inquiry for device {1} from Plugin {2}.ExtendedEventScsi device queue parameters incorrectly set.warningesx.problem.scsi.device.io.invalid.disk.qfull.value|QFullSampleSize should be bigger than QFullThreshold. LUN queue depth throttling algorithm will not function as expected. Please set the QFullSampleSize and QFullThreshold disk configuration values in ESX correctly.EventExScsi Device I/O Latency going highwarningesx.problem.scsi.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExQErr cannot be changed on device. Please change it manually on the device if possible.warningesx.problem.scsi.device.io.qerr.change.config|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The system is not configured to change the QErr setting of device. The QErr value supported by system is 0x{3}. Please check the SCSI ChangeQErrSetting configuration value for ESX.EventExScsi Device QErr setting changedwarningesx.problem.scsi.device.io.qerr.changed|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The device was originally configured to the supported QErr setting of 0x{3}, but this has been changed and could not be changed back.EventExPlugin's isLocal entry point failedwarningesx.problem.scsi.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.scsi.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.scsi.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.scsi.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExFailed to apply NMP SATP option during device discovery.warningesx.problem.scsi.device.nmp.satp.option.failed|Invalid config parameter: \"{1}\" provided in the nmp satp claimrule, this setting was not applied while claiming the path {2}EventExDevice has been turned off administratively.infoesx.problem.scsi.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.scsi.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExThin Provisioned Device Nearing Capacitywarningesx.problem.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.scsi.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExvVol PE path going out of vVol-incapable adaptererroresx.problem.scsi.scsipath.badpath.unreachpe|Sanity check failed for path {1}. The path is to a vVol PE, but it goes out of adapter {2} which is not PE capable. Path dropped.EventExCannot safely determine vVol PEerroresx.problem.scsi.scsipath.badpath.unsafepe|Sanity check failed for path {1}. Could not safely determine if the path is to a vVol PE. Path dropped.EventExMaximum number of storage pathserroresx.problem.scsi.scsipath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.scsi.unsupported.plugin.type|Scsi Device Allocation not supported for plugin type {1}ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.infoesx.problem.sgx.addpackage|Support for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.infoesx.problem.sgx.htenabled|Support for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.esx.problem.slp.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.EventExAll paths are downwarningesx.problem.storage.apd.start|Device or filesystem with identifier {1} has entered the All Paths Down state.EventExAll Paths Down timed out, I/Os will be fast failedwarningesx.problem.storage.apd.timeout|Device or filesystem with identifier {1} has entered the All Paths Down Timeout state after being in the All Paths Down state for {2} seconds. I/Os will now be fast failed.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.devicepor|Frequent PowerOn Reset Unit Attentions are occurring on device {1}. This might indicate a storage problem. Affected datastores: {2}EventExLost Storage Connectivityerroresx.problem.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.pathpor|Frequent PowerOn Reset Unit Attentions are occurring on path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExFrequent State Changes of Storage Pathinfoesx.problem.storage.connectivity.pathstatechanges|Frequent path state changes are occurring for path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExiSCSI discovery target login connection problemerroresx.problem.storage.iscsi.discovery.connect.error|iSCSI discovery to {1} on {2} failed. The iSCSI Initiator could not establish a network connection to the discovery address.EventExiSCSI Discovery target login errorerroresx.problem.storage.iscsi.discovery.login.error|iSCSI discovery to {1} on {2} failed. The Discovery target returned a login error of: {3}.EventExiSCSI iSns Discovery errorerroresx.problem.storage.iscsi.isns.discovery.error|iSCSI iSns discovery to {1} on {2} failed. ({3} : {4}).EventExiSCSI Target login connection problemerroresx.problem.storage.iscsi.target.connect.error|Login to iSCSI target {1} on {2} failed. The iSCSI initiator could not establish a network connection to the target.EventExiSCSI Target login errorerroresx.problem.storage.iscsi.target.login.error|Login to iSCSI target {1} on {2} failed. Target returned login error of: {3}.EventExiSCSI target permanently removederroresx.problem.storage.iscsi.target.permanently.lost|The iSCSI target {2} was permanently removed from {1}.EventExiSCSI target was permanently removederroresx.problem.storage.iscsi.target.permanently.removed|The iSCSI target {1} was permanently removed from {2}.EventExDegraded Storage Path Redundancywarningesx.problem.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. Affected datastores: {3}.EventExLost Storage Path Redundancywarningesx.problem.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.esx.problem.swap.systemSwap.isPDL.cannot.remove|System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.EventExSystem swap was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningesx.problem.swap.systemSwap.isPDL.cannot.remove.2|System swap was affected by the PDL of {1} and was removed. System swap has been reconfigured.EventExSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure|System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.EventExSystem swap was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningesx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2|System swap was affected by the PDL of {1}. It was removed but the subsequent reconfiguration failed.ExtendedEventSystem logging is not configured.warningSystem logging is not configured on host {host.name}.esx.problem.syslog.config|System logging is not configured on host {host.name}. Please check Syslog options for the host under Configuration -> Software -> Advanced Settings in vSphere client.ExtendedEventSystem logs are stored on non-persistent storage.warningSystem logs on host {host.name} are stored on non-persistent storage.esx.problem.syslog.nonpersistent|System logs on host {host.name} are stored on non-persistent storage. Consult product documentation to configure a syslog server or a scratch partition.ExtendedEventTest with no argumentserroresx.problem.test.test0|Test with no argumentsEventExTest with both int and string argumentserroresx.problem.test.test2|Test with both {1} and {2}ExtendedEventUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.warningUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.esx.problem.unsupported.tls.protocols|Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.EventExA VFAT filesystem is full.erroresx.problem.vfat.filesystem.full.other|The VFAT filesystem {1} (UUID {2}) is full.EventExA VFAT filesystem, being used as the host's scratch partition, is full.erroresx.problem.vfat.filesystem.full.scratch|The host's scratch partition, which is the VFAT filesystem {1} (UUID {2}), is full.EventExConfigstore is reaching its critical size limit. Please refer to the KB 93362 for more details.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.EventExA ramdisk has a very high usage. Please refer to the KB 93362 for more details.warningRamdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.ExtendedEventAn operation on the root filesystem has failed.erroresx.problem.visorfs.failure|An operation on the root filesystem has failed.EventExThe root filesystem's file table is full.erroresx.problem.visorfs.inodetable.full|The root filesystem's file table is full. As a result, the file {1} could not be created by the application '{2}'.EventExA ramdisk is full.erroresx.problem.visorfs.ramdisk.full|The ramdisk '{1}' is full. As a result, the file {2} could not be written.EventExA ramdisk's file table is full.erroresx.problem.visorfs.ramdisk.inodetable.full|The file table of the ramdisk '{1}' is full. As a result, the file {2} could not be created by the application '{3}'.EventExConfig store is reaching its critical size limit.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.EventExA ramdisk has a very high usage.warningRamdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left.EventExA VM could not fault in the a page. The VM is terminated as further progress is impossible.erroresx.problem.vm.kill.unexpected.fault.failure|The VM using the config file {1} could not fault in a guest physical page from the hypervisor level swap file at {2}. The VM is terminated as further progress is impossible.EventExA virtual machine could not fault in the a page. It is terminated as further progress is impossible.errorThe virtual machine could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleesx.problem.vm.kill.unexpected.fault.failure.2|{1} could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleEventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire.64|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA virtual machine cointained a host physical page that was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.errorThe virtual machine contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2|{1} contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.noSwapResponse|The VM using the config file {1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA virtual machine did not respond to swap actions. It is terminated as further progress is impossible.errorThe virtual machine did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.esx.problem.vm.kill.unexpected.noSwapResponse.2|{1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA VM is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.erroresx.problem.vm.kill.unexpected.vmtrack|The VM using the config file {1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.errorThe virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.esx.problem.vm.kill.unexpected.vmtrack.2|{1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA user world daemon of a virtual machine could not fault in the a page. The VM is terminated as further progress is impossible.errorThe user world daemon of this virtual machine could not fault in a page. The virtual machine is terminated as further progress is impossible.esx.problem.vm.kill.unexpected.vmx.fault.failure.2|The user world daemon of {1} could not fault in a page. The virtual machine is terminated as further progress is impossible.EventExMulti-extent ATS-only VMFS Volume unable to use ATSerroresx.problem.vmfs.ats.incompatibility.detected|Multi-extent ATS-only volume '{1}' ({2}) is unable to use ATS because HardwareAcceleratedLocking is disabled on this host: potential for introducing filesystem corruption. Volume should not be used from other hosts.EventExDevice Backing VMFS has lost ATS Supporterroresx.problem.vmfs.ats.support.lost|ATS-Only VMFS volume '{1}' not mounted. Host does not support ATS or ATS initialization has failed.EventExVMFS Locked By Remote Hosterroresx.problem.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.erroresx.problem.vmfs.extent.offline|An attached device {1} may be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system came onlineinfoesx.problem.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Heartbeat Corruption Detected.erroresx.problem.vmfs.heartbeat.corruptondisk|At least one corrupt on-disk heartbeat region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Volume Connectivity Restoredinfoesx.problem.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfoesx.problem.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterroresx.problem.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerroresx.problem.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Volume will remain in read-only metadata mode with limited write support until journal can be created.EventExTrying to acquire lock on an already locked file. - File descriptionerror{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}esx.problem.vmfs.lock.busy.filedesc|{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}EventExTrying to acquire lock on an already locked file. FilenameerrorLock(s) held on file {1} by other host(s).esx.problem.vmfs.lock.busy.filename|Lock(s) held on file {1} by other host(s).EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk.v2|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected.errorInconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.lockmode.inconsistency.detected|Inconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExFailed to mount NFS volumeerroresx.problem.vmfs.nfs.mount.failed|NFS mount failed for {1}:{2} volume {3}. Status: {4}EventExLost connection to NFS servererroresx.problem.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExvmknic configured for NFS has been removedwarningesx.problem.vmfs.nfs.vmknic.removed|vmknic {1} removed, NFS{2} datastore {3} configured with the vmknic will be inaccessible.EventExNFS volume average I/O Latency has exceeded configured threshold for the current configured periodwarningesx.problem.vmfs.nfs.volume.io.latency.exceed.threshold.period|NFS volume {1} average I/O latency {2}(us) has exceeded threshold {3}(us) for last {4} minutesEventExNFS volume I/O Latency going highwarningesx.problem.vmfs.nfs.volume.io.latency.high|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExNFS volume I/O Latency exceeding thresholdwarningesx.problem.vmfs.nfs.volume.io.latency.high.exceed.threshold|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds. Exceeded threshold {4} microsecondsEventExNo space on NFS volume.warningesx.problem.vmfs.nfs.volume.no.space|{1}: No space on NFS volume.EventExVMFS Resource Corruption Detectederroresx.problem.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected on spanned volume.errorInconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanned.lockmode.inconsistency.detected|Inconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.EventExIncompatible VMFS span state detected.errorIncompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanstate.incompatibility.detected|Incompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExRemote logging host has become unreachable.erroresx.problem.vmsyslogd.remote.failure|The host "{1}" has become unreachable. Remote logging to this host has stopped.ExtendedEventLogging to storage has failed.erroresx.problem.vmsyslogd.storage.failure|Logging to storage has failed. Logs are no longer being stored locally on this host.EventExThe configured log directory cannot be used. The default directory will be used instead.erroresx.problem.vmsyslogd.storage.logdir.invalid|The configured log directory {1} cannot be used. The default directory {2} will be used instead.EventExLog daemon has failed for an unexpected reason.erroresx.problem.vmsyslogd.unexpected|Log daemon has failed for an unexpected reason: {1}EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.errorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.EventExvSAN detected LSN mismatch in mirrorswarningvSAN detected LSN mismatch in mirrors for object {1}.esx.problem.vob.vsan.dom.lsnmismatcherror|vSAN detected LSN mismatch in mirrors for object {1}.EventExResync encountered no space errorwarningResync encountered no space error for component {1} on disk {2}.esx.problem.vob.vsan.dom.nospaceduringresync|Resync encountered no space error for component {1} on disk {2}. Resync will resume once space is freed up on this disk. Need around {3}MB to resync the component on this diskEventExResync is delayed.warningResync is delayed for component {1} on disk {2} for object {3}.esx.problem.vob.vsan.dom.resyncdecisiondelayed|Resync is delayed for component {1} on disk {2} until data availability is regained for object {3} on the remote site.EventExResync timed outwarningResync timed out for component {2} on disk {3}.esx.problem.vob.vsan.dom.resynctimeout|Resync timed out as no progress was made in {1} minute(s) for component {2} on disk {3}. Resync will be tried again for this component. The remaining resync is around {4}MB.EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskerrorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskunrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.unrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.EventExNVMe critical health warning for disk. The disk's backup device has failed.errorNVMe critical health warning for disk {1}. The disk's backup device has failed.esx.problem.vob.vsan.lsom.backupfailednvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's backup device has failed.EventExOffline event on component.warningOffline event issued for component: {1}, flag: {2}, reason: {3}.esx.problem.vob.vsan.lsom.componentoffline|Offline event issued for component: {1}, flag: {2}, reason: {3}.EventExvSAN Node: Near node component count limit.warningvSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).esx.problem.vob.vsan.lsom.componentthreshold|vSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).EventExEvacuation has failed for device and it will be retried by DDH.errorEvacuation has failed for device {1} and it will be retried by DDH.esx.problem.vob.vsan.lsom.ddhEvacFailed|Evacuation has failed for device {1} and it will be retried by DDH.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.devicerepair|vSAN device {1} is being repaired due to I/O failures, and will be out of service until the repair is complete. If the device is part of a dedup disk group, the entire disk group will be out of service until the repair is complete.EventExvSAN device has high latency. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithhighlatency|vSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.EventExvSAN device smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithsmartfailure|vSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.EventExvSAN device is under permanent failure.errorvSAN device {1} is under permanent failure.esx.problem.vob.vsan.lsom.diskerror|vSAN device {1} is under permanent failure.EventExFailed to create a new disk group.errorFailed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.esx.problem.vob.vsan.lsom.diskgrouplimit|Failed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.EventExvSAN diskgroup log is congested.errorvSAN diskgroup {1} log is congestedesx.problem.vob.vsan.lsom.diskgrouplogcongested|vSAN diskgroup {1} log is congested.EventExvSAN disk group is under congestion. It will be remediated. No action is needed.warningvSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.esx.problem.vob.vsan.lsom.diskgroupundercongestion|vSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.EventExFailed to add disk to disk group.errorFailed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.esx.problem.vob.vsan.lsom.disklimit2|Failed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.EventExvSAN device is under propagated error.errorvSAN device {1} is under propagated erroresx.problem.vob.vsan.lsom.diskpropagatederror|vSAN device {1} is under propagated error.EventExvSAN device is under propagated permanent error.errorvSAN device {1} is under propagated permanent erroresx.problem.vob.vsan.lsom.diskpropagatedpermerror|vSAN device {1} is under propagated permanent error.EventExvSAN device is unhealthy.errorvSAN device {1} is unhealthyesx.problem.vob.vsan.lsom.diskunhealthy|vSAN device {1} is unhealthy.EventExEvacuation failed for device due to insufficient resources and it will be retried.errorEvacuation failed for device {1} due to insufficient resources and it will be retried.esx.problem.vob.vsan.lsom.evacFailedInsufficientResources|Evacuation failed for device {1} due to insufficient resources and it will be retried. Please make resources available for evacuation.EventExDeleted invalid metadata component.warningDeleted invalid metadata component: {1}.esx.problem.vob.vsan.lsom.invalidMetadataComponent|Deleted invalid metadata component: {1}.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.metadataURE|vSAN device {1} encountered an unrecoverable read error. This disk will be evacuated and rebuilt. If the device is part of a dedup disk group, the entire disk group will be evacuated and rebuilt.EventExNVMe disk critical health warning for disk. Disk is now read only.errorNVMe critical health warning for disk {1}. Disk is now read only.esx.problem.vob.vsan.lsom.readonlynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1} is: The NVMe disk has become read only.EventExNVMe critical health warning for disk. The disk has become unreliable.errorNVMe critical health warning for disk {1}. The disk has become unreliable.esx.problem.vob.vsan.lsom.reliabilitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk has become unreliable.EventExNVMe critical health warning for disk. The disk's spare capacity is below threshold.errorNVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.esx.problem.vob.vsan.lsom.sparecapacitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.storagepoolURE|vSAN device {1} encountered an unrecoverable read error. This disk will be rebuilt.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.storagepoolrepair|vSAN device {1} is being repaired due to I/O failures and will be out of service until the repair is complete.EventExNo response for I/O on vSAN device.errorNo response for I/O on vSAN device {1}.esx.problem.vob.vsan.lsom.storagepoolstuckio|No response for I/O on vSAN device {1}.EventExvSAN device detected suspended I/Os.errorvSAN device {1} detected suspended I/Os.esx.problem.vob.vsan.lsom.stuckio|vSAN device {1} detected suspended I/Os. Taking the host out of service to avoid affecting the vSAN cluster.EventExvSAN device detected stuck I/O error.errorvSAN device {1} detected stuck I/O error.esx.problem.vob.vsan.lsom.stuckiooffline|vSAN device {1} detected stuck I/O error. Marking the device as offline.EventExvSAN device is under propagated stuck I/O error.errorvSAN device {1} is under propagated stuck I/O error.esx.problem.vob.vsan.lsom.stuckiopropagated|vSAN device {1} is under propagated stuck I/O error. Marking the device as offline.EventExvSAN device detected I/O timeout error.errorvSAN device {1} detected I/O timeout error.esx.problem.vob.vsan.lsom.stuckiotimeout|vSAN device {1} detected I/O timeout error. This may lead to stuck I/O.EventExNVMe critical health warning for disk. The disk's temperature is beyond threshold.errorNVMe critical health warning for disk {1}. The disk's temperature is beyond bethreshold.esx.problem.vob.vsan.lsom.temperaturenvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's temperature beyond threshold.EventExvSAN device has gone offline.errorvSAN device {1} has gone offline.esx.problem.vob.vsan.pdl.offline|vSAN device {1} has gone offline.EventExA ZDOM object is paused due to continuous fail-stops.warningZDOM object {1} is paused on host {2}, numFailStops={3}.esx.problem.vob.vsan.zdom.failstoppaused|ZDOM object {1} is paused on host {2}, numFailStops={3}.ExtendedEventTest with no arguments.infoesx.problem.vobdtestcorrelator.test.0|Test with no argumentsEventExTest with int argument.infoesx.problem.vobdtestcorrelator.test.1d|Test with int argument: {1}EventExTest with sting argument.infoesx.problem.vobdtestcorrelator.test.1s|Test with sting argument: {1}EventExTest with huge sting argument.infoesx.problem.vobdtestcorrelator.test.hugestr|Test with huge sting argument: {1}EventExVpxa crashed and a core file was created.warningesx.problem.vpxa.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExVpxa crashed and an encrypted core file was created.warningesx.problem.vpxa.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventvSAN clustering services have been disabled.warningvSAN clustering and directory services have been disabled thus will be no longer available.esx.problem.vsan.clustering.disabled|vSAN clustering and directory services have been disabled thus will be no longer available.EventExData component found on witness host.warningData component {1} found on witness host is ignored.esx.problem.vsan.dom.component.datacomponent.on.witness.host|Data component {1} found on witness host is ignored.EventExvSAN Distributed Object Manager failed to initializewarningvSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.esx.problem.vsan.dom.init.failed.status|vSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.EventExOne or more disks exceed its/their warning usage of estimated endurance threshold.infoOne or more disks exceed its/their warning usage of estimated endurance threshold.esx.problem.vsan.health.ssd.endurance|Disks {Disk Name} in Cluster {Cluster Name} have exceeded warning usage of their estimated endurance threshold {Disk Percentage Threshold}, currently at {Disk Percentage Used} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds the estimated endurance threshold.errorOne of the disks exceeds the estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.error|Disks {1} have exceeded their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds 90% of its estimated endurance threshold.warningOne of the disks exceeds 90% of its estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.warning|Disks {1} have exceeded 90 percent usage of their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.errorOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.esx.problem.vsan.health.vsanesa.pdl|Disk {1} is detected with PDL in vSAN ESA Cluster. Please check the host for further details.EventExvSAN device Memory/SSD congestion has changed.infoLSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.esx.problem.vsan.lsom.congestionthreshold|LSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.EventExA vmknic added to vSAN network configuration doesn't have valid IP. Network is not ready.errorvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.esx.problem.vsan.net.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.ExtendedEventvSAN doesn't haven any redundancy in its network configuration.warningvSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.lost|vSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.ExtendedEventvSAN is operating on reduced network redundancy.warningvSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.reduced|vSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.ExtendedEventvSAN doesn't have any network configuration for use.errorvSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.esx.problem.vsan.no.network.connectivity|vSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.EventExA vmknic added to vSAN network configuration doesn't have valid IP. It will not be in use.warningvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.esx.problem.vsan.vmknic.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.EventEx Failed to add shared virtual disk. Maximum count reachederroresx.problem.vscsi.shared.vmdk.add.failure.max.count|Failed to add shared virtual disk. Maximum number of shared vmdks supported per ESX host are {1}EventExNo free slots availableerroresx.problem.vscsi.shared.vmdk.no.free.slot.available|No Free slot available. Maximum number of virtual machinies supported in MSCS cluster are {1}EventExFailed to power on virtual machines on shared VMDK with running virtual machineerroresx.problem.vscsi.shared.vmdk.virtual.machine.power.on.failed|Two or more virtual machines (\"{1}\" and \"{2}\") sharing same virtual disk are not allowed to be Powered-On on same host.EventExVVol container has gone offline.erroresx.problem.vvol.container.offline|VVol container {1} has gone offline: isPEAccessible {2}, isVPAccessible {3}.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.EventExCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated.thirdPartyProv|CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.EventExApplication consistent sync completed.infoApplication consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Application consistent sync completed for virtual machine {vm.name} on host {host.name}.Application consistent sync completed for virtual machine {vm.name}.Application consistent sync completed.hbr.primary.AppQuiescedDeltaCompletedEvent|Application consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)ExtendedEventConnection to VR Server restored.infoConnection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Connection to VR Server restored for virtual machine {vm.name} on host {host.name}.Connection to VR Server restored for virtual machine {vm.name}.Connection to VR Server restored.hbr.primary.ConnectionRestoredToHbrServerEvent|Connection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExSync stopped.warningSync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped: {reason.@enum.hbr.primary.ReasonForDeltaAbort}hbr.primary.DeltaAbortedEvent|Sync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}EventExSync completed.infoSync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync completed for virtual machine {vm.name} on host {host.name}.Sync completed for virtual machine {vm.name}.Sync completed.hbr.primary.DeltaCompletedEvent|Sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventSync started.infoSync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync started by {userName} for virtual machine {vm.name} on host {host.name}.Sync started by {userName} for virtual machine {vm.name}.Sync started by {userName}.hbr.primary.DeltaStartedEvent|Sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFile system consistent sync completed.infoFile system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.File system consistent sync completed for virtual machine {vm.name} on host {host.name}.File system consistent sync completed for virtual machine {vm.name}.File system consistent sync completed.hbr.primary.FSQuiescedDeltaCompletedEvent|File system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)EventExFailed to start sync.errorFailed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartDeltaEvent|Failed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExFailed to start full sync.errorFailed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartSyncEvent|Failed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExDisk replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}hbr.primary.InvalidDiskReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}EventExVirtual machine replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}hbr.primary.InvalidVmReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}ExtendedEventVR Server does not support network compression.warningVR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support network compression for virtual machine {vm.name} on host {host.name}.VR Server does not support network compression for virtual machine {vm.name}.VR Server does not support network compression.hbr.primary.NetCompressionNotOkForServerEvent|VR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server supports network compression.infoVR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server supports network compression for virtual machine {vm.name} on host {host.name}.VR Server supports network compression for virtual machine {vm.name}.VR Server supports network compression.hbr.primary.NetCompressionOkForServerEvent|VR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExNo connection to VR Server.warningNo connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server: {reason.@enum.hbr.primary.ReasonForNoServerConnection}hbr.primary.NoConnectionToHbrServerEvent|No connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}EventExVR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}errorVR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}hbr.primary.NoProgressWithHbrServerEvent|VR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}ExtendedEventPrepare Delta Time exceeds configured RPO.warningPrepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name}.Prepare Delta Time exceeds configured RPO.hbr.primary.PrepareDeltaTimeExceedsRpoEvent|Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventQuiescing is not supported for this virtual machine.warningQuiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing is not supported for virtual machine {vm.name} on host {host.name}.Quiescing is not supported for virtual machine {vm.name}.Quiescing is not supported for this virtual machine.hbr.primary.QuiesceNotSupported|Quiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server is compatible with the configured RPO.infoVR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name}.VR Server is compatible with the configured RPO.hbr.primary.RpoOkForServerEvent|VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server does not support the configured RPO.warningVR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server does not support the configured RPO for virtual machine {vm.name}.VR Server does not support the configured RPO.hbr.primary.RpoTooLowForServerEvent|VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFull sync completed.infoFull sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync completed for virtual machine {vm.name} on host {host.name}.Full sync completed for virtual machine {vm.name}.Full sync completed.hbr.primary.SyncCompletedEvent|Full sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventFull sync started.infoFull sync started for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync started for virtual machine {vm.name} on host {host.name}.Full sync started for virtual machine {vm.name}.Full sync started.hbr.primary.SyncStartedEvent|Full sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventReplication paused.infoReplication paused for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Replication paused for virtual machine {vm.name} on host {host.name}.Replication paused for virtual machine {vm.name}.Replication paused.hbr.primary.SystemPausedReplication|Replication paused by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.warningQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.hbr.primary.UnquiescedDeltaCompletedEvent|Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).EventExReplication configuration changed.infoReplication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} on host {host.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).hbr.primary.VmReplicationConfigurationChangedEvent|Replication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).AccountCreatedEventAccount createdinfoAn account was createdAccount {spec.id} was created on host {host.name} <EventLongDescription id="vim.event.AccountCreatedEvent"> <description> An account has been created on the host </description> </EventLongDescription> AccountRemovedEventAccount removedinfoAccount {account} was removedAccount {account} was removed on host {host.name} <EventLongDescription id="vim.event.AccountRemovedEvent"> <description> An account has been removed from the host </description> </EventLongDescription> AccountUpdatedEventAccount updatedinfoAccount {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}' <EventLongDescription id="vim.event.AccountUpdatedEvent"> <description> An account has been updated on the host </description> </EventLongDescription> AdminPasswordNotChangedEventAdministrator password not changedinfoThe default password for the root user has not been changedThe default password for the root user on the host {host.name} has not been changed <EventLongDescription id="vim.event.AdminPasswordNotChangedEvent"> <description> The default password for the Administrator user on the host has not been changed </description> <cause> <description> You have not changed the password for the Administrator user on the host so the default password is still active </description> <action> Change the password for the Administrator user on the host </action> </cause> </EventLongDescription> AlarmAcknowledgedEventAlarm acknowledgedinfoAcknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}'Acknowledged alarm '{alarm.name}' on {entity.name}AlarmActionTriggeredEventAlarm action triggeredinfoAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarmClearedEventAlarm clearedinfoManually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}AlarmCreatedEventAlarm createdinfoCreated alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}'Created alarm '{alarm.name}' on {entity.name}AlarmEmailCompletedEventAlarm email sentinfoAlarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}AlarmEmailFailedEventCannot send alarm emailerrorAlarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to} <EventLongDescription id="vim.event.AlarmEmailFailedEvent"> <description> An error occurred while sending email notification of a triggered alarm </description> <cause> <description>Failed to send email for a triggered alarm</description> <action>Check the vCenter Server SMTP settings for sending email notifications</action> </cause> </EventLongDescription> AlarmEvent<Alarm Event>info<internal>AlarmReconfiguredEventAlarm reconfiguredinfoReconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}'Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}. <EventLongDescription id="vim.event.AlarmReconfiguredEvent"> <description> An alarm has been reconfigured </description> <cause> <description>A user has reconfigured an alarm</description> </cause> </EventLongDescription> AlarmRemovedEventAlarm removedinfoRemoved alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}'Removed alarm '{alarm.name}' on {entity.name}AlarmScriptCompleteEventAlarm script completedinfoAlarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}AlarmScriptFailedEventAlarm script not completederrorAlarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg} <EventLongDescription id="vim.event.AlarmScriptFailedEvent"> <description> The vCenter Server logs this event if an error occurs while running a script after an alarm triggers. </description> <cause> <description>There was an error running the script</description> <action>Fix the script or failure condition</action> </cause> </EventLongDescription> AlarmSnmpCompletedEventAlarm SNMP trap sentinfoAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarmSnmpFailedEventAlarm SNMP trap not senterrorAlarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg} <EventLongDescription id="vim.event.AlarmSnmpFailedEvent"> <description> The vCenter Server logs this event if an error occurs while sending an SNMP trap when an alarm triggers. </description> <cause> <description>An SNMP trap could not be sent for a triggered alarm</description> <action>Check the vCenter Server SNMP settings. Make sure that the vCenter Server network can handle SNMP packets.</action> </cause> </EventLongDescription> AlarmStatusChangedEventAlarm status changedinfoAlarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}AllVirtualMachinesLicensedEventAll virtual machines are licensedinfoAll running virtual machines are licensedAlreadyAuthenticatedSessionEventAlready authenticatedinfoUser cannot logon since the user is already logged onAuthorizationEvent<Authorization Event>info<internal>BadUsernameSessionEventInvalid user nameerrorCannot login {userName}@{ipAddress} <EventLongDescription id="vim.event.BadUsernameSessionEvent"> <description> A user attempted to log in with an unknown or invalid username </description> <cause> <description> The username is unknown to the system </description> <action> Use a username that is included in the system user directory </action> <action> On Linux, verify that the user directory is correctly configured </action> <action> If you are using Active Directory, check the health of the domain controller </action> </cause> <cause> <description> The user provided an invalid password </description> <action> Supply the correct password </action> </cause> </EventLongDescription> CanceledHostOperationEventCanceled host operationinfoThe operation performed on host {host.name} was canceledThe operation performed on host {host.name} was canceledThe operation was canceledThe operation performed on host {host.name} in {datacenter.name} was canceled <EventLongDescription id="vim.event.CanceledHostOperationEvent"> <description> An operation performed on the host was canceled </description> <cause> <description> A previous event in the sequence of events will provide more information about the cause of this cancellation </description> </cause> </EventLongDescription> ClusterComplianceCheckedEventChecked cluster for complianceinfoChecked cluster {computeResource.name} for complianceCluster was checked for compliance with profile {profile.name}Checked cluster for compliance <EventLongDescription id="vim.event.ClusterComplianceCheckedEvent"> <description> The cluster was checked for compliance with a cluster profile </description> <cause> <description> The user initiated a compliance check on the cluster against a cluster profile </description> </cause> <cause> <description> A scheduled has initiated a compliance check for the cluster against a cluster profile </description> </cause> </EventLongDescription> ClusterCreatedEventCluster createdinfoCreated cluster {computeResource.name}Created in folder {parent.name}Created cluster {computeResource.name} in {datacenter.name}ClusterDestroyedEventCluster deletedinfoRemoved cluster {computeResource.name}Removed clusterRemoved cluster {computeResource.name} in datacenter {datacenter.name}ClusterEvent<Cluster Event>info<internal>ClusterOvercommittedEventCluster overcommittederrorInsufficient capacity in cluster {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in cluster {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.ClusterOvercommittedEvent"> <description> The cumulative CPU and/or memory resources of all hosts in the cluster are not adequate to satisfy the resource reservations of all virtual machines in the cluster </description> <cause> <description>You attempted to power on a virtual machine bypassing vCenter Server. This condition occurs when you attempt the power on using the vSphere Client directly connected to the host.</description> <action>In a DRS cluster, do not power on virtual machines bypassing vCenter Server</action> </cause> <cause> <description>A host was placed in Maintenance, Standby, or Disconnected Mode</description> <action>Bring any host in Maintenance, Standby, or Disconnected mode out of these modes</action> </cause> </EventLongDescription> ClusterReconfiguredEventCluster reconfiguredinfoReconfigured cluster {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Cluster reconfiguredReconfigured cluster {computeResource.name} in datacenter {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ClusterReconfiguredEvent"> <description> The cluster configuration was changed. The cluster configuration includes information about the DRS, DPM, EVC and vSphere HA settings of the cluster. All DRS rules are also stored in the cluster configuration. Editing the cluster configuration may trigger an invocation of DRS and/or enabling/disabling of vSphere HA on each host in the cluster. </description> </EventLongDescription> ClusterStatusChangedEventCluster status changedinfoConfiguration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.ClusterStatusChangedEvent"> <description> The cluster status has changed. This status is the status of the root resource pool that encompasses the entire cluster. A cluster status change may be accompanied by the removal of a configuration issue if one was previously detected. A cluster status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The cluster status changed to yellow</description> <action>Add more resources (more hosts), or reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The cluster status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> CustomFieldDefAddedEventCustom field definition addedinfoCreated new custom field definition {name}CustomFieldDefEvent<Custom Field Definition Event>info<internal>CustomFieldDefRemovedEventCustom field definition removedinfoRemoved field definition {name}CustomFieldDefRenamedEventCustom field definition renamedinfoRenamed field definition from {name} to {newName}CustomFieldEvent<Custom Field Event>info<internal>CustomFieldValueChangedEventCustom field value changedinfoChanged custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} in {datacenter.name} from '{prevState}' to '{value}'CustomizationEvent<Customization Event>info<internal>CustomizationFailed<An error occurred during customization>infoAn error occurred during customization, Reason: {reason.@enum.CustomizationFailed.ReasonCode}An error occurred during customization on VM {vm.name}, Reason: {reason.@enum.CustomizationFailed.ReasonCode}. See customization log at {logLocation} on the guest OS for details.CustomizationLinuxIdentityFailedCustomization Linux Identity FailederrorAn error occurred while setting up Linux identity. See log file '{logLocation}' on guest OS for details. <EventLongDescription id="vim.event.CustomizationLinuxIdentityFailed"> <description> The guest operating system Linux distribution is not supported by the customization scripts. Please refer to the VMware vSphere Compatibility Matrix for the list of the supported Linux distributions. </description> <cause> <description> Customization of the target guest operating system Linux distribution is not supported. </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> CustomizationNetworkSetupFailedCannot complete customization network setuperrorAn error occurred while setting up network properties of the guest OS. See the log file {logLocation} in the guest OS for details. <EventLongDescription id="vim.event.CustomizationNetworkSetupFailed"> <description> The customization scripts failed to set the parameters in the corresponding configuration files for Linux or in the Windows registry </description> <cause> <description> The Customization Specification contains an invalid host name or domain name </description> <action> Review the guest operating system log files for this event for more details </action> <action> Provide a valid host name for the target guest operating system. The name must comply with the host name and domain name definitions in RFC 952, 1035, 1123, 2181. </action> </cause> <cause> <description> Could not find a NIC with the MAC address specified in the Customization Package </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that there was no change in the virtual NIC MAC address between the creation of the Customization Package and its deployment. Deployment occurs during the first boot of the virtual machine after customization has been scheduled. </action> </cause> <cause> <description> The customization code needs read/write permissions for certain configuration files. These permissions were not granted to the 'root' account on Linux or to the account used by the VMware Tools Service on the Windows guest operating system. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Grant read/write permissions to the 'root' account for Linux or to the account used by the VMware Tools Service on the Windows guest operating system and the registry keys that need to be modified by the customization code </action> </cause> </EventLongDescription> CustomizationStartedEventStarted customizationinfoStarted customization of VM {vm.name}. Customization log located at {logLocation} in the guest OS.CustomizationSucceededCustomization succeededinfoCustomization of VM {vm.name} succeeded. Customization log located at {logLocation} in the guest OS.CustomizationSysprepFailedCannot complete customization SyspreperrorThe version of Sysprep {sysprepVersion} provided for customizing VM {vm.name} does not match the version of guest OS {systemVersion}. See the log file {logLocation} in the guest OS for more information. <EventLongDescription id="vim.event.CustomizationSysprepFailed"> <description> The sysprep files in the folder corresponding to the selected target guest operating system are not compatible with the actual version of the guest operation system </description> <cause> <description> The sysprep files in the folder corresponding to the target guest operating system (for example Windows XP) are for a different guest operating system (for example Windows 2003) </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> <cause> <description> The sysprep files in the folder corresponding to the guest operating system are for a different Service Pack, for example the guest operating system is Windows XP SP2 and but the sysprep files are for Windows XP SP1. </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> </EventLongDescription> CustomizationUnknownFailureUnknown customization errorerrorAn error occurred while customizing VM {vm.name}. For details reference the log file {logLocation} in the guest OS. <EventLongDescription id="vim.event.CustomizationUnknownFailure"> <description> The customization component failed to set the required parameters inside the guest operating system </description> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the registry keys used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on registry keys used by the customization code </action> </cause> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the files and folders used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on the files and folders used by the customization code </action> </cause> <cause> <description> On Linux, an invalid or unsupported time zone is passed to the customization scripts and the time zone configuration failed as a result </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that a supported time zone is passed in Customization Specification. </action> </cause> <cause> <description> On Linux, the guest operating system 'root' account does not have read/write permissions for the configuration files that the customization scripts need to modify ('/etc/hosts') </description> <action> Grant read/write permissions for the configuration files to the guest operating system 'root' account </action> </cause> <cause> <description> To enable guest customization on Linux, in case open-vm-tools are used, you must also install the deployPkg plug-in. </description> <action> Follow kb.vmware.com/kb/2075048 to install the open-vm-tools deployPkg plug-in. </action> </cause> <cause> <description> Customization of the target guest operating system is not supported </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> DVPortgroupCreatedEventdvPort group createdinfodvPort group {net.name} was added to switch {dvs}.dvPort group {net.name} in {datacenter.name} was added to switch {dvs.name}.DVPortgroupDestroyedEventdvPort group deletedinfodvPort group {net.name} was deleted.dvPort group {net.name} in {datacenter.name} was deleted.DVPortgroupEventdvPort group eventinfodvPort group eventdvPort group eventDVPortgroupReconfiguredEventdvPort group reconfiguredinfodvPort group {net.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}dvPort group {net.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DVPortgroupRenamedEventdvPort group renamedinfodvPort group {oldName} was renamed to {newName}.dvPort group {oldName} in {datacenter.name} was renamed to {newName}DasAdmissionControlDisabledEventvSphere HA admission control disabledinfovSphere HA admission control disabled for cluster {computeResource.name}vSphere HA admission control disabledvSphere HA admission control disabled for cluster {computeResource.name} in {datacenter.name}DasAdmissionControlEnabledEventvSphere HA admission control enabledinfovSphere HA admission control enabled for cluster {computeResource.name}vSphere HA admission control enabledvSphere HA admission control enabled for cluster {computeResource.name} in {datacenter.name}DasAgentFoundEventvSphere HA agent foundinfoRe-established contact with a primary host in this vSphere HA clusterDasAgentUnavailableEventvSphere HA agent unavailableerrorUnable to contact a primary vSphere HA agent in cluster {computeResource.name}Unable to contact a primary vSphere HA agentUnable to contact a primary vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasAgentUnavailableEvent"> <description> vCenter Server is not able to contact any good primary hosts in the vSphere HA cluster. vSphere HA protection may not be available for virtual machines running in the cluster. In addition, you cannot enable or reconfigure vSphere HA on hosts in the cluster until contact between vCenter Server and a good primary host is restored. </description> <cause> <description> There was a network outage, and all hosts show up in the inventory as "not responding" </description> <action>Restore the network</action> </cause> <cause> <description>All the primary hosts in the cluster failed</description> <action> If the failed primary hosts cannot be restored, disable vSphere HA on the cluster, wait for the Unconfigure vSphere HA tasks to complete on all hosts, and re-enable vSphere HA on the cluster </action> </cause> </EventLongDescription> DasClusterIsolatedEventAll vSphere HA hosts isolatederrorAll hosts in the vSphere HA cluster {computeResource.name} were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster {computeResource.name} in {datacenter.name} were isolated from the network. Check the network configuration for proper network redundancy in the management network.DasDisabledEventvSphere HA disabled for clusterinfovSphere HA disabled for cluster {computeResource.name}vSphere HA disabled for this clustervSphere HA disabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasDisabledEvent"> <description> vSphere HA has been disabled on this host due to a user action. vSphere HA is disabled when a host is disconnected from vCenter Server or placed into maintenance or standby mode. Virtual machines on other hosts in the cluster will not be failed over to this host in the event of a host failure. In addition, if the host is disconnected, any virtual machines running on this host will not be failed if the host fails. Further, no attempt will be made by vSphere HA VM and Application Monitoring to reset VMs. </description> </EventLongDescription> DasEnabledEventvSphere HA enabled for clusterinfovSphere HA enabled for cluster {computeResource.name}vSphere HA enabled for this clustervSphere HA enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasEnabledEvent"> <description> vSphere HA has been enabled on this host due to a user action. vSphere HA is enabled when a host is added to or moved into a vSphere HA cluster or when vSphere HA is enabled on a cluster. If the host was already in a vSphere HA cluster, vSphere HA will be enabled when the host is reconnected to vCenter Server or brought out of maintenance or standby mode. vSphere HA will attempt to protect any VMs that are running on the host at the time that HA is enabled on it. </description> </EventLongDescription> DasHostFailedEventvSphere HA host failederrorA possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name} in cluster {computeResource.name} in {datacenter.name}DasHostIsolatedEventvSphere HA host isolatedwarningHost {isolatedHost.name} has been isolated from cluster {computeResource.name}Host {isolatedHost.name} has been isolatedHost has been isolated from clusterHost {isolatedHost.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasHostIsolatedEvent"> <description> vSphere HA detected that the host is network isolated. When a host is in this state, vSphere HA applies the power-off or shutdown host isolation response to virtual machines running on the host, and continues to monitor the virtual machines that are left powered on. While a host is in this state, vSphere HA's ability to restart virtual machines after a failure is impacted. vSphere HA only powers off or shuts down a virtual machine if the agent on the host determines that a master agent is responsible for the virtual machine. </description> <cause> <description> A host is network isolated if both of the following conditions are met: (1) isolation addresses have been configured and the host is unable to ping them; (2) the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <action> Resolve the networking problem that is preventing the host from pinging its isolation addresses and communicating with other hosts. Ensure that there is redundancy in the management networks used by vSphere HA. With redundancy, vSphere HA is able to communicate over more than one path thus reducing the chance of a host becoming isolated. </action> </cause> </EventLongDescription> DatacenterCreatedEventDatacenter createdinfoCreated in folder {parent.name}Created datacenter {datacenter.name}Created datacenter {datacenter.name} in folder {parent.name}DatacenterEvent<Datacenter Event>info<internal>DatacenterRenamedEventDatacenter renamedinfoRenamed datacenterRenamed datacenter from {oldName} to {newName}Renamed datacenter from {oldName} to {newName}DatastoreCapacityIncreasedEventDatastore capacity increasedinfoDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytesDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytes in {datacenter.name}DatastoreDestroyedEventDatastore deletedinfoRemoved unconfigured datastore {datastore.name}Removed unconfigured datastore {datastore.name}DatastoreDiscoveredEventDatastore discoveredinfoDiscovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name}Discovered datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreDiscoveredEvent"> <description> A datastore was discovered on a host </description> <cause> <description> A host that has access to this datastore was added to the datacenter </description> </cause> <cause> <description> The storage backing this datastore was unmasked to a host in the datacenter </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host and the datastore was visible on at least one other host in the datacenter prior to this operation. </description> </cause> </EventLongDescription> DatastoreDuplicatedEventDatastore duplicatederrorMultiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detectedMultiple datastores named {datastore} detected on host {host.name} in {datacenter.name}DatastoreEvent<Datastore Event>info<internal>DatastoreFileCopiedEventFile or directory copied to datastoreinfoCopy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Copy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileDeletedEventFile or directory deletedinfoDeletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Deletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileEvent<Datastore File Event>info<internal>DatastoreFileMovedEventFile or directory moved to datastoreinfoMove of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Move of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreIORMReconfiguredEventReconfigured Storage I/O Control on datastoreinfoReconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}DatastorePrincipalConfiguredDatastore principal configuredinfoConfigured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal}Configured datastore principal {datastorePrincipal} on host {host.name} in {datacenter.name}DatastoreRemovedOnHostEventDatastore removed from hostinfoRemoved datastore {datastore.name} from {host.name}Removed datastore {datastore.name}Removed datastore {datastore.name} from {host.name} in {datacenter.name}DatastoreRenamedEventDatastore renamedinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name}DatastoreRenamedOnHostEventDatastore renamed from hostinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreRenamedOnHostEvent"> <description> A datastore was renamed on a host managed by vCenter Server </description> <cause> <description> vCenter Server discovered datastore on a host and renamed the datastore because it already exists in the vCenter Server inventory under a different name. vCenter Server might also have renamed the datastore because the name conflicts with another datastore in the same datacenter. </description> </cause> </EventLongDescription> DrsDisabledEventDRS disabledinfoDisabled DRS on cluster {computeResource.name}Disabled DRSDisabled DRS on cluster {computeResource.name} in datacenter {datacenter.name}DrsEnabledEventDRS enabledinfoEnabled DRS on cluster {computeResource.name} with automation level {behavior}Enabled DRS with automation level {behavior}Enabled DRS on {computeResource.name} with automation level {behavior} in {datacenter.name}DrsEnteredStandbyModeEventDRS entered standby modeinfoDRS put {host.name} into standby modeDRS put {host.name} into standby modeDRS put the host into standby modeDRS put {host.name} into standby modeDrsEnteringStandbyModeEventDRS entering standby modeinfoDRS is putting {host.name} into standby modeDRS is putting {host.name} into standby modeDRS is putting the host into standby modeDRS is putting {host.name} into standby modeDrsExitStandbyModeFailedEventDRS cannot exit the host out of standby modeerrorDRS cannot move {host.name} out of standby modeDRS cannot move {host.name} out of standby modeDRS cannot move the host out of standby modeDRS cannot move {host.name} out of standby mode <EventLongDescription id="vim.event.DrsExitStandbyModeFailedEvent"> <description> DPM failed to power on a host in standby mode. DPM tried to power on a host using IPMI, iLO or Wake-on-LAN protocol, but the host did not power on. </description> <cause> <description>DPM could not communicate with the BMC on the host</description> <action>Verify the IPMI/iLO credentials entered in vCenter Server</action> <action>Verify that LAN access is enabled in the BMC</action> </cause> <cause> <description>The vMotion NIC on the host does not support Wake-on-LAN</description> <action>Select a vMotion NIC that supports Wake-on-LAN</action> </cause> </EventLongDescription> DrsExitedStandbyModeEventDRS exited standby modeinfoDRS moved {host.name} out of standby modeDRS moved {host.name} out of standby modeDRS moved the host out of standby modeDRS moved {host.name} out of standby modeDrsExitingStandbyModeEventDRS exiting standby modeinfoDRS is moving {host.name} out of standby modeDRS is moving {host.name} out of standby modeDRS is moving the host out of standby modeDRS is moving {host.name} out of standby modeDrsInvocationFailedEventDRS invocation not completederrorDRS invocation not completedDRS invocation not completedDRS invocation not completed <EventLongDescription id="vim.event.DrsInvocationFailedEvent"> <description> A DRS invocation failed to complete successfully. This condition can occur for a variety of reasons, some of which may be transient. </description> <cause> <description>An error was encountered during a DRS invocation</description> <action>Disable and re-enable DRS</action> </cause> </EventLongDescription> DrsRecoveredFromFailureEventDRS has recovered from the failureinfoDRS has recovered from the failureDRS has recovered from the failureDRS has recovered from the failureDrsResourceConfigureFailedEventCannot complete DRS resource configurationerrorUnable to apply DRS resource settings on host. {reason.msg}. This can significantly reduce the effectiveness of DRS.Unable to apply DRS resource settings on host {host.name} in {datacenter.name}. {reason.msg}. This can significantly reduce the effectiveness of DRS. <EventLongDescription id="vim.event.DrsResourceConfigureFailedEvent"> <description> The DRS resource settings could not be successfully applied to a host in the cluster. This condition is typically transient. </description> <cause> <description>DRS resource settings could not be applied to a host.</description> <action>DRS generates resource settings that map the cluster values to the host. However, in this case, the values could not be successfully applied to the host. This is typically a transient error caused by delayed synchronization from DRS to the host. If this condition persists, enable debug logging in vpxa and contact VMware Support. </action> </cause> </EventLongDescription> DrsResourceConfigureSyncedEventDRS resource configuration synchronizedinfoResource configuration specification returns to synchronization from previous failureResource configuration specification returns to synchronization from previous failure on host '{host.name}' in {datacenter.name}DrsRuleComplianceEventVM is now compliant with DRS VM-Host affinity rulesinfo{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} is now compliant with DRS VM-Host affinity rulesvirtual machine on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} in {datacenter.name} is now compliant with DRS VM-Host affinity rulesDrsRuleViolationEventVM is violating a DRS VM-Host affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} is violating a DRS VM-Host affinity rulevirtual machine on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host affinity ruleDrsSoftRuleViolationEventThe VM is violating a DRS VM-Host soft affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} is violating a DRS VM-Host soft affinity rulevirtual machine on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host soft affinity ruleDrsVmMigratedEventDRS VM migratedinfoDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name}DRS migrated {vm.name} from {sourceHost.name} to {host.name}DRS migrated {vm.name} from {sourceHost.name}Migrated from {sourceHost.name} to {host.name} by DRSDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmMigratedEvent"> <description> A virtual machine was migrated based on a DRS recommendation. The recommendation might have been made be to achieve better load balancing in the cluster or to evacuate a host in the cluster that is being put into Standby or Maintenance Mode. </description> <cause> <description>DRS recommended the migration of a virtual machine</description> </cause> </EventLongDescription> DrsVmPoweredOnEventDRS VM powered oninfoDRS powered on {vm.name} on {host.name}DRS powered on {vm.name} on {host.name}DRS powered on {vm.name}DRS powered on the virtual machine on {host.name}DRS powered on {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmPoweredOnEvent"> <description> A virtual machine was powered on by the user and DRS choose a host for the virtual machine based on the current cluster load distribution combined with the virtual machine's resource requirements </description> <cause> <description>DRS chose a host for a virtual machine that was being powered on</description> </cause> </EventLongDescription> DuplicateIpDetectedEventDuplicate IP detectedinfoVirtual machine {macAddress} has a duplicate IP {duplicateIP}Virtual machine {macAddress} on host {host.name} has a duplicate IP {duplicateIP}DvpgImportEventImport Operation eventinfoImport operation with type {importType} was performed on {net.name}Import operation with type {importType} was performed on {net.name}DvpgRestoreEventRestore Operation eventinfoRestore operation was performed on {net.name}Restore operation was performed on {net.name}DvsCreatedEventvSphere Distributed Switch createdinfoA vSphere Distributed Switch {dvs.name} was createdA vSphere Distributed Switch {dvs.name} was created in {datacenter.name}.DvsDestroyedEventvSphere Distributed Switch deletedinfovSphere Distributed Switch {dvs.name} was deleted.vSphere Distributed Switch {dvs.name} in {datacenter.name} was deleted.DvsEventvSphere Distributed Switch eventinfovSphere Distributed Switch eventvSphere Distributed Switch eventDvsHealthStatusChangeEventHealth check status of the switch changed.infoHealth check status changed in vSphere Distributed Switch {dvs.name} on host {host.name}Health check status changed in vSphere Distributed Switch {dvs.name}Health check status was changed in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}DvsHostBackInSyncEventThe vSphere Distributed Switch configuration on the host was synchronized with that of the vCenter Server.infoThe vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.DvsHostJoinedEventHost joined the vSphere Distributed SwitchinfoThe host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name}.The host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostLeftEvent Host left vSphere Distributed SwitchinfoThe host {hostLeft.name} left the vSphere Distributed Switch {dvs.name}.The host {hostLeft.name} left the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostStatusUpdatedHost status changed on the vSphere Distributed SwitchinfoThe host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name}.The host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostWentOutOfSyncEventThe vSphere Distributed Switch configuration on the host differed from that of the vCenter Server.warningThe vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server. <EventLongDescription id="vim.event.DvsHostWentOutOfSyncEvent"> <description> The vSphere Distributed Switch configuration on the host differed from that of the vCenter Server </description> <cause> <description> The host was not connected to the vCenter Server when updates were sent </description> </cause> <cause> <description> vCenter Server failed to push the vSphere Distributed Switch configuration to the host in the past</description> </cause> </EventLongDescription> DvsImportEventImport Operation eventinfoImport operation with type {importType} was performed on {dvs.name}Import operation with type {importType} was performed on {dvs.name}DvsMergedEventvSphere Distributed Switch mergedinfovSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name}.vSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name} in {datacenter.name}.DvsPortBlockedEventdvPort blockedinfoThe dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortConnectedEventdvPort connectedinfoThe dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortCreatedEventdvPort createdinfoNew ports were created in the vSphere Distributed Switch {dvs.name}.New ports were created in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDeletedEventdvPort deletedinfoPorts were deleted in the vSphere Distributed Switch {dvs.name}.Deleted ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDisconnectedEventdvPort disconnectedinfoThe dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortEnteredPassthruEventdvPort in passthrough modeinfoThe dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortExitedPassthruEventdvPort not in passthrough modeinfoThe dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortJoinPortgroupEventA dvPort was moved into the dvPort group.infoThe dvPort {portKey} was moved into the dvPort group {portgroupName}.The dvPort {portKey} was moved into the dvPort group {portgroupName} in {datacenter.name}.DvsPortLeavePortgroupEventA dvPort was moved out of the dvPort group.infoThe dvPort {portKey} was moved out of the dvPort group {portgroupName}.The dvPort {portKey} was moved out of the dvPort group {portgroupName} in {datacenter.name}.DvsPortLinkDownEventdvPort link was downinfoThe dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortLinkUpEventdvPort link was upinfoThe dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortReconfiguredEventdvPort reconfiguredinfoPorts were reconfigured in the vSphere Distributed Switch {dvs.name}.
Ports changed {portKey}.
Changes are {configChanges}Reconfigured ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.
Ports changed {portKey}.
Changes are {configChanges}DvsPortRuntimeChangeEventdvPort runtime information changed.infoThe dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortUnblockedEventdvPort unblockedinfoThe dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortVendorSpecificStateChangeEventdvPort vendor specific state changed.infoThe dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsReconfiguredEventvSphere Distributed Switch reconfiguredinfoThe vSphere Distributed Switch {dvs.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}The vSphere Distributed Switch {dvs.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DvsRenamedEventvSphere Distributed Switch renamedinfoThe vSphere Distributed Switch {oldName} was renamed to {newName}.The vSphere Distributed Switch {oldName} in {datacenter.name} was renamed to {newName}.DvsRestoreEventRestore Operation eventinfoRestore operation was performed on {dvs.name}Restore operation was performed on {dvs.name}DvsUpgradeAvailableEventAn upgrade for the vSphere Distributed Switch is available.infoAn upgrade for vSphere Distributed Switch {dvs.name} is available. An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is available.DvsUpgradeInProgressEventAn upgrade for the vSphere Distributed Switch is in progress.infoAn upgrade for vSphere Distributed Switch {dvs.name} is in progress.An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is in progress.DvsUpgradeRejectedEventCannot complete the upgrade for the vSphere Distributed SwitchinfoAn upgrade for vSphere Distributed Switch {dvs.name} was rejected.Cannot complete an upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name}DvsUpgradedEventThe vSphere Distributed Switch was upgraded.infovSphere Distributed Switch {dvs.name} was upgraded.vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} was upgraded.EnteredMaintenanceModeEventEntered maintenance modeinfoHost {host.name} in {datacenter.name} has entered maintenance modeHost {host.name} in {datacenter.name} has entered maintenance modeEnter maintenance mode completed. All virtual machine operations are disabledHost {host.name} in {datacenter.name} has entered maintenance modeEnteredStandbyModeEventEntered standby modeinfoEntered standby modeThe host {host.name} is in standby modeEnteringMaintenanceModeEventEntering maintenance modeinfoHost {host.name} has started to enter maintenance modeHost {host.name} has started to enter maintenance modeStarted to enter maintenance mode. Waiting for virtual machines to shut down, suspend, or migrateHost {host.name} in {datacenter.name} has started to enter maintenance modeEnteringStandbyModeEventEntering standby modeinfoEntering standby modeThe host {host.name} is entering standby modeErrorUpgradeEventUpgrade errorerror{message} <EventLongDescription id="vim.event.ErrorUpgradeEvent"> <description> An error occurred during agent upgrade </description> </EventLongDescription> Event<Event>info<internal>ExitMaintenanceModeEventExit maintenance modeinfoHost {host.name} has exited maintenance modeHost {host.name} has exited maintenance modeExited maintenance modeHost {host.name} in {datacenter.name} has exited maintenance modeExitStandbyModeFailedEventCannot exit standby modeerrorCould not exit standby modeThe host {host.name} could not exit standby modeExitedStandbyModeEventExited standby modeinfoExited standby modeThe host {host.name} is no longer in standby modeExitingStandbyModeEventExiting standby modeinfoExiting standby modeThe host {host.name} is exiting standby modeFailoverLevelRestoredvSphere HA failover resources are sufficientinfoSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name}Sufficient resources are available to satisfy vSphere HA failover levelSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name} in {datacenter.name}GeneralEventGeneral eventinfoGeneral event: {message}GeneralHostErrorEventHost errorerrorError detected on {host.name}: {message}Error detected on {host.name}: {message}{message}Error detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostErrorEvent"> <description> An error occurred on the host </description> <cause> <description> The agent cannot send heartbeats because of a networking related failure on host </description> </cause> <cause> <description> The agent failed to update the configuration file on host </description> </cause> <cause> <description> The agent failed to save the configuration file to disk on host </description> </cause> <cause> <description> The provisioning module failed to load. As a result, all provisioning operations will fail on host. </description> </cause> </EventLongDescription> GeneralHostInfoEventHost informationinfoIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostInfoEvent"> <description> A general information event occurred on the host </description> </EventLongDescription> GeneralHostWarningEventHost warningwarningIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostWarningEvent"> <description> A general warning event occurred on the host </description> <cause> <description> Virtual machine creation might fail because the agent was unable to retrieve virtual machine creation options from the host </description> </cause> </EventLongDescription> GeneralUserEventUser eventuserUser logged event: {message} <EventLongDescription id="vim.event.GeneralUserEvent"> <description> A general user event occurred on the host </description> <cause> <description> A user initiated an action on the host </description> </cause> </EventLongDescription> GeneralVmErrorEventVM errorerrorError detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name}: {message}{message} on {host.name}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmErrorEvent"> <description> An error occurred on the virtual machine </description> </EventLongDescription> GeneralVmInfoEventVM informationinfoIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmInfoEvent"> <description> A general information event occurred on the virtual machine </description> </EventLongDescription> GeneralVmWarningEventVM warningwarningIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmWarningEvent"> <description> A general warning event occurred on the virtual machine </description> </EventLongDescription> GhostDvsProxySwitchDetectedEventThe vSphere Distributed Switch corresponding to the proxy switches on the host does not exist in vCenter Server or does not contain this host.infoThe vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host does not exist in vCenter Server or does not contain this host.The vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host {host.name} does not exist in vCenter Server or does not contain this host. <EventLongDescription id="vim.event.GhostDvsProxySwitchDetectedEvent"> <description> vCenter Server found a vSphere Distributed Switch proxy switch on the host that does not match any vSphere Distributed Switch in vCenter Server </description> <cause> <description> The vSphere Distributed Switch corresponding to the vSphere Distributed Switch proxy switch on the host was deleted while host was disconnected from the vCenter Server </description> </cause> <cause> <description> The host is no longer a member of the vSphere Distributed Switch that the proxy switch in the host corresponds to </description> </cause> </EventLongDescription> GhostDvsProxySwitchRemovedEventA ghost proxy switch on the host was resolved.infoA ghost proxy switch {switchUuid} on the host was resolved.A ghost proxy switch {switchUuid} on the host {host.name} was resolved.GlobalMessageChangedEventMessage changedinfoThe message changed: from '{prevMessage}' to '{message}'HealthStatusChangedEventStatus changeinfo{componentName} status changed from {oldStatus} to {newStatus}HostAddFailedEventCannot add hosterrorCannot add host {hostname}Cannot add host {hostname}Cannot add host {hostname} to datacenter {datacenter.name} <EventLongDescription id="vim.event.HostAddFailedEvent"> <description> Adding a host failed </description> </EventLongDescription> HostAddedEventHost AddedinfoAdded host {host.name}Added host {host.name}Added host {host.name} to datacenter {datacenter.name}HostAdminDisableEventHost administrator access disabledwarningAdministrator access to the host is disabledAdministrator access to the host {host.name} is disabled <EventLongDescription id="vim.event.HostAdminDisableEvent"> <description> Host permissions have been changed so that only the account used for vCenter Server operations has Administrator permissions </description> <cause> <description> This condition occurs when vCenter Server removes all other Administrator access to the host because the host has been placed in Lockdown Mode. The host can be managed by vCenter Server only and Only vCenter Server can re-enable Administrator access for other accounts. </description> </cause> </EventLongDescription> HostAdminEnableEventHost administrator access enabledwarningAdministrator access to the host has been restoredAdministrator access to the host {host.name} has been restored <EventLongDescription id="vim.event.HostAdminEnableEvent"> <description> vCenter Server has restored Administrator permissions for host user accounts whose permissions were disabled by Lockdown Mode </description> <cause> <description> This condition occurs when vCenter Server restores Administrator access to host user accounts that lost their Administrator permissions when the host was placed in Lockdown Mode </description> </cause> </EventLongDescription> HostCnxFailedAccountFailedEventCannot connect host and configure management accounterrorCannot connect {host.name}: cannot configure management accountCannot connect {host.name}: cannot configure management accountCannot connect: cannot configure management accountCannot connect {host.name} in {datacenter.name}: cannot configure management account <EventLongDescription id="vim.event.HostCnxFailedAccountFailedEvent"> <description> Could not connect to the host because setting up a management account failed </description> <cause> <description> The account used by vCenter Server to manage the host could not be configured </description> </cause> </EventLongDescription> HostCnxFailedAlreadyManagedEventCannot connect host - already managederrorCannot connect {host.name}: already managed by {serverName}Cannot connect {host.name}: already managed by {serverName}Cannot connect: already managed by {serverName}Cannot connect {host.name} in {datacenter.name}: already managed by {serverName} <EventLongDescription id="vim.event.HostCnxFailedAlreadyManagedEvent"> <description> Could not connect to the host because it is already being managed by a different vCenter Server instance. </description> <cause> <description> The host is already being managed by a different vCenter Server instance </description> <action> Remove the host from the inventory for the other vCenter Server instance </action> <action> Force the addition of the host to the current vCenter Server instance </action> </cause> </EventLongDescription> HostCnxFailedBadCcagentEventCannot connect host - incorrect CcagenterrorCannot connect {host.name} : server agent is not respondingCannot connect {host.name} : server agent is not respondingCannot connect: server agent is not respondingCannot connect host {host.name} in {datacenter.name} : server agent is not responding <EventLongDescription id="vim.event.HostCnxFailedBadCcagentEvent"> <description> Could not connect to the host because the host agent did not respond </description> <cause> <description> No response was received from the host agent </description> <action> Restart the host agent on the ESX/ESXi host </action> </cause> </EventLongDescription> HostCnxFailedBadUsernameEventCannot connect host - incorrect user nameerrorCannot connect {host.name}: incorrect user name or passwordCannot connect {host.name}: incorrect user name or passwordCannot connect: incorrect user name or passwordCannot connect {host.name} in {datacenter.name}: incorrect user name or password <EventLongDescription id="vim.event.HostCnxFailedBadUsernameEvent"> <description> Could not connect to the host due to an invalid username and password combination </description> <cause> <description> Invalid username and password combination </description> <action> Use the correct username and password </action> </cause> </EventLongDescription> HostCnxFailedBadVersionEventCannot connect host - incompatible versionerrorCannot connect {host.name}: incompatible versionCannot connect {host.name}: incompatible versionCannot connect: incompatible versionCannot connect {host.name} in {datacenter.name}: incompatible version <EventLongDescription id="vim.event.HostCnxFailedBadVersionEvent"> <description> Could not connect to the host due to an incompatible vSphere Client version </description> <cause> <description> The version of the vSphere Client is incompatible with the ESX/ESXi host so the connection attempt failed </description> <action> Download and use a compatible vSphere Client version to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedCcagentUpgradeEventCannot connect host - Ccagent upgradeerrorCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect: did not install or upgrade vCenter agent serviceCannot connect host {host.name} in {datacenter.name}. Did not install or upgrade vCenter agent service. <EventLongDescription id="vim.event.HostCnxFailedCcagentUpgradeEvent"> <description> Could not connect to the host because a host agent upgrade or installation is in process </description> <cause> <description> The host agent is being upgraded or installed on the host </description> <action> Wait for the host agent upgrade or installation to complete </action> </cause> </EventLongDescription> HostCnxFailedEventCannot connect hosterrorCannot connect host {host.name}: error connecting to hostCannot connect host {host.name}: error connecting to hostCannot connect: error connecting to hostCannot connect {host.name} in {datacenter.name}: error connecting to host <EventLongDescription id="vim.event.HostCnxFailedEvent"> <description> Could not connect to the host due to an unspecified condition </description> <cause> <description> Unknown cause of failure </description> </cause> </EventLongDescription> HostCnxFailedNetworkErrorEventCannot connect host - network errorerrorCannot connect {host.name}: network errorCannot connect {host.name}: network errorCannot connect: network errorCannot connect {host.name} in {datacenter.name}: network error <EventLongDescription id="vim.event.HostCnxFailedNetworkErrorEvent"> <description> Could not connect to the host due to a network error </description> <cause> <description> A Network error occurred while connecting to the host </description> <action> Verify that host networking is configured correctly </action> </cause> </EventLongDescription> HostCnxFailedNoAccessEventCannot connect host - no accesserrorCannot connect {host.name}: account has insufficient privilegesCannot connect {host.name}: account has insufficient privilegesCannot connect: account has insufficient privilegesCannot connect host {host.name} in {datacenter.name}: account has insufficient privileges <EventLongDescription id="vim.event.HostCnxFailedNoAccessEvent"> <description> Could not connect to the host due to insufficient account privileges </description> <cause> <description> The account used to connect to host does not have host access privileges </description> <action> Use an account that has sufficient privileges to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedNoConnectionEventCannot connect host - no connectionerrorCannot connect {host.name}Cannot connect {host.name}Cannot connect to hostCannot connect host {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostCnxFailedNoConnectionEvent"> <description> Could not connect to the host because the host is not in the network </description> <cause> <description> The host that you are attempting to connect to is not present in the network </description> <action> Verify that host networking is configured correctly and the host is connected to the same network as vCenter Server </action> </cause> </EventLongDescription> HostCnxFailedNoLicenseEventCannot connect host - no licenseerrorCannot connect {host.name}: not enough CPU licensesCannot connect {host.name}: not enough CPU licensesCannot connect: not enough CPU licensesCannot connect {host.name} in {datacenter.name}: not enough CPU licenses <EventLongDescription id="vim.event.HostCnxFailedNoLicenseEvent"> <description> Could not connect to the host due to a licensing issue </description> <cause> <description> There are not enough licenses to add the host to the vCenter Server inventory. This event is accompanied by a fault that specifies the missing licenses required to add the host successfully. </description> <action> Add the necessary licenses to vCenter Server and try adding the host again </action> </cause> </EventLongDescription> HostCnxFailedNotFoundEventCannot connect host - host not founderrorCannot connect {host.name}: incorrect host nameCannot connect {host.name}: incorrect host nameCannot connect: incorrect host nameCannot connect {host.name} in {datacenter.name}: incorrect host name <EventLongDescription id="vim.event.HostCnxFailedNotFoundEvent"> <description> Could not connect to the host because vCenter Server could not resolve the host name </description> <cause> <description> Unable to resolve the host name of the host </description> <action> Verify that the correct host name has been supplied for the host </action> <action> Configure the host to use a known-good (resolvable) host name </action> <action> Add the host name to the DNS server </action> </cause> </EventLongDescription> HostCnxFailedTimeoutEventCannot connect host - time-outerrorCannot connect {host.name}: time-out waiting for host responseCannot connect {host.name}: time-out waiting for host responseCannot connect: time-out waiting for host responseCannot connect {host.name} in {datacenter.name}: time-out waiting for host response <EventLongDescription id="vim.event.HostCnxFailedTimeoutEvent"> <description> Could not connect to the host because the connection attempt timed out </description> <cause> <description> A timeout occurred while attempting to connect to the host </description> </cause> </EventLongDescription> HostComplianceCheckedEventChecked host for complianceinfoHost {host.name} checked for compliance with profile {profile.name}Host {host.name} checked for compliance with profile {profile.name}Checked host for compliance with profile {profile.name}Host {host.name} checked for compliance. <EventLongDescription id="vim.event.HostComplianceCheckedEvent"> <description> The host was checked for compliance with a host profile </description> <cause> <description> The user initiated a compliance check on the host against a host profile </description> </cause> <cause> <description> A scheduled task initiated a compliance check for the host against a host profile </description> </cause> </EventLongDescription> HostCompliantEventHost compliant with profileinfoHost is in compliance with the attached profile.Host {host.name} is in compliance with the attached profileHostConfigAppliedEventHost configuration changes applied to hostinfoHost configuration changes applied to {host.name}Host configuration changes applied to {host.name}Host configuration changes applied.Host configuration changes applied.HostConnectedEventHost connectedinfoConnected to {host.name}Connected to {host.name}Established a connectionConnected to {host.name} in {datacenter.name}HostConnectionLostEventHost connection losterrorHost {host.name} is not respondingHost {host.name} is not respondingHost is not respondingHost {host.name} in {datacenter.name} is not responding <EventLongDescription id="vim.event.HostConnectionLostEvent"> <description> Connection to the host has been lost </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostDasDisabledEventvSphere HA agent disabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is disabledvSphere HA agent on {host.name} is disabledvSphere HA agent on this host is disabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is disabledHostDasDisablingEventDisabling vSphere HAinfovSphere HA is being disabled on {host.name}vSphere HA is being disabled on {host.name}Disabling vSphere HAvSphere HA is being disabled on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}HostDasEnabledEventvSphere HA agent enabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is enabledvSphere HA agent on {host.name} is enabledvSphere HA agent on this host is enabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is enabledHostDasEnablingEventEnabling host vSphere HA agentwarningEnabling vSphere HA agent on {host.name}Enabling vSphere HA agent on {host.name}Enabling vSphere HA agentEnabling vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.HostDasEnablingEvent"> <description> vSphere HA is being enabled on this host. </description> </EventLongDescription> HostDasErrorEventvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error {message}: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}HostDasEvent<Host vSphere HA Event>info<internal>HostDasOkEventvSphere HA agent configuredinfovSphere HA agent on host {host.name} is configured correctlyvSphere HA agent on host {host.name} is configured correctlyvSphere HA agent is configured correctlyvSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} is configured correctlyHostDisconnectedEventHost disconnectedinfoDisconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from host. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name} in {datacenter.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}HostEnableAdminFailedEventCannot restore administrator permissions to hosterrorCannot restore some administrator permissions to the hostCannot restore some administrator permissions to the host {host.name}HostEvent<Host Event>info<internal>HostExtraNetworksEventHost has extra vSphere HA networkserrorHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostExtraNetworksEvent"> <description> The host being added to the vSphere HA cluster has more management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, be on the same subnets. This helps to facilitate the pairing up of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnet of the joining member that are not present on the existing member. </description> <cause> <description> The host has extra networks missing on an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for the cluster. </action> </cause> </EventLongDescription> HostGetShortNameFailedEventCannot get short host nameerrorCannot complete command 'hostname -s' or returned incorrect name formatCannot complete command 'hostname -s' on host {host.name} or returned incorrect name format <EventLongDescription id="vim.event.HostGetShortNameFailedEvent"> <description> The hostname -s command has failed on the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostInAuditModeEventHost is in audit mode.infoHost is running in audit mode.Host {host.name} is running in audit mode. The host's configuration will not be persistent across reboots.HostInventoryFullEventHost inventory fullerrorMaximum ({capacity}) number of hosts allowed for this edition of vCenter Server has been reached <EventLongDescription id="vim.event.HostInventoryFullEvent"> <description> The vCenter Server Foundation license key currently allows only three hosts to be added to the inventory. Adding extra hosts results in errors and the logging of this event. </description> <cause> <description>Attempting to add more hosts than the number allowed by the license key assigned to vCenter Server</description> <action>Assign vCenter Server a license key that allows more hosts or has no host limit</action> </cause> </EventLongDescription> HostInventoryUnreadableEventHost Inventory UnreadableinfoThe virtual machine inventory file is damaged or unreadable.The virtual machine inventory file on host {host.name} is damaged or unreadable.HostIpChangedEventHost IP changedinfoIP address changed from {oldIP} to {newIP}IP address of the host {host.name} changed from {oldIP} to {newIP} <EventLongDescription id="vim.event.HostIpChangedEvent"> <description> The IP address of the host was changed </description> <cause> <description> The IP address of the host was changed through vCenter Server </description> </cause> <cause> <description> The IP address of the host was changed through the host </description> </cause> </EventLongDescription> HostIpInconsistentEventHost IP inconsistenterrorConfiguration of host IP address is inconsistent: address resolved to {ipAddress} and {ipAddress2}Configuration of host IP address is inconsistent on host {host.name}: address resolved to {ipAddress} and {ipAddress2}HostIpToShortNameFailedEventHost IP to short name not completederrorCannot resolve IP address to short nameCannot resolve IP address to short name on host {host.name} <EventLongDescription id="vim.event.HostIpToShortNameFailedEvent"> <description> The host's IP address could not be resolved to a short name </description> <cause> <description>The host or DNS records are improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostIsolationIpPingFailedEventvSphere HA isolation address unreachableerrorvSphere HA agent on host {host.name} in cluster {computeResource.name} could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} could not reach isolation address: {isolationIp}vSphere HA agent on this host could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} could not reach isolation address: {isolationIp} <EventLongDescription id="vim.event.HostIsolationIpPingFailedEvent"> <description> vSphere HA was unable to ping one or more of the isolation IP addresses. The inability to ping the addresses may cause HA to incorrectly declare the host as network isolated. A host is declared as isolated if it cannot ping the configured isolation addresses and the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <cause> <description>Could not ping the isolation address</description> <action>Correct the cause of the failure to ping the address</action> <action> Use advanced options to change the addresses used by vSphere HA for determining if a host is network isolated. By default, the isolation address is the default gateway of the management network. You can override the default using advanced options, or specify additional addresses to use for determining if a host is network isolated. Set the das.useDefaultIsolationAddress advanced option to "false" if you prefer that vSphere HA not use the default gateway as the isolation address. Specify the das.isolationAddress[X] advanced option for each isolation address that you want to specify. The new values take effect when vSphere HA is reconfigured for each host. </action> </cause> </EventLongDescription> HostLicenseExpiredEventHost license expirederrorA host license for {host.name} has expired <EventLongDescription id="vim.event.HostLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of host licenses on the license server and uses this event to notify you of any host licenses that are about to expire </description> <cause> <description>Host licenses on the license server are about to expire</description> <action>Update the license server to get a new version of the host license</action> </cause> </EventLongDescription> HostLocalPortCreatedEventA host local port is created to recover from management network connectivity loss.infoA host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic}.A host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic} on the host {host.name}.HostMissingNetworksEventHost is missing vSphere HA networkserrorHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostMissingNetworksEvent"> <description> The host being added to the vSphere HA cluster has fewer management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, have common subnets. This helps facilitate the pairing of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnets of the existing member that are not present on the joining member. </description> <cause> <description> The host does not have networks compatible with an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. After you change the host's network configuration, reconfigure vSphere HA for this host. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for this cluster. </action> </cause> </EventLongDescription> HostMonitoringStateChangedEventvSphere HA host monitoring state changedinfovSphere HA host monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'HostNoAvailableNetworksEventHost has no available networks for vSphere HA communicationerrorHost {host.name} in cluster {computeResource.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}This host currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips} <EventLongDescription id="vim.event.HostNoAvailableNetworksEvent"> <description> The host being added to the vSphere HA cluster has no management networks available for vSphere HA cluster communication. The advanced option das.allowNetwork[X] is set, but no port group names match the advanced option for this host. </description> <cause> <description> The host has no port groups that match the names used by the advanced options to control which port groups vSphere HA uses </description> <action> Delete the advanced options das.allowNetwork[X] to allow vSphere HA to select the default management port groups </action> <action> Correct the names of the port groups specified in the advanced options to match those to be used by vSphere HA for this host </action> <action> Specify additional das.allowNetwork[X] advanced options to match the port group names for this host </action> </cause> </EventLongDescription> HostNoHAEnabledPortGroupsEventHost has no port groups enabled for vSphere HAerrorHost {host.name} in cluster {computeResource.name} has no port groups enabled for vSphere HA communication.Host {host.name} has no port groups enabled for vSphere HA communication.This host has no port groups enabled for vSphere HA communication.Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no port groups enabled for vSphere HA communication. <EventLongDescription id="vim.event.HostNoHAEnabledPortGroupsEvent"> <description> vSphere HA has determined that there are no management networks available on the host for vSphere HA inter-agent communication. </description> <cause> <description> The host has no vSphere HA management networks available </description> <action> If this event is observed when the host is being added to a vSphere HA cluster, change the host's network configuration to enable vSphere HA traffic on one or more port groups. By default, vSphere HA will use the Service Console port groups on ESX and ESXi hosts, the port groups with the Management Traffic checkbox selected. If vSphere HA was already configured on the host, it is possible that the host's network settings have changed and invalidated the management network configuration. Review the settings to make sure the port groups configured for management network still exist on the host and for ESXi the Management Traffic option is enabled. Reconfigure vSphere HA on the host after fixing any configuration issues. </action> </cause> </EventLongDescription> HostNoRedundantManagementNetworkEventNo redundant management network for hostwarningHost {host.name} in cluster {computeResource.name} currently has no management network redundancyHost {host.name} currently has no management network redundancyThis host currently has no management network redundancyHost {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no management network redundancy <EventLongDescription id="vim.event.HostNoRedundantManagementNetworkEvent"> <description> vSphere HA has determined that there is only one path for vSphere HA management traffic, resulting in a single point of failure. Best practices require more than one path for vSphere HA to use for heartbeats and cluster communication. A host with a single path is more likely to be declared dead, network partitioned or isolated after a network failure. If declared dead, vSphere HA will not respond if the host subsequently actually fails, while if declared isolated, vSphere HA may apply the isolation response thus impacting the uptime of the virtual machines running on it. </description> <cause> <description>There is only one port group available for vSphere HA communication</description> <action>Configure another Service Console port group on the ESX host</action> <action> Configure another port group on the ESXi host by selecting the "Management Traffic" check box </action> <action> Use NIC teaming on the management port group to allow ESX or ESXi to direct management traffic out of more than one physical NIC in case of a path failure </action> <action> If you accept the risk of not having redundancy for vSphere HA communication, you can eliminate the configuration issue by setting the das.ignoreRedundantNetWarning advanced option to "true" </action> </cause> </EventLongDescription> HostNonCompliantEventHost non-compliant with profileerrorHost is not in compliance with the attached profile.Host {host.name} is not in compliance with the attached profile <EventLongDescription id="vim.event.HostNonCompliantEvent"> <description> The host does not comply with the host profile </description> <cause> <description> The host is not in compliance with the attached profile </description> <action> Check the Summary tab for the host in the vSphere Client to determine the possible cause(s) of noncompliance </action> </cause></EventLongDescription> HostNotInClusterEventHost not in clustererrorNot a cluster member in {datacenter.name}Host {host.name} is not a cluster member in {datacenter.name}HostOvercommittedEventHost resource overcommittederrorInsufficient capacity in host {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in host {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.HostOvercommittedEvent"> <description> A host does not have sufficient CPU and/or memory capacity to satisfy its resource configuration. The host has its own admission control, so this condition should never occur. </description> <cause> <description>A host has insufficient capacity for its resource configuration</description> <action>If you encounter this condition, contact VMware Support </action> </cause> </EventLongDescription> HostPrimaryAgentNotShortNameEventHost primary agent not specified as short nameerrorPrimary agent {primaryAgent} was not specified as a short namePrimary agent {primaryAgent} was not specified as a short name to host {host.name} <EventLongDescription id="vim.event.HostPrimaryAgentNotShortNameEvent"> <description> The primary agent is not specified in short name format </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostProfileAppliedEventHost profile appliedinfoProfile configuration applied to the hostProfile is applied on the host {host.name}HostReconnectionFailedEventCannot reconnect hosterrorCannot reconnect to {host.name}Cannot reconnect to {host.name}Cannot reconnectCannot reconnect to {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostReconnectionFailedEvent"> <description> Could not reestablish a connection to the host </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostRemovedEventHost removedinfoRemoved host {host.name}Removed host {host.name}Removed from inventoryRemoved host {host.name} in {datacenter.name}HostShortNameInconsistentEventHost short name inconsistenterrorHost names {shortName} and {shortName2} both resolved to the same IP address. Check the host's network configuration and DNS entries <EventLongDescription id="vim.event.HostShortNameInconsistentEvent"> <description> The name resolution check on the host returns different names for the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShortNameToIpFailedEventHost short name to IP not completederrorCannot resolve short name {shortName} to IP addressCannot resolve short name {shortName} to IP address on host {host.name} <EventLongDescription id="vim.event.HostShortNameToIpFailedEvent"> <description> The short name of the host can not be resolved to an IP address </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShutdownEventHost shut downinfoShut down of {host.name}: {reason}Shut down of {host.name}: {reason}Shut down of host: {reason}Shut down of {host.name} in {datacenter.name}: {reason}HostSpecificationChangedEventHost specification is changed on vCenterinfoHost specification of host {host.name} is changed on vCenter.Host specification of host {host.name} is changed on vCenter.Host specification is changed.Host specification of host {host.name} is changed on vCenter.HostSpecificationRequireEventPull host specification from host to vCenterinfoPull host specification of host {host.name} to vCenter.Pull host specification of host {host.name} to vCenter.Pull host specification to vCenter.Pull host specification of host {host.name} to vCenter.HostSpecificationUpdateEventHost specification is changed on hostinfoHost specification is changed on host {host.name}.Host specification is changed on host {host.name}.Host specification is changed.Host specification is changed on host {host.name}.HostStatusChangedEventHost status changedinfoConfiguration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.HostStatusChangedEvent"> <description> The host status has changed. This status is the status of the root resource pool that encompasses the entire host. A host status change may be accompanied by the removal of a configuration issue if one was previously detected. A host status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The host status changed to yellow</description> <action>Reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The host status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> HostSubSpecificationDeleteEventDelete host sub specification {subSpecName}infoDelete host sub specification {subSpecName} of host {host.name}.Delete host sub specification {subSpecName} of host {host.name}.Delete host sub specification.Delete host sub specification {subSpecName} of host {host.name}.HostSubSpecificationUpdateEventHost sub specification {hostSubSpec.name} is changed on hostinfoHost sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed.Host sub specification {hostSubSpec.name} is changed on host {host.name}.HostSyncFailedEventCannot synchronize hosterrorCannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg} <EventLongDescription id="vim.event.HostSyncFailedEvent"> <description> Failed to sync with the vCenter Agent on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> HostUpgradeFailedEventHost upgrade failederrorCannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostUpgradeFailedEvent"> <description> Failed to connect to the host due to an installation or upgrade issue </description> </EventLongDescription> HostUserWorldSwapNotEnabledEventThe userworld swap is not enabled on the hostwarningThe userworld swap is not enabled on the hostThe userworld swap is not enabled on the host {host.name}HostVnicConnectedToCustomizedDVPortEventSome host vNICs were reconfigured to use dvPorts with port level configuration, which might be different from the dvPort group.infoHost vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.Host {host.name} vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.HostWwnChangedEventHost WWN changedwarningWWNs are changedWWNs are changed for {host.name}HostWwnConflictEventHost WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {host.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.HostWwnConflictEvent"> <description> The WWN (World Wide Name) of this host conflicts with the WWN of another host or virtual machine </description> <cause> <description> The WWN of this host conflicts with WWN of another host </description> </cause> <cause> <description> The WWN of this host conflicts with WWN of another virtual machine</description> </cause> </EventLongDescription> IncorrectHostInformationEventIncorrect host informationerrorInformation needed to acquire the correct set of licenses not providedHost {host.name} did not provide the information needed to acquire the correct set of licenses <EventLongDescription id="vim.event.IncorrectHostInformationEvent"> <description> The host did not provide the information needed to acquire the correct set of licenses </description> <cause> <description> The cpuCores, cpuPackages or hostType information on the host is not valid </description> </cause> <cause> <description> The host information is not available because host was added as disconnected </description> </cause> </EventLongDescription> InfoUpgradeEventInformation upgradeinfo{message}InsufficientFailoverResourcesEventvSphere HA failover resources are insufficienterrorInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name}Insufficient resources to satisfy vSphere HA failover levelInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.InsufficientFailoverResourcesEvent"> <description> The cluster does not have enough unreserved capacity to satisfy the level configured for vSphere HA admission control. Failovers may still be performed by vSphere HA but will be on a best effort basis. </description> <cause> <description> If the "number of host failures to tolerate" policy is configured and a few virtual machines have a much higher CPU or memory reservation than the other virtual machines, vSphere HA admission control can be excessively conservative to ensure that there are enough unfragmented resources if a host fails. </description> <action> Use similar CPU and memory reservations for all virtual machines in the cluster. If this is not possible, consider using a different vSphere HA admission control policy, such as reserving a percentage of cluster resource for failover. Alternatively, you can use advanced options to specify a cap for the slot size. See the vSphere Availability Guide for details. </action> </cause> <cause> <description> Hosts with vSphere HA agent errors are not good candidates for providing failover capacity in the cluster, and their resources are not considered for vSphere HA admission control purposes. If many hosts have an vSphere HA agent error, vCenter Server generates this event. </description> <action> Check the event log of the hosts to determine the cause of the vSphere HA agent errors. After addressing any configuration issues, reconfigure vSphere HA on the affected hosts or on the cluster. </action> </cause> </EventLongDescription> InvalidEditionEventInvalid editionerrorThe license edition '{feature}' is invalid <EventLongDescription id="vim.event.InvalidEditionEvent"> <description> vCenter Server attempted to acquire an undefined feature from the license server </description> <cause> <description>Any operation that requires a feature license such as vMotion, DRS, vSphere HA might result in this event if that feature is not defined on the license server</description> <action>Verify that the feature in question is present on the license server</action> </cause> </EventLongDescription> EventExLicense downgradewarningLicense downgradeLicense downgradeLicense downgradevim.event.LicenseDowngradedEvent|License downgrade: {licenseKey} removes the following features: {lostFeatures} <EventLongDescription id="vim.event.LicenseDowngradedEvent"> <description> The installed license reduces the set of available features. Some of the features, previously available, will not be accessible with the new license. </description> <cause> <description>The license has been replaced.</description> <action>Revert to the license previously installed if it is not already expired.</action> <action>Contact VMware in order to obtain new license with the required features.</action> </cause> </EventLongDescription> LicenseEvent<License Event>info<internal>LicenseExpiredEventLicense expirederrorLicense {feature.featureName} has expiredLicenseNonComplianceEventInsufficient licenses.errorLicense inventory is not compliant. Licenses are overused <EventLongDescription id="vim.event.LicenseNonComplianceEvent"> <description> vCenter Server does not strictly enforce license usage. Instead, it checks for license overuse periodically. If vCenter Server detects overuse, it logs this event and triggers an alarm. </description> <cause> <description>Overuse of licenses</description> <action>Check the license reports through the vSphere Client and reduce the number of entities using the license key or add a new license key with a greater capacity</action> </cause> </EventLongDescription> LicenseRestrictedEventUnable to acquire licenses due to a restriction on the license servererrorUnable to acquire licenses due to a restriction in the option file on the license server. <EventLongDescription id="vim.event.LicenseRestrictedEvent"> <description> vCenter Server logs this event if it is unable to check out a license from the license server due to restrictions in the license file </description> <cause> <description>License file in the license server has restrictions that prevent check out</description> <action>Check the license file and remove any restrictions that you can</action> </cause> </EventLongDescription> LicenseServerAvailableEventLicense server availableinfoLicense server {licenseServer} is availableLicenseServerUnavailableEventLicense server unavailableerrorLicense server {licenseServer} is unavailable <EventLongDescription id="vim.event.LicenseServerUnavailableEvent"> <description> vCenter Server tracks the license server state and logs this event if the license server has stopped responding. </description> <cause> <description>License server is not responding and not available to vCenter Server</description> <action>Verify that the license server is running. If it is, check the connectivity between vCenter Server and the license server.</action> </cause> </EventLongDescription> LocalDatastoreCreatedEventLocal datastore createdinfoCreated local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl})Created local datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}LocalTSMEnabledEventESXi Shell is enabledinfoESXi Shell for the host has been enabledESXi Shell for the host {host.name} has been enabledLockerMisconfiguredEventLocker misconfiguredwarningDatastore {datastore} which is configured to back the locker does not existLockerReconfiguredEventLocker reconfiguredinfoLocker was reconfigured from {oldDatastore} to {newDatastore} datastoreMigrationErrorEventMigration errorerrorUnable to migrate {vm.name} from {host.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate from {host.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine from the source host failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationEvent<Migration Event>info<internal>MigrationHostErrorEventMigration host errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to host {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine to the destination host or datastore failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationHostWarningEventMigration host warningwarningMigration of {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination host or datastore is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceErrorEventMigration resource errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Cannot migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceErrorEvent"> <description> A virtual machine failed to migrate due to incompatibilities with target resource pool </description> <cause> <description>Migrating a virtual machine to the destination host or datastore is not possible due to incompatibilities with the target resource pool. </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceWarningEventMigration resource warningwarningMigration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination resource pool is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationWarningEventMigration warningwarningMigration of {vm.name} from {host.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration from {host.name}: {fault.msg}Migration of {vm.name} from {host.name} in {datacenter.name}: {fault.msg}MtuMatchEventThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to the physical NIC.infoThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}MtuMismatchEventThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to the physical NIC.errorThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}NASDatastoreCreatedEventNAS datastore createdinfoCreated NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl})Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}NetworkRollbackEventNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.errorNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.NoAccessUserEventNo access for usererrorCannot login user {userName}@{ipAddress}: no permission <EventLongDescription id="vim.event.NoAccessUserEvent"> <description> A user could not log in due to insufficient access permission </description> <cause> <description> The user account has insufficient access permission </description> <action> Log in with a user account that has the necessary access permissions or grant additional access permissions to the current user </action> </cause> </EventLongDescription> NoDatastoresConfiguredEventNo datastores configuredinfoNo datastores have been configuredNo datastores have been configured on the host {host.name}NoLicenseEventNo licenseerrorA required license {feature.featureName} is not reserved <EventLongDescription id="vim.event.NoLicenseEvent"> <description> vCenter Server logs this event if it fails to acquire a feature from the license server for an unknown reason. </description> <cause> <description>Acquiring a feature license fails for an unknown reason</description> <action>Verify that the license server has the license for the feature</action> </cause> </EventLongDescription> NoMaintenanceModeDrsRecommendationForVMNo maintenance mode DRS recommendation for the VMinfoUnable to automatically migrate {vm.name}Unable to automatically migrate from {host.name}Unable to automatically migrate {vm.name} from {host.name} <EventLongDescription id="vim.event.NoMaintenanceModeDrsRecommendationForVM"> <description> DRS failed to generate a vMotion recommendation for a virtual machine on a host entering Maintenance Mode. This condition typically occurs because no other host in the DRS cluster is compatible with the virtual machine. Unless you manually migrate or power off this virtual machine, the host will be unable to enter Maintenance Mode. </description> <cause> <description>DRS failed to evacuate a powered on virtual machine</description> <action>Manually migrate the virtual machine to another host in the cluster</action> <action>Power off the virtual machine</action> <action>Bring any hosts in Maintenance Mode out of that mode</action> <action>Cancel the task that is making the host enter Maintenance Mode </action> </cause> </EventLongDescription> NonVIWorkloadDetectedOnDatastoreEventUnmanaged workload detected on SIOC-enabled datastoreinfoAn unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.NotEnoughResourcesToStartVmEventInsufficient resources for vSphere HA to start the VM. Reason: {reason.@enum.fdm.placementFault}warningInsufficient resources to fail over {vm.name} in {computeResource.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over this virtual machine. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name} in {computeResource.name} that resides in {datacenter.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault} <EventLongDescription id="vim.event.NotEnoughResourcesToStartVmEvent"> <description> This event is issued by vSphere HA when the master agent was not able to fail over a virtual machine to one of its compatible hosts. This condition is due to one or more of the causes listed below. When this condition occurs, vSphere HA will report a cause for it in the event summary, but note that additional causes might exist. It is more likely to occur if vSphere HA admission control is disabled or more hosts fail than were provisioned for. When a virtual machine cannot be placed, vSphere HA will retry placing it when the cluster state changes. Also, if vSphere DRS is enabled, it will be invoked to try to defragment the cluster or bring hosts out of Standby Mode. </description> <cause> <description> A virtual machine has bandwidth reservations for CPU, memory, vFlash cache, or virtual NICs. There was no compatible host that had enough resources to satisfy the reservations. </description> <action>Decrease the virtual machine resource reservation</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition if one exists</action> <action>If DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> <cause> <description> The cluster has vSAN enabled, and one or more hosts that contribute storage to the cluster is inaccessible, preventing vSphere HA from powering on the virtual machine. This applies to virtual machines that have one or more files on a vSAN datastore. </description> <action>Bring online any failed hosts or resolve a network partition if one exists that involves hosts that contribute storage to the vSAN cluster</action> </cause> <cause> <description>One or more datastores that are associated with a virtual machine are inaccessible by any compatible host in the cluster.</description> <action>Bring online any non-responding host that mounts the virtual machine datastores</action> <action>Fix the all-paths-down (APD) or permanent-device-loss (PDL) issues.</action> </cause> <cause> <description>vSphere HA is enforcing virtual machine to virtual machine anti-affinity rules, and the rule cannot be satisfied. </description> <action>Add more hosts to cluster</action> <action>Bring online any non-responding host or resolve a network partition if one exists</action> <action>Remove any anti-affinity rules that are restricting the placement</action> </cause> <cause> <description>The number of VMs that can run on each host is limited. There is no host that can power on the VM without exceeding the limit.</description> <action>Increase the limit if you have set the limitVmsPerESXHost HA advanced option.</action> <action>Bring online any non-responding host or add new hosts to the cluster</action> </cause> </EventLongDescription> OutOfSyncDvsHostThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.warningThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.The vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.PermissionAddedEventPermission addedinfoPermission created for {principal} on {entity.name}, role is {role.name}, propagation is {propagate.@enum.auth.Permission.propagate}PermissionEvent<Permission Event>info<internal>PermissionRemovedEventPermission removedinfoPermission rule removed for {principal} on {entity.name}PermissionUpdatedEventPermission updatedinfoPermission changed for '{principal}' on '{entity.name}'.
Role changed from '{prevRole.name}' to role '{role.name}'. Propagate changed from '{prevPropagate.@enum.auth.Permission.propagate}' to '{propagate.@enum.auth.Permission.propagate}'.ProfileAssociatedEventProfile attached to hostinfoProfile {profile.name} has been attached.Profile {profile.name} has been attached.Profile {profile.name} has been attached with the host.Profile {profile.name} attached.ProfileChangedEventProfile was changedinfoProfile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.ProfileCreatedEventProfile createdinfoProfile is created.ProfileDissociatedEventProfile detached from hostinfoProfile {profile.name} has been detached.Profile {profile.name} has been detached. Profile {profile.name} has been detached from the host.Profile {profile.name} detached.ProfileEventinfo<internal>ProfileReferenceHostChangedEventThe profile reference host was changedinfoProfile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.ProfileRemovedEventProfile removedinfoProfile {profile.name} was removed.Profile {profile.name} was removed.Profile was removed.RecoveryEventRecovery completed on the host.infoThe host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the management virtual NIC {vnic} by connecting to a new port {portKey} on the vSphere Distributed Switch {dvsUuid}.RemoteTSMEnabledEventSSH is enabledinfoSSH for the host has been enabledSSH for the host {host.name} has been enabledResourcePoolCreatedEventResource pool createdinfoCreated resource pool {resourcePool.name} in compute-resource {computeResource.name}Created resource pool {resourcePool.name}Created resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}ResourcePoolDestroyedEventResource pool deletedinfoRemoved resource pool {resourcePool.name} on {computeResource.name}Removed resource pool {resourcePool.name}Removed resource pool {resourcePool.name} on {computeResource.name} in {datacenter.name}ResourcePoolEvent<Resource Pool Event>info<internal>ResourcePoolMovedEventResource pool movedinfoMoved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name} in {datacenter.name}ResourcePoolReconfiguredEventResource pool reconfiguredinfoUpdated configuration for {resourcePool.name} in compute-resource {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration on {resourcePool.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration for {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ResourcePoolReconfiguredEvent"> <description> The resource pool configuration changed. The resource pool configuration includes information about the resource reservations of the resource pool and the resource reservations of its children. </description> </EventLongDescription> ResourceViolatedEventResource usage exceeds configurationerrorResource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name}'Resource usage exceeds configuration on resource pool {resourcePool.name}Resource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.ResourceViolatedEvent"> <description> The cumulative CPU and/or memory resource consumption of all virtual machines in the resource pool exceeds the resource pool configuration </description> <cause> <description>You attempted to move a virtual machine from one resource pool into another bypassing vCenter Server. This condition occurs when you attempt the move using the vSphere Client directly connected to the host. </description> <action>In a DRS cluster, do not move and power on a virtual machine bypassing vCenter Server</action> </cause> </EventLongDescription> RoleAddedEventRole addedinfoNew role {role.name} createdRoleEvent<Role Event>info<internal>RoleRemovedEventRole removedinfoRole {role.name} removedRoleUpdatedEventRole updatedinfoRole modified.
Previous name: {prevRoleName}, new name: {role.name}.
Added privileges: {privilegesAdded}.
Removed privileges: {privilegesRemoved}.RollbackEventHost Network operation rolled backinfoThe Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.The operation {methodName} on the host {hostName} disconnected the host and was rolled back .The Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.ScheduledTaskCompletedEventScheduled task completedinfoTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} completed successfullyTask {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} completed successfullyScheduledTaskCreatedEventScheduled task createdinfoCreated task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name}Created task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskEmailCompletedEventSent scheduled task emailinfoTask {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} sent email to {to}ScheduledTaskEmailFailedEventScheduled task email not senterrorTask {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot send email to {to}: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskEmailFailedEvent"> <description> An error occurred while sending email notification that a scheduled task is running </description> <cause> <description>Failed to send email for the scheduled task</description> <action>Check the vCenter Server SMTP settings for sending emails</action> </cause> </EventLongDescription> ScheduledTaskEvent<Scheduled Task Event>info<internal>ScheduledTaskFailedEventCannot complete scheduled taskerrorTask {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot be completed: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskFailedEvent"> <description> An error occurred while running a scheduled task </description> <cause> <description>Failed to run a scheduled task</description> <action>Correct the failure condition</action> </cause> </EventLongDescription> ScheduledTaskReconfiguredEventScheduled task reconfiguredinfoReconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name}Reconfigured task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.ScheduledTaskRemovedEventScheduled task removedinfoRemoved task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name}Removed task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskStartedEventScheduled task startedinfoRunning task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name}Running task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ServerLicenseExpiredEventServer license expirederrorA vCenter Server license has expiredServerStartedSessionEventServer started sessioninfovCenter startedSessionEvent<Session Event>info<internal>SessionTerminatedEventSession stoppedinfoA session for user '{terminatedUsername}' has stopped <EventLongDescription id="vim.event.SessionTerminatedEvent"> <description> A session has been terminated </description> </EventLongDescription> ExtendedEventThe time-limited license on the host has expired.warningThe time-limited license on host {host.name} has expired.The time-limited license on host {host.name} has expired.The time-limited license on the host has expired.vim.event.SubscriptionLicenseExpiredEvent|The time-limited license on host {host.name} has expired. To comply with the EULA, renew the license at http://my.vmware.comTaskEventTask eventinfoTask: {info.descriptionId}TaskTimeoutEventTask time-outinfoTask: {info.descriptionId} time-out <EventLongDescription id="vim.event.TaskTimeoutEvent"> <description> A task has been cleaned up because it timed out </description> </EventLongDescription> TeamingMatchEventTeaming configuration in the vSphere Distributed Switch matches the physical switch configurationinfoTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TeamingMisMatchEventTeaming configuration in the vSphere Distributed Switch does not match the physical switch configurationerrorTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TemplateBeingUpgradedEventUpgrading templateinfoUpgrading template {legacyTemplate}TemplateUpgradeEvent<Template Upgrade Event>info<internal>TemplateUpgradeFailedEventCannot upgrade templateinfoCannot upgrade template {legacyTemplate} due to: {reason.msg}TemplateUpgradedEventTemplate upgradedinfoTemplate {legacyTemplate} upgrade completedTimedOutHostOperationEventHost operation timed outwarningThe operation performed on host {host.name} timed outThe operation performed on host {host.name} timed outThe operation timed outThe operation performed on {host.name} in {datacenter.name} timed out <EventLongDescription id="vim.event.TimedOutHostOperationEvent"> <description> An operation performed on the host has timed out </description> <cause> <description> A previous event in the sequence of events will provide information on the reason for the timeout </description> </cause> </EventLongDescription> UnlicensedVirtualMachinesEventUnlicensed virtual machinesinfoThere are {unlicensed} unlicensed virtual machines on host {host} - there are only {available} licenses availableUnlicensedVirtualMachinesFoundEventUnlicensed virtual machines foundinfo{unlicensed} unlicensed virtual machines found on host {host}UpdatedAgentBeingRestartedEventRestarting updated agentinfoThe agent is updated and will soon restartThe agent on host {host.name} is updated and will soon restartUpgradeEvent<Upgrade Event>info<internal>UplinkPortMtuNotSupportEventNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.errorNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortMtuSupportEventAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.infoAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.All VLAN MTU setting on the external physical switch allows the vSphere Distributed Switch max MTU size packets passing on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}All VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanTrunkedEventThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch.infoThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanUntrunkedEventNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch.errorNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UserAssignedToGroupUser assigned to groupinfoUser {userLogin} was added to group {group}UserLoginSessionEventUser logininfoUser {userName}@{ipAddress} logged in as {userAgent}UserLogoutSessionEventUser logoutinfoUser {userName}@{ipAddress} logged out (login time: {loginTime}, number of API invocations: {callCount}, user agent: {userAgent})UserPasswordChangedUser password changedinfoPassword was changed for account {userLogin}Password was changed for account {userLogin} on host {host.name}UserUnassignedFromGroupUser removed from groupinfoUser {userLogin} removed from group {group}UserUpgradeEventUser upgradeuser{message} <EventLongDescription id="vim.event.UserUpgradeEvent"> <description> A general user event occurred due to an upgrade </description> </EventLongDescription> VMFSDatastoreCreatedEventVMFS datastore createdinfoCreated VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl})Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}VMFSDatastoreExpandedEventVMFS datastore expandedinfoExpanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name}Expanded VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExpandedEvent"> <description> An existing extent in a VMFS volume was grown to increase its capacity </description> <cause> <description> A user or system action caused an extent of an existing VMFS datastore to be grown. Only extents with free space immediately after them are expandable. As a result, the action filled the available adjacent capacity on the LUN. </description> </cause> </EventLongDescription> VMFSDatastoreExtendedEventVMFS datastore extendedinfoExtended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name}Extended VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExtendedEvent"> <description> An existing VMFS volume was extended to increase its capacity </description> <cause> <description> A user or system action caused the datastore to be extended with a partition on a LUN to increase its capacity. </description> </cause> </EventLongDescription> VMotionLicenseExpiredEventvMotion license expirederrorA vMotion license for {host.name} has expired <EventLongDescription id="vim.event.VMotionLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of vMotion licenses on the license server and uses this event to notify you of any vMotion licenses that are about to expire </description> <cause> <description>vMotion licenses on the license server are about to expire</description> <action>Update the license server to get a fresher version of the vMotion license</action> </cause> </EventLongDescription> VcAgentUninstallFailedEventCannot uninstall vCenter agenterrorCannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUninstallFailedEvent"> <description> An attempt to uninstall the vCenter Agent failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUninstalledEventvCenter agent uninstalledinfovCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalledvCenter agent has been uninstalled from {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUninstalledEvent"> <description> The vCenter Agent has been uninstalled from host </description> </EventLongDescription> VcAgentUpgradeFailedEventCannot complete vCenter agent upgradeerrorCannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUpgradeFailedEvent"> <description> A vCenter Agent upgrade attempt failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUpgradedEventvCenter agent upgradedinfovCenter agent has been upgraded on {host.name}vCenter agent has been upgraded on {host.name}vCenter agent has been upgradedvCenter agent has been upgraded on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUpgradedEvent"> <description> The vCenter Agent has been upgraded on the host </description> </EventLongDescription> VimAccountPasswordChangedEventVIM account password changedinfoVIM account password changedVIM account password was changed on host {host.name} <EventLongDescription id="vim.event.VimAccountPasswordChangedEvent"> <description> The password for the Vim account user on the host has been changed. This account is created by vCenter Server and used to manage the host. </description> <cause> <description> vCenter Server periodically changes the password of the Vim account that it uses to manage the host </description> </cause> </EventLongDescription> VmAcquiredMksTicketEventVM acquired MKS ticketinfoRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} has been openedRemote console has been opened for this virtual machine on {host.name}Remote console to {vm.name} on {host.name} in {datacenter.name} has been opened <EventLongDescription id="vim.event.VmAcquiredMksTicketEvent"> <description> Successfully acquired MKS Ticket for the virtual machine </description> <cause> <description> The MKS Ticket used to connect to the virtual machine remote console has been successfully acquired. </description> </cause> </EventLongDescription> VmAcquiredTicketEventVM acquired ticketinfoA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket of type {ticketType.@enum.VirtualMachine.TicketType} has been acquired.A ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} on {host.name} in {datacenter.name} has been acquiredVmAutoRenameEventVM auto renameinfoInvalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name}. Renamed from {oldName} to {newName}Conflicting or invalid virtual machine name detected. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name} in {datacenter.name}. Renamed from {oldName} to {newName} <EventLongDescription id="vim.event.VmAutoRenameEvent"> <description> The virtual machine was renamed because of possible name conflicts with another virtual machine </description> <cause> <description>The virtual machine might have been added to the vCenter Server inventory while scanning the datastores of hosts added to the inventory. During such an action, the newly-added virtual machine's name might have been found to be in conflict with a virtual machine name already in the inventory. To resolve this, vCenter Server renames the newly-added virtual machine. </description> </cause> </EventLongDescription> VmBeingClonedEventVM being clonedinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}VmBeingClonedNoFolderEventVM being cloned to a vAppinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on host {host.name}, {ds.name} in {datacenter.name} to {destName} on host {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}VmBeingCreatedEventCreating VMinfoCreating {vm.name} on {host.name}, {ds.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {ds.name} in {datacenter.name}Creating VM on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmBeingDeployedEventDeploying VMinfoDeploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} from template {srcTemplate.name}Deploying VM on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} in {datacenter.name} from template {srcTemplate.name} <EventLongDescription id="vim.event.VmBeingDeployedEvent"> <description> A virtual machine is being created from a template </description> <cause> <description> A user action prompted a virtual machine to be created from this template. </description> </cause> </EventLongDescription> VmBeingHotMigratedEventVM is hot migratinginfoMigrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingHotMigratedEvent"> <description> A powered-on virtual machine is being migrated with vMotion </description> <cause> <description> A user action might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> <cause> <description> A DRS recommendation might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> </EventLongDescription> VmBeingMigratedEventVM migratinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingMigratedEvent"> <description> Changing the host on which the virtual machine is executing </description> <cause> <description> A user action caused the virtual machine to be migrated to a different host </description> </cause> </EventLongDescription> VmBeingRelocatedEventVM relocatinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingRelocatedEvent"> <description> The virtual machine execution and/or storage is being relocated </description> <cause> <description> A user action might have caused the virtual machine's execution and/or storage to be changed </description> </cause> </EventLongDescription> VmCloneEvent<VM Clone Event>info<internal><internal><internal><internal><internal>VmCloneFailedEventCannot complete VM cloneerrorFailed to clone {vm.name} on {host.name}, {ds.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmCloneFailedEvent"> <description> Cloning a virtual machine failed </description> <cause> <description> An error prevented the virtual machine from being cloned </description> </cause> </EventLongDescription> VmClonedEventVM clonedinfo{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {ds.name} in {datacenter.name}{sourceVm.name} cloned to {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmConfigMissingEventVM configuration missinginfoConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} cannot be foundConfiguration file cannot be foundConfiguration file for {vm.name} on {host.name} in {datacenter.name} cannot be found <EventLongDescription id="vim.event.VmConfigMissingEvent"> <description> One or more configuration files for the virtual machine cannot be found </description> <cause> <description> The datastore on which this virtual machine resides may be inaccessible </description> <action> Check the connectivity of the datastore on which this virtual machine resides. If the datastore has a backing LUN, check to see if there are any transient disk failures. </action> </cause> </EventLongDescription> VmConnectedEventVM connectedinfoHost is connectedVirtual machine {vm.name} is connected <EventLongDescription id="vim.event.VmConnectedEvent"> <description> The virtual machine is in a connected state in the inventory and vCenter Server can access it </description> <cause> <description> A user or system action that resulted in operations such as creating, registering, cloning or deploying a virtual machine gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> A user or system action that resulted in operations such as adding or reconnecting a host gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> The state of the virtual machine's host changed from Not Responding to Connected and the host gave vCenter Server access to the virtual machine </description> </cause> </EventLongDescription> VmCreatedEventVM createdinfoNew virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {ds.name} in {datacenter.name}Virtual machine created on {host.name}, {ds.name} in {datacenter.name}Created virtual machine {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmDasBeingResetEventvSphere HA is resetting VMinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode} <EventLongDescription id="vim.event.VmDasBeingResetEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA has been configured, the virtual machine might be reset because the VMware Tools heartbeat or application heartbeat status turned red. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasBeingResetWithScreenshotEventvSphere HA enabled VM reset with screenshotinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}{vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}. <EventLongDescription id="vim.event.VmDasBeingResetWithScreenshotEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA is configured, this condition can occur because the VMware Tools heartbeat or the application heartbeat status turned red. The event contains the location of the screenshot taken of the guest console before it was reset. You can use this information to determine the cause of the heartbeat failure. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> Check the screenshot image to see if the cause was a guest operating system failure. If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasResetFailedEventvSphere HA cannot reset VMwarningvSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA cannot reset {vm.name} on {host.name}vSphere HA cannot reset {vm.name}vSphere HA cannot reset this virtual machinevSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmDasResetFailedEvent"> <description> vSphere HA attempted to reset the virtual machine because of a heartbeat failure from VMware Tools or a guest application, depending on how vSphere HA was configured. However, the reset operation failed. </description> <cause> <description> The most likely reason for the reset failure is that the virtual machine was running another task at the time the reset was initiated. </description> <action>Check to see whether the virtual machine requires attention and reset it manually if necessary.</action> </cause> </EventLongDescription> VmDasUpdateErrorEventVM vSphere HA update errorerrorUnable to update vSphere HA agents given the state of {vm.name}VmDasUpdateOkEventCompleted VM DAS updateinfovSphere HA agents have been updated with the current state of the virtual machineVmDateRolledBackEventVM date rolled backerrorDisconnecting all hosts as the date of virtual machine {vm.name} has been rolled backVmDeployFailedEventCannot deploy VM from templateerrorFailed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmDeployFailedEvent"> <description> Failed to deploy a virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine failed to deploy. This condition can occur if there is not enough disk space, the host or virtual machine loses its network connection, the host is disconnected, and so on. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmDeployedEventVM deployedinfoTemplate {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name} <EventLongDescription id="vim.event.VmDeployedEvent"> <description> A virtual machine has been created from the specified template </description> <cause> <description> A user action caused a virtual machine to be created from the template </description> </cause> <cause> <description> A scheduled task caused a virtual machine to be created from the template </description> </cause> </EventLongDescription> VmDisconnectedEventVM disconnectedinfo{vm.name} on host {host.name} is disconnected{vm.name} on host {host.name} is disconnected{vm.name} is disconnected{host.name} is disconnected{vm.name} on host {host.name} in {datacenter.name} is disconnectedVmDiscoveredEventVM discoveredinfoDiscovered {vm.name} on {host.name}Discovered {vm.name} on {host.name}Discovered {vm.name}Discovered on {host.name}Discovered {vm.name} on {host.name} in {datacenter.name}VmDiskFailedEventCannot create VM diskerrorCannot create virtual disk {disk} <EventLongDescription id="vim.event.VmDiskFailedEvent"> <description> Failed to create a virtual disk for the virtual machine for reasons described in the event message </description> <cause> <description> A virtual disk was not created for the virtual machine. This condition can occur if the operation failed to access the disk, the disk did not have enough space, you do not have permission for the operation, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that disk is accessible, has enough space, and that the permission settings allow the operation. </action> </cause> </EventLongDescription> VmEmigratingEventVM emigratinginfoMigrating {vm.name} off host {host.name}Migrating {vm.name} off host {host.name}Migrating {vm.name} off hostMigrating off host {host.name}Migrating {vm.name} off host {host.name} in {datacenter.name}VmEndRecordingEventEnd a recording sessioninfoEnd a recording sessionEnd a recording session on {vm.name}VmEndReplayingEventEnd a replay sessioninfoEnd a replay sessionEnd a replay session on {vm.name}VmEvent<VM Event>info<internal>VmFailedMigrateEventCannot migrate VMerrorCannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} to {destHost.name}, {destDatastore.name}Cannot migrate to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmFailedMigrateEvent"> <description> Failed to migrate the virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine did not migrate. This condition can occur if vMotion IPs are not configured, the source and destination hosts are not accessible, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that the vMotion IPs are configured on source and destination hosts, the hosts are accessible, and so on. </action> </cause> </EventLongDescription> VmFailedRelayoutEventCannot complete VM relayout.errorCannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name}: {reason.msg}Cannot complete relayout for this virtual machine on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedRelayoutEvent"> <description> Failed to lay out a virtual machine </description> <cause> <description> An attempt to lay out a virtual machine on disk failed for reasons described in the event message. This condition can occur for any of several reasons, for example inability to access the disk. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmFailedRelayoutOnVmfs2DatastoreEventCannot complete VM relayout on Vmfs2 datastoreerrorCannot complete relayout due to disks on a VMFS2 volumeCannot complete relayout for virtual machine {vm.name} which has disks on a VMFS2 volume. <EventLongDescription id="vim.event.VmFailedRelayoutOnVmfs2DatastoreEvent"> <description> Failed to migrate a virtual machine on VMFS2 datastore </description> <cause> <description> An attempt to migrate a virtual machine failed because the virtual machine still has disk(s) on a VMFS2 datastore. VMFS2 datastores are read-only for ESX 3.0 and later hosts. </description> <action> Upgrade the datastore(s) from VMFS2 to VMFS3 </action> </cause> </EventLongDescription> VmFailedStartingSecondaryEventvCenter cannot start the Fault Tolerance secondary VMerrorvCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason} <EventLongDescription id="vim.event.VmFailedStartingSecondaryEvent"> <description> vCenter Server could not start the Secondary VM because of an error </description> <cause> <description> The remote host is incompatible for Secondary VM. For instance, this condition can occur when the host does not have access to the virtual machine's network or datastore. </description> <action>Ensure that the hosts in the cluster are compatible for FT</action> </cause> <cause> <description>Login to a remote host failed. If the host has been newly added to the inventory or just rebooted, it might take some time for SSL thumbprints to be propagated to the hosts. </description> <action>If the problem persists, disconnect and re-connect the host.</action> </cause> <cause> <description>Registration of the Secondary VM on the remote host failed</description> <action>Determine whether the remote host has access to the datastore that the FT virtual machine resides on</action> </cause> <cause> <description>An error occurred while starting the Secondary VM</description> <action>Determine the cause of the migration error. vCenter Server will try to restart the Secondary VM if it can.</action> </cause> </EventLongDescription> VmFailedToPowerOffEventCannot power off the VM.errorCannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name}. {reason.msg}Cannot power off: {reason.msg}Cannot power off {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOffEvent"> <description>The virtual machine failed to power off</description> <cause> <description> The virtual machine might be performing concurrent operations </description> <action>Complete the concurrent operations and retry the power-off operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility. </description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToPowerOnEventCannot power on the VM.errorCannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOnEvent"> <description> The virtual machine failed to power on </description> <cause> <description> Virtual machine power-on attempts can fail because the virtual machine is already in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the power-on failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToRebootGuestEventVM cannot reboot the guest OS.errorCannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot the guest OS for {vm.name} on {host.name} in {datacenter.name}. {reason.msg} <EventLongDescription id="vim.event.VmFailedToRebootGuestEvent"> <description> The guest operating system on the virtual machine failed to reboot. </description> <cause> <description> Guest operating system reboot failures can occur because the virtual machine is not in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the reboot failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToResetEventCannot reset VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToResetEvent"> <description>The virtual machine failed to reset</description> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere client and respond to the question or prompt </action> </cause> <cause> <description>There might not be enough available licenses to perform this operation.</description> <action> Obtain the required licenses and retry the reset operation </action> </cause> <cause> <description> Concurrent operations might be executing on the virtual machine </description> <action>Complete the concurrent operations and retry the reset operation</action> </cause> <cause> <description> The host on which the virtual machine is running is entering maintenance mode </description> <action> Wait until the host exits maintenance mode and retry the operation </action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToShutdownGuestEventCannot shut down the guest OSerrorCannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}{vm.name} cannot shut down the guest OS on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToShutdownGuestEvent"> <description> Guest operating system shutdown failed for the virtual machine </description> <cause> <description> Guest operating system shutdown can fail if VMware Tools is not installed in the virtual machine. </description> <action>Install VMware Tools.</action> </cause> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere Client and respond to the question or prompt </action> </cause> <cause> <description> Concurrent operations might be running on the virtual machine </description> <action>Complete the concurrent operations and retry the shutdown operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToStandbyGuestEventVM cannot standby the guest OSerrorCannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}{vm.name} cannot standby the guest OS on {host.name} in {datacenter.name}: {reason.msg}VmFailedToSuspendEventCannot suspend VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg}VmFailedUpdatingSecondaryConfigvCenter cannot update the Fault Tolerance secondary VM configurationerrorvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name}vCenter cannot update the Fault Tolerance secondary VM configurationvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFailedUpdatingSecondaryConfig"> <description> After a failover, the new Primary VM failed to update the configuration of the Secondary VM </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFailoverFailedvSphere HA virtual machine failover unsuccessfulwarningvSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over this virtual machine. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg} <EventLongDescription id="vim.event.VmFailoverFailed"> <description> vSphere HA did not failover this virtual machine. The event includes the details of the fault that was generated when vSphere HA attempted the failover. vSphere HA will retry the failover on another host unless the maximum number of failover attempts have been exceeded. In many cases, the retry will succeed. </description> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It may also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to failover the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. If any hosts have been declared dead, investigate whether a networking/storage issue may be the cause. </action> <action> If, however, the error reports that the virtual machine is in an invalid state, there may be an in-progress operation that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation that is taking a long time to complete. </action> </cause> </EventLongDescription> VmFaultToleranceStateChangedEventVM Fault Tolerance state changedinfoFault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state on {vm.name} on host {host.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState} <EventLongDescription id="vim.event.VmFaultToleranceStateChangedEvent"> <description> The Fault Tolerance state of the virtual machine changed </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceTurnedOffEventVM Fault Tolerance turned offinfoFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name}Fault Tolerance protection has been turned off for {vm.name} on host {host.name}Fault Tolerance protection has been turned off for {vm.name}Fault Tolerance protection has been turned off for this virtual machineFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFaultToleranceTurnedOffEvent"> <description> All Secondary VMs have been removed and Fault Tolerance protection is turned off for this virtual machine. </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceVmTerminatedEventFault Tolerance VM terminatedinfoThe Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason} <EventLongDescription id="vim.event.VmFaultToleranceVmTerminatedEvent"> <description> A Primary VM or Secondary VM became inactive </description> <cause> <description> The Secondary VM became inactive because its operations are no longer synchronized with those of the Primary VM</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Secondary VM became inactive because a hardware or network failure caused the Primary VM to lose the Primary-to-Secondary connection</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Fault Tolerant VM became inactive due to a partial hardware failure on the physical host</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> A user stopped the Fault Tolerant VM</description> <action>The remaining Fault Tolerant VM takes over as the Primary VM. vSphere HA will attempt to restart the Secondary VM.</action> </cause> </EventLongDescription> VmGuestOSCrashedEventGuest operating system crashederror{vm.name} on {host.name}: Guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.{vm.name}: Guest operating system has crashed.This virtual machine's guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.VmGuestRebootEventGuest rebootinfoGuest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name}Guest OS rebootGuest OS reboot for {vm.name} on {host.name} in {datacenter.name}VmGuestShutdownEventGuest OS shut downinfoGuest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name}Guest OS shut downGuest OS shut down for {vm.name} on {host.name} in {datacenter.name}VmGuestStandbyEventGuest standbyinfoGuest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name}Guest OS standbyGuest OS standby for {vm.name} on {host.name} in {datacenter.name}VmHealthMonitoringStateChangedEventvSphere HA VM monitoring state changedinfovSphere HA VM monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'VmInstanceUuidAssignedEventAssign a new instance UUIDinfoAssign a new instance UUID ({instanceUuid})Assign a new instance UUID ({instanceUuid}) to {vm.name} <EventLongDescription id="vim.event.VmInstanceUuidAssignedEvent"> <description>The virtual machine was assigned a new vCenter Server-specific instance UUID </description> <cause> <description> The user who created the virtual machine did not specify a vCenter Server-specific instance UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmInstanceUuidChangedEventInstance UUID ChangedinfoThe instance UUID has been changed from ({oldInstanceUuid}) to ({newInstanceUuid})The instance UUID of {vm.name} has been changed from ({oldInstanceUuid}) to ({newInstanceUuid}) <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine has changed </description> <cause> <description> A user action resulted in a change to the vCenter Server-specific instance UUID of the virtual machine </description> </cause> <cause> <description> vCenter Server changed the instance UUID of the virtual machine because it detected a conflict </description> </cause> </EventLongDescription> VmInstanceUuidConflictEventInstance UUIDs conflicterrorThe instance UUID ({instanceUuid}) conflicts with the instance UUID assigned to {conflictedVm.name}The instance UUID ({instanceUuid}) of {vm.name} conflicts with the instance UUID assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine conflicted with that of another virtual machine. </description> <cause> <description> Virtual machine instance UUID conflicts can occur if you copy virtual machine files manually without using vCenter Server. </description> </cause> </EventLongDescription> VmMacAssignedEventVM MAC assignedinfoNew MAC address ({mac}) assigned to adapter {adapter}New MAC address ({mac}) assigned to adapter {adapter} for {vm.name}VmMacChangedEventVM MAC changedwarningChanged MAC address from {oldMac} to {newMac} for adapter {adapter}Changed MAC address from {oldMac} to {newMac} for adapter {adapter} for {vm.name} <EventLongDescription id="vim.event.VmMacChangedEvent"> <description> The virtual machine MAC address has changed </description> <cause> <description> A user action changed the virtual machine MAC address </description> </cause> <cause> <description> vCenter changed the virtual machine MAC address because it detected a MAC address conflict </description> </cause> </EventLongDescription> VmMacConflictEventVM MAC conflicterrorThe MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmMacConflictEvent"> <description> The virtual machine MAC address conflicts with that of another virtual machine </description> <cause> <description> This virtual machine's MAC address is the same as that of another virtual machine. Refer to the event details for more information on the virtual machine that caused the conflict. </description> </cause> </EventLongDescription> VmMaxFTRestartCountReachedvSphere HA reached maximum Secondary VM restart count.warningvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxFTRestartCountReached"> <description> The system reached the maximum restart limit in its attempt to restart a Secondary VM </description> <cause> <description>The system exceeded the number of allowed restart attempts for the Secondary VM when it tried to reestablish Fault Tolerance</description> <action>Check the causes for the restart failures and fix them. Then disable and re-enable Fault Tolerance protection.</action> </cause> </EventLongDescription> VmMaxRestartCountReachedvSphere HA reached maximum VM restart countwarningvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name}because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart this VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxRestartCountReached"> <description> vSphere HA has reached the maximum number of failover attempts for this virtual machine and has not been able to restart it. No further failover attempts will be made. By default vSphere HA attempts to failover a virtual machine 5 times. </description> <cause> <description> Failover can fail for a number of reasons including that the configuration file of the virtual machine is corrupt or one or more of the virtual machines datastores are not accessible by any host in the cluster due to an all paths down condition. In addition, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. </description> <action> To determine why previous failover attempts failed, search the events that are logged for the VM for occurrences of the event vSphere HA reports when a failover fails. These events will report the reason for the failed failover. vSphere HA events can be located by searching for the phrase 'vSphere HA'. To determine whether any issues still exist, try to manually power on the virtual machine. If power-on fails, investigate the error that is returned. But, if the power-on remains pending for a long time, investigate whether an all paths down condition exists. Also, if any hosts have been declared dead, investigate whether a networking or storage issue may be the cause. </action> </cause> </EventLongDescription> VmMessageErrorEventVM error messageerrorError message on {vm.name} on {host.name}: {message}Error message on {vm.name} on {host.name}: {message}Error message on {vm.name}: {message}Error message from {host.name}: {message}Error message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageErrorEvent"> <description> An error message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this error occurred </description> </cause> </EventLongDescription> VmMessageEventVM information messageinfoMessage on {vm.name} on {host.name}: {message}Message on {vm.name} on {host.name}: {message}Message on {vm.name}: {message}Message from {host.name}: {message}Message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageEvent"> <description> An information message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on the messages from the virtual machine </description> </cause> </EventLongDescription> VmMessageWarningEventVM warning messagewarningWarning message on {vm.name} on {host.name}: {message}Warning message on {vm.name} on {host.name}: {message}Warning message on {vm.name}: {message}Warning message from {host.name}: {message}Warning message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageWarningEvent"> <description> A warning message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this warning was issued </description> </cause> </EventLongDescription> VmMigratedEventVM migratedinfoVirtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmMigratedEvent"> <description> The virtual machine's host was changed successfully </description> <cause> <description> A user action caused the virtual machine to be successfully migrated to a different host </description> </cause> </EventLongDescription> VmNoCompatibleHostForSecondaryEventNo compatible host for the Fault Tolerance secondary VMerrorNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name}No compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name}No compatible host for the Fault Tolerance secondary VM {vm.name}No compatible host for the Fault Tolerance secondary VMNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmNoCompatibleHostForSecondaryEvent"> <description> No compatible host was found when trying to place a Secondary VM </description> <cause> <description>There was no compatible host available to place a Secondary VM</description> <action>Resolve the incompatibilities and retry the operation</action> </cause> </EventLongDescription> VmNoNetworkAccessEventVM No Network AccesswarningNot all networks are accessible by {destHost.name}Not all networks for {vm.name} are accessible by {destHost.name}VmOrphanedEventVM orphanedwarning{vm.name} does not exist on {host.name}{vm.name} does not exist on {host.name}{vm.name} does not existVirtual machine does not exist on {host.name}{vm.name} does not exist on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmOrphanedEvent"> <description> The virtual machine does not exist on the host with which it is associated </description> <cause> <description> The virtual machine was deleted while its host was disconnected from vCenter Server. </description> </cause> </EventLongDescription> VmPowerOffOnIsolationEventvSphere HA powered off VM on isolated hostinfovSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off this virtual machine on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmPowerOffOnIsolationEvent"> <description> vSphere HA powered off this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmPoweredOffEventVM powered offinfo{vm.name} on {host.name} is powered off{vm.name} on {host.name} is powered off{vm.name} is powered offVirtual machine on {host.name} is powered off{vm.name} on {host.name} in {datacenter.name} is powered offVmPoweredOnEventVM powered oninfo{vm.name} on {host.name} has powered on{vm.name} on {host.name} has powered on{vm.name} has powered onVirtual machine on {host.name} has powered on{vm.name} on {host.name} in {datacenter.name} has powered onVmPoweringOnWithCustomizedDVPortEventVirtual machine powered on with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.infoVirtual machine powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.Virtual machine {vm.name} powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.VmPrimaryFailoverEventFault Tolerance VM failovererrorFault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name} in {datacenter.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}VmReconfiguredEventVM reconfiguredinfoReconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured virtual machine.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name} in {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}VmRegisteredEventVM registeredinfoRegistered {vm.name} on {host.name}Registered {vm.name} on {host.name} in {datacenter.name}Registered {vm.name}Registered VM on {host.name} in {datacenter.name}Registered {vm.name} on {host.name} in {datacenter.name}VmRelayoutSuccessfulEventVM relayout completedinfoRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} completedRelayout of the virtual machine completedRelayout of {vm.name} on {host.name} in {datacenter.name} completedVmRelayoutUpToDateEventVM relayout up-to-dateinfo{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} is in the correct format and relayout is not necessaryIn the correct format and relayout is not necessary{vm.name} on {host.name} in {datacenter.name} is in the correct format and relayout is not necessaryVmReloadFromPathEventVirtual machine reloaded from pathinfo{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} reloaded from new configuration {configPath}.Virtual machine on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.VmReloadFromPathFailedEventVirtual machine not reloaded from patherror{vm.name} on {host.name} could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from path {configPath}.{vm.name} could not be reloaded from {configPath}.This virtual machine could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from {configPath}. <EventLongDescription id="vim.event.VmReloadFromPathFailedEvent"> <description> Reloading the virtual machine from a new datastore path failed </description> <cause> <description>The destination datastore path was inaccessible or invalid </description> <action>Use a valid destination datastore path </action> </cause> <cause> <description>The virtual machine is in an invalid state </description> <action>Check the virtual machine state power state. If the virtual machine is powered on, power it off </action> </cause> <cause> <description>The virtual machine is enabled for Fault Tolerance </description> <action>Disable Fault Tolerance for the virtual machine and retry the operation </action> </cause> </EventLongDescription> VmRelocateFailedEventFailed to relocate VMerrorFailed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmRelocateFailedEvent"> <description> Virtual machine relocation to a different host or datastore failed </description> <cause> <description> Virtual machine relocation can fail for a number of reasons, including network outages, insufficient disk space, and so on </description> <action> Consider the task related to this event, evaluate the failure reason, and take action accordingly </action> </cause> </EventLongDescription> VmRelocateSpecEvent<VM Relocate Spec Event>info<internal><internal><internal><internal><internal>VmRelocatedEventVM relocatedinfoVirtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmRelocatedEvent"> <description> The virtual machine execution and/or storage was successfully relocated </description> <cause> <description> A user action caused the virtual machine's execution and/or storage to be successfully changed </description> </cause> </EventLongDescription> VmRemoteConsoleConnectedEventVM remote console connectedinfoRemote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name}Remote console connectedRemote console connected to {vm.name} on host {host.name}VmRemoteConsoleDisconnectedEventVM remote console disconnectedinfoRemote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name}Remote console connectedRemote console disconnected from {vm.name} on host {host.name}VmRemovedEventVM removedinfoRemoved {vm.name} on {host.name}Removed {vm.name} on {host.name}Removed {vm.name}RemovedRemoved {vm.name} on {host.name} from {datacenter.name}VmRenamedEventVM renamedwarningRenamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName} in {datacenter.name}VmRequirementsExceedCurrentEVCModeEventVirtual machine is using features that exceed the capabilities of the host's current EVC mode.warningFeature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of this host's current EVC mode.Feature requirements of this virtual machine exceed capabilities of this host's current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.VmResettingEventVM resettinginfo{vm.name} on {host.name} is reset{vm.name} on {host.name} is reset{vm.name} is resetVirtual machine on {host.name} is reset{vm.name} on {host.name} in {datacenter.name} is resetVmResourcePoolMovedEventVM resource pool movedinfoMoved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name} in {datacenter.name}VmResourceReallocatedEventVM resource reallocatedinfoResource allocation changed
Modified:
{configChanges.modified}Changed resource allocation for {vm.name}
Modified:
{configChanges.modified}VmRestartedOnAlternateHostEventVM restarted on alternate hostinfoVirtual machine {vm.name} was restarted on this host since {sourceHost.name} failedVirtual machine was restarted on {host.name} since {sourceHost.name} failedVirtual machine {vm.name} was restarted on {host.name} since {sourceHost.name} failedVmResumingEventVM resuminginfo{vm.name} on {host.name} is resuming{vm.name} on {host.name} is resuming{vm.name} is resumingVirtual machine on {host.name} is resuming{vm.name} on {host.name} in {datacenter.name} is resumingVmSecondaryAddedEventFault Tolerance secondary VM addedinfoA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name}A Fault Tolerance secondary VM has been added for {vm.name} on host {host.name}A Fault Tolerance secondary VM has been added for {vm.name}A Fault Tolerance secondary VM has been added for this VMA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryDisabledBySystemEventvCenter disabled Fault ToleranceerrorvCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the Secondary VM could not be powered On. <EventLongDescription id="vim.event.VmSecondaryDisabledBySystemEvent"> <description> vCenter Server disabled a Secondary VM because it could not power on the Secondary VM </description> <cause> <description>vCenter Server failed to power on the Secondary VM </description> <action>Check the reason in the event message for more details, fix the failure, and re-enable Fault Tolerance protection to power on the Secondary VM.</action> </cause> </EventLongDescription> VmSecondaryDisabledEventDisabled Fault Tolerance secondary VMinfoDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Disabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Disabled Fault Tolerance secondary VM for {vm.name}Disabled Fault Tolerance secondary VM for this virtual machineDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryEnabledEventEnabled Fault Tolerance secondary VMinfoEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Enabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Enabled Fault Tolerance secondary VM for {vm.name}Enabled Fault Tolerance secondary VM for this VMEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryStartedEventStarted Fault Tolerance secondary VMinfoStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Started Fault Tolerance secondary VM for {vm.name} on host {host.name}Started Fault Tolerance secondary VM for {vm.name}Started Fault Tolerance secondary VM for this virtual machineStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmShutdownOnIsolationEventvSphere HA shut down VM on isolated hostinfovSphere HA shut down {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down this virtual machine on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} was shut down on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation} <EventLongDescription id="vim.event.VmShutdownOnIsolationEvent"> <description> vSphere HA shut down this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmStartRecordingEventStart a recording sessioninfoStart a recording sessionStart a recording session on {vm.name}VmStartReplayingEventStart a replay sessioninfoStart a replay sessionStart a replay session on {vm.name}VmStartingEventVM startinginfo{vm.name} on {host.name} is starting{vm.name} on {host.name} is starting{vm.name} is startingVirtual machine is starting{vm.name} on {host.name} in {datacenter.name} is startingVmStartingSecondaryEventStarting Fault Tolerance secondary VMinfoStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Starting Fault Tolerance secondary VM for {vm.name} on host {host.name} in clusterStarting Fault Tolerance secondary VM for {vm.name}Starting Fault Tolerance secondary VM for this virtual machineStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmStaticMacConflictEventVM static MAC conflicterrorThe static MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The static MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name}VmStoppingEventVM stoppinginfo{vm.name} on {host.name} is stopping{vm.name} on {host.name} is stopping{vm.name} is stoppingVirtual machine is stopping{vm.name} on {host.name} in {datacenter.name} is stoppingVmSuspendedEventVM suspendedinfo{vm.name} on {host.name} is suspended{vm.name} on {host.name} is suspended{vm.name} is suspendedVirtual machine is suspended{vm.name} on {host.name} in {datacenter.name} is suspendedVmSuspendingEventVM being suspendedinfo{vm.name} on {host.name} is being suspended{vm.name} on {host.name} is being suspended{vm.name} is being suspendedVirtual machine is being suspended{vm.name} on {host.name} in {datacenter.name} is being suspendedVmTimedoutStartingSecondaryEventStarting the Fault Tolerance secondary VM timed outerrorStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} timed out within {timeout} ms <EventLongDescription id="vim.event.VmTimedoutStartingSecondaryEvent"> <description> An attempt to start a Secondary VM timed out. </description> <cause> <description>A user attempted to turn on or enable Fault Tolerance, triggering the start of the Secondary VM. The start operation timed out and, as a result, vCenter Server disables Fault Tolerance. </description> <action>Fix any problems and re-enable Fault Tolerance protection</action> </cause> <cause> <description>The secondary VM was started in response to a failure, but the start attempt timed out</description> <action> vSphere HA will attempt to power on the Secondary VM</action> </cause> </EventLongDescription> VmUnsupportedStartingEventVM unsupported guest OS is startingwarningUnsupported guest OS {guestId} for {vm.name}Unsupported guest OS {guestId} for {vm.name} on {host.name}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name}Unsupported guest OS {guestId}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUnsupportedStartingEvent"> <description> Attempting to power on a virtual machine that has an unsupported guest operating system </description> <cause> <description> A user action initiated a virtual machine power-on operation, but the virtual machine has an unsupported guest operating system. </description> </cause> </EventLongDescription> VmUpgradeCompleteEventVM upgrade completeinfoVirtual machine compatibility upgraded to {version.@enum.vm.hwVersion}VmUpgradeFailedEventCannot upgrade VMerrorCannot upgrade virtual machine compatibility.VmUpgradingEventUpgrading VMinfoUpgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} in {datacenter.name} to {version.@enum.vm.hwVersion} <EventLongDescription id="vim.event.VmUpgradingEvent"> <description>The virtual hardware on this virtual machine is being upgraded</description> <cause> <description>A user-initiated action triggered an upgrade of the virtual machine hardware</description> </cause> <cause> <description>A scheduled task started an upgrade of the virtual machine hardware</description> </cause> </EventLongDescription> VmUuidAssignedEventVM UUID assignedinfoAssigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name}Assigned new BIOS UUID ({uuid})Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidAssignedEvent"> <description>The virtual machine was assigned a new BIOS UUID</description> <cause> <description>The user who created the virtual machine did not specify a BIOS UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmUuidChangedEventVM UUID ChangedwarningChanged BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name}BIOS UUID was changed from {oldUuid} to {newUuid}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidChangedEvent"> <description>The virtual machine BIOS UUID has changed</description> <cause> <description> A user changed the virtual machine BIOS UUID directly on the host </description> </cause> </EventLongDescription> VmUuidConflictEventVM UUID ConflicterrorBIOS ID ({uuid}) conflicts with that of {conflictedVm.name}BIOS ID ({uuid}) of {vm.name} conflicts with that of {conflictedVm.name}VmVnicPoolReservationViolationClearEventVirtual NIC Network Resource Pool Reservation Violation Clear eventinfoThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedVmVnicPoolReservationViolationRaiseEventVirtual NIC Network Resource Pool Reservation Violation eventinfoThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedVmWwnAssignedEventVM WWN assignedinfoNew WWNs assignedNew WWNs assigned to {vm.name} <EventLongDescription id="vim.event.VmWwnAssignedEvent"> <description> The virtual machine was assigned a WWN (World Wide Name) </description> <cause> <description>The virtual machine was assigned a WWN because it was created with an RDM (Raw Device Mappings) disk or was reconfigured to access an RDM disk </description> </cause> </EventLongDescription> VmWwnChangedEventVM WWN changedwarningWWNs are changedWWNs are changed for {vm.name} <EventLongDescription id="vim.event.VmWwnChangedEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine was changed </description> <cause> <description>The virtual machine was assigned a new WWN, possibly due to a conflict caused by another virtual machine being assigned the same WWN </description> </cause> </EventLongDescription> VmWwnConflictEventVM WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {vm.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.VmWwnConflictEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine has a conflict </description> <cause> <description>The WWN assigned to this virtual machine was the same as that of a different virtual machine. </description> <action> Check the event details for more information on the conflict and correct the problem. </action>\</cause> </EventLongDescription> WarningUpgradeEventWarning upgradewarning{message}IScsiBootFailureEventBoot from iSCSI failed.warningBooting from iSCSI failed.Booting from iSCSI failed with an error. See the VMware Knowledge Base for information on configuring iBFT networking.EventExLost Network Connectivityerrorvprob.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExNo IPv6 TSO supporterrorvprob.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid vmknic specified in /Migrate/Vmknicwarningvprob.net.migrate.bindtovmk|The ESX advanced config option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Please update the config option with a valid vmknic or, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExVirtual NIC connection to switch failedwarningvprob.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningvprob.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. {3} uplinks still up. Affected portgroups:{4}.EventExLost Network Redundancywarningvprob.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExThin Provisioned Device Nearing Capacitywarningvprob.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold.EventExLost Storage Connectivityerrorvprob.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExDegraded Storage Path Redundancywarningvprob.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. {3} remaining active paths. Affected datastores: {4}.EventExLost Storage Path Redundancywarningvprob.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExVMFS Locked By Remote Hosterrorvprob.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.errorvprob.vmfs.extent.offline|An attached device {1} might be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system is online.infovprob.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Volume Connectivity Restoredinfovprob.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfovprob.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterrorvprob.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerrorvprob.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Opening volume in read-only metadata mode with limited write support.EventExVMFS Lock Corruption Detectederrorvprob.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume may be damaged too.EventExLost connection to NFS servererrorvprob.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExRestored connection to NFS serverinfovprob.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExVMFS Resource Corruption Detectederrorvprob.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExCopied Library Iteminfocom.vmware.cl.CopyLibraryItemEvent|Copied Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}({sourceLibraryItemId}), source Library {sourceLibraryName}.EventExFailed to copy Library Itemerrorcom.vmware.cl.CopyLibraryItemFailEvent|Failed to copy Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}, source Library {sourceLibraryName}.EventExCreated Libraryinfocom.vmware.cl.CreateLibraryEvent|Created Library {libraryName}EventExFailed to create Libraryerrorcom.vmware.cl.CreateLibraryFailEvent|Failed to create Library {libraryName}EventExCreated Library Iteminfocom.vmware.cl.CreateLibraryItemEvent|Created Library Item {libraryItemName} in Library {libraryName}.EventExFailed to create Library Itemerrorcom.vmware.cl.CreateLibraryItemFailEvent|Failed to create Library Item {libraryItemName} in Library {libraryName}.EventExDeleted Libraryinfocom.vmware.cl.DeleteLibraryEvent|Deleted Library {libraryName}EventExFailed to delete Libraryerrorcom.vmware.cl.DeleteLibraryFailEvent|Failed to delete Library {libraryName}EventExDeleted Library Iteminfocom.vmware.cl.DeleteLibraryItemEvent|Deleted Library Item {libraryItemName} in Library {libraryName}.EventExFailed to delete Library Itemerrorcom.vmware.cl.DeleteLibraryItemFailEvent|Failed to delete Library Item {libraryItemName} in Library {libraryName}.EventExPublished Libraryinfocom.vmware.cl.PublishLibraryEvent|Published Library {libraryName}EventExFailed to publish Libraryerrorcom.vmware.cl.PublishLibraryFailEvent|Failed to publish Library {libraryName}EventExPublished Library Iteminfocom.vmware.cl.PublishLibraryItemEvent|Published Library Item {libraryItemName} in Library {libraryName}EventExFailed to publish Library Itemerrorcom.vmware.cl.PublishLibraryItemFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName}EventExPublished Library Item to Subscriptioninfocom.vmware.cl.PublishLibraryItemSubscriptionEvent|Published Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library Item to Subscriptionerrorcom.vmware.cl.PublishLibraryItemSubscriptionFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExPublished Library to Subscriptioninfocom.vmware.cl.PublishLibrarySubscriptionEvent|Published Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library to Subscriptionerrorcom.vmware.cl.PublishLibrarySubscriptionFailEvent|Failed to publish Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExCreated Subscriptioninfocom.vmware.cl.SubscriptionCreateEvent|Created subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to create Subscriptionerrorcom.vmware.cl.SubscriptionCreateFailEvent|Failed to create subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExDeleted Subscriptioninfocom.vmware.cl.SubscriptionDeleteEvent|Deleted subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to delete Subscriptionerrorcom.vmware.cl.SubscriptionDeleteFailEvent|Failed to delete subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExUpdated Subscriptioninfocom.vmware.cl.SubscriptionUpdateEvent|Updated subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to update Subscriptionerrorcom.vmware.cl.SubscriptionUpdateFailEvent|Failed to update subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExSynchronized Libraryinfocom.vmware.cl.SyncLibraryEvent|Synchronized Library {libraryName}EventExFailed to Synchronize Libraryerrorcom.vmware.cl.SyncLibraryFailEvent|Failed to Synchronize Library {libraryName}EventExSynchronized Library Iteminfocom.vmware.cl.SyncLibraryItemEvent|Synchronized Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Itemerrorcom.vmware.cl.SyncLibraryItemFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Iteminfocom.vmware.cl.SyncNfcFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}. Failure may be due to a network error or a host entering maintenance mode.EventExUpdated Libraryinfocom.vmware.cl.UpdateLibraryEvent|Updated Library {libraryName}EventExFailed to update Libraryerrorcom.vmware.cl.UpdateLibraryFailEvent|Failed to update Library {libraryName}EventExUpdated Library Iteminfocom.vmware.cl.UpdateLibraryItemEvent|Updated Library Item {libraryItemName} in Library {libraryName}.EventExFailed to update Library Itemerrorcom.vmware.cl.UpdateLibraryItemFailEvent|Failed to update Library Item {libraryItemName} in Library {libraryName}.EventExCould not locate Library Item file on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryItemFileOnRestoreEvent|File '{fileName}' in Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library Item folder on the storage backing after restorecom.vmware.cl.restore.DeletedLibraryItemOnRestoreEvent|Folder for Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library folder on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryOnRestoreEvent|Library '{libraryName}' folder could not be located on the storage backing after restoreEventExCould not locate Library Item content after restorecom.vmware.cl.restore.MissingLibraryItemContentOnRestoreEvent|The content of Library Item '{libraryItemName}' could not be located on storage after restoreEventExNew Library Item file found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemFileOnRestoreEvent|New Library Item file '{fileName}' found on the storage backing for Library Item '{libraryItemName}' after restore. Path to the file on storage: '{filePath}'EventExNew Library Item folder found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemOnRestoreEvent|New Library Item folder '{itemFolderName}' found on the storage backing for Library '{libraryName}' after restore. Path to the item folder on storage: '{itemFolderPath}'ExtendedEventCancel LWD snapshotinfoCancelling LWD snapshotcom.vmware.dp.events.cancelsnapshot|Cancelling LWD snapshotExtendedEventLWD snapshot is cancelledinfoLWD snapshot is cancelledcom.vmware.dp.events.cancelsnapshotdone|LWD snapshot is cancelledExtendedEventFailed to cancel LWD snapshoterrorFailed to cancel LWD snapshotcom.vmware.dp.events.cancelsnapshotfailed|Failed to cancel LWD snapshotExtendedEventPerform 'commit' phase of LWD-based restoreinfoPerforming 'commit' phase of LWD-based restorecom.vmware.dp.events.commitrestore|Performing 'commit' phase of LWD-based restoreExtendedEvent'commit' phase of LWD-based restore is completedinfo'commit' phase of LWD-based restore is completedcom.vmware.dp.events.commitrestoredone|'commit' phase of LWD-based restore is completedExtendedEvent'commit' phase of LWD-based restore failederror'commit' phase of LWD-based restore failedcom.vmware.dp.events.commitrestorefailed|'commit' phase of LWD-based restore failedExtendedEventEnabling protection services on hosts in the clusterinfoEnabling protection services on hosts in the clusterEnabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservices|Enabling protection services on hosts in the clusterExtendedEventFinished enabling protection services on hosts in the clusterinfoFinished enabling protection services on hosts in the clusterFinished enabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesdone|Finished enabling protection services on hosts in the clusterExtendedEventFailed to enable protection services on hosts in the clustererrorFailed to enable protection services on hosts in the clusterFailed to enable protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesfailed|Failed to enable protection services on hosts in the clusterExtendedEventPerform 'prepare' phase of LWD-based restoreinfoPerforming 'prepare' phase of LWD-based restorecom.vmware.dp.events.preparerestore|Perform 'prepare' phase of LWD restoreExtendedEvent'prepare' phase of LWD-based restore is completedinfo'prepare' phase of LWD-based restore is completedcom.vmware.dp.events.preparerestoredone|'prepare' phase of LWD-based restore is completedExtendedEvent'prepare' phase of LWD-based restore failederror'prepare' phase of LWD-based restore failedcom.vmware.dp.events.preparerestorefailed|'prepare' phase of LWD-based restore failedExtendedEventEnable LWD data protectioninfoEnabling LWD data protectioncom.vmware.dp.events.protect|Enabling LWD data protectionExtendedEventLWD data protection enabledinfoLWD data protection enabledcom.vmware.dp.events.protectdone|LWD data protection enabledExtendedEventFailed to enable LWD data protectionerrorFailed to enable LWD data protectioncom.vmware.dp.events.protectfailed|Failed to enable LWD data protectionExtendedEventQuerying entity for protection infoinfoQuerying entity for protection infocom.vmware.dp.events.queryprotectedentityinfo|Querying entity for protection infoExtendedEventFinished querying entity for protection infoinfoFinished querying entity for protection infocom.vmware.dp.events.queryprotectedentityinfodone|Finished querying entity for protection infoExtendedEventFailed to query entity for protection infoerrorFailed to query entity for protection infocom.vmware.dp.events.queryprotectedentityinfofailed|Failed to query entity for protection infoExtendedEventRetire LWD snapshotinfoRetiring LWD snapshotcom.vmware.dp.events.retiresnapshot|Retiring LWD snapshotExtendedEventLWD snapshot is retiredinfoLWD snapshot is retiredcom.vmware.dp.events.retiresnapshotdone|LWD snapshot is retiredExtendedEventFailed to retire LWD snapshoterrorFailed to retire LWD snapshotcom.vmware.dp.events.retiresnapshotfailed|Failed to retire LWD snapshotExtendedEventTake LWD application-consistent snapshotinfoTaking LWD application-consistent snapshotcom.vmware.dp.events.snapshot.applicationconsistent|Taking LWD application-consistent snapshotExtendedEventTake LWD crash-consistent snapshotinfoTaking LWD crash-consistent snapshotcom.vmware.dp.events.snapshot.crashconsistent|Taking LWD crash-consistent snapshotExtendedEventTake LWD metadata-only snapshotinfoTaking LWD metadata-only snapshotcom.vmware.dp.events.snapshot.metadataonly|Taking LWD metadata-only snapshotExtendedEventTake LWD VSS application-consistent snapshotinfoTaking LWD VSS application-consistent snapshotcom.vmware.dp.events.snapshot.vssappconsistent|Taking LWD VSS application-consistent snapshotExtendedEventLWD application-consistent snapshot takeninfoLWD application-consistent snapshot takencom.vmware.dp.events.snapshotdone.applicationconsistent|LWD application-consistent snapshot takenExtendedEventLWD crash-consistent snapshot takeninfoLWD crash-consistent snapshot takencom.vmware.dp.events.snapshotdone.crashconsistent|LWD crash-consistent snapshot takenExtendedEventLWD metadata-only snapshot takeninfoLWD metadata-only snapshot takencom.vmware.dp.events.snapshotdone.metadataonly|LWD metadata-only snapshot takenExtendedEventLWD VSS application-consistent snapshot takeninfoLWD VSS application-consistent snapshot takencom.vmware.dp.events.snapshotdone.vssappconsistent|LWD VSS application-consistent snapshot takenExtendedEventLWD application-consistent snapshot failederrorLWD application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.applicationconsistent|LWD application-consistent snapshot failedExtendedEventLWD crash-consistent snapshot failederrorLWD crash-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.crashconsistent|LWD crash-consistent snapshot failedExtendedEventLWD metadata-only snapshot failederrorLWD metadata-only snapshot failedcom.vmware.dp.events.snapshotfailed.metadataonly|LWD metadata-only snapshot failedExtendedEventLWD VSS application-consistent snapshot failederrorLWD VSS application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.vssappconsistent|LWD VSS application-consistent snapshot failedExtendedEventPerform LWD snapshot syncinfoPerforming LWD snapshot synccom.vmware.dp.events.sync|Performing LWD snapshot syncExtendedEventLWD snapshot sync is completedinfoLWD snapshot sync is completedcom.vmware.dp.events.syncdone|LWD snapshot sync is completedExtendedEventLWD snapshot sync failederrorLWD snapshot sync failedcom.vmware.dp.events.syncfailed|LWD snapshot sync failedExtendedEventDisable LWD data protectioninfoDisabling LWD data protectioncom.vmware.dp.events.unprotect|Disabling LWD data protectionExtendedEventLWD data protection disabledinfoLWD data protection disabledcom.vmware.dp.events.unprotectdone|LWD data protection disabledExtendedEventFailed to disable LWD data protectionerrorFailed to disable LWD data protectioncom.vmware.dp.events.unprotectfailed|Failed to disable LWD data protectionEventExDeployed entity from Content Libraryinfocom.vmware.ovfs.DeployEvent|Deployed entity from Library Item {libraryItemName} in Library {libraryName}EventExFailed to deploy entity from Content Libraryerrorcom.vmware.ovfs.DeployFailEvent|Failed to deploy entity from Library Item {libraryItemName} in Library {libraryName}EventExCloned entity to Content Libraryinfocom.vmware.ovfs.ExportEvent|Cloned entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExFailed to clone entity to Content Libraryerrorcom.vmware.ovfs.ExportFailEvent|Failed to clone entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExinfocom.vmware.rbd.activateRuleSet|Activate Rule SetEventExwarningcom.vmware.rbd.fdmPackageMissing|A host in a HA cluster does not have the 'vmware-fdm' package in its image profileEventExwarningcom.vmware.rbd.hostProfileRuleAssocEvent|A host profile associated with one or more active rules was deleted.EventExerrorcom.vmware.rbd.hostScriptFailure|An error encountered while running a user defined script: {scriptName} on the host: {ip}. Status: {status}EventExwarningcom.vmware.rbd.ignoreMachineIdentity|Ignoring the AutoDeploy.MachineIdentity event, since the host is already provisioned through Auto DeployEventExinfocom.vmware.rbd.pxeBootNoImageRule|Unable to PXE boot host since it does not match any rulesEventExinfocom.vmware.rbd.pxeBootUnknownHost|PXE Booting unknown hostEventExinfocom.vmware.rbd.pxeProfileAssoc|Attach PXE ProfileEventExinfocom.vmware.rbd.scriptBundleAssoc|Script Bundle Name: {name} attached to moref {moref}, entity-id {entity-id}EventExerrorcom.vmware.rbd.vmcaCertGenerationFailureEvent|Failed to generate host certificates using VMCAEventExCreated Harbor registryinfocom.vmware.registry.HarborCreateEvent|Created Harbor registry {registryName} on cluster {clusterId}.EventExFailed to create Harbor registryerrorcom.vmware.registry.HarborCreateFailEvent|Failed to create Harbor registry {registryName} on cluster {clusterId}.EventExDeleted Harbor registryinfocom.vmware.registry.HarborDeleteEvent|Deleted Harbor registry {registryName} on cluster {clusterId}.EventExFailed to delete Harbor registryerrorcom.vmware.registry.HarborDeleteFailEvent|Failed to delete Harbor registry {registryName} on cluster {clusterId}.EventExCreated Harbor projectinfocom.vmware.registry.HarborProjectCreateEvent|Created Harbor project {projectName} for registry {registryId}.EventExFailed to create Harbor projecterrorcom.vmware.registry.HarborProjectCreateFailEvent|Failed to create Harbor project {projectName} for registry {registryId}.EventExDeleted Harbor projectinfocom.vmware.registry.HarborProjectDeleteEvent|Deleted Harbor project {projectName} for registry {registryId}.EventExFailed to delete Harbor projecterrorcom.vmware.registry.HarborProjectDeleteFailEvent|Failed to delete Harbor project {projectName} for registry {registryId}.EventExCreated Harbor project memberinfocom.vmware.registry.HarborProjectMemberCreateEvent|Created Harbor project member {memberName} for project {projectName}.EventExFailed to create Harbor project membererrorcom.vmware.registry.HarborProjectMemberCreateFailEvent|Failed to create Harbor project member {memberName} for project {projectName}.EventExDeleted Harbor project memberinfocom.vmware.registry.HarborProjectMemberDeleteEvent|Deleted Harbor project member {memberName} from project {projectName}.EventExFailed to delete Harbor project membererrorcom.vmware.registry.HarborProjectMemberDeleteFailEvent|Failed to delete Harbor project member {memberName} from project {projectName}.EventExUpdated Harbor project memberinfocom.vmware.registry.HarborProjectMemberUpdateEvent|Updated Harbor project member {memberName} for project {projectName}.EventExFailed to update Harbor project membererrorcom.vmware.registry.HarborProjectMemberUpdateFailEvent|Failed to update Harbor project member {memberName} for project {projectName}.EventExPurged Harbor projectinfocom.vmware.registry.HarborProjectPurgeEvent|Purged Harbor project {projectName} for registry {registryId}.EventExFailed to purge Harbor projecterrorcom.vmware.registry.HarborProjectPurgeFailEvent|Failed to purge Harbor project {projectName} for registry {registryId}.EventExRestoring Harbor registryinfocom.vmware.registry.HarborRestoreEvent|Restoring Harbor registry {registryName} on cluster {clusterId}.EventExFailed to restore Harbor registryerrorcom.vmware.registry.HarborRestoreFailEvent|Failed to restore Harbor registry {registryName} on cluster {clusterId}.EventExRestored Harbor registryinfocom.vmware.registry.HarborRestoreSuccessEvent|Restored Harbor registry {registryName} on cluster {clusterId}.ExtendedEventProactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!errorcom.vmware.vc.proactivehdw.DbError|Proactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!EventExProactive hardware management: Host is disabled with proactive hardware management.warningcom.vmware.vc.proactivehdw.Disabled|Host is disabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: Host is enabled with proactive hardware management.infocom.vmware.vc.proactivehdw.Enabled|Host is enabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: received a failure health update from vendor.errorcom.vmware.vc.proactivehdw.Failure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: Polled health updates from HSM are discarded due to health update response content size limit being exceeded.warningcom.vmware.vc.proactivehdw.HealthUpdatesResponseLimitExceed|Proactive hardware management: Polled health updates from HSM {VendorDisplayName} are discarded due to health update response content size limit being exceeded. Refer to vSAN health logs for more details.EventExProactive hardware management: Some health updates from HSM are discarded due to validation failures.warningcom.vmware.vc.proactivehdw.HealthUpdatesValidationFail|Proactive hardware management: Some health updates from HSM {VendorDisplayName} are discarded due to validation failures. Refer to vSAN health logs for more details.EventExProactive hardware management: Error occurred when posting host-level event for unregistration of HSMerrorcom.vmware.vc.proactivehdw.HostEventPostFailed|Proactive hardware management: After HSM {VendorDisplayName} was unregistered an internal error prevented a host event from posting. The following hosts are affected: {AffectedHosts}.EventExProactive hardware management: Failed to contact an HSMerrorcom.vmware.vc.proactivehdw.HsmCommunicationError|Proactive hardware management: Failed to contact HSM with vendor: {VendorDisplayName}.EventExProactive hardware management: Error occured in poll HSM requesterrorcom.vmware.vc.proactivehdw.HsmRequestError|Proactive hardware management: Internal error occurred during polling HSM from vendor {VendorDisplayName}.EventExProactive hardware management: HSM is unregistered.infocom.vmware.vc.proactivehdw.HsmUnregistration|Proactive hardware management: HSM is unregistered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: received a predictive failure health update from vendor.warningcom.vmware.vc.proactivehdw.PredictiveFailure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: HSM is unregistered but with a failure in removing resource bundle.errorcom.vmware.vc.proactivehdw.ResourceBundleCleanupError|Proactive hardware management: HSM from {VendorDisplayName} is unregistered but with a failure in removing resource bundle - likely the resource bundle is currently in use. Please refer to vSAN health logs for the underlying cause and perform manual clean up on the resource bundle.EventExProactive hardware management: Failed to create/update subscription for HSM due to a communication error with HSMerrorcom.vmware.vc.proactivehdw.SubscriptionHsmCommError|Proactive hardware management: Failed to create/update subscription for HSM {VendorDisplayName} due to a communication error with HSM.EventExProactive hardware management: Failed to create/update subscription for HSM due to internal errorerrorcom.vmware.vc.proactivehdw.SubscriptionInternalError|Proactive hardware management: Failed to perform subscription create/update for HSM {VendorDisplayName} due to an internal error. Please refer to the vSAN health logs for more details.EventExProactive hardware management: A new HSM is registered.infocom.vmware.vc.proactivehdw.registration.NewRegistration|Proactive hardware management: A new HSM is registered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: HSM registration is updated.infocom.vmware.vc.proactivehdw.registration.UpdateSuccess|Proactive hardware management: The registration information on the following HSM: '{VendorDisplayName}' has been updated. Here are its supported health update infos: '{EnabledHealthUpdateInfos}'ExtendedEventinfocom.vmware.vcIntegrity.CancelTask|Canceling task on [data.name].ExtendedEventinfocom.vmware.vcIntegrity.CheckNotification|Successfully downloaded notifications. New notifications: [data.Notifications]ExtendedEventerrorcom.vmware.vcIntegrity.CheckNotificationFailed|Could not download notifications.ExtendedEventerrorcom.vmware.vcIntegrity.CheckPXEBootHostFailure|Cannot determine whether host {host.name} is PXE booted. The host will be excluded for the current operation.ExtendedEventwarningcom.vmware.vcIntegrity.ClusterConfigurationOutOfCompliance|Hosts in Cluster [data.resource] are out of compliance.ExtendedEventerrorcom.vmware.vcIntegrity.ClusterOperationCancelledDueToCertRefresh|In-flight VUM task on Cluster [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventwarningcom.vmware.vcIntegrity.CriticallyLowDiskSpace|VMware vSphere Lifecycle Manager is critically low on storage space! Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.DisableToolsRemediateOnReboot|Successfully disabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.DisableToolsRemediateOnRebootFailed|Could not disable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.DownloadAlert|VMware vSphere Lifecycle Manager download alert (critical/total): ESX [data.esxCritical]/[data.esxTotal]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadFailedPatchBinary|Could not download patch packages for following patches: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestPackage|Successfully downloaded guest patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestPackageFailed|Could not download guest patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixPackage|Successfully downloaded guest patch packages for UNIX. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixPackageFailed|Could not download guest patch packages for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixUpdate|Successfully downloaded guest patch definitions for UNIX. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixUpdateFailed|Could not download guest patch definitions for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUpdate|Successfully downloaded guest patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUpdateFailed|Could not download guest patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostPackage|Successfully downloaded host patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostPackageFailed|Could not download host patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostUpdate|Successfully downloaded host patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostUpdateFailed|Could not download host patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.EnableToolsRemediateOnReboot|Successfully enabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.EnableToolsRemediateOnRebootFailed|Could not enable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.FailToLock|There are running tasks for the entity [data.name] that cannot finish within a specific time. The operation will stop.ExtendedEventcom.vmware.vcIntegrity.FtFailedEvent|ExtendedEventerrorcom.vmware.vcIntegrity.GADvdMountError|VMware vSphere Lifecycle Manager Guest Agent could not access the DVD drive on {vm.name}. Verify that a DVD drive is available and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAError|An internal error occurred in communication with VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Verify that the VM is powered on and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAInstallFailed|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Make sure that the VM is powered on.ExtendedEventinfocom.vmware.vcIntegrity.GAInstalled|VMware vSphere Lifecycle Manager Guest Agent successfully installed on {vm.name}.ExtendedEventerrorcom.vmware.vcIntegrity.GARuntimeError|An unknown internal error occurred during the required operation on {vm.name}. Check the logs for more details and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GATimeout|VMware vSphere Lifecycle Manager Guest Agent could not respond in time on {vm.name}. Verify that the VM is powered on and that the Guest Agent is running.ExtendedEventwarningcom.vmware.vcIntegrity.HostConfigurationOutOfCompliance|Configuration of Host [data.resource] is out of compliance.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallClose|Close [data.name] firewall ports.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallOpen|Open [data.name] firewall ports.ExtendedEventerrorcom.vmware.vcIntegrity.HostOperationCancelledDueToCertRefresh|In-flight VUM task on Host [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportCancelled|Host patch offline bundle upload is canceled by user.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportSuccess|[data.numBulletins] new bulletins uploaded successfully through offline bundle.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchBundleImportUnknownError|Host patch offline bundle upload did not succeed.ExtendedEventcom.vmware.vcIntegrity.HostPatchInputRecalledFailure|ExtendedEventcom.vmware.vcIntegrity.HostPatchPrerequisiteRecalledFailure|ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be remediated. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry remediation operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be remediated. Remove either of the patch from the baseline and retry the remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be staged. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be staged. Remove either of the patch from the baseline and retry the stage operation.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmFtEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster and contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmScanEvent|Cannot scan host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmStageEvent|Cannot stage host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtDiffPatchesEvent|Host {host.name} has FT enabled VMs. If you apply different patches to hosts in a cluster, FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtEvent|Cannot remediate host {host.name} because it contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtPairEvent|Host {host.name} has FT enabled VMs. The host on which the Secondary VMs reside is not selected for remediation. As a result FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedHacEvent|Cannot remediate host {host.name} because it is a part of a HA admission control enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedPxeUpgradeUnsupported|Upgrade operations are not supported on host {host.name} because it is PXE booted.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedRemovableDeviceEvent|Cannot remediate host {host.name} because it has VMs with a connected removable device. Disconnect all removable devices before remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorEsxFileDownload|Host [data.name] cannot download files from the VMware vSphere Lifecycle Manager patch store. Check the network connectivity and firewall setup, and verify that the host can access the configured patch store.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorNotInstallable|The selected patches [data.arg1] cannot be installed on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateConflictDependencies|The patches selected for remediation on the host [data.name] depend on other patches that have conflicts.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDefault|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDeviceAttached|Remediation did not succeed for [data.name]. The host has virtual machines [data.arg1] with connected removable media devices. This prevents the host from entering maintenance mode. Disconnect the removable devices and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateEnterMmode|Remediation did not succeed for [data.name]. The host could not enter maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateExitMmode|Remediation did not succeed for [data.name]. The host could not exit maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReboot|Remediation did not succeed for [data.name]. The host did not reboot after remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRebootReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect after a reboot.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRestoreVm|Remediation did not succeed for [data.name]. Restoring the power state or device connection state for one or more virtual machines on the host did not succeed.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateMetadataCorrupt|Remediation did not succeed for [data.name]. The patch metadata is corrupted. This might be caused by an invalid format of metadata content. You can try to re-download the patches.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateVibDownload|Remediation did not succeed for [data.name]. There were errors while downloading one or more software packages. Check the VMware vSphere Lifecycle Manager network connectivity settings.ExtendedEventcom.vmware.vcIntegrity.HostUpdateErrorVsanHealthCheckFailed|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeAgentDeployFailure|Cannot deploy upgrade agent on host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailBootDiskSize|The boot disk has a size of [data.found] MiB, the minimum requirement of the upgrade image is [data.expected] MiB.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailConflictingVibs|The upgrade contains conflicting VIBs. Remove the conflicting VIBs or use Image Builder to create a custom upgrade ISO image that contains the newer versions of the conflicting VIBs, and try to upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreakageUnsure|Cannot determine whether the upgrade breaks Cisco Nexus 1000V virtual network switch feature on the host. If the host does not have the feature, you can ignore this warning.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreaks|Cisco Nexus 1000V virtual network switch feature installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailEESXInsufficientSpaceForImage|Cannot create a ramdisk of size [data.expected]MB to store the upgrade image. Check if the host has sufficient memory.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailESXInsufficientSpaceForImage|Upgrade requires at least [data.expected]MB free space on boot partition to store the upgrade image, only [data.found]MB found. Retry after freeing up sufficient space or perform a CD-based installation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailHostHardwareMismatch|The upgrade is not supported on the host hardware. The upgrade ISO image contains VIBs that failed the host hardware compatibility check.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPInImage|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V software package [data.expected] installed on the host. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPUSupportedHost|The host is managing a DPU(s) and is a part of vLCM baselines-managed cluster, which is not supported. Move the host to vLCM image-managed cluster and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleHostAcceptance|The upgrade package is not compatible with the host. Use an upgrade package that meets the host's acceptance level or change the host's acceptance level to match that of the upgrade package.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePartitionLayout|The host cannot be upgraded due to incompatible partition layout.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePasswords|The passwords cannot be migrated because the password encryption scheme is incompatible.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleSphereletVersion|Spherelet version [data.found] is not compatible with ESXi 8.0 and later version. Please upgrade your WCP cluster to install a compatible Spherelet version, or remove Spherelet if the host is not in a WCP cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleWithDvsCP|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientEntropyCache|Storage entropy cache is not full. A full entropy cache is required for upgrade. Refer to KB 89854 for steps on how to refill the cache.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientMemory|Insufficient memory found on the host: [data.expected]MB required, [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientSpaceForConfig|Upgrade requires at least [data.expected]MB free space on a local VMFS datastore, only [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailLockerSpaceAvail|The system has insufficient locker space for the image profile.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPBreaksDvsCP|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image that is compatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPInImage|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image [data.found]. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingGunzipChecksumVibs|These VIB(s) on the host do not have the required sha-256 gunzip checksum for their payloads: [data.found]. This will prevent VIB security verification and secure boot from functioning properly. Please remove these VIBs and check with your vendor for a replacement of these VIBs.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeBootBank|The system image on the attached iso lacks a storage driver for the installed bootbank.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeNic|The system image on the attached iso lacks a NIC driver for the management network traffic.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMD5RootPassword|The root password is not using MD5 hashing, causing it to be authenticated up to only 8 characters. For instructions on how to correct this, see VMware KB 1024500 at http://kb.vmware.com/kb/1024500.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMinCpuCores|New ESXi version requires a minimum of [data.expected] processor cores.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoVt|Processor does not support hardware virtualization or it is disabled in BIOS. Virtual machine performance may be slow.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNonVmwareSoftware|The software modules [data.found] found on the host are not part of the upgrade image. These modules will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNvdsToCvdsMigration|ESXi host is not ready for NSX-T vSphere Distributed Switch (VDS) migration included with this ESXi upgrade. Please run Upgrade Readiness Tool (URT) from the NSX-T Manager managing this host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNxbitEnabled|No eXecute (NX) bit is not enabled on the host. New ESXi version requires a CPU with NX/XD bit supported and enabled.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPendingReboot|Host software configuration requires a reboot. Reboot the host and try upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPowerPathBreaks|EMC PowerPath module [data.found] installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailRRFTVMsPresent|Legacy FT is not compatible with upgraded version. Disable legacy FT.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailScriptInitFailed|Host upgrade validity checks are not successful.ExtendedEventcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailTbootRequired|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnknown|The upgrade precheck script returned unknown error.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedConfig|Error in ESX configuration file (esx.conf).ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedDevices|Unsupported devices [data.found] found on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedHostVersion|Host version [data.found] is not supported for upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedLongMode|Host CPU is unsupported. New ESXi version requires a 64-bit CPU with support for LAHF/SAHF instructions in long mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedSHA1Cert|SHA-1 signature found in host certificate {data.cert} with subject {data.subject}. Support for certificates with weak signature algorithm SHA-1 has been removed in ESXi 8.0. To proceed with upgrade, replace it with a SHA-2 signature based certificate. Refer to release notes and KB 89424 for more details.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedTPMVersion|TPM 1.2 device detected. Support for TPM version 1.2 is discontinued. Installation may proceed, but may cause the system to behave unexpectedly.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailVFATCorruption|A problem with one or more vFAT bootbank partitions was detected. Please refer to KB 91136 and run dosfsck on bootbank partitions.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeProgressAborted|Host upgrade installer stopped.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressAuth|Host upgrade in progress: Configuring authentication.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressBootloader|Host upgrade in progress: Boot setup.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressClearpart|Host upgrade in progress: Clearing partitions.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressComplete|Host upgrade installer completed.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressKeyboard|Host upgrade in progress: Setting keyboard.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressLanguage|Host upgrade in progress: Setting language.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMigrating|Host upgrade in progress: Migrating ESX v3 configuration to ESX v4.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMount|Host upgrade in progress: Mounting file systems.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressNetworking|Host upgrade in progress: Installing network configuration.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPackages|Host upgrade in progress: Installing packages.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartphys|Host upgrade in progress: Partitioning physical hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartvirt|Host upgrade in progress: Partitioning virtual hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPostscript|Host upgrade in progress: Running postinstallation script.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressRootpass|Host upgrade in progress: Setting root passwordExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressTimezone|Host upgrade in progress: Setting timezone.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressUnknown|Host upgrade in progress.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeRunScriptFailure|Cannot run upgrade script on host.ExtendedEventerrorcom.vmware.vcIntegrity.ImageRecommendationGenerationError|The image recommendation generation failed.ExtendedEventinfocom.vmware.vcIntegrity.ImageRecommendationGenerationFinished|The image recommendation generation finished.ExtendedEventerrorcom.vmware.vcIntegrity.IncompatibleTools|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name} because VMware Tools is not installed or is of an incompatible version. The required version is [data.requiredVersion] and the installed version is [data.installedVersion].ExtendedEventinfocom.vmware.vcIntegrity.InstallAddOnUpdate|The following additional patches are included to resolve a conflict for installation on [data.entityName]: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestion|To resolve a conflict for installation on [data.entityName], the following additional patches might need to be included in the baseline: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestionNotFound|VMware vSphere Lifecycle Manager could not find patches to resolve the conflict for installation on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdate|Installation of patches [data.updateId] started on host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdateComplete|Installation of patches succeeded on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.InstallUpdateError|Could not install patches on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.LinuxOffLineScanNotSupported|Cannot scan [data.name] for patches. Scan of powered off or suspended Linux VMs is not supported.ExtendedEventwarningcom.vmware.vcIntegrity.LowDiskSpace|VMware vSphere Lifecycle Manager is running out of storage space. Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.MetadataCorrupted|Patch definition for [data.name] is corrupt. Check the logs for more details. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.MetadataNotFound|Patch definitions for [data.name] are missing. Download patch definitions first.ExtendedEventerrorcom.vmware.vcIntegrity.NoRequiredLicense|There is no VMware vSphere Lifecycle Manager license for [data.name] for the required operation.ExtendedEventinfocom.vmware.vcIntegrity.NotificationCriticalInfoAlert|VMware vSphere Lifecycle Manager informative notification (critical) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationDownloadAlert|VMware vSphere Lifecycle Manager notification download alertExtendedEventinfocom.vmware.vcIntegrity.NotificationImportantInfoAlert|VMware vSphere Lifecycle Manager informative notification (important) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationModerateInfoAlert|VMware vSphere Lifecycle Manager informative notification (moderate) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallAlert|VMware vSphere Lifecycle Manager recall alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallFixAlert|VMware vSphere Lifecycle Manager recall fix alertExtendedEventerrorcom.vmware.vcIntegrity.OperationCancelledDueToCertRefresh|In-flight VUM task on [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventcom.vmware.vcIntegrity.PXEBootedHostEvent|ExtendedEventinfocom.vmware.vcIntegrity.PackageImport|Package [data.name] is successfully imported.ExtendedEventerrorcom.vmware.vcIntegrity.PackageImportFailure|Import of package: [data.name] did not succeed.ExtendedEventinfocom.vmware.vcIntegrity.RebootHostComplete|Host [data.entityName] is successfully rebooted.ExtendedEventerrorcom.vmware.vcIntegrity.RebootHostError|Cannot reboot host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostStart|Start rebooting host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostWait|Waiting for host [data.entityName] to reboot.ExtendedEventerrorcom.vmware.vcIntegrity.ReconfigureClusterFailedEvent|VMware vSphere Lifecycle Manager could not restore HA admission control/DPM settings for cluster {computeResource.name} to their original values. These settings have been changed for patch installation. Check the cluster settings and restore them manually.ExtendedEventinfocom.vmware.vcIntegrity.Remediate|Remediation succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedHost|Could not remediate {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedVm|Could not remediate {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDpmDisableHost|Could not remediate host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.RemediateFailed|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostInvalidPowerState|Cannot remediate the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostOnUnsupportedHost|Could not remediate {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediateOrphanedVm|Could not remediate orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.RemediateStart|Remediating object [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateVmOnUnsupportedHost|Could not remediate {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediationStatusEvent|Current progress of remediation: [data.noOfSucceededHosts] hosts completed successfully, [data.noOfFailedHosts] hosts completed with errors, [data.noOfHostsBeingRemediatedCurrently] hosts are being remediated, [data.noOfWaitingHosts] hosts are waiting to start remediation, and [data.noOfRetryHosts] hosts could not enter maintenance mode and are waiting to retry.ExtendedEventinfocom.vmware.vcIntegrity.Scan|Successfully scanned [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanCancelled|Scanning of [data.name] is canceled by user.ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedHost|Could not scan {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedVm|Could not scan {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDpmDisableHost|Could not scan host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.ScanFailed|Could not scan [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostInvalidPowerState|Cannot scan the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostOnUnsupportedHost|Could not scan {host.name} for patches because it is of unsupported version [data.version].ExtendedEventwarningcom.vmware.vcIntegrity.ScanMissingUpdate|Found a missing patch: [data.message] when scanning [data.name]. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.ScanOrphanedVm|Could not scan orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.ScanStart|Scanning object [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.ScanUnsupportedVolume|{vm.name} contains an unsupported volume [data.volumeLabel]. Scan results for this VM might be incomplete.ExtendedEventerrorcom.vmware.vcIntegrity.ScanVmOnUnsupportedHost|Could not scan {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventerrorcom.vmware.vcIntegrity.SequentialRemediateFailedEvent|An error occured during the sequential remediation of hosts in cluster {computeResource.name}. Check the related events for more details.ExtendedEventinfocom.vmware.vcIntegrity.SkipSuspendedVm|Suspended VM {vm.name} has been skipped.ExtendedEventinfocom.vmware.vcIntegrity.Stage|Staging succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.StageDisconnectedHost|Could not stage patches to {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageDpmDisableHost|Could not stage patches to host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.StageFailed|Staging did not succeed for [data.name][data.message].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostInvalidPowerState|Cannot stage patches to the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostOnUnsupportedHost|Could not stage patches to {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.StageStart|Staging patches to host [data.name].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdate|Started staging of patches [data.updateId] on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdateComplete|Staging of patch to [data.entityName] succeeded.ExtendedEventerrorcom.vmware.vcIntegrity.StageUpdateError|Cannot stage patch [data.updateId] to [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.SysprepDisabled|Sysprep is disabled during the remediation.ExtendedEventinfocom.vmware.vcIntegrity.SysprepEnabled|Sysprep settings are restored.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepHandleFailure|Cannot access the sysprep settings for VM {vm.name}. Retry the operation after disabling sysprep for the VM.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepNotFound|Cannot locate the sysprep settings for VM {vm.name}. For Windows 7 and Windows 2008 R2, offline VM remediation is supported only if the system volume is present in the primary disk partition. Retry the operation after disabling sysprep for the VM.ExtendedEventinfocom.vmware.vcIntegrity.ToolsRemediate|VMware Tools upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ToolsRemediateFailed|VMware Tools upgrade did not succeed for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.ToolsScan|Successfully scanned [data.name] for VMware Tools upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.ToolsScanFailed|Could not scan [data.name] for VMware Tools upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsScanInstallNotSupported|VMware Tools is not installed on [data.name]. VMware vSphere Lifecycle Manager supports upgrading only an existing VMware Tools installation.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeRemediateSkippedOnHost|VMware Tools upgrade was not performed on {vm.name}. VMware Tools upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade is not supported for virtual appliances.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeScanSkippedOnHost|VMware Tools upgrade scan was not performed on {vm.name}. VMware Tools upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedHostRemediateSpecialVMEvent|The host [data.name] has a VM [data.vm] with VMware vSphere Lifecycle Manager or VMware vCenter Server installed. The VM must be moved to another host for the remediation to proceed.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedLinuxAction|Action is not supported for Linux VM/VA {vm.name}. VMware Tools is not installed or the machine cannot start.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedOs|Scan or remediation is not supported on [data.name] because of unsupported OS [data.os].ExtendedEventinfocom.vmware.vcIntegrity.UnsupportedPXEBootHost|Scanning, remediation, and staging are not supported on PXE booted ESXi hosts.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedSpecialVMEvent|VM [data.name] has either VMware vSphere Lifecycle Manager or VMware vCenter Server installed. This VM will be ignored for scan and remediation.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedVaAction|Action is not supported for offline or suspended virtual appliance {vm.name}. ExtendedEventerrorcom.vmware.vcIntegrity.VAAutoUpdateOn|Auto update is set to ON for virtual appliance [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADiscovery|Successfully discovered virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADiscoveryFailed|Could not discover virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadGenericFailure|Could not download virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadInvalidUrl|[data.name] is not a valid virtual appliance download URL.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadMetadataFailure|Could not download virtual appliance upgrade metadata for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADownloadSuccess|Successfully downloaded virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VARepositoryAddressNotSet|No repository address is set for virtual appliance [data.name]. The appliance does not support updates by vCenter Server.ExtendedEventinfocom.vmware.vcIntegrity.VAScan|Successfully scanned [data.name] for VA upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VAScanFailed|Could not scan [data.name] for VA upgrades.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeRemediate|Virtual Hardware upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateFailed|Could not perform Virtual Hardware upgrade on [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateSkippedOnHost|Virtual Hardware upgrade was not performed for {vm.name}. Virtual Hardware upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade is not supported for virtual appliances.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeScan|Successfully scanned [data.name] for Virtual Hardware upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeScanFailed|Could not scan [data.name] for Virtual Hardware upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeScanSkippedOnHost|Virtual Hardware upgrade scan was not performed for {vm.name}. Virtual Hardware upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotInstalled|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not installed. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotLatest|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not the latest version supported by the host. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnknown|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is unknown. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnmanaged|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is not managed by VMware vSphere. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsAutoUpgradeUnsupported|The version of VMware Tools installed in {vm.name} does not support automatic upgrade. Upgrade VMware Tools manually.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsNotRunning|Error while waiting for VMware Tools to respond. Verify that VMware Tools is running in VM {vm.name}.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForInstall|Patch [data.inputBulletin] was excluded from the remediation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the remediation. You can also add the baselines to a baseline group for convenience and perform the remediation.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForStage|Patch [data.inputBulletin] was excluded from the stage operation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the stage operation. You can also add the baselines to a baseline group for convenience and perform the stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.VmDevicesRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original removable device connection settings for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the settings for the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmMigrationFailedEvent|Cannot migrate VM {vm.name} from [data.srcHost] to [data.destHost].ExtendedEventerrorcom.vmware.vcIntegrity.VmPowerRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original power state for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the original power state of the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmotionCompatibilityCheckFailedEvent|Cannot check compatibility of the VM {vm.name} for migration with vMotion to host [data.hostName].EventExAgency createdinfocom.vmware.vim.eam.agency.create|{agencyName} created by {ownerName}EventExAgency destroyedinfocom.vmware.vim.eam.agency.destroyed|{agencyName} removed from the vSphere ESX Agent ManagerEventExAgency state changedinfocom.vmware.vim.eam.agency.goalstate|{agencyName} changed goal state from {oldGoalState} to {newGoalState}EventExAgency status changedinfocom.vmware.vim.eam.agency.statusChanged|Agency status changed from {oldStatus} to {newStatus}EventExAgency reconfiguredinfocom.vmware.vim.eam.agency.updated|Configuration updated {agencyName}EventExCluster Agent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterPowerOn|Cluster Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterProvisioning|Cluster Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM is about to be powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailablePrePowerOn|Cluster Agent VM {vm.name} is about to be powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent added to hostinfocom.vmware.vim.eam.agent.created|Agent added to host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyed|Agent removed from host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyedNoHost|Agent removed from host ({agencyName})EventExAgent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterPowerOn|Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterProvisioning|Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent status changedinfocom.vmware.vim.eam.agent.statusChanged|Agent status changed from {oldStatus} to {newStatus}EventExAgent VM is deletedinfocom.vmware.vim.eam.agent.task.deleteVm|Agent VM {vmName} is deleted on host {host.name} ({agencyName})EventExAgent VM is provisionedinfocom.vmware.vim.eam.agent.task.deployVm|Agent VM {vm.name} is provisioned on host {host.name} ({agencyName})EventExAgent VM powered offinfocom.vmware.vim.eam.agent.task.powerOffVm|Agent VM {vm.name} powered off, on host {host.name} ({agencyName})EventExAgent VM powered oninfocom.vmware.vim.eam.agent.task.powerOnVm|Agent VM {vm.name} powered on, on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibInstalled|Agent installed VIB {vib} on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibUninstalled|Agent uninstalled VIB {vib} on host {host.name} ({agencyName})EventExwarningcom.vmware.vim.eam.issue.agencyDisabled|Agency is disabledEventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentOVF|Unable to access agent OVF package at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentVib|Unable to access agent VIB module at {url} ({agencyName})EventExcom.vmware.vim.eam.issue.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInMaintenanceMode|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterResources|Cluster Agent VM cannot be powered on due to insufficient resources on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterSpace|Cluster Agent VM on cluster {computeResource.name} cannot be provisioned due to insufficient space on cluster datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.invalidConfig|Cluster Agent VM {vm.name} on cluster {computeResource.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmDatastore|Cluster Agent VM datastore(s) {customAgentVmDatastoreName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmNetwork|Cluster Agent VM network(s) {customAgentVmNetworkName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.ovfInvalidProperty|OVF environment used to provision cluster Agent VM on cluster {computeResource.name} has one or more invalid properties ({agencyName})EventExcom.vmware.vim.eam.issue.cluster.agent.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotDeployed|Cluster Agent VM is missing on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotRemoved|Cluster Agent VM {vm.name} is provisioned when it should be removed ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOff|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOn|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmSuspended|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostInMaintenanceMode|Agent cannot complete an operation since the host {host.name} is in maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.hostInStandbyMode|Agent cannot complete an operation since the host {host.name} is in standby mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostNotReachable|Host {host.name} must be powered on and connected to complete agent operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.immediateHostRebootRequired|Host {host.name} must be rebooted immediately to unblock agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.incompatibleHostVersion|Agent is not deployed due to incompatible host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientResources|Agent cannot be provisioned due to insufficient resources on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientSpace|Agent on {host.name} cannot be provisioned due to insufficient space on datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotDeleteSoftware|Cannot remove the Baseline associated with agency {agencyName} from VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotStageSoftware|The software defined by agency {agencyName} cannot be staged in VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.vUMUnavailable|VMware Update Manager was unavailable during agency {agencyName} operationsEventExerrorcom.vmware.vim.eam.issue.invalidConfig|Agent VM {vm.name} on host {host.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmDatastore|No agent datastore configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmNetwork|No agent network configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmDatastore|Agent datastore(s) {customAgentVmDatastoreName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmNetwork|Agent network(s) {customAgentVmNetworkName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmDatastore|Agent datastore cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmNetwork|Agent network(s) cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidFormat|OVF used to provision agent on host {host.name} has invalid format ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidProperty|OVF environment used to provision agent on host {host.name} has one or more invalid properties ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotConfigureSolutions|The required solutions defined by agency {agencyName} cannot be configured in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotUploadDepot|Software defined by agency {agencyName} cannot be uploaded in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.inaccessibleDepot|Unable to access software defined by agency {agencyName}EventExerrorcom.vmware.vim.eam.issue.personality.agency.invalidDepot|Software defined by agency {agencyName} contains invalid vSphere Lifecycle Manager related metadataEventExerrorcom.vmware.vim.eam.issue.personality.agency.pMUnavailable|vSphere Lifecycle Manager was unavailable during agency {agencyName} operationsEventExinfocom.vmware.vim.eam.issue.personality.agent.awaitingPMRemediation|Agent requires application of configured solutions through vSphere Lifecycle Manager on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agent.blockedByAgencyOperation|Agency issues related to vSphere Lifecycle Manager require resolution to unblock host {host.name} ({agencyName})EventExinfocom.vmware.vim.eam.issue.resolved|Issue {type} resolved (key {key})EventExerrorcom.vmware.vim.eam.issue.vibCannotPutHostInMaintenanceMode|Cannot put host into maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.vibCannotPutHostOutOfMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.vibDependenciesNotMetByHost|VIB module dependencies for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibInvalidFormat|Invalid format for VIB module at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibNotInstalled|VIB module for agent is not installed/removed on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequirementsNotMetByHost|VIB system requirements for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostInMaintenanceMode|Host must be put into maintenance mode to complete agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostReboot|Host {host.name} must be reboot to complete agent VIB installation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualInstallation|VIB {vib} requires manual installation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualUninstallation|VIB {vib} requires manual uninstallation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmCorrupted|Agent VM {vm.name} on host {host.name} is corrupted ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmDeployed|Agent VM {vm.name} is provisioned on host {host.name} when it should be removed ({agencyName})EventExcom.vmware.vim.eam.issue.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.vmNotDeployed|Agent VM is missing on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmOrphaned|Orphaned agent VM {vm.name} on host {host.name} detected ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOff|Agent VM {vm.name} on host {host.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOn|Agent VM {vm.name} on host {host.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmRequiresHostOutOfMaintenanceMode|Agent cannot deploy Agent VM since the host {host.name} is in maintenance mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmSuspended|Agent VM {vm.name} on host {host.name} is expected to be powered on but is suspended ({agencyName})ExtendedEventInvalid loginwarningcom.vmware.vim.eam.login.invalid|Failed login to vSphere ESX Agent ManagerEventExSuccessful login to vSphere ESX Agent Managerinfocom.vmware.vim.eam.login.succeeded|Successful login by {user} into vSphere ESX Agent ManagerEventExUser logged out of vSphere ESX Agent Managerinfocom.vmware.vim.eam.logout|User {user} logged out of vSphere ESX Agent Manager by logging out of the vCenter serverEventExUnauthorized access in vSphere ESX Agent Managerwarningcom.vmware.vim.eam.unauthorized.access|Unauthorized access by {user} in vSphere ESX Agent ManagerEventExChecked in virtual machine into a virtual machine template iteminfocom.vmware.vmtx.LibraryItemCheckInEvent|Checked in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to check in virtual machine into a virtual machine template itemerrorcom.vmware.vmtx.LibraryItemCheckInFailEvent|Failed to check in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'EventExDeleted the virtual machine checked out from the VM template iteminfocom.vmware.vmtx.LibraryItemCheckOutDeleteEvent|Deleted the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExFailed to delete the virtual machine checked out from the VM template itemerrorcom.vmware.vmtx.LibraryItemCheckOutDeleteFailEvent|Failed to delete the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExChecked out virtual machine template item as a virtual machineinfocom.vmware.vmtx.LibraryItemCheckOutEvent|Checked out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExFailed to check out virtual machine template item as a virtual machineerrorcom.vmware.vmtx.LibraryItemCheckOutFailEvent|Failed to check out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExA virtual machine checked out from the VM template item was orphaned after restorewarningcom.vmware.vmtx.LibraryItemCheckoutOrphanedOnRestoreEvent|A virtual machine (ID: {vmId}) checked out from the VM template item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was orphaned after restoreEventExCloned virtual machine to Content Library as VM templateinfocom.vmware.vmtx.LibraryItemCreateEvent|Cloned virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExFailed to clone virtual machine to Content Library as VM templateerrorcom.vmware.vmtx.LibraryItemCreateFailEvent|Failed to clone virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExDeleted a version of the virtual machine template iteminfocom.vmware.vmtx.LibraryItemDeleteVersionEvent|Deleted VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to delete a version of the virtual machine template itemerrorcom.vmware.vmtx.LibraryItemDeleteVersionFailEvent|Failed to delete VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'EventExDeployed virtual machine from Content Libraryinfocom.vmware.vmtx.LibraryItemDeployEvent|Deployed virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExFailed to deploy virtual machine from Content Libraryerrorcom.vmware.vmtx.LibraryItemDeployFailEvent|Failed to deploy virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExRolled back virtual machine template item to a previous versioninfocom.vmware.vmtx.LibraryItemRollbackEvent|Rolled back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'ExtendedEventFailed to roll back virtual machine template item to a previous versionerrorcom.vmware.vmtx.LibraryItemRollbackFailEvent|Failed to roll back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'EventExA virtual machine template managed by Content Library was converted to a virtual machineerrorcom.vmware.vmtx.LibraryItemTemplateConvertedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was converted to a virtual machineEventExA virtual machine template managed by Content Library was converted to a virtual machine after restorewarningcom.vmware.vmtx.LibraryItemTemplateConvertedOnRestoreEvent|The virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was found converted to a virtual machine after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was deletedEventExCould not locate a virtual machine template managed by Content Library after restorewarningcom.vmware.vmtx.LibraryItemTemplateDeletedOnRestoreEvent|Could not locate the virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateLatestVersionDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) is rolled back to the previous version because the latest VM template (ID: {vmId}) was deletedEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplatePreviousVersionDeletedEvent|Previous VM template (ID: {vmId}) of the library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was deletedEventExA virtual machine template managed by Content Library was renamedwarningcom.vmware.vmtx.LibraryItemTemplateRenamedEvent|The name of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will change to '{newItemName}' because the virtual machine template (ID: {vmId}) that the item manages was renamedExtendedEventAdded witness host to the cluster.infoAdded witness host to the cluster.com.vmware.vsan.clusterconfig.events.witnessadditiondone|Added witness host to the cluster.ExtendedEventRemoved witness host from the cluster.infoRemoved witness host from the cluster.com.vmware.vsan.clusterconfig.events.witnessremovaldone|Removed witness host from the cluster.ExtendedEventAdd disk group back to the vSAN cluster.infoAdd disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.adddisks|Add disk group back to the vSAN cluster on host {host.name}.ExtendedEventFailed to add disk group back to the vSAN cluster.errorFailed to add disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.addfail|Failed to add disk group back to the vSAN cluster on host {host.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formatdone|Disk format conversion is done on cluster {computeResource.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formathostdone|Disk format conversion is done on host {host.name}.ExtendedEventFailed to migrate vsanSparse objects.errorFailed to migrate vsanSparse objects.com.vmware.vsan.diskconversion.events.migrationfail|Failed to migrate vsanSparse objects on cluster {computeResource.name}.ExtendedEventNo disk conversion performed, all mounted disk groups on host are compliantinfoNo disk conversion performed, all mounted disk groups on host are compliant.com.vmware.vsan.diskconversion.events.noneed|No disk conversion performed, all mounted disk groups on host {host.name} are already compliant.ExtendedEventCheck existing objects on the vSAN cluster.infoCheck existing objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objectcheck|Check existing objects on the vSAN cluster.ExtendedEventObject conversion is done.infoObject conversion is done.com.vmware.vsan.diskconversion.events.objectdone|Object conversion is done.ExtendedEventFailed to convert objects on the vSAN cluster.errorFailed to convert objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objecterror|Failed to convert objects on the vSAN cluster.ExtendedEventRemove disk group from the vSAN cluster.infoRemove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removedisks|Remove disk group from the vSAN cluster on host {host.name}.ExtendedEventFailed to remove disk group from the vSAN cluster.errorFailed to remove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removefail|Failed to remove disk group on host {host.name} from the vSAN cluster.ExtendedEventRestore disk group from last break point.infoRestore disk group from last break point..com.vmware.vsan.diskconversion.events.restore|Restore disk group from last break point.ExtendedEventNo disk conversion performed, host has no mounted disk groups.infoNo disk conversion performed, host has no mounted disk groups.com.vmware.vsan.diskconversion.events.skiphost|No disk conversion performed, host {host.name} has no mounted disk groups.ExtendedEventCheck cluster status for disk format conversion.infoCheck cluster status for disk format conversion.com.vmware.vsan.diskconversion.events.statuscheck|Check status of cluster {computeResource.name} status for disk format conversion.ExtendedEventcom.vmware.vsan.diskconversion.events.syncingtimeout|ExtendedEventUpdate the vSAN cluster system settings.infoUpdate the vSAN cluster system settings.com.vmware.vsan.diskconversion.events.updatesetting|Update the vSAN cluster system settings on host {host.name}.ExtendedEventDisk format conversion failed in what if upgrade.infoDisk format conversion faild in what if upgrade check.com.vmware.vsan.diskconversion.events.whatifupgradefailed|Disk format conversion failed in what if upgrade check.EventExMark ssd(s) as capacity flash.infoMark {disks} as capacity flash.com.vmware.vsan.diskmgmt.events.tagcapacityflash|Mark {disks} as capacity flash.EventExMark ssd as hdd.infoMark ssd {disk} as hdd.com.vmware.vsan.diskmgmt.events.taghdd|Mark ssd {disk} as hdd.EventExMark remote disk as local disk.infoMark remote disk {disk} as local disk.com.vmware.vsan.diskmgmt.events.taglocal|Mark remote disk {disk} as local disk.EventExMark hdd as ssd.infoMark hdd {disk} as ssd.com.vmware.vsan.diskmgmt.events.tagssd|Mark hdd {disk} as ssd.EventExRemove capacity flash mark from ssd(s).infoRemove capacity flash mark from {disks}.com.vmware.vsan.diskmgmt.events.untagcapacityflash|Remove capacity flash mark from {disks}.EventExAdvisorvSAN Health Test 'Advisor' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.advisor.event|vSAN Health Test 'Advisor' changed from '{prestatus}' to '{curstatus}'EventExAudit CEIP Collected DatavSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.auditceip.event|vSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'EventExCNS Critical Alert - Patch available with important fixesvSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.cnspatchalert.event|vSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.controllercacheconfig.event|vSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExCoredump partition size checkvSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.coredumpartitionsize.event|vSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'EventExUpgrade vSphere CSI driver with cautionvSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.csidriver.event|vSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.diskusage.event|vSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dualencryption.event|vSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExProper vSAN network traffic shaping policy is configuredvSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dvsportspeedlimit.event|vSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'EventExEnd of general support for lower vSphere versionvSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.eoscheck.event|vSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'EventExImportant patch available for vSAN issuevSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.fsvlcmpatchalert.event|vSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.h730.event|vSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExHPE SAS Solid State DrivevSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.hpesasssd.event|vSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration check for large scale clustervSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.largescalecluster.event|vSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lavenderalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExvSAN critical alert regarding a potential data inconsistencyvSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilacdeltacomponenttest.event|vSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Critical Alert - Patch available for critical vSAN issuevSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilypatchalert.event|vSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.marigoldalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmode.event|vSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmodeh730.event|vSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvSAN storage policy compliance up-to-datevSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.objspbm.event|vSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.patchalert.event|vSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.pnicconsistent.event|vSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExVM storage policy is not-recommendedvSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.policyupdate.event|vSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'EventExMaximum host number in vSAN over RDMAvSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.rdmanodesalert.event|vSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExESXi system logs stored outside vSAN datastorevSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.scratchconfig.event|vSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.smalldiskstest.event|vSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExThick-provisioned VMs on vSANvSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.thickprovision.event|vSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExFix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabledvSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.unmaptest.event|vSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'EventExvSAN v1 disk in usevSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.v1diskcheck.event|vSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'EventExvCenter Server up to datevSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vcuptodate.event|vSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'EventExMultiple VMs share the same vSAN home namespacevSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vmns.event|vSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Support InsightvSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanenablesupportinsight.event|vSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpefwtest.event|vSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExCustomer advisory for HPE Smart ArrayvSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpesmartarraytest.event|vSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'EventExvSAN management service resource checkvSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanmgmtresource.event|vSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.witnesshw.event|vSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Advanced Configuration Check for Urgent vSAN ESA PatchvSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.zdomadvcfgenabled.event|vSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all of the hosts in a vSAN cluster have consistent advanced configuration options.vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.advcfgsync.event|vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN host daemon liveness.vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clomdliveness.event|vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSphere cluster members match vSAN cluster members.vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clustermembership.event|vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster configuration consistencyvSAN Health Test 'vSAN cluster configuration consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.consistentconfig.event|vSAN Health Test 'vSAN configuration consistency' status changed from '{prestatus}' to '{curstatus}'EventExESA prescriptive disk claimvSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.ddsconfig.event|vSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk group layoutvSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.dglayout.event|vSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN disk balance statusvSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.diskbalance.event|vSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Conversion HealthvSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.esaconversionhealth.event|vSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN extended configuration in syncvSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.extendedconfig.event|vSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Managed disk claimvSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.hcldiskclaimcheck.event|vSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'EventExCheck host maintenance mode is in sync with vSAN node decommission state.vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.mmdecominsync.event|vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'EventExvSAN optimal datastore default policy configurationvSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.optimaldsdefaultpolicy.event|vSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN with RDMA supports up to 32 hosts.vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.rdmanodes.event|vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExResync operations throttlingvSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.resynclimit.event|vSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN Cluster time sync status among hosts and VCvSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.timedrift.event|vSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk format statusvSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradelowerhosts.event|vSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'EventExSoftware version compatibilityvSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradesoftware.event|vSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'EventExVMware vCenter state is authoritativevSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vcauthoritative.event|vSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Direct homogeneous disk claimingvSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsandconfigconsistency.event|vSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'EventExvSphere Lifecycle Manager (vLCM) configurationvSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsanesavlcmcheck.event|vSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks the object format status of all vSAN objects.vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objectformat.event|vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health status of all vSAN objects.vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objecthealth.event|vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'EventExpNic RX/TX PauseRX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.pausecount.event|RX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX CRC ErrorRX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxcrcerr.event|RX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Generic ErrorRX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxerr.event|RX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX FIFO ErrorRX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxfifoerr.event|RX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Missed ErrorRX missed error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxmisserr.event|RX missed error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Buffer Overflow ErrorRX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxoverr.event|RX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Carrier ErrorTX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txcarerr.event|TX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Generic ErrorTX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txerr.event|TX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExRDT Checksum Mismatch ErrorRDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})vsan.health.test.diagnostics.rdt.checksummismatchcount.event|RDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})EventExData-in-transit encryption configuration checkvSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.ditencryption.ditconfig.event|vSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.dualencryption.event|vSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExChecks if CPU AES-NI is disabled on hostsvSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.hostcpuaesni.event|vSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VMware vCenter or any hosts are not connected to Key Management ServersvSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.kmsconnection.event|vSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Prescriptive Disk Claim ConfigurationsHost {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsvsan.health.test.esaprescriptivediskclaim.noeligibledisk|Host {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsEventExCheck vSAN File Service host file server agent vm state.vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.fileserver.event|vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'EventExInfrastructure HealthvSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.host.event|vSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'EventExFile Share HealthvSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.sharehealth.event|vSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'EventExVDS compliance check for hyperconverged cluster configurationvSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.dvshciconfig.event|vSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExHost compliance check for hyperconverged cluster configurationvSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.hosthciconfig.event|vSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN health alarm enablement statusvSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hciskip.event|vSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'EventExvSAN HCL DB Auto UpdatevSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.autohclupdate.event|vSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllercacheconfig.event|vSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN disk group type (All-Flash or Hybrid) is VMware certified for the used SCSI controllervSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdiskmode.event|vSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller driver is VMware certified.vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdriver.event|vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller firmware is VMware certified.vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerfirmware.event|vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller is compatible with the VMWARE Compatibility GuidevSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controlleronhcl.event|vSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExDisplays information about whether there is any driver supported for a given controller in the release of ESXi installed.vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerreleasesupport.event|vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.h730.event|vSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks the age of the VMware Hardware Compatibility Guid database.vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hcldbuptodate.event|vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any host failed to return its hardware information.vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hclhostbadstate.event|vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'EventExHost physical memory compliance checkvSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hostmemcheck.event|vSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmode.event|vSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmodeh730.event|vSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvsan.health.test.hcl.nvmeonhcl.event|EventExNetwork Interface Cards (NICs) used in vSAN hosts must meet certain requirements. These NIC requirements assume that the packet loss is not more than 0.0001% in the hyper-converged environments. It's recommended to use NIC which link speed can meet the minimum requirement. Otherwise, there can be a drastic impact on the vSAN performance.vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.pniclinkspeed.event|vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NICs used in this RDMA enabled vSAN cluster are certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmaniciscertified.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NIC's driver and firmware combination is certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportdriverfirmware.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the current ESXi release is certified for the RDMA NIC by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportesxrelease.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.vsanhpefwtest.event|vSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExHome objectvSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsihomeobjectstatustest.event|vSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExLUN runtime healthvSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsilunruntimetest.event|vSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExNetwork configurationvSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicenetworktest.event|vSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExService runtime statusvSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicerunningtest.event|vSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster claimed capacity is more than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.error.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExvSAN cluster claimed capacity is less than the entitled capacity.vSAN cluster claimed capacity is less than the entitled capacity.vsan.health.test.licensecapacityusage.green.event|vSAN cluster claimed capacity is less than the entitled capacity.EventExvSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.warn.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExChecks the vSAN cluster storage space utilizationvSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.diskspace.event|vSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN component limits, disk space and RC reservations assuming one host failure.vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.limit1hf.event|vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'EventExChecks the component utilization for the vSAN cluster and each host in the cluster.vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.nodecomponentlimit.event|vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN cluster read cache utilizationvSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.rcreservation.event|vSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN cluster is partitioned due to a network issue.vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.clusterpartition.event|vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExCheck if there are duplicate IP addresses configured for vmknic interfaces.vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.duplicateip.event|vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a connectivity check for vSAN Max Client Network by checking the heartbeats from each host to all other hosts in server clustervSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.externalconnectivity.event|vSAN Health Test 'vSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if API calls from VC to a host are failing while the host is in vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostconnectivity.event|vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VC has an active connection to all hosts in the cluster.vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostdisconnected.event|vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a network latency check via ping small packet size ping test from all hosts to all other hostsvSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostlatencycheck.event|vSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN API calls from each host can reach to other peer hosts in the clustervSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.interhostconnectivity.event|vSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExCheck if LACP is working properly.vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.lacpstatus.event|vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hostsvSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.largeping.event|vSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster receive the multicast heartbeat of the vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastdeepdive.event|vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have matching IP multicast configuration.vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsettings.event|vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any of the hosts in the vSAN cluster have IP multicast connectivity issue.vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsuspected.event|vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'EventExCheck if any host in remote vSAN client or server cluster has more than one vSAN vmknic configured.vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multiplevsanvmknic.event|vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnicconsistent.event|vSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExCheck if TSO is enabled for pNIC.vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnictso.event|vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'EventExCheck if the vSAN RDMA enabled physical NIC is configured for lossless traffic.vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.rdmaconfig.event|vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all hosts in client cluster have been in a single partition with all hosts in server vSAN cluster.vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.serverpartition.event|vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hostsvSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.smallping.event|vSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpinglarge.event|vSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpingsmall.event|vSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExCheck if all hosts in server cluster have a dedicated vSAN external vmknic configured.vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanexternalvmknic.event|vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have a configured vmknic with vSAN traffic enabled.vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanvmknic.event|vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'EventExCheck all remote VMware vCenter network connectivity.vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.xvcconnectivity.event|vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'EventExvSAN overall health statusvSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.overallsummary.event|vSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service data collectionvSAN Health Test 'Checks the statistics collection of the vSAN performance service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.collection.event|vSAN Health Test 'Checks statistics collection of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service network diagnostic mode statusvSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.diagmode.event|vSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'EventExNot all hosts are contributing stats to vSAN Performance ServicevSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.hostsmissing.event|vSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats primary electionvSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.masterexist.event|vSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service statusvSAN Health Test 'Checks status of vSAN Performance Service changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.perfsvcstatus.event|vSAN Health Test 'Checks status of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats DB object conflictsvSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.renameddirs.event|vSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the vSAN performance service statistics database objectvSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.statsdb.event|vSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service verbose mode statusvSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.verbosemode.event|vSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN has encountered an integrity issue of the metadata of a component on this disk.vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.componentmetadata.event|vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.diskusage.event|vSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on vital memory pools, needed for the correct operation of physical disks.vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomheap.event|vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on the vital memory pool, needed for the operation of physical disks.vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomslab.event|vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'EventExStorage Vendor Reported Drive HealthvSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.phmhealth.event|vSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the free space on physical disks in the vSAN cluster.vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcapacity.event|vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the number of components on the physical disk reaches the maximum limitationvSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcomplimithealth.event|vSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN is using the disk with reduced performance.vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcongestion.event|vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'EventExChecks if there is an issue retrieving the physical disk information from hosts in the vSAN cluster.vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskhostissues.event|vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the physical disks for all hosts in the vSAN cluster.vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskoverall.event|vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.smalldiskstest.event|vSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExCluster Name is not found in ssd endurance alarmClusters - {clustername} is/are not found in alarm - vSAN Health Alarm for disk endurance check.vsan.health.test.ssdendurance.clusternotfound.event|Clusters - {clustername} is/are not found. Please edit alarm - 'vSAN Health Alarm for disk endurance check' and correct the cluster name.EventExThe stretched cluster contains multiple unicast agents. This means multiple unicast agents were set on non-witness hostsvSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithmultipleunicastagents.event|vSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain a valid witness hostvSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithoutonewitnesshost.event|vSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain two valid fault domainsvSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithouttwodatafaultdomains.event|vSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'EventExHost should setup unicast agent so that they are able to communicate with the witness nodevSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostunicastagentunset.event|vSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'EventExHost with an invalid unicast agentvsan.health.test.stretchedcluster.hostwithinvalidunicastagent.event|vSAN Health Test 'Invalid unicast agent' status changed from '{prestatus}' to '{curstatus}'EventExCluster contains hosts that do not support stretched clustervSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostwithnostretchedclustersupport.event|vSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'EventExUnexpected number of data hosts in shared witness cluster. This means more than 2 data hosts in one shared witness cluster.vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnessclusterdatahostnumexceed.event|vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'EventExPer cluster component limit scaled down for shared witness host because of insufficient memoryvSAN Health Test 'Shared witness per cluster component limit scaled down' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnesscomponentlimitscaleddown.event|vSAN Health Test 'Shared witness per-cluster component limit inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExChecks the network latency between the two fault domains and the witness hostvSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.siteconnectivity.event|vSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'EventExWitness node is managed by vSphere Lifecycle ManagervSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.vlcmwitnessconfig.event|vSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'EventExThe following witness node resides in one of the data fault domainsvSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessfaultdomaininvalid.event|vSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster incorporates a witness host inside VMware vCenter clustervSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessinsidevccluster.event|vSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'EventExThe following (witness) hosts have invalid preferred fault domainsvSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomaininvalid.event|vSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'EventExThe preferred fault domain does not exist in the cluster for the following witness hostvSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomainnotexist.event|vSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevsan.health.test.stretchedcluster.witnessupgissue.event|vSAN Health Test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExWitness appliance upgrade to vSphere 7.0 or higher with cautionvsan.health.test.stretchedcluster.witnessupgrade.event|vSAN Health Test 'Witness appliance upgrade to vSphere 7.0 or higher with caution' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster contains witness hosts with no disk claimedvSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesswithnodiskmapping.event|vSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'EventExVMware Certified vSAN HardwarevSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vsanhardwarecert.event|vSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.patchalert.event|vSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExvSAN release catalog up-to-datevSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.releasecataloguptodate.event|vSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'EventExCheck configuration issues for vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumconfig.event|vSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'EventExESXi build recommended by vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumrecommendation.event|vSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'EventExThis object has the risk of PSOD issue due to improper DOM object flag leakThis object has the risk of PSOD issue due to improper DOM object flag leakvsan.health.test.zdom.leak|Objects {1} have the risk of PSOD issue due to improper DOM object flag leak. Please refer KB https://kb.vmware.com/s/article/89564VirtualMachineFaultToleranceStateFault Tolerance has not been configured for this virtual machinenotConfiguredFault Tolerance is disableddisabledFault Tolerance is enabledenabledFault Tolerant Secondary VM is not runningneedSecondaryFault Tolerance is startingstartingFault Tolerance is runningrunning
12855:20241101:185744.066 End of vmware_service_get_evt_severity() evt_severities:1989
12855:20241101:185744.067 In vmware_service_get_hv_ds_dc_dvs_list()
12855:20241101:185744.072 vmware_service_get_hv_ds_dc_dvs_list() SOAP response:
group-d1triggeredAlarmState365.1group-d1alarm-365yellowfalse39701datacenter-3nameNTK-corptriggeredAlarmStategroup-n7triggeredAlarmStategroup-h5triggeredAlarmStatedatastore-4041datastore-4050datastore-4046datastore-2007datastore-2006datastore-2005group-v4triggeredAlarmStategroup-n4029triggeredAlarmStategroup-v11triggeredAlarmStategroup-v4027triggeredAlarmStatedvs-21nameNTK-DSwitchuuid50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbgroup-v4056triggeredAlarmStatehost-4047host-4043host-4038
12855:20241101:185744.072 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_datacenters_list'
12855:20241101:185744.072 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_datacenters_list' found:0 total:0
12855:20241101:185744.072 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_hv_ds_dc_dvs_list'
12855:20241101:185744.072 In vmware_service_alarm_details_update() alarm:alarm-365
12855:20241101:185744.075 vmware_service_alarm_details_update() SOAP response:
alarm-365info.descriptionThis alarm is fired when vSphere Health detects new issues in your environment. This alarm will be retriggered even if acknowledged when new issues are detected. Go to Monitor -> Health for a detailed description of the issues.info.enabledtrueinfo.nameSkyline Health has detected issues in your vSphere environmentinfo.systemNameSkyline Health has detected issues in your vSphere environment
12855:20241101:185744.075 End of vmware_service_alarm_details_update() index:0
12855:20241101:185744.075 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_hv_ds_dc_dvs_list' found:1 total:1
12855:20241101:185744.075 End of vmware_service_get_hv_ds_dc_dvs_list():SUCCEED found hv:3 ds:6 dc:1
12855:20241101:185744.075 In vmware_service_create_datastore() datastore:'datastore-4041'
12855:20241101:185744.077 vmware_service_create_datastore() SOAP response:
datastore-4041infoLocal_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/34100425523270368744177664703687441776642024-10-24T08:57:27.792Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0334252364185616396313666.8267155e10-d4545cb2-5b01-3cecef012e78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682111______8falsetruesummarydatastore-4041Local_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12855:20241101:185744.078 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185744.078 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185744.078 End of vmware_service_create_datastore()
12855:20241101:185744.078 In vmware_service_create_datastore() datastore:'datastore-4050'
12855:20241101:185744.081 vmware_service_create_datastore() SOAP response:
datastore-4050infoLocal_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/3410042552327036874417766468169720922112703687441776642024-11-01T13:06:44.907432Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0134252364185616396313666.8267155cc9-bea5e318-19fd-ac1f6bb14c78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______8falsetruetruesummarydatastore-4050Local_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12855:20241101:185744.081 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185744.081 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185744.081 End of vmware_service_create_datastore()
12855:20241101:185744.081 In vmware_service_create_datastore() datastore:'datastore-4046'
12855:20241101:185744.083 vmware_service_create_datastore() SOAP response:
datastore-4046infoLocal_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/34100425523270368744177664703687441776642024-11-01T11:53:36.643999Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0234252364185616396313666.8267155ba7-5e9d16d6-0733-3cecef02b6e0t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682100______8falsetruesummarydatastore-4046Local_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12855:20241101:185744.084 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185744.084 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185744.084 End of vmware_service_create_datastore()
12855:20241101:185744.084 In vmware_service_create_datastore() datastore:'datastore-2007'
12855:20241101:185744.086 vmware_service_create_datastore() SOAP response:
datastore-2007info3PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/5031560478727036874417766468169720922112703687441776642024-11-01T13:06:44.904493Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID53153660247654416396313666.826704dec9-75e6c68a-c19e-9440c9831520naa.60002ac00000000000000054000228a31falsefalsefalsesummarydatastore-20073PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/53660247654450315604787242237661184truetrueVMFSnormaltriggeredAlarmState
12855:20241101:185744.086 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185744.086 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185744.086 End of vmware_service_create_datastore()
12855:20241101:185744.086 In vmware_service_create_datastore() datastore:'datastore-2006'
12855:20241101:185744.088 vmware_service_create_datastore() SOAP response:
datastore-2006info3PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/1592765972487036874417766468169720922112703687441776642024-11-01T13:06:44.898963Z7036874417766468169720922112VMFS3PAR_KARTOHA_SAS_NTK_ID53516079283814416396313666.826703d63f-3516ce66-4bee-9440c9831520naa.60002ac0000000000000042f000219831falsefalsefalsesummarydatastore-20063PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/160792838144159276597248truetrueVMFSnormaltriggeredAlarmState
12855:20241101:185744.088 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185744.088 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185744.088 End of vmware_service_create_datastore()
12855:20241101:185744.088 In vmware_service_create_datastore() datastore:'datastore-2005'
12855:20241101:185744.090 vmware_service_create_datastore() SOAP response:
datastore-2005info3PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/8543356846087036874417766468169720922112703687441776642024-11-01T18:34:30.288888Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID530_mgmt107347338854416396313666.826703d517-82086a06-cec0-9440c9831520naa.60002ac0000000000000004a000228a31falsefalsefalsesummarydatastore-20053PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/10734733885448543356846080truetrueVMFSnormaltriggeredAlarmState
12855:20241101:185744.090 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185744.090 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185744.090 End of vmware_service_create_datastore()
12855:20241101:185744.091 In vmware_service_get_clusters_and_resourcepools()
12855:20241101:185744.093 vmware_service_get_clusters_and_resourcepools() SOAP response:
domain-c1002nameNTK-corptriggeredAlarmStateresgroup-1003nameResourcesparentdomain-c1002resourcePoolresgroup-4001resgroup-4026resgroup-4026nameNTKparentresgroup-1003resourcePoolresgroup-4001namemgmtparentresgroup-1003resourcePool
12855:20241101:185744.093 In vmware_service_process_cluster_data()
12855:20241101:185744.093 In vmware_service_get_alarms_data(), func_parent:'vmware_service_process_cluster_data'
12855:20241101:185744.093 End of vmware_service_get_alarms_data() func_parent:'vmware_service_process_cluster_data' found:0 total:1
12855:20241101:185744.093 End of vmware_service_process_cluster_data():SUCCEED cl:1 rp:3
12855:20241101:185744.093 In vmware_service_get_cluster_state() clusterid:'domain-c1002'
12855:20241101:185744.095 vmware_service_get_cluster_state() SOAP response:
domain-c1002datastoredatastore-2005datastore-2006datastore-2007datastore-4041datastore-4046datastore-4050summary.overallStatusgreen
12855:20241101:185744.095 End of vmware_service_get_cluster_state():SUCCEED
12855:20241101:185744.095 End of vmware_service_get_clusters_and_resourcepools():SUCCEED found cl:1 rp:2
12855:20241101:185744.095 In vmware_service_init_hv() hvid:'host-4047'
12855:20241101:185744.095 In vmware_service_get_hv_data() guesthvid:'host-4047'
12855:20241101:185744.095 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4047false
12855:20241101:185744.106 vmware_service_get_hv_data() SOAP response:
host-4047config.network.dnsConfigfalsentk-esxi-01m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrueac:1f:6b:b1:4c:783ac:1f:6b:b1:4c:7800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrueac:1f:6b:b1:4c:793ac:1f:6b:b1:4c:7900falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:20:55:a8350:7c:6f:20:55:a800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:20:55:a9350:7c:6f:20:55:a900falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383139353420202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4050overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-01.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686832898048summary.hardware.modelSuper Serversummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-ac1f6bb14c78summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage153summary.quickStats.overallMemoryUsage16596summary.quickStats.uptime691190summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6300-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen107-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen103-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen184-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen341-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen516-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen183-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen186-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen340-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen510-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1170-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen690000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 4 FAN4The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen650000-2RPMnonefanFan Device 1 FAN1The sensor is operating under normal conditionsGreen660000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3100-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4800-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen4400-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5800-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5300-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4060
12855:20241101:185744.107 End of vmware_service_get_hv_data():SUCCEED
12855:20241101:185744.108 In vmware_service_get_hv_pnics_data()
12855:20241101:185744.108 End of vmware_service_get_hv_pnics_data() found:4
12855:20241101:185744.108 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12855:20241101:185744.108 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12855:20241101:185744.108 In vmware_hv_ip_search()
12855:20241101:185744.109 End of vmware_hv_ip_search() ip:10.50.242.11
12855:20241101:185744.109 In vmware_hv_get_parent_data() id:'host-4047'
12855:20241101:185744.111 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12855:20241101:185744.111 End of vmware_hv_get_parent_data():SUCCEED
12855:20241101:185744.111 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4047"
12855:20241101:185744.111 In vmware_service_hv_disks_get_info() hvid:'host-4047'
12855:20241101:185744.111 vmware_service_hv_disks_get_info() count of scsiLun:21
12855:20241101:185744.121 vmware_service_hv_disks_get_info() SOAP response:
host-4047config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].canonicalNamet10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].modelSAMSUNG MZ7LH480config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].queueDepth31config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].revision904Qconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].serialNumberS45PNC0R681954 config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].vendorATA config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].canonicalNamenaa.2ff70002ac021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].canonicalNamenaa.2ff70002ac0228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].canonicalNamenaa.60002ac0000000000000004a000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].canonicalNamenaa.60002ac00000000000000054000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].canonicalNamenaa.60002ac0000000000000042f00021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].vendor3PARdata
12855:20241101:185744.122 In vmware_service_hv_disks_parse_info()
12855:20241101:185744.123 End of vmware_service_hv_disks_parse_info() created:6
12855:20241101:185744.123 End of vmware_service_hv_disks_get_info():SUCCEED for 6(vsan:0) / 21
12855:20241101:185744.123 In vmware_service_hv_get_multipath_data() hvid:'host-4047'
12855:20241101:185744.131 vmware_service_hv_get_multipath_data() SOAP response:
host-4047config.storageDevice.multipathInfokey-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554e0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.MultipathInfo.Path-vmhba1:C0:T0:L0vmhba1:C0:T0:L0activeactivetruekey-vim.host.BlockHba-vmhba1key-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554eFIXEDkey-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a3565620202020020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L530vmhba2:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L530vmhba2:C0:T3:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L530vmhba3:C0:T1:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L530vmhba3:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a3565620202020020013020060002ac00000000000000054000228a3565620202020key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L531vmhba2:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L531vmhba2:C0:T3:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L531vmhba3:C0:T1:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L531vmhba3:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f00021983565620202020020017020060002ac0000000000000042f00021983565620202020key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L535vmhba3:C0:T3:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L535vmhba3:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L535vmhba2:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L535vmhba2:C0:T1:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202002000001002ff70002ac0228a3565620202020key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L256vmhba2:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L256vmhba2:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L256vmhba3:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L256vmhba3:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202002000001002ff70002ac021983565620202020key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L256vmhba3:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L256vmhba3:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L256vmhba2:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L256vmhba2:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUA
12855:20241101:185744.131 End of vmware_service_hv_get_multipath_data():SUCCEED
12855:20241101:185744.131 In vmware_hv_ds_access_update() hv id:host-4047 hv dss:4 dss:6
12855:20241101:185744.134 vmware_hv_ds_access_update() SOAP response:
datastore-2005host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2006host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2007host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-4050host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtrue
12855:20241101:185744.134 In vmware_hv_ds_access_parse()
12855:20241101:185744.134 In vmware_hv_get_ds_access() for DS:datastore-2005
12855:20241101:185744.134 End of vmware_hv_get_ds_access() mountinfo:15
12855:20241101:185744.134 In vmware_hv_get_ds_access() for DS:datastore-2006
12855:20241101:185744.134 End of vmware_hv_get_ds_access() mountinfo:15
12855:20241101:185744.134 In vmware_hv_get_ds_access() for DS:datastore-2007
12855:20241101:185744.134 End of vmware_hv_get_ds_access() mountinfo:15
12855:20241101:185744.134 In vmware_hv_get_ds_access() for DS:datastore-4050
12855:20241101:185744.134 End of vmware_hv_get_ds_access() mountinfo:15
12855:20241101:185744.134 End of vmware_hv_ds_access_parse() parsed:4
12855:20241101:185744.134 End of vmware_hv_ds_access_update():SUCCEED for 4 / 4
12855:20241101:185744.134 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID530_mgmt"
12855:20241101:185744.134 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_KARTOHA_SAS_NTK_ID535"
12855:20241101:185744.134 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID531"
12855:20241101:185744.134 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"Local_ntk-m1-esxi-01"
12855:20241101:185744.134 In vmware_service_create_vm() vmid:'vm-4060'
12855:20241101:185744.134 In vmware_service_get_vm_data() vmid:'vm-4060'
12855:20241101:185744.138 vmware_service_get_vm_data() SOAP response:
vm-4060availableFieldconfig.hardware218192falsefalse200IDE 00201IDE 11300PS2 controller 00600700100PCI controller 00500120001000150004000400SIO controller 00600Keyboard3000700Pointing device; Devicefalseautodetect3001500Video card100040961falsefalseautomatic26214412000Device on the virtual machine PCI bus that provides support for the virtual machine communication interface10017-1079927627falsetrue1000LSI Logic16100302000truenoSharing715000AHCI321002401600016000ISO [3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.iso[3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.isodatastore-2005truetruefalseok1500002000104,857,600 KB[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdatastore-2005persistentfalsefalsefalsefalse6000C29d-45c9-aa9f-3d54-a04187209ee5fa74bccac7959c5d95abe5bffffffffefalsesharingNone100001048576001073741824001000normal-11000normal05-20004000DVSwitch: 50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 db50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-400628630340996truefalsetrueok1601007assigned00:50:56:b0:80:48true050normal-1trueconfig.instanceUuid50304101-157a-f442-58f4-550f05de33feconfig.uuid42306756-2f64-b85a-a4fe-276cbfa19cb5customValuedatastoredatastore-2005guest.disk/5146047283240655228928/boot20403732481785856000guest.guestFamilylinuxGuestguest.guestFullNameUbuntu Linux (64-bit)guest.guestStaterunningguest.hostNamezabb-ntk-proxyguest.ipAddress10.50.242.76guest.netntk_dmz_vlan_112910.50.242.76fe80::250:56ff:feb0:804800:50:56:b0:80:48true400010.50.242.7628preferredfe80::250:56ff:feb0:804864unknownguest.toolsRunningStatusguestToolsRunningguest.toolsVersion12389layoutEx0[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmxconfig23822382true1[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmsdsnapshotList00true2[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdiskDescriptor458458true3[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-flat.vmdkdiskExtent107374182400107374182400true4[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.nvramnvram86848684true5[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-13083273.vswpswap85899345928589934592true6[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/vmx-zabbix-proxy-ntk-4e9138c7e268bf86a750769daba1b562730af6a5e74aa2ad704e8731824ba105-1.vswpuwswap8598323285983232true2000232024-11-01T18:26:30.170712Zparentgroup-v11resourcePoolresgroup-4001summary.config.memorySizeMB8192summary.config.namezabbix-proxy-ntksummary.config.numCpu2summary.quickStats.balloonedMemory0summary.quickStats.compressedMemory0summary.quickStats.guestMemoryUsage163summary.quickStats.hostMemoryUsage8222summary.quickStats.overallCpuUsage84summary.quickStats.privateMemory8165summary.quickStats.sharedMemory3summary.quickStats.swappedMemory0summary.quickStats.uptimeSeconds75967summary.runtime.consolidationNeededfalsesummary.runtime.powerStatepoweredOnsummary.storage.committed116050111748summary.storage.uncommitted0summary.storage.unshared107374182858triggeredAlarmStategroup-v11nameDiscovered virtual machineparentgroup-v4group-v4namevmparentdatacenter-3
12855:20241101:185744.138 End of vmware_service_get_vm_data():SUCCEED
12855:20241101:185744.139 In vmware_service_get_vm_folder() folder id:'group-v11'
12855:20241101:185744.140 End of vmware_service_get_vm_folder(): vm folder:Discovered virtual machine
12855:20241101:185744.140 In vmware_vm_get_nic_devices()
12855:20241101:185744.140 End of vmware_vm_get_nic_devices() found:1
12855:20241101:185744.140 In vmware_vm_get_disk_devices()
12855:20241101:185744.140 End of vmware_vm_get_disk_devices() found:1
12855:20241101:185744.140 In vmware_vm_get_file_systems()
12855:20241101:185744.140 End of vmware_vm_get_file_systems() found:2
12855:20241101:185744.140 In vmware_vm_get_custom_attrs()
12855:20241101:185744.140 End of vmware_vm_get_custom_attrs() attributes:0
12855:20241101:185744.140 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_vm'
12855:20241101:185744.140 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_vm' found:0 total:1
12855:20241101:185744.140 End of vmware_service_create_vm():SUCCEED
12855:20241101:185744.140 End of vmware_service_init_hv():SUCCEED
12855:20241101:185744.140 In vmware_service_init_hv() hvid:'host-4043'
12855:20241101:185744.140 In vmware_service_get_hv_data() guesthvid:'host-4043'
12855:20241101:185744.140 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4043false
12855:20241101:185744.157 vmware_service_get_hv_data() SOAP response:
host-4043config.network.dnsConfigfalsentk-esxi-02m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrue3c:ec:ef:02:b6:e033c:ec:ef:02:b6:e000falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrue3c:ec:ef:02:b6:e133c:ec:ef:02:b6:e100falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:3b:d8:c6350:7c:6f:3b:d8:c600falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:3b:d8:c7350:7c:6f:3b:d8:c700falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383231303020202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383231303020202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4046overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-02.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686831919104summary.hardware.modelSYS-6019P-WTRsummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-3cecef02b6e0summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage396summary.quickStats.overallMemoryUsage8864summary.quickStats.uptime691313summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6700-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen106-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen102-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen182-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen335-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen343-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1164-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen560000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen590000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen610000-2RPMnonefanFan Device 2 FAN2The sensor is operating under normal conditionsGreen600000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3600-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen4000-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen4600-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5000-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4057
12855:20241101:185744.157 End of vmware_service_get_hv_data():SUCCEED
12855:20241101:185744.158 In vmware_service_get_hv_pnics_data()
12855:20241101:185744.159 End of vmware_service_get_hv_pnics_data() found:4
12855:20241101:185744.159 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12855:20241101:185744.159 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12855:20241101:185744.159 In vmware_hv_ip_search()
12855:20241101:185744.159 End of vmware_hv_ip_search() ip:10.50.242.12
12855:20241101:185744.159 In vmware_hv_get_parent_data() id:'host-4043'
12855:20241101:185744.161 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12855:20241101:185744.162 End of vmware_hv_get_parent_data():SUCCEED
12855:20241101:185744.162 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4043"
12855:20241101:185744.162 In vmware_service_hv_disks_get_info() hvid:'host-4043'
12855:20241101:185744.162 vmware_service_hv_disks_get_info() count of scsiLun:21
12859:20241101:185744.861 In vmware_job_get() queue:2
12859:20241101:185744.862 End of vmware_job_get() queue:2 type:none
12861:20241101:185744.862 In vmware_job_get() queue:2
12861:20241101:185744.862 End of vmware_job_get() queue:2 type:none
12857:20241101:185744.862 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 6.000000 sec during 5.815664 sec]'
12857:20241101:185744.862 In vmware_job_get() queue:2
12857:20241101:185744.862 End of vmware_job_get() queue:2 type:none
12859:20241101:185745.309 cannot increase log level: maximum level has been already set
12859:20241101:185745.309 In vmware_job_get() queue:2
12859:20241101:185745.309 End of vmware_job_get() queue:2 type:none
12861:20241101:185745.309 cannot increase log level: maximum level has been already set
12861:20241101:185745.309 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 6.000000 sec during 5.305387 sec]'
12861:20241101:185745.309 In vmware_job_get() queue:2
12861:20241101:185745.309 End of vmware_job_get() queue:2 type:none
12857:20241101:185745.309 cannot increase log level: maximum level has been already set
12857:20241101:185745.309 In vmware_job_get() queue:2
12857:20241101:185745.309 End of vmware_job_get() queue:2 type:none
12861:20241101:185746.310 In vmware_job_get() queue:2
12861:20241101:185746.310 End of vmware_job_get() queue:2 type:none
12859:20241101:185746.310 In vmware_job_get() queue:2
12859:20241101:185746.310 End of vmware_job_get() queue:2 type:none
12857:20241101:185746.310 In vmware_job_get() queue:2
12857:20241101:185746.310 End of vmware_job_get() queue:2 type:none
12859:20241101:185747.310 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 6.000000 sec during 5.305323 sec]'
12859:20241101:185747.310 In vmware_job_get() queue:2
12859:20241101:185747.310 End of vmware_job_get() queue:2 type:none
12861:20241101:185747.310 In vmware_job_get() queue:2
12861:20241101:185747.310 End of vmware_job_get() queue:2 type:none
12857:20241101:185747.310 In vmware_job_get() queue:2
12857:20241101:185747.310 End of vmware_job_get() queue:2 type:none
12861:20241101:185748.310 In vmware_job_get() queue:2
12861:20241101:185748.310 End of vmware_job_get() queue:2 type:none
12859:20241101:185748.310 In vmware_job_get() queue:2
12859:20241101:185748.310 End of vmware_job_get() queue:2 type:none
12857:20241101:185748.310 In vmware_job_get() queue:2
12857:20241101:185748.310 End of vmware_job_get() queue:2 type:none
12837:20241101:185748.553 received configuration data from server at "10.50.242.78", datalen 437
12859:20241101:185749.310 In vmware_job_get() queue:2
12859:20241101:185749.310 End of vmware_job_get() queue:2 type:none
12857:20241101:185749.310 In vmware_job_get() queue:2
12857:20241101:185749.310 End of vmware_job_get() queue:2 type:none
12861:20241101:185749.310 In vmware_job_get() queue:2
12861:20241101:185749.310 End of vmware_job_get() queue:2 type:none
12857:20241101:185750.310 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 6.000000 sec during 5.448739 sec]'
12857:20241101:185750.310 In vmware_job_get() queue:2
12857:20241101:185750.310 End of vmware_job_get() queue:2 type:none
12859:20241101:185750.310 In vmware_job_get() queue:2
12859:20241101:185750.310 End of vmware_job_get() queue:2 type:none
12861:20241101:185750.311 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000964 sec]'
12861:20241101:185750.311 In vmware_job_get() queue:2
12861:20241101:185750.311 End of vmware_job_get() queue:2 type:none
12857:20241101:185751.311 In vmware_job_get() queue:2
12861:20241101:185751.311 In vmware_job_get() queue:2
12861:20241101:185751.311 End of vmware_job_get() queue:2 type:none
12857:20241101:185751.311 End of vmware_job_get() queue:2 type:none
12859:20241101:185751.311 In vmware_job_get() queue:2
12859:20241101:185751.311 End of vmware_job_get() queue:2 type:none
12861:20241101:185752.311 In vmware_job_get() queue:2
12861:20241101:185752.311 End of vmware_job_get() queue:2 type:none
12857:20241101:185752.311 In vmware_job_get() queue:2
12857:20241101:185752.311 End of vmware_job_get() queue:2 type:none
12859:20241101:185752.311 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001054 sec]'
12859:20241101:185752.311 In vmware_job_get() queue:2
12859:20241101:185752.311 End of vmware_job_get() queue:2 type:none
12861:20241101:185753.311 In vmware_job_get() queue:2
12861:20241101:185753.311 End of vmware_job_get() queue:2 type:none
12857:20241101:185753.311 In vmware_job_get() queue:2
12857:20241101:185753.311 End of vmware_job_get() queue:2 type:none
12859:20241101:185753.311 In vmware_job_get() queue:2
12859:20241101:185753.311 End of vmware_job_get() queue:2 type:none
12855:20241101:185754.164 cannot increase log level: maximum level has been already set
12855:20241101:185754.164 End of vmware_service_hv_disks_get_info():FAIL for 0(vsan:0) / 21
12855:20241101:185754.164 End of vmware_service_init_hv():FAIL
12855:20241101:185754.164 Unable initialize hv host-4043: Timeout was reached.
12855:20241101:185754.164 In vmware_service_init_hv() hvid:'host-4038'
12855:20241101:185754.164 In vmware_service_get_hv_data() guesthvid:'host-4038'
12855:20241101:185754.164 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4038false
12861:20241101:185754.311 In vmware_job_get() queue:2
12861:20241101:185754.312 End of vmware_job_get() queue:2 type:none
12857:20241101:185754.313 In vmware_job_get() queue:2
12857:20241101:185754.313 End of vmware_job_get() queue:2 type:none
12859:20241101:185754.313 In vmware_job_get() queue:2
12859:20241101:185754.313 End of vmware_job_get() queue:2 type:none
12861:20241101:185755.313 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002186 sec]'
12861:20241101:185755.313 In vmware_job_get() queue:2
12861:20241101:185755.313 End of vmware_job_get() queue:2 type:none
12859:20241101:185755.313 In vmware_job_get() queue:2
12859:20241101:185755.313 End of vmware_job_get() queue:2 type:none
12857:20241101:185755.313 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002386 sec]'
12857:20241101:185755.313 In vmware_job_get() queue:2
12857:20241101:185755.313 End of vmware_job_get() queue:2 type:none
12861:20241101:185756.313 In vmware_job_get() queue:2
12861:20241101:185756.313 End of vmware_job_get() queue:2 type:none
12859:20241101:185756.313 In vmware_job_get() queue:2
12859:20241101:185756.313 End of vmware_job_get() queue:2 type:none
12857:20241101:185756.313 In vmware_job_get() queue:2
12857:20241101:185756.313 End of vmware_job_get() queue:2 type:none
12861:20241101:185757.313 In vmware_job_get() queue:2
12861:20241101:185757.313 End of vmware_job_get() queue:2 type:none
12859:20241101:185757.313 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002249 sec]'
12859:20241101:185757.313 In vmware_job_get() queue:2
12859:20241101:185757.313 End of vmware_job_get() queue:2 type:none
12857:20241101:185757.313 In vmware_job_get() queue:2
12857:20241101:185757.313 End of vmware_job_get() queue:2 type:none
12861:20241101:185758.313 In vmware_job_get() queue:2
12861:20241101:185758.313 End of vmware_job_get() queue:2 type:none
12859:20241101:185758.313 In vmware_job_get() queue:2
12859:20241101:185758.313 End of vmware_job_get() queue:2 type:none
12857:20241101:185758.313 In vmware_job_get() queue:2
12857:20241101:185758.313 End of vmware_job_get() queue:2 type:none
12837:20241101:185758.569 received configuration data from server at "10.50.242.78", datalen 437
12861:20241101:185759.313 In vmware_job_get() queue:2
12861:20241101:185759.314 End of vmware_job_get() queue:2 type:none
12857:20241101:185759.314 In vmware_job_get() queue:2
12857:20241101:185759.314 End of vmware_job_get() queue:2 type:none
12859:20241101:185759.314 In vmware_job_get() queue:2
12859:20241101:185759.315 End of vmware_job_get() queue:2 type:none
12861:20241101:185800.315 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001973 sec]'
12861:20241101:185800.315 In vmware_job_get() queue:2
12861:20241101:185800.315 End of vmware_job_get() queue:2 type:none
12857:20241101:185800.315 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001948 sec]'
12857:20241101:185800.315 In vmware_job_get() queue:2
12857:20241101:185800.315 End of vmware_job_get() queue:2 type:none
12859:20241101:185800.315 In vmware_job_get() queue:2
12859:20241101:185800.315 End of vmware_job_get() queue:2 type:none
12861:20241101:185801.315 In vmware_job_get() queue:2
12861:20241101:185801.315 End of vmware_job_get() queue:2 type:none
12857:20241101:185801.315 In vmware_job_get() queue:2
12857:20241101:185801.315 End of vmware_job_get() queue:2 type:none
12859:20241101:185801.315 In vmware_job_get() queue:2
12859:20241101:185801.315 End of vmware_job_get() queue:2 type:none
12861:20241101:185802.315 In vmware_job_get() queue:2
12861:20241101:185802.315 End of vmware_job_get() queue:2 type:none
12857:20241101:185802.315 In vmware_job_get() queue:2
12857:20241101:185802.315 End of vmware_job_get() queue:2 type:none
12859:20241101:185802.315 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002080 sec]'
12859:20241101:185802.315 In vmware_job_get() queue:2
12859:20241101:185802.315 End of vmware_job_get() queue:2 type:none
12861:20241101:185803.315 In vmware_job_get() queue:2
12861:20241101:185803.315 End of vmware_job_get() queue:2 type:none
12857:20241101:185803.315 In vmware_job_get() queue:2
12857:20241101:185803.315 End of vmware_job_get() queue:2 type:none
12859:20241101:185803.315 In vmware_job_get() queue:2
12859:20241101:185803.315 End of vmware_job_get() queue:2 type:none
12855:20241101:185804.166 End of vmware_service_get_hv_data():FAIL
12855:20241101:185804.166 End of vmware_service_init_hv():FAIL
12855:20241101:185804.166 Unable initialize hv host-4038: Timeout was reached.
12855:20241101:185804.166 In vmware_service_dvswitch_load() dvs count:0
12855:20241101:185804.166 End of vmware_service_dvswitch_load() count: 0 / 0
12855:20241101:185804.166 In vmware_service_props_load() props total:0
12855:20241101:185804.166 End of vmware_service_props_load() count: 0 / 0
12855:20241101:185804.166 In vmware_service_get_maxquerymetrics()
12861:20241101:185804.315 In vmware_job_get() queue:2
12861:20241101:185804.316 End of vmware_job_get() queue:2 type:none
12859:20241101:185804.316 In vmware_job_get() queue:2
12859:20241101:185804.316 End of vmware_job_get() queue:2 type:none
12857:20241101:185804.317 In vmware_job_get() queue:2
12857:20241101:185804.317 End of vmware_job_get() queue:2 type:none
12861:20241101:185805.317 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001959 sec]'
12861:20241101:185805.317 In vmware_job_get() queue:2
12861:20241101:185805.317 End of vmware_job_get() queue:2 type:none
12859:20241101:185805.317 In vmware_job_get() queue:2
12859:20241101:185805.317 End of vmware_job_get() queue:2 type:none
12857:20241101:185805.317 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002009 sec]'
12857:20241101:185805.317 In vmware_job_get() queue:2
12857:20241101:185805.317 End of vmware_job_get() queue:2 type:none
12861:20241101:185806.317 In vmware_job_get() queue:2
12861:20241101:185806.317 End of vmware_job_get() queue:2 type:none
12857:20241101:185806.317 In vmware_job_get() queue:2
12857:20241101:185806.317 End of vmware_job_get() queue:2 type:none
12859:20241101:185806.317 In vmware_job_get() queue:2
12859:20241101:185806.317 End of vmware_job_get() queue:2 type:none
12861:20241101:185807.317 In vmware_job_get() queue:2
12861:20241101:185807.317 End of vmware_job_get() queue:2 type:none
12857:20241101:185807.317 In vmware_job_get() queue:2
12857:20241101:185807.317 End of vmware_job_get() queue:2 type:none
12859:20241101:185807.317 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002014 sec]'
12859:20241101:185807.317 In vmware_job_get() queue:2
12859:20241101:185807.317 End of vmware_job_get() queue:2 type:none
12861:20241101:185808.317 In vmware_job_get() queue:2
12861:20241101:185808.317 End of vmware_job_get() queue:2 type:none
12857:20241101:185808.317 In vmware_job_get() queue:2
12857:20241101:185808.317 End of vmware_job_get() queue:2 type:none
12859:20241101:185808.317 In vmware_job_get() queue:2
12859:20241101:185808.317 End of vmware_job_get() queue:2 type:none
12837:20241101:185808.587 received configuration data from server at "10.50.242.78", datalen 437
12861:20241101:185809.317 In vmware_job_get() queue:2
12861:20241101:185809.318 End of vmware_job_get() queue:2 type:none
12859:20241101:185809.319 In vmware_job_get() queue:2
12859:20241101:185809.319 End of vmware_job_get() queue:2 type:none
12857:20241101:185809.319 In vmware_job_get() queue:2
12857:20241101:185809.319 End of vmware_job_get() queue:2 type:none
12861:20241101:185810.319 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002055 sec]'
12861:20241101:185810.319 In vmware_job_get() queue:2
12861:20241101:185810.319 End of vmware_job_get() queue:2 type:none
12859:20241101:185810.319 In vmware_job_get() queue:2
12859:20241101:185810.319 End of vmware_job_get() queue:2 type:none
12857:20241101:185810.319 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002087 sec]'
12857:20241101:185810.319 In vmware_job_get() queue:2
12857:20241101:185810.319 End of vmware_job_get() queue:2 type:none
12861:20241101:185811.319 In vmware_job_get() queue:2
12861:20241101:185811.319 End of vmware_job_get() queue:2 type:none
12859:20241101:185811.319 In vmware_job_get() queue:2
12859:20241101:185811.319 End of vmware_job_get() queue:2 type:none
12857:20241101:185811.319 In vmware_job_get() queue:2
12857:20241101:185811.319 End of vmware_job_get() queue:2 type:none
12861:20241101:185812.319 In vmware_job_get() queue:2
12861:20241101:185812.319 End of vmware_job_get() queue:2 type:none
12859:20241101:185812.319 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001951 sec]'
12859:20241101:185812.319 In vmware_job_get() queue:2
12859:20241101:185812.319 End of vmware_job_get() queue:2 type:none
12857:20241101:185812.319 In vmware_job_get() queue:2
12857:20241101:185812.319 End of vmware_job_get() queue:2 type:none
12861:20241101:185813.319 In vmware_job_get() queue:2
12861:20241101:185813.319 End of vmware_job_get() queue:2 type:none
12859:20241101:185813.319 In vmware_job_get() queue:2
12859:20241101:185813.319 End of vmware_job_get() queue:2 type:none
12857:20241101:185813.319 In vmware_job_get() queue:2
12857:20241101:185813.319 End of vmware_job_get() queue:2 type:none
12855:20241101:185814.167 End of vmware_service_get_maxquerymetrics():FAIL
12855:20241101:185814.167 In vmware_service_update_perf_entities()
12855:20241101:185814.167 In vmware_service_add_perf_entity() type:HostSystem id:host-4047
12855:20241101:185814.168 In zbx_vmware_service_get_perf_entity() type:HostSystem id:host-4047
12855:20241101:185814.168 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/packetsRx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:153
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/packetsTx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:154
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/received[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:155
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/transmitted[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:156
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:datastore/totalReadLatency[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:189
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:datastore/totalWriteLatency[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:190
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:datastore/numberReadAveraged[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:185
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:datastore/numberWriteAveraged[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:186
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:cpu/usage[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:2
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:cpu/utilization[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:398
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:power/power[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:164
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:power/powerCap[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:165
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/droppedRx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:605
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/droppedTx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:606
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/errorsRx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:613
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/errorsTx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:614
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/broadcastRx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:609
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/broadcastTx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:610
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 End of vmware_service_add_perf_entity() perfcounters:18
12855:20241101:185814.168 In vmware_service_add_perf_entity() type:VirtualMachine id:vm-4060
12855:20241101:185814.168 In zbx_vmware_service_get_perf_entity() type:VirtualMachine id:vm-4060
12855:20241101:185814.168 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:virtualDisk/read[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:180
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:virtualDisk/write[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:181
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:virtualDisk/numberReadAveraged[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:178
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:virtualDisk/numberWriteAveraged[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:179
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/packetsRx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:153
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/packetsTx[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:154
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/received[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:155
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/transmitted[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:156
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:cpu/ready[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:12
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/usage[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:150
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:cpu/usage[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:2
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:cpu/latency[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:540
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:cpu/readiness[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:548
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:cpu/swapwait[summation]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:531
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:sys/osUptime[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:643
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:mem/consumed[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:98
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:mem/usage[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:24
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:mem/swapped[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:70
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:net/usage[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:150
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:virtualDisk/readOIO[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:349
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:virtualDisk/writeOIO[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:350
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:virtualDisk/totalWriteLatency[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:183
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:virtualDisk/totalReadLatency[average]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:182
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 End of vmware_service_add_perf_entity() perfcounters:23
12855:20241101:185814.168 vmware_service_update_perf_entities() for type: VirtualMachine hv id: host-4047 hv uuid: 00000000-0000-0000-0000-ac1f6bb14c78 linked vm id: vm-4060 vm uuid: 50304101-157a-f442-58f4-550f05de33fe
12855:20241101:185814.168 In vmware_service_add_perf_entity() type:Datastore id:datastore-2005
12855:20241101:185814.168 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2005
12855:20241101:185814.168 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:285
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:286
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:287
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 End of vmware_service_add_perf_entity() perfcounters:3
12855:20241101:185814.168 vmware_service_update_perf_entities() for type: Datastore id: datastore-2005 name: 3PAR_GOROH_SSD_NTK_ID530_mgmt uuid: 6703d517-82086a06-cec0-9440c9831520
12855:20241101:185814.168 In vmware_service_add_perf_entity() type:Datastore id:datastore-2006
12855:20241101:185814.168 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2006
12855:20241101:185814.168 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:285
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:286
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:287
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 End of vmware_service_add_perf_entity() perfcounters:3
12855:20241101:185814.168 vmware_service_update_perf_entities() for type: Datastore id: datastore-2006 name: 3PAR_KARTOHA_SAS_NTK_ID535 uuid: 6703d63f-3516ce66-4bee-9440c9831520
12855:20241101:185814.168 In vmware_service_add_perf_entity() type:Datastore id:datastore-2007
12855:20241101:185814.168 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-2007
12855:20241101:185814.168 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:285
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:286
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:287
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 End of vmware_service_add_perf_entity() perfcounters:3
12855:20241101:185814.168 vmware_service_update_perf_entities() for type: Datastore id: datastore-2007 name: 3PAR_GOROH_SSD_NTK_ID531 uuid: 6704dec9-75e6c68a-c19e-9440c9831520
12855:20241101:185814.168 In vmware_service_add_perf_entity() type:Datastore id:datastore-4046
12855:20241101:185814.168 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4046
12855:20241101:185814.168 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:285
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:286
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12855:20241101:185814.168 zbx_vmware_service_get_counterid() counterid:287
12855:20241101:185814.168 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.168 End of vmware_service_add_perf_entity() perfcounters:3
12855:20241101:185814.168 vmware_service_update_perf_entities() for type: Datastore id: datastore-4046 name: Local_ntk-m1-esxi-02 uuid: 67155ba7-5e9d16d6-0733-3cecef02b6e0
12855:20241101:185814.168 In vmware_service_add_perf_entity() type:Datastore id:datastore-4050
12855:20241101:185814.169 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4050
12855:20241101:185814.169 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12855:20241101:185814.169 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12855:20241101:185814.169 zbx_vmware_service_get_counterid() counterid:285
12855:20241101:185814.169 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.169 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12855:20241101:185814.169 zbx_vmware_service_get_counterid() counterid:286
12855:20241101:185814.169 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.169 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12855:20241101:185814.169 zbx_vmware_service_get_counterid() counterid:287
12855:20241101:185814.169 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.169 End of vmware_service_add_perf_entity() perfcounters:3
12855:20241101:185814.169 vmware_service_update_perf_entities() for type: Datastore id: datastore-4050 name: Local_ntk-m1-esxi-01 uuid: 67155cc9-bea5e318-19fd-ac1f6bb14c78
12855:20241101:185814.169 In vmware_service_add_perf_entity() type:Datastore id:datastore-4041
12855:20241101:185814.169 In zbx_vmware_service_get_perf_entity() type:Datastore id:datastore-4041
12855:20241101:185814.169 End of zbx_vmware_service_get_perf_entity() entity:(nil)
12855:20241101:185814.169 In zbx_vmware_service_get_counterid() path:disk/used[latest]
12855:20241101:185814.169 zbx_vmware_service_get_counterid() counterid:285
12855:20241101:185814.169 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.169 In zbx_vmware_service_get_counterid() path:disk/provisioned[latest]
12855:20241101:185814.169 zbx_vmware_service_get_counterid() counterid:286
12855:20241101:185814.169 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.169 In zbx_vmware_service_get_counterid() path:disk/capacity[latest]
12855:20241101:185814.169 zbx_vmware_service_get_counterid() counterid:287
12855:20241101:185814.169 End of zbx_vmware_service_get_counterid():SUCCEED
12855:20241101:185814.169 End of vmware_service_add_perf_entity() perfcounters:3
12855:20241101:185814.169 vmware_service_update_perf_entities() for type: Datastore id: datastore-4041 name: Local_ntk-m1-esxi-03 uuid: 67155e10-d4545cb2-5b01-3cecef012e78
12855:20241101:185814.169 End of vmware_service_update_perf_entities() entities:8
12855:20241101:185814.169 === memory statistics for vmware cache size ===
12855:20241101:185814.169 free chunks of size >= 256 bytes: 4
12855:20241101:185814.169 min chunk size: 760 bytes
12855:20241101:185814.169 max chunk size: 1073164272 bytes
12855:20241101:185814.169 memory of total size 1073625760 bytes fragmented into 7204 chunks
12855:20241101:185814.169 of those, 1073166872 bytes are in 4 free chunks
12855:20241101:185814.169 of those, 458888 bytes are in 7200 used chunks
12855:20241101:185814.169 of those, 115248 bytes are used by allocation overhead
12855:20241101:185814.169 ================================
12855:20241101:185814.169 End of zbx_vmware_service_update():FAIL processed:1638400 bytes of data. Events:0 DC:1 DS:6 CL:1 HV:1 VM:1 DVS:1 Alarms:1 VMwareCache memory usage (free/strpool/total): 1073166872 / 3211208 / 1073741008
12855:20241101:185814.169 End of vmware_job_exec() type:update_conf ret:FAIL
12855:20241101:185814.169 In vmware_job_schedule() queue:2 type:update_conf
12855:20241101:185814.169 End of vmware_job_schedule() type:update_conf nextcheck:18:58:43
12855:20241101:185814.169 In vmware_job_get() queue:3
12855:20241101:185814.169 End of vmware_job_get() queue:3 type:none
12855:20241101:185814.169 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 35.164942 sec]'
12855:20241101:185814.169 In vmware_job_get() queue:3
12855:20241101:185814.169 End of vmware_job_get() queue:3 type:none
12861:20241101:185814.319 In vmware_job_get() queue:3
12861:20241101:185814.320 End of vmware_job_get() queue:3 type:none
12857:20241101:185814.320 In vmware_job_get() queue:3
12857:20241101:185814.320 End of vmware_job_get() queue:3 type:none
12859:20241101:185814.320 In vmware_job_get() queue:3
12859:20241101:185814.320 End of vmware_job_get() queue:3 type:none
12855:20241101:185815.169 In vmware_job_get() queue:3
12855:20241101:185815.169 End of vmware_job_get() queue:3 type:none
12861:20241101:185815.320 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001343 sec]'
12861:20241101:185815.320 In vmware_job_get() queue:3
12861:20241101:185815.320 End of vmware_job_get() queue:3 type:none
12859:20241101:185815.320 In vmware_job_get() queue:3
12859:20241101:185815.320 End of vmware_job_get() queue:3 type:none
12857:20241101:185815.320 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001286 sec]'
12857:20241101:185815.320 In vmware_job_get() queue:3
12857:20241101:185815.320 End of vmware_job_get() queue:3 type:none
12855:20241101:185816.169 In vmware_job_get() queue:3
12855:20241101:185816.169 End of vmware_job_get() queue:3 type:none
12861:20241101:185816.320 In vmware_job_get() queue:3
12861:20241101:185816.320 End of vmware_job_get() queue:3 type:none
12859:20241101:185816.320 In vmware_job_get() queue:3
12859:20241101:185816.320 End of vmware_job_get() queue:3 type:none
12857:20241101:185816.320 In vmware_job_get() queue:3
12857:20241101:185816.320 End of vmware_job_get() queue:3 type:none
12855:20241101:185817.169 In vmware_job_get() queue:3
12855:20241101:185817.169 End of vmware_job_get() queue:3 type:none
12861:20241101:185817.320 In vmware_job_get() queue:3
12861:20241101:185817.320 End of vmware_job_get() queue:3 type:none
12859:20241101:185817.320 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001329 sec]'
12859:20241101:185817.320 In vmware_job_get() queue:3
12859:20241101:185817.320 End of vmware_job_get() queue:3 type:none
12857:20241101:185817.320 In vmware_job_get() queue:3
12857:20241101:185817.320 End of vmware_job_get() queue:3 type:none
12855:20241101:185818.169 In vmware_job_get() queue:3
12855:20241101:185818.169 End of vmware_job_get() queue:3 type:none
12861:20241101:185818.320 In vmware_job_get() queue:3
12861:20241101:185818.320 End of vmware_job_get() queue:3 type:none
12859:20241101:185818.321 In vmware_job_get() queue:3
12859:20241101:185818.321 End of vmware_job_get() queue:3 type:none
12857:20241101:185818.321 In vmware_job_get() queue:3
12857:20241101:185818.321 End of vmware_job_get() queue:3 type:none
12837:20241101:185818.604 received configuration data from server at "10.50.242.78", datalen 437
12855:20241101:185819.170 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000751 sec]'
12855:20241101:185819.170 In vmware_job_get() queue:3
12855:20241101:185819.170 End of vmware_job_get() queue:3 type:none
12861:20241101:185819.321 In vmware_job_get() queue:3
12861:20241101:185819.321 End of vmware_job_get() queue:3 type:none
12857:20241101:185819.321 In vmware_job_get() queue:3
12857:20241101:185819.321 End of vmware_job_get() queue:3 type:none
12859:20241101:185819.321 In vmware_job_get() queue:3
12859:20241101:185819.321 End of vmware_job_get() queue:3 type:none
12855:20241101:185820.170 In vmware_job_get() queue:3
12855:20241101:185820.170 End of vmware_job_get() queue:3 type:none
12857:20241101:185820.321 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001430 sec]'
12859:20241101:185820.321 In vmware_job_get() queue:3
12859:20241101:185820.322 End of vmware_job_get() queue:3 type:none
12857:20241101:185820.322 In vmware_job_get() queue:3
12857:20241101:185820.322 End of vmware_job_get() queue:3 type:none
12861:20241101:185820.322 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001585 sec]'
12861:20241101:185820.322 In vmware_job_get() queue:3
12861:20241101:185820.322 End of vmware_job_get() queue:3 type:none
12855:20241101:185821.170 In vmware_job_get() queue:3
12855:20241101:185821.170 End of vmware_job_get() queue:3 type:none
12859:20241101:185821.322 In vmware_job_get() queue:3
12859:20241101:185821.322 End of vmware_job_get() queue:3 type:none
12861:20241101:185821.322 In vmware_job_get() queue:3
12861:20241101:185821.322 End of vmware_job_get() queue:3 type:none
12857:20241101:185821.322 In vmware_job_get() queue:3
12857:20241101:185821.322 End of vmware_job_get() queue:3 type:none
12855:20241101:185822.170 In vmware_job_get() queue:3
12855:20241101:185822.170 End of vmware_job_get() queue:3 type:none
12859:20241101:185822.322 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001411 sec]'
12861:20241101:185822.322 In vmware_job_get() queue:3
12861:20241101:185822.322 End of vmware_job_get() queue:3 type:none
12859:20241101:185822.322 In vmware_job_get() queue:3
12859:20241101:185822.322 End of vmware_job_get() queue:3 type:none
12857:20241101:185822.322 In vmware_job_get() queue:3
12857:20241101:185822.322 End of vmware_job_get() queue:3 type:none
12855:20241101:185823.170 In vmware_job_get() queue:3
12855:20241101:185823.170 End of vmware_job_get() queue:3 type:none
12857:20241101:185823.322 In vmware_job_get() queue:3
12861:20241101:185823.322 In vmware_job_get() queue:3
12861:20241101:185823.322 End of vmware_job_get() queue:3 type:none
12857:20241101:185823.322 End of vmware_job_get() queue:3 type:none
12859:20241101:185823.322 In vmware_job_get() queue:3
12859:20241101:185823.322 End of vmware_job_get() queue:3 type:none
12855:20241101:185824.170 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000819 sec]'
12855:20241101:185824.170 In vmware_job_get() queue:3
12855:20241101:185824.170 End of vmware_job_get() queue:3 type:none
12861:20241101:185824.322 In vmware_job_get() queue:3
12861:20241101:185824.322 End of vmware_job_get() queue:3 type:none
12857:20241101:185824.324 In vmware_job_get() queue:3
12857:20241101:185824.324 End of vmware_job_get() queue:3 type:none
12859:20241101:185824.324 In vmware_job_get() queue:3
12859:20241101:185824.324 End of vmware_job_get() queue:3 type:none
12855:20241101:185825.171 In vmware_job_get() queue:3
12855:20241101:185825.171 End of vmware_job_get() queue:3 type:none
12861:20241101:185825.324 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002506 sec]'
12861:20241101:185825.324 In vmware_job_get() queue:3
12861:20241101:185825.324 End of vmware_job_get() queue:3 type:none
12857:20241101:185825.324 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002697 sec]'
12857:20241101:185825.324 In vmware_job_get() queue:3
12857:20241101:185825.324 End of vmware_job_get() queue:3 type:none
12859:20241101:185825.324 In vmware_job_get() queue:3
12859:20241101:185825.324 End of vmware_job_get() queue:3 type:none
12846:20241101:185825.721 executing housekeeper
12846:20241101:185825.736 housekeeper [deleted 0 records in 0.001211 sec, idle for 1 hour(s)]
12855:20241101:185826.171 In vmware_job_get() queue:3
12855:20241101:185826.171 End of vmware_job_get() queue:3 type:none
12861:20241101:185826.324 In vmware_job_get() queue:3
12861:20241101:185826.324 End of vmware_job_get() queue:3 type:none
12859:20241101:185826.324 In vmware_job_get() queue:3
12859:20241101:185826.324 End of vmware_job_get() queue:3 type:none
12857:20241101:185826.324 In vmware_job_get() queue:3
12857:20241101:185826.324 End of vmware_job_get() queue:3 type:none
12855:20241101:185827.171 In vmware_job_get() queue:3
12855:20241101:185827.171 End of vmware_job_get() queue:3 type:none
12861:20241101:185827.324 In vmware_job_get() queue:3
12861:20241101:185827.324 End of vmware_job_get() queue:3 type:none
12859:20241101:185827.324 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002670 sec]'
12859:20241101:185827.324 In vmware_job_get() queue:3
12859:20241101:185827.324 End of vmware_job_get() queue:3 type:none
12857:20241101:185827.325 In vmware_job_get() queue:3
12857:20241101:185827.325 End of vmware_job_get() queue:3 type:none
12855:20241101:185828.171 In vmware_job_get() queue:3
12855:20241101:185828.171 End of vmware_job_get() queue:3 type:none
12861:20241101:185828.325 In vmware_job_get() queue:3
12861:20241101:185828.325 End of vmware_job_get() queue:3 type:none
12859:20241101:185828.325 In vmware_job_get() queue:3
12859:20241101:185828.325 End of vmware_job_get() queue:3 type:none
12857:20241101:185828.325 In vmware_job_get() queue:3
12857:20241101:185828.325 End of vmware_job_get() queue:3 type:none
12837:20241101:185828.620 received configuration data from server at "10.50.242.78", datalen 437
12855:20241101:185829.171 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000903 sec]'
12855:20241101:185829.171 In vmware_job_get() queue:3
12855:20241101:185829.171 End of vmware_job_get() queue:3 type:none
12859:20241101:185829.325 In vmware_job_get() queue:3
12859:20241101:185829.325 End of vmware_job_get() queue:3 type:none
12861:20241101:185829.325 In vmware_job_get() queue:3
12861:20241101:185829.326 End of vmware_job_get() queue:3 type:none
12857:20241101:185829.326 In vmware_job_get() queue:3
12857:20241101:185829.326 End of vmware_job_get() queue:3 type:none
12855:20241101:185830.171 In vmware_job_get() queue:3
12855:20241101:185830.172 End of vmware_job_get() queue:3 type:none
12859:20241101:185830.325 In vmware_job_get() queue:3
12859:20241101:185830.325 End of vmware_job_get() queue:3 type:none
12861:20241101:185830.326 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002052 sec]'
12861:20241101:185830.326 In vmware_job_get() queue:3
12861:20241101:185830.326 End of vmware_job_get() queue:3 type:none
12857:20241101:185830.326 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001952 sec]'
12857:20241101:185830.326 In vmware_job_get() queue:3
12857:20241101:185830.326 End of vmware_job_get() queue:3 type:none
12855:20241101:185831.172 In vmware_job_get() queue:3
12855:20241101:185831.172 End of vmware_job_get() queue:3 type:none
12859:20241101:185831.325 In vmware_job_get() queue:3
12859:20241101:185831.325 End of vmware_job_get() queue:3 type:none
12861:20241101:185831.326 In vmware_job_get() queue:3
12861:20241101:185831.326 End of vmware_job_get() queue:3 type:none
12857:20241101:185831.326 In vmware_job_get() queue:3
12857:20241101:185831.326 End of vmware_job_get() queue:3 type:none
12855:20241101:185832.172 In vmware_job_get() queue:3
12855:20241101:185832.172 End of vmware_job_get() queue:3 type:none
12859:20241101:185832.325 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000736 sec]'
12859:20241101:185832.325 In vmware_job_get() queue:3
12859:20241101:185832.325 End of vmware_job_get() queue:3 type:none
12861:20241101:185832.326 In vmware_job_get() queue:3
12861:20241101:185832.326 End of vmware_job_get() queue:3 type:none
12857:20241101:185832.326 In vmware_job_get() queue:3
12857:20241101:185832.326 End of vmware_job_get() queue:3 type:none
12855:20241101:185833.172 In vmware_job_get() queue:3
12855:20241101:185833.172 End of vmware_job_get() queue:3 type:none
12859:20241101:185833.325 In vmware_job_get() queue:3
12859:20241101:185833.325 End of vmware_job_get() queue:3 type:none
12861:20241101:185833.326 In vmware_job_get() queue:3
12861:20241101:185833.327 End of vmware_job_get() queue:3 type:none
12857:20241101:185833.327 In vmware_job_get() queue:3
12857:20241101:185833.327 End of vmware_job_get() queue:3 type:none
12855:20241101:185834.172 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000864 sec]'
12855:20241101:185834.172 In vmware_job_get() queue:3
12855:20241101:185834.172 End of vmware_job_get() queue:3 type:none
12859:20241101:185834.326 In vmware_job_get() queue:3
12859:20241101:185834.326 End of vmware_job_get() queue:3 type:none
12861:20241101:185834.327 In vmware_job_get() queue:3
12861:20241101:185834.327 End of vmware_job_get() queue:3 type:none
12857:20241101:185834.327 In vmware_job_get() queue:3
12857:20241101:185834.327 End of vmware_job_get() queue:3 type:none
12855:20241101:185835.172 In vmware_job_get() queue:3
12855:20241101:185835.172 End of vmware_job_get() queue:3 type:none
12859:20241101:185835.326 In vmware_job_get() queue:3
12859:20241101:185835.326 End of vmware_job_get() queue:3 type:none
12861:20241101:185835.327 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000657 sec]'
12861:20241101:185835.327 In vmware_job_get() queue:3
12861:20241101:185835.327 End of vmware_job_get() queue:3 type:none
12857:20241101:185835.327 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000704 sec]'
12857:20241101:185835.327 In vmware_job_get() queue:3
12857:20241101:185835.327 End of vmware_job_get() queue:3 type:none
12855:20241101:185836.172 In vmware_job_get() queue:3
12855:20241101:185836.172 End of vmware_job_get() queue:3 type:none
12859:20241101:185836.326 In vmware_job_get() queue:3
12859:20241101:185836.326 End of vmware_job_get() queue:3 type:none
12861:20241101:185836.327 In vmware_job_get() queue:3
12861:20241101:185836.327 End of vmware_job_get() queue:3 type:none
12857:20241101:185836.327 In vmware_job_get() queue:3
12857:20241101:185836.327 End of vmware_job_get() queue:3 type:none
12855:20241101:185837.173 In vmware_job_get() queue:3
12855:20241101:185837.173 End of vmware_job_get() queue:3 type:none
12859:20241101:185837.326 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000836 sec]'
12859:20241101:185837.326 In vmware_job_get() queue:3
12859:20241101:185837.326 End of vmware_job_get() queue:3 type:none
12861:20241101:185837.327 In vmware_job_get() queue:3
12861:20241101:185837.327 End of vmware_job_get() queue:3 type:none
12857:20241101:185837.327 In vmware_job_get() queue:3
12857:20241101:185837.327 End of vmware_job_get() queue:3 type:none
12855:20241101:185838.173 In vmware_job_get() queue:3
12855:20241101:185838.173 End of vmware_job_get() queue:3 type:none
12859:20241101:185838.326 In vmware_job_get() queue:3
12859:20241101:185838.326 End of vmware_job_get() queue:3 type:none
12861:20241101:185838.327 In vmware_job_get() queue:3
12861:20241101:185838.327 End of vmware_job_get() queue:3 type:none
12857:20241101:185838.327 In vmware_job_get() queue:3
12857:20241101:185838.327 End of vmware_job_get() queue:3 type:none
12837:20241101:185838.637 received configuration data from server at "10.50.242.78", datalen 437
12855:20241101:185839.173 zbx_setproctitle() title:'vmware collector #1 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000830 sec]'
12855:20241101:185839.173 In vmware_job_get() queue:3
12855:20241101:185839.173 End of vmware_job_get() queue:3 type:none
12859:20241101:185839.326 In vmware_job_get() queue:3
12859:20241101:185839.326 End of vmware_job_get() queue:3 type:none
12861:20241101:185839.328 In vmware_job_get() queue:3
12861:20241101:185839.328 End of vmware_job_get() queue:3 type:none
12857:20241101:185839.328 In vmware_job_get() queue:3
12857:20241101:185839.328 End of vmware_job_get() queue:3 type:none
12855:20241101:185840.173 In vmware_job_get() queue:3
12855:20241101:185840.173 End of vmware_job_get() queue:3 type:none
12861:20241101:185840.328 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000907 sec]'
12861:20241101:185840.328 In vmware_job_get() queue:3
12861:20241101:185840.328 End of vmware_job_get() queue:3 type:none
12857:20241101:185840.328 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.000868 sec]'
12857:20241101:185840.328 In vmware_job_get() queue:3
12857:20241101:185840.328 End of vmware_job_get() queue:3 type:none
12859:20241101:185840.328 In vmware_job_get() queue:3
12859:20241101:185840.328 End of vmware_job_get() queue:3 type:none
12855:20241101:185841.173 In vmware_job_get() queue:3
12855:20241101:185841.173 End of vmware_job_get() queue:3 type:none
12861:20241101:185841.328 In vmware_job_get() queue:3
12861:20241101:185841.328 End of vmware_job_get() queue:3 type:none
12857:20241101:185841.328 In vmware_job_get() queue:3
12857:20241101:185841.328 End of vmware_job_get() queue:3 type:none
12859:20241101:185841.328 In vmware_job_get() queue:3
12859:20241101:185841.328 End of vmware_job_get() queue:3 type:none
12855:20241101:185842.173 In vmware_job_get() queue:3
12855:20241101:185842.173 End of vmware_job_get() queue:3 type:none
12861:20241101:185842.328 In vmware_job_get() queue:3
12861:20241101:185842.328 End of vmware_job_get() queue:3 type:none
12857:20241101:185842.328 In vmware_job_get() queue:3
12857:20241101:185842.328 End of vmware_job_get() queue:3 type:none
12859:20241101:185842.328 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002088 sec]'
12859:20241101:185842.328 In vmware_job_get() queue:3
12859:20241101:185842.328 End of vmware_job_get() queue:3 type:none
12855:20241101:185843.174 In vmware_job_get() queue:3
12855:20241101:185843.174 End of vmware_job_get() queue:2 type:update_tags
12855:20241101:185843.174 In vmware_job_exec() type:update_tags
12855:20241101:185843.174 End of vmware_job_exec() type:update_tags ret:FAIL
12855:20241101:185843.174 In vmware_job_schedule() queue:2 type:update_tags
12855:20241101:185843.174 End of vmware_job_schedule() type:update_tags nextcheck:18:59:43
12855:20241101:185843.174 In vmware_job_get() queue:3
12855:20241101:185843.174 End of vmware_job_get() queue:2 type:update_conf
12855:20241101:185843.174 In vmware_job_exec() type:update_conf
12855:20241101:185843.174 In zbx_vmware_service_update() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12855:20241101:185843.174 In vmware_service_cust_query_prep() cust_queries:0
12855:20241101:185843.174 End of vmware_service_cust_query_prep() cq_values:0
12855:20241101:185843.174 In vmware_service_cust_query_prep() cust_queries:0
12855:20241101:185843.174 End of vmware_service_cust_query_prep() cq_values:0
12855:20241101:185843.174 In vmware_service_authenticate() 'zabbix@vsphere.local'@'https://10.50.242.10/sdk'
12855:20241101:185843.230 vmware_service_authenticate() SOAP response:
52fb481e-ef43-bf5d-46d0-a4bf3f602a3eVSPHERE.LOCAL\zabbix2024-11-01T18:58:43.237159Z2024-11-01T18:58:43.237159Zenenfalse10.50.242.760
12855:20241101:185843.231 End of vmware_service_authenticate():SUCCEED
12855:20241101:185843.235 vmware_service_get_contents() SOAP response:
group-d1propertyCollectorViewManagerVMware vCenter ServerVMware vCenter Server 8.0.3 build-24322831VMware, Inc.8.0.324322831INTL000linux-x64vpxVirtualCenter8.0.3.09a31b4b0-64a6-48e1-919a-e9f7ca1668b6VMware VirtualCenter Server8.0VpxSettingsUserDirectorySessionManagerAuthorizationManagerServiceMgrPerfMgrScheduledTaskManagerAlarmManagerEventManagerTaskManagerExtensionManagerCustomizationSpecManagerCustomFieldsManagerDiagMgrLicenseManagerSearchIndexFileManagerDatastoreNamespaceManagervirtualDiskManagerSnmpSystemProvCheckerCompatCheckerOvfManagerIpPoolManagerDVSManagerHostProfileManagerClusterProfileManagerMoComplianceManagerLocalizationManagerStorageResourceManagerguestOperationsManagerOverheadMemoryManagercertificateManagerIoFilterManager
12855:20241101:185843.235 In vmware_service_get_perf_counters()
12855:20241101:185843.260 vmware_service_get_perf_counters() SOAP response:
PerfMgrperfCounter1CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentnonerate442CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentaveragerate133CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentminimumrate444CPU usage as a percentage during the intervalusageCPUcpuPercentagepercentmaximumrate445CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertznonerate446CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzaveragerate137CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzminimumrate448CPU usage in megahertz during the intervalusagemhzCPUcpuMegahertzmegaHertzmaximumrate449Total CPU capacity reserved by virtual machinesreservedCapacityCPUcpuMegahertzmegaHertzaverageabsolute2310Amount of time spent on system processes on each virtual CPU in the virtual machinesystemCPUcpuMillisecondmillisecondsummationdelta3311Total CPU time spent in wait statewaitCPUcpuMillisecondmillisecondsummationdelta3312Time that the virtual machine was ready, but could not get scheduled to run on the physical CPU during last measurement intervalreadyCPUcpuMillisecondmillisecondsummationdelta1313Total time that the CPU spent in an idle stateidleCPUcpuMillisecondmillisecondsummationdelta2314Total CPU usageusedCPUcpuMillisecondmillisecondsummationdelta3315Capacity in MHz of the physical CPU corescapacity.provisionedCPUcpuMegahertzmegaHertzaverageabsolute4416CPU resources devoted by the ESXi scheduler to the virtual machines and resource poolscapacity.entitlementCPUcpuMegahertzmegaHertzaverageabsolute4417CPU usage as a percent during the intervalcapacity.usageCPUcpuMegahertzmegaHertzaveragerate4418The amount of CPU resources a VM would use if there were no CPU contentioncapacity.demandCPUcpuMegahertzmegaHertzaverageabsolute4419Percent of time the VM is unable to run because it is contending for access to the physical CPU(s)capacity.contentionCPUcpuPercentagepercentaveragerate4420The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberaverageabsolute4421The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberaverageabsolute4422Time the VM vCPU is ready to run, but is unable to run due to co-scheduling constraintscorecount.contentionCPUcpuPercentagepercentaveragerate4423Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentnoneabsolute4424Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentaverageabsolute1325Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentminimumabsolute4426Percentage of host physical memory that has been consumedusageMemorymemPercentagepercentmaximumabsolute4427Memory reservation consumed by powered-on virtual machinesreservedCapacityMemorymemMegabytemegaBytesaverageabsolute2328Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesnoneabsolute4429Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesaverageabsolute2330Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesminimumabsolute4431Amount of host physical memory or physical memory that is mapped for a virtual machine or a hostgrantedMemorymemKilobytekiloBytesmaximumabsolute4432Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesnoneabsolute4433Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesaverageabsolute2334Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesminimumabsolute4435Amount of guest physical memory that is being actively read or written by guest. Activeness is estimated by ESXiactiveMemorymemKilobytekiloBytesmaximumabsolute4436Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesnoneabsolute4437Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesaverageabsolute2338Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesminimumabsolute4439Amount of guest physical memory that is shared within a single virtual machine or across virtual machinessharedMemorymemKilobytekiloBytesmaximumabsolute4440Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesnoneabsolute4441Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesaverageabsolute2342Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesminimumabsolute4443Guest physical memory pages whose content is 0x00zeroMemorymemKilobytekiloBytesmaximumabsolute4444Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesnoneabsolute4445Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesaverageabsolute2346Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesminimumabsolute4447Amount by which reservation can be raisedunreservedMemorymemKilobytekiloBytesmaximumabsolute4448Swap storage space consumedswapusedMemorymemKilobytekiloBytesnoneabsolute4449Swap storage space consumedswapusedMemorymemKilobytekiloBytesaverageabsolute2350Swap storage space consumedswapusedMemorymemKilobytekiloBytesminimumabsolute4451Swap storage space consumedswapusedMemorymemKilobytekiloBytesmaximumabsolute4452swapunreservedMemorymemKilobytekiloBytesnoneabsolute4453swapunreservedMemorymemKilobytekiloBytesaverageabsolute4454swapunreservedMemorymemKilobytekiloBytesminimumabsolute4455swapunreservedMemorymemKilobytekiloBytesmaximumabsolute4456Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesnoneabsolute4457Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesaverageabsolute2358Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesminimumabsolute4459Amount of host physical memory that backs shared guest physical memory (Shared)sharedcommonMemorymemKilobytekiloBytesmaximumabsolute4460Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesnoneabsolute4461Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesaverageabsolute4462Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesminimumabsolute4463Virtual address space of ESXi that is dedicated to its heapheapMemorymemKilobytekiloBytesmaximumabsolute4464Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesnoneabsolute4465Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesaverageabsolute4466Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesminimumabsolute4467Free address space in the heap of ESXi. This is less than or equal to HeapheapfreeMemorymemKilobytekiloBytesmaximumabsolute4468Current memory availability state of ESXi. Possible values are high, clear, soft, hard, low. The state value determines the techniques used for memory reclamation from virtual machinesstateMemorymemNumbernumberlatestabsolute2369Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesnoneabsolute4470Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesaverageabsolute2371Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesminimumabsolute4472Amount of guest physical memory that is swapped out to the swap spaceswappedMemorymemKilobytekiloBytesmaximumabsolute4473Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesnoneabsolute4474Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesaverageabsolute2375Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesminimumabsolute4476Amount of memory that ESXi needs to reclaim by swappingswaptargetMemorymemKilobytekiloBytesmaximumabsolute4477swapInMemorymemKilobytekiloBytesnoneabsolute4478swapInMemorymemKilobytekiloBytesaverageabsolute2379swapInMemorymemKilobytekiloBytesminimumabsolute4480swapInMemorymemKilobytekiloBytesmaximumabsolute4481swapOutMemorymemKilobytekiloBytesnoneabsolute4482swapOutMemorymemKilobytekiloBytesaverageabsolute2383swapOutMemorymemKilobytekiloBytesminimumabsolute4484swapOutMemorymemKilobytekiloBytesmaximumabsolute4485Rate at which guest physical memory is swapped in from the swap spaceswapinRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1386Rate at which guest physical memory is swapped out to the swap spaceswapoutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate1387Amount of memory that is swapped out for the Service ConsoleswapOutManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3388Amount of memory that is swapped in for the Service ConsoleswapInManagement agentmanagementAgentKilobytes per secondkiloBytesPerSecondaveragerate3389Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesnoneabsolute4490Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesaverageabsolute1391Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesminimumabsolute4492Amount of guest physical memory reclaimed from the virtual machine by the balloon driver in the guestvmmemctlMemorymemKilobytekiloBytesmaximumabsolute4493Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesnoneabsolute4494Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesaverageabsolute2395Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesminimumabsolute4496Desired amount of guest physical memory the balloon driver needs to reclaim, as determined by ESXivmmemctltargetMemorymemKilobytekiloBytesmaximumabsolute4497Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesnoneabsolute4498Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesaverageabsolute1399Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesminimumabsolute44100Amount of host physical memory consumed for backing up guest physical memory pagesconsumedMemorymemKilobytekiloBytesmaximumabsolute44101Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesnoneabsolute44102Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesaverageabsolute11103Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesminimumabsolute44104Host physical memory consumed by ESXi data structures for running the virtual machinesoverheadMemorymemKilobytekiloBytesmaximumabsolute44105Guest physical memory pages that have undergone memory compressioncompressedMemorymemKilobytekiloBytesaverageabsolute23106Rate of guest physical memory page compression by ESXicompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23107Rate of guest physical memory decompressiondecompressionRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23108Total amount of memory available to the hostcapacity.provisionedMemorymemKilobytekiloBytesaverageabsolute44109Amount of host physical memory the VM is entitled to, as determined by the ESXi schedulercapacity.entitlementMemorymemKilobytekiloBytesaverageabsolute44110Amount of physical memory available for use by virtual machines on this hostcapacity.usableMemorymemKilobytekiloBytesaverageabsolute44111Amount of physical memory actively usedcapacity.usageMemorymemKilobytekiloBytesaverageabsolute44112Percentage of time VMs are waiting to access swapped, compressed or ballooned memorycapacity.contentionMemorymemPercentagepercentaveragerate44113capacity.usage.vmMemorymemKilobytekiloBytesaverageabsolute24114capacity.usage.vmOvrhdMemorymemKilobytekiloBytesaverageabsolute24115capacity.usage.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24116capacity.usage.userworldMemorymemKilobytekiloBytesaverageabsolute24117reservedCapacity.vmMemorymemKilobytekiloBytesaverageabsolute24118reservedCapacity.vmOvhdMemorymemKilobytekiloBytesaverageabsolute24119reservedCapacity.vmkOvrhdMemorymemKilobytekiloBytesaverageabsolute24120reservedCapacity.userworldMemorymemKilobytekiloBytesaverageabsolute24121Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentaverageabsolute44122Amount of physical memory consumed by VMs on this hostconsumed.vmsMemorymemKilobytekiloBytesaverageabsolute24123Amount of physical memory consumed by userworlds on this hostconsumed.userworldsMemorymemKilobytekiloBytesaverageabsolute24124Current read bandwidth of this memory typebandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22125Current write bandwidth of this memory typebandwidth.writeMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22126Total read and write bandwidth of this memory typebandwidth.totalMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute11127vm.bandwidth.readMemorymemMegabytes per secondmegaBytesPerSecondlatestabsolute22128Get the current miss rate of this memory typemissrateMemorymemPercentagepercentlatestabsolute22129Get the current read latency of this memory typelatency.readMemorymemNanosecondnanosecondlatestabsolute33130Get the current write latency of this memory typelatency.writeMemorymemNanosecondnanosecondlatestabsolute33131Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondnonerate44132Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate13133Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondminimumrate44134Aggregated disk I/O rate. For hosts, this metric includes the rates for all virtual machines running on the host during the collection interval.usageDiskdiskKilobytes per secondkiloBytesPerSecondmaximumrate44135Number of disk reads during the collection intervalnumberReadDiskdiskNumbernumbersummationdelta33136Number of disk writes during the collection intervalnumberWriteDiskdiskNumbernumbersummationdelta33137Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23138Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate23139Average amount of time taken during the collection interval to process a Storage command issued by the guest OS to the virtual machinetotalLatencyDiskdiskMillisecondmillisecondaverageabsolute33140Highest latency value across all disks used by the hostmaxTotalLatencyDiskdiskMillisecondmillisecondlatestabsolute13141Number of Storage commands aborted during the collection intervalcommandsAbortedDiskdiskNumbernumbersummationdelta23142Number of Storage-bus reset commands issued during the collection intervalbusResetsDiskdiskNumbernumbersummationdelta23143Average number of disk reads per second during the collection intervalnumberReadAveragedDiskdiskNumbernumberaveragerate13144Average number of disk writes per second during the collection intervalnumberWriteAveragedDiskdiskNumbernumberaveragerate13145Aggregated disk I/O rate, including the rates for all virtual machines running on the host during the collection intervalthroughput.usageDiskdiskKilobytes per secondkiloBytesPerSecondaveragerate44146Average amount of time for an I/O operation to complete successfullythroughput.contentionDiskdiskMillisecondmillisecondaverageabsolute44147Number of Storage reservation conflicts for the LUN during the collection intervalscsiReservationConflictsDiskdiskNumbernumbersummationdelta22148Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaverageabsolute44149Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondnonerate44150Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate13151Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondminimumrate44152Network utilization (combined transmit-rates and receive-rates) during the intervalusageNetworknetKilobytes per secondkiloBytesPerSecondmaximumrate44153Number of packets received during the intervalpacketsRxNetworknetNumbernumbersummationdelta23154Number of packets transmitted during the intervalpacketsTxNetworknetNumbernumbersummationdelta23155Average rate at which data was received during the intervalreceivedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23156Average rate at which data was transmitted during the intervaltransmittedNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23157The maximum network bandwidth for the hostthroughput.provisionedNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44158The current available network bandwidth for the hostthroughput.usableNetworknetKilobytes per secondkiloBytesPerSecondaverageabsolute44159The current network bandwidth usage for the hostthroughput.usageNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44160The aggregate network droppped packets for the hostthroughput.contentionNetworknetNumbernumbersummationdelta44161Average rate of packets received and transmitted per secondthroughput.packetsPerSecNetworknetNumbernumberaveragerate44162Total time elapsed, in seconds, since last system startupuptimeSystemsysSecondsecondlatestabsolute13163Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumbersummationdelta13164Current power usagepowerPowerpowerWattwattaveragerate23165Maximum allowed power usagepowerCapPowerpowerWattwattaverageabsolute33166Total energy used since last stats resetenergyPowerpowerJoulejoulesummationdelta33167Current power usage as a percentage of maximum allowed powercapacity.usagePctPowerpowerPercentagepercentaverageabsolute44168Average number of commands issued per second by the storage adapter during the collection intervalcommandsAveragedStorage adapterstorageAdapterNumbernumberaveragerate22169Average number of read commands issued per second by the storage adapter during the collection intervalnumberReadAveragedStorage adapterstorageAdapterNumbernumberaveragerate22170Average number of write commands issued per second by the storage adapter during the collection intervalnumberWriteAveragedStorage adapterstorageAdapterNumbernumberaveragerate22171Rate of reading data by the storage adapterreadStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22172Rate of writing data by the storage adapterwriteStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate22173The average time a read by the storage adapter takestotalReadLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22174The average time a write by the storage adapter takestotalWriteLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22175Highest latency value across all storage adapters used by the hostmaxTotalLatencyStorage adapterstorageAdapterMillisecondmillisecondlatestabsolute33176Average amount of time for an I/O operation to complete successfullythroughput.contStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute44177The percent of I/Os that have been issued but have not yet completedOIOsPctStorage adapterstorageAdapterPercentagepercentaverageabsolute44178Average number of read commands issued per second to the virtual disk during the collection intervalnumberReadAveragedVirtual diskvirtualDiskNumbernumberaveragerate13179Average number of write commands issued per second to the virtual disk during the collection intervalnumberWriteAveragedVirtual diskvirtualDiskNumbernumberaveragerate13180Rate of reading data from the virtual diskreadVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22181Rate of writing data to the virtual diskwriteVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate22182The average time a read from the virtual disk takestotalReadLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13183The average time a write to the virtual disk takestotalWriteLatencyVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute13184Average amount of time for an I/O operation to complete successfullythroughput.contVirtual diskvirtualDiskMillisecondmillisecondaverageabsolute44185Average number of read commands issued per second to the datastore during the collection intervalnumberReadAveragedDatastoredatastoreNumbernumberaveragerate13186Average number of write commands issued per second to the datastore during the collection intervalnumberWriteAveragedDatastoredatastoreNumbernumberaveragerate13187Rate of reading data from the datastorereadDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22188Rate of writing data to the datastorewriteDatastoredatastoreKilobytes per secondkiloBytesPerSecondaveragerate22189The average time a read from the datastore takestotalReadLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13190The average time a write to the datastore takestotalWriteLatencyDatastoredatastoreMillisecondmillisecondaverageabsolute13191Highest latency value across all datastores used by the hostmaxTotalLatencyDatastoredatastoreMillisecondmillisecondlatestabsolute33192Storage I/O Control aggregated IOPSdatastoreIopsDatastoredatastoreNumbernumberaverageabsolute13193Storage I/O Control size-normalized I/O latencysizeNormalizedDatastoreLatencyDatastoredatastoreMicrosecondmicrosecondaverageabsolute13194throughput.usageDatastoredatastoreKilobytes per secondkiloBytesPerSecondaverageabsolute44195throughput.contentionDatastoredatastoreMillisecondmillisecondaverageabsolute44196busResetsDatastoredatastoreNumbernumbersummationdelta22197commandsAbortedDatastoredatastoreNumbernumbersummationdelta22198Percentage of time Storage I/O Control actively controlled datastore latencysiocActiveTimePercentageDatastoredatastorePercentagepercentaverageabsolute13199Average amount of time for an I/O operation to complete successfullythroughput.contStorage pathstoragePathMillisecondmillisecondaverageabsolute44200Highest latency value across all storage paths used by the hostmaxTotalLatencyStorage pathstoragePathMillisecondmillisecondlatestabsolute33201Virtual disk I/O ratethroughput.usageVirtual diskvirtualDiskKilobytes per secondkiloBytesPerSecondaveragerate44202Number of terminations to a virtual diskcommandsAbortedVirtual diskvirtualDiskNumbernumbersummationdelta24203Number of resets to a virtual diskbusResetsVirtual diskvirtualDiskNumbernumbersummationdelta24204The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberaverageabsolute22205The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberaverageabsolute22206The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberaverageabsolute22207Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyStorage adapterstorageAdapterMillisecondmillisecondaverageabsolute22208The storage adapter's I/O ratethroughput.usagStorage adapterstorageAdapterKilobytes per secondkiloBytesPerSecondaveragerate44209Number of Storage-bus reset commands issued during the collection intervalbusResetsStorage pathstoragePathNumbernumbersummationdelta23210Number of Storage commands terminated during the collection intervalcommandsAbortedStorage pathstoragePathNumbernumbersummationdelta23211Storage path I/O ratethroughput.usageStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate44212Average pNic I/O rate for VMsthroughput.usage.vmNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33213Average pNic I/O rate for NFSthroughput.usage.nfsNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33214Average pNic I/O rate for vMotionthroughput.usage.vmotionNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33215Average pNic I/O rate for FTthroughput.usage.ftNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33216Average pNic I/O rate for iSCSIthroughput.usage.iscsiNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33217Average pNic I/O rate for HBRthroughput.usage.hbrNetworknetKilobytes per secondkiloBytesPerSecondaveragerate33218Current maximum allowed power usagecapacity.usablePowerpowerWattwattaverageabsolute44219Current power usagecapacity.usagePowerpowerWattwattaverageabsolute44220Power usage due to host idlenesscapacity.usageIdlePowerpowerWattwattaverageabsolute23221Power usage due to non-VM activitiescapacity.usageSystemPowerpowerWattwattaverageabsolute23222Power usage due to VM workloadscapacity.usageVmPowerpowerWattwattaverageabsolute23223Static power usage of VMcapacity.usageStaticPowerpowerWattwattaverageabsolute23224Amount of CPU resources allocated to the virtual machine or resource pool, based on the total cluster capacity and the resource configuration of the resource hierarchycpuentitlementCPUcpuMegahertzmegaHertzlatestabsolute23225Memory allocation as calculated by the VMkernel scheduler based on current estimated demand and reservation, limit, and shares policies set for all virtual machines and resource pools in the host or clustermementitlementMemorymemMegabytemegaByteslatestabsolute23226DRS score of the virtual machinevmDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11227Fairness of distributed CPU resource allocationcpufairnessCluster servicesclusterServicesNumbernumberlatestabsolute13228Aggregate available memory resources of all the hosts within a clustermemfairnessCluster servicesclusterServicesNumbernumberlatestabsolute13229The rate of transmitted packets for this VDSthroughput.pktsTxNetworknetNumbernumberaverageabsolute33230The rate of transmitted Multicast packets for this VDSthroughput.pktsTxMulticastNetworknetNumbernumberaverageabsolute33231The rate of transmitted Broadcast packets for this VDSthroughput.pktsTxBroadcastNetworknetNumbernumberaverageabsolute33232The rate of received packets for this vDSthroughput.pktsRxNetworknetNumbernumberaverageabsolute33233The rate of received Multicast packets for this VDSthroughput.pktsRxMulticastNetworknetNumbernumberaverageabsolute33234The rate of received Broadcast packets for this VDSthroughput.pktsRxBroadcastNetworknetNumbernumberaverageabsolute33235Count of dropped transmitted packets for this VDSthroughput.droppedTxNetworknetNumbernumberaverageabsolute33236Count of dropped received packets for this VDSthroughput.droppedRxNetworknetNumbernumberaverageabsolute33237The rate of transmitted packets for this DVPortthroughput.vds.pktsTxNetworknetNumbernumberaverageabsolute33238The rate of transmitted multicast packets for this DVPortthroughput.vds.pktsTxMcastNetworknetNumbernumberaverageabsolute33239The rate of transmitted broadcast packets for this DVPortthroughput.vds.pktsTxBcastNetworknetNumbernumberaverageabsolute33240The rate of received packets for this DVPortthroughput.vds.pktsRxNetworknetNumbernumberaverageabsolute33241The rate of received multicast packets for this DVPortthroughput.vds.pktsRxMcastNetworknetNumbernumberaverageabsolute33242The rate of received broadcast packets for this DVPortthroughput.vds.pktsRxBcastNetworknetNumbernumberaverageabsolute33243Count of dropped transmitted packets for this DVPortthroughput.vds.droppedTxNetworknetNumbernumberaverageabsolute33244Count of dropped received packets for this DVPortthroughput.vds.droppedRxNetworknetNumbernumberaverageabsolute33245The rate of transmitted packets for this LAGthroughput.vds.lagTxNetworknetNumbernumberaverageabsolute33246The rate of transmitted Multicast packets for this LAGthroughput.vds.lagTxMcastNetworknetNumbernumberaverageabsolute33247The rate of transmitted Broadcast packets for this LAGthroughput.vds.lagTxBcastNetworknetNumbernumberaverageabsolute33248The rate of received packets for this LAGthroughput.vds.lagRxNetworknetNumbernumberaverageabsolute33249The rate of received multicast packets for this LAGthroughput.vds.lagRxMcastNetworknetNumbernumberaverageabsolute33250The rate of received Broadcast packets for this LAGthroughput.vds.lagRxBcastNetworknetNumbernumberaverageabsolute33251Count of dropped transmitted packets for this LAGthroughput.vds.lagDropTxNetworknetNumbernumberaverageabsolute33252Count of dropped received packets for this LAGthroughput.vds.lagDropRxNetworknetNumbernumberaverageabsolute33253Number of virtual machine power on operationsnumPoweronVirtual machine operationsvmopNumbernumberlatestabsolute13254Number of virtual machine power off operationsnumPoweroffVirtual machine operationsvmopNumbernumberlatestabsolute13255Number of virtual machine suspend operationsnumSuspendVirtual machine operationsvmopNumbernumberlatestabsolute13256Number of virtual machine reset operationsnumResetVirtual machine operationsvmopNumbernumberlatestabsolute13257Number of virtual machine guest reboot operationsnumRebootGuestVirtual machine operationsvmopNumbernumberlatestabsolute13258Number of virtual machine standby guest operationsnumStandbyGuestVirtual machine operationsvmopNumbernumberlatestabsolute13259Number of virtual machine guest shutdown operationsnumShutdownGuestVirtual machine operationsvmopNumbernumberlatestabsolute13260Number of virtual machine create operationsnumCreateVirtual machine operationsvmopNumbernumberlatestabsolute13261Number of virtual machine delete operationsnumDestroyVirtual machine operationsvmopNumbernumberlatestabsolute13262Number of virtual machine register operationsnumRegisterVirtual machine operationsvmopNumbernumberlatestabsolute13263Number of virtual machine unregister operationsnumUnregisterVirtual machine operationsvmopNumbernumberlatestabsolute13264Number of virtual machine reconfigure operationsnumReconfigureVirtual machine operationsvmopNumbernumberlatestabsolute13265Number of virtual machine clone operationsnumCloneVirtual machine operationsvmopNumbernumberlatestabsolute13266Number of virtual machine template deploy operationsnumDeployVirtual machine operationsvmopNumbernumberlatestabsolute13267Number of host change operations for powered-off and suspended VMsnumChangeHostVirtual machine operationsvmopNumbernumberlatestabsolute13268Number of datastore change operations for powered-off and suspended virtual machinesnumChangeDSVirtual machine operationsvmopNumbernumberlatestabsolute13269Number of host and datastore change operations for powered-off and suspended virtual machinesnumChangeHostDSVirtual machine operationsvmopNumbernumberlatestabsolute13270Number of migrations with vMotion (host change operations for powered-on VMs)numVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13271Number of migrations with Storage vMotion (datastore change operations for powered-on VMs)numSVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13272Number of host and datastore change operations for powered-on and suspended virtual machinesnumXVMotionVirtual machine operationsvmopNumbernumberlatestabsolute13273Total available CPU resources of all hosts within a clustereffectivecpuCluster servicesclusterServicesMegahertzmegaHertzaveragerate13274Total amount of machine memory of all hosts in the cluster that is available for use for virtual machine memory and overhead memoryeffectivememCluster servicesclusterServicesMegabytemegaBytesaverageabsolute13275Total amount of CPU resources of all hosts in the clustertotalmhzCPUcpuMegahertzmegaHertzaveragerate13276Total amount of host physical memory of all hosts in the cluster that is available for virtual machine memory (physical memory for use by the guest OS) and virtual machine overhead memorytotalmbMemorymemMegabytemegaBytesaverageabsolute13277DRS score of the clusterclusterDrsScoreCluster servicesclusterServicesPercentagepercentlatestabsolute11278vSphere HA number of failures that can be toleratedfailoverCluster servicesclusterServicesNumbernumberlatestabsolute13279The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentaverageabsolute13280The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesaverageabsolute13281The amount of GPU memory reserved in kilobytesmem.reservedGPUgpuKilobytekiloByteslatestabsolute13282The power used by a GPU in wattspower.usedGPUgpuWattwattlatestabsolute13283The temperature of a GPU in degrees celsiustemperatureGPUgpuTemperature in degrees Celsiuscelsiusaverageabsolute13284The total amount of GPU memory in kilobytesmem.totalGPUgpuKilobytekiloByteslatestabsolute13285Amount of space actually used by the virtual machine or the datastoreusedDiskdiskKilobytekiloByteslatestabsolute11286Amount of storage set aside for use by a datastore or a virtual machineprovisionedDiskdiskKilobytekiloByteslatestabsolute11287Configured size of the datastorecapacityDiskdiskKilobytekiloByteslatestabsolute13288Amount of space associated exclusively with a virtual machineunsharedDiskdiskKilobytekiloByteslatestabsolute11289Amount of disk actually used on the datastoreactualusedDiskdiskMegabytemegaByteslatestabsolute23290Storage overhead of a virtual machine or a datastore due to delta disk backingsdeltausedDiskdiskKilobytekiloByteslatestabsolute23291Virtual disk: The maximum capacity size of the virtual disk. Virtual machine: The provisioned size of all virtual disks plus snapshot files and the swap file, if the VM is running. Datastore: The maximum capacity of the datastore. POD: The maximum capacity of all datastores in the POD.capacity.provisionedDiskdiskKilobytekiloBytesaverageabsolute44292The amount of storage capacity currently being consumed by the entity or on the entity.capacity.usageDiskdiskKilobytekiloBytesaverageabsolute44293The amount of storage capacity overcommitment for the entity, measured in percent.capacity.contentionDiskdiskPercentagepercentaverageabsolute44294The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44295The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44296The latency of an activation operation in vCenter ServeractivationlatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11297Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44298Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44299Activation operations in vCenter ServeractivationstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11300Total size of in-memory cache of blocks (buffer cache) read in from block devices (i.e., disk devices) on the system where vCenter Server is runningbufferszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44301Total size of in-memory caches of pages (page cache) for files from on-disk and in-memory filesystems on the system where vCenter Server is runningcacheszvCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute44302Number of context switches per second on the system where vCenter Server is runningctxswitchesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11303Disk sectors read per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskreadsectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44304Number of disk reads per second on the system where vCenter Server is runningdiskreadsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11305Disk sectors written per second over last sampling interval (typically 60 seconds) on the system where vCenter Server is runningdiskwritesectorratevCenter resource usage informationvcResourcesNumbernumberaveragerate44306Number of disk writes per second on the system where vCenter Server is runningdiskwritesratevCenter resource usage informationvcResourcesNumbernumberaveragerate11307The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondmaximumabsolute44308The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondminimumabsolute44309The latency of a host sync operation in vCenter ServerhostsynclatencystatsvCenter debugging informationvcDebugInfoMillisecondmillisecondsummationabsolute11310The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44311The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44312The number of host sync operations in vCenter ServerhostsyncstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11313vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44314vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44315vCenter Server inventory statisticsinventorystatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11316vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44317vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44318vCenter Server locking statisticslockstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11319vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44320vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44321vCenter Server LRO statisticslrostatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11322Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44323Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44324Miscellaneous statisticsmiscstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11325Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44326Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44327Managed object reference counts in vCenter ServermorefregstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11328Rate of the number of total packets received per second on the system where vCenter Server is runningpacketrecvratevCenter resource usage informationvcResourcesNumbernumberaveragerate11329Number of total packets sent per second on the system where vCenter Server is runningpacketsentratevCenter resource usage informationvcResourcesNumbernumberaveragerate11330Total system CPU used on the system where vCenter Server in runningsystemcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11331Number of page faults per second on the system where vCenter Server is runningpagefaultratevCenter resource usage informationvcResourcesNumbernumberaveragerate11332Physical memory used by vCenterphysicalmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11333CPU used by vCenter Server in privileged modepriviledgedcpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11334Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44335Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44336Object counts in vCenter ServerscoreboardvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute33337The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44338The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44339The statistics of client sessions connected to vCenter ServersessionstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11340Number of systems calls made per second on the system where vCenter Server is runningsyscallsratevCenter resource usage informationvcResourcesNumbernumberaveragerate11341The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44342The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44343The statistics of vCenter Server as a running system such as thread statistics and heap statisticssystemstatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11344CPU used by vCenter Server in user modeusercpuusagevCenter resource usage informationvcResourcesPercentagepercentaveragerate11345vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbermaximumabsolute44346vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumberminimumabsolute44347vCenter service statistics such as events, alarms, and tasksvcservicestatsvCenter debugging informationvcDebugInfoNumbernumbersummationabsolute11348Virtual memory used by vCenter ServervirtualmemusagevCenter resource usage informationvcResourcesKilobytekiloBytesaverageabsolute11349Average number of outstanding read requests to the virtual disk during the collection intervalreadOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22350Average number of outstanding write requests to the virtual disk during the collection intervalwriteOIOVirtual diskvirtualDiskNumbernumberlatestabsolute22351Storage DRS virtual disk metric for the read workload modelreadLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22352Storage DRS virtual disk metric for the write workload modelwriteLoadMetricVirtual diskvirtualDiskNumbernumberlatestabsolute22353CPU active average over 1 minuteactav1Resource group CPUrescpuPercentagepercentlatestabsolute33354Storage DRS datastore bytes readdatastoreReadBytesDatastoredatastoreNumbernumberlatestabsolute22355Storage DRS datastore bytes writtendatastoreWriteBytesDatastoredatastoreNumbernumberlatestabsolute22356Storage DRS datastore read I/O ratedatastoreReadIopsDatastoredatastoreNumbernumberlatestabsolute13357Storage DRS datastore write I/O ratedatastoreWriteIopsDatastoredatastoreNumbernumberlatestabsolute13358Storage DRS datastore outstanding read requestsdatastoreReadOIODatastoredatastoreNumbernumberlatestabsolute13359Storage DRS datastore outstanding write requestsdatastoreWriteOIODatastoredatastoreNumbernumberlatestabsolute13360Storage DRS datastore normalized read latencydatastoreNormalReadLatencyDatastoredatastoreNumbernumberlatestabsolute22361Storage DRS datastore normalized write latencydatastoreNormalWriteLatencyDatastoredatastoreNumbernumberlatestabsolute22362Storage DRS datastore metric for read workload modeldatastoreReadLoadMetricDatastoredatastoreNumbernumberlatestabsolute44363Storage DRS datastore metric for write workload modeldatastoreWriteLoadMetricDatastoredatastoreNumbernumberlatestabsolute44364The average datastore latency as seen by virtual machinesdatastoreVMObservedLatencyDatastoredatastoreMicrosecondmicrosecondlatestabsolute13365Number of Storage reservation conflicts for the LUN as a percent of total commands during the collection intervalscsiReservationCnflctsPctDiskdiskPercentagepercentaveragerate44366Average number of kilobytes read from the disk each second during the collection intervalreadDiskdiskNumbernumberlatestabsolute44367Number of failed reads on the diskreadFailedDiskdiskNumbernumberlatestabsolute44368Average number of kilobytes written to disk each second during the collection intervalwriteDiskdiskNumbernumberlatestabsolute44369Number of failed writes on the diskwriteFailedDiskdiskNumbernumberlatestabsolute44370Number of successful commands on the diskcommands.successDiskdiskNumbernumberlatestabsolute44371Number of failed commands on the diskcommands.failedDiskdiskNumbernumberlatestabsolute44372Number of queued commands on the diskcommands.queuedDiskdiskNumbernumberlatestabsolute44373Number of active commands on the diskcommands.activeDiskdiskNumbernumberlatestabsolute44374Current state of devicestateDiskdiskNumbernumberlatestabsolute44375Total number of aborts on a diskTM.abortDiskdiskNumbernumberlatestabsolute44376Total number of aborts retries on a diskTM.abortRetryDiskdiskNumbernumberlatestabsolute44377Total number of failed aborts on a diskTM.abortFailedDiskdiskNumbernumberlatestabsolute44378Total number of virt resets TM.virtResetDiskdiskNumbernumberlatestabsolute44379Total number of virt-reset retries TM.virtResetRetryDiskdiskNumbernumberlatestabsolute44380Total number of failed virt-resetsTM.virtResetFailedDiskdiskNumbernumberlatestabsolute44381Total number of lun resets TM.lunResetDiskdiskNumbernumberlatestabsolute44382Total number of lun-reset retries TM.lunResetRetryDiskdiskNumbernumberlatestabsolute44383Total number of failed lun-resetsTM.lunResetFailedDiskdiskNumbernumberlatestabsolute44384Total number of device resets TM.deviceResetDiskdiskNumbernumberlatestabsolute44385Total number of device-reset retries TM.deviceResetRetryDiskdiskNumbernumberlatestabsolute44386Total number of failed device-resetsTM.deviceResetFailedDiskdiskNumbernumberlatestabsolute44387Total number of bus resets TM.busResetDiskdiskNumbernumberlatestabsolute44388Total number of bus-reset retries TM.busResetRetryDiskdiskNumbernumberlatestabsolute44389Total number of failed bus-resetsTM.busResetFailedDiskdiskNumbernumberlatestabsolute44390Average time, in microseconds, spent by Queue to process each Storage commandlatency.qavgDiskdiskMicrosecondmicrosecondlatestabsolute44391Average time, in microseconds, spent by Device to process each Storage commandlatency.davgDiskdiskMicrosecondmicrosecondlatestabsolute44392Average time, in microseconds, spent by kernel to process each Storage commandlatency.kavgDiskdiskMicrosecondmicrosecondlatestabsolute44393Average time, in microseconds, spent by Guest to process each Storage commandlatency.gavgDiskdiskMicrosecondmicrosecondlatestabsolute44394The number of I/Os that have been issued but have not yet completedoutstandingIOsStorage adapterstorageAdapterNumbernumberlatestabsolute44395The current number of I/Os that are waiting to be issuedqueuedStorage adapterstorageAdapterNumbernumberlatestabsolute44396The maximum number of I/Os that can be outstanding at a given timequeueDepthStorage adapterstorageAdapterNumbernumberlatestabsolute44397The percentage HT partner usage per physical CPUpartnerBusyTimeCPUcpuPercentagepercentaveragerate44398CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentaveragerate23399The number of virtual processors provisioned to the entitycorecount.provisionedCPUcpuNumbernumberlatestabsolute44400The amount of L3 cache the VM usescache.l3.occupancyCPUcpuKilobytekiloBytesaverageabsolute44401The number of virtual processors running on the hostcorecount.usageCPUcpuNumbernumberlatestabsolute44402CPU load average over the past 1 minute, sampled on every 6 secondsload.avg1minCPUcpuPercentagepercentlatestabsolute44403CPU load average over the past 5 minutes, sampled on every 6 secondsload.avg5minCPUcpuPercentagepercentlatestabsolute44404CPU load average over the past 15 minutes, sampled on every 6 secondsload.avg15minCPUcpuPercentagepercentlatestabsolute44405Total amount of memory available to the hostcapacity.provisionedMemorymemMegabytemegaByteslatestabsolute44406Percent of memory that has been reserved either through VMkernel use, by userworlds or due to VM memory reservationsreservedCapacityPctMemorymemPercentagepercentlatestabsolute44407Ratio of total requested memory and the managed memory minus 1 over the past 1 minuteovercommit.avg1minMemorymemNumbernumberlatestabsolute44408Ratio of total requested memory and the managed memory minus 1 over the past 5 minutesovercommit.avg5minMemorymemNumbernumberlatestabsolute44409Ratio of total requested memory and the managed memory minus 1 over the past 15 minutesovercommit.avg15minMemorymemNumbernumberlatestabsolute44410Total amount of machine memory on the ESXi hostphysical.totalMemorymemMegabytemegaByteslatestabsolute44411Amount of machine memory being used by everything other than VMkernelphysical.userMemorymemMegabytemegaByteslatestabsolute44412Amount of machine memory that is free on the ESXi hostphysical.freeMemorymemMegabytemegaByteslatestabsolute44413Total amount of machine memory managed by VMkernelkernel.managedMemorymemMegabytemegaByteslatestabsolute44414Mininum amount of machine memory that VMkernel likes to keep freekernel.minfreeMemorymemMegabytemegaByteslatestabsolute44415Amount of machine memory that is currently unreservedkernel.unreservedMemorymemMegabytemegaByteslatestabsolute44416Amount of physical memory that is being sharedpshare.sharedMemorymemMegabytemegaByteslatestabsolute44417Amount of machine memory that is common across World(s)pshare.commonMemorymemMegabytemegaByteslatestabsolute44418Amount of machine memory saved due to page-sharingpshare.sharedSaveMemorymemMegabytemegaByteslatestabsolute44419Current swap usageswap.currentMemorymemMegabytemegaByteslatestabsolute44420Where ESXi expects the reclaimed memory using swapping and compression to beswap.targetMemorymemMegabytemegaByteslatestabsolute44421Rate at which memory is swapped in by ESXi from diskswap.readrateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44422Rate at which memory is swapped to disk by the ESXiswap.writerateMemorymemMegabytes per secondmegaBytesPerSecondaveragerate44423Total compressed physical memoryzip.zippedMemorymemMegabytemegaByteslatestabsolute44424Saved memory by compressionzip.savedMemorymemMegabytemegaByteslatestabsolute44425Total amount of physical memory reclaimed using the vmmemctl modulesmemctl.currentMemorymemMegabytemegaByteslatestabsolute44426Total amount of physical memory ESXi would like to reclaim using the vmmemctl modulesmemctl.targetMemorymemMegabytemegaByteslatestabsolute44427Maximum amount of physical memory ESXi can reclaim using the vmmemctl modulesmemctl.maxMemorymemMegabytemegaByteslatestabsolute44428Memory reservation health state, 2->Red, 1->Greenhealth.reservationStateMemorymemNumbernumberlatestabsolute44429Amount of Overhead memory actively usedcapacity.overheadMemorymemMegabytemegaBytesaverageabsolute44430Amount of OverheadResv memorycapacity.overheadResvMemorymemMegabytemegaBytesaverageabsolute44431Per tier consumed memory. This value is expressed in megabytescapacity.consumedMemorymemMegabytemegaByteslatestabsolute44432Per tier active memory. This value is expressed in megabytescapacity.activeMemorymemMegabytemegaByteslatestabsolute44433Current CPU power usagecapacity.usageCpuPowerpowerWattwattaverageabsolute44434Current memory power usagecapacity.usageMemPowerpowerWattwattaverageabsolute44435Current other power usagecapacity.usageOtherPowerpowerWattwattaverageabsolute44436vmkernel.downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44437downtimeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44438precopy.timeMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44439rttMigration of powered on VMvmotionMicrosecondmicrosecondlatestabsolute44440dst.migration.timeMigration of powered on VMvmotionSecondsecondlatestabsolute44441mem.sizembMigration of powered on VMvmotionMegabytemegaByteslatestabsolute44442Current number of replicated virtual machinesvmsvSphere ReplicationhbrNumbernumberlatestabsolute44443Average amount of data received per secondthroughput.hbr.inboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44444Average amount of data transmitted per secondthroughput.hbr.outboundNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44445Average disk read latency seen by vSphere Replicationhbr.readLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44446Average guest I/O stall introduced by vSphere Replicationhbr.stallLatencyMSVirtual diskvirtualDiskMillisecondmillisecondlatestabsolute44447Average latency seen by vSphere Replicationlatency.hbr.outboundNetworknetMillisecondmillisecondlatestabsolute44448Number of Lightweight Delta (LWD) snapshots takennumSnapshotsvSphere Data Protection (LWD)lwdNumbernumberlatestabsolute44449APD state of the nfs volumeapdStateNFSnfsNumbernumberlatestabsolute44450Cumulative read issue time on NFS volumereadIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44451Cumulative write issue time on NFS volumewriteIssueTimeNFSnfsMicrosecondmicrosecondlatestabsolute44452Total reads on NFS volumetotalReadsNFSnfsNumbernumberlatestabsolute44453Total reads failed on NFS volumereadsFailedNFSnfsNumbernumberlatestabsolute44454Total writes on NFS volumetotalWritesNFSnfsNumbernumberlatestabsolute44455Total writes failed on NFS volumewritesFailedNFSnfsNumbernumberlatestabsolute44456Cumulative readTime on NFS volumereadTimeNFSnfsMicrosecondmicrosecondlatestabsolute44457Cumulative writeTime on NFS volumewriteTimeNFSnfsMicrosecondmicrosecondlatestabsolute44458Total IO requests queued in NFS volumeioRequestsQueuedNFSnfsNumbernumberlatestabsolute44459Total create calls on NFS volumetotalCreateNFSnfsNumbernumberlatestabsolute44460Total create calls failed on NFS volumecreateFailedNFSnfsNumbernumberlatestabsolute44461Number of times we hit into socket buffer out of space condition for NFS volumesocketBufferFullNFSnfsNumbernumberlatestabsolute44462Total journal transactions on VMFS volumevmfs.totalTxnDatastoredatastoreNumbernumberlatestabsolute44463Total cancelled journal transactions on VMFS volumevmfs.cancelledTxnDatastoredatastoreNumbernumberlatestabsolute44464Current APD state of the VMFS volumevmfs.apdStateDatastoredatastoreNumbernumberlatestabsolute44465Total apd timeout events received on the VMFS volumevmfs.apdCountDatastoredatastoreNumbernumberlatestabsolute44466vVol PE is accessiblepe.isaccessiblevVol object related statsvvolNumbernumberlatestabsolute44467Total no. of read cmds done on vVol PEpe.reads.donevVol object related statsvvolNumbernumberlatestabsolute44468Total no. of write cmds done on vVol PEpe.writes.donevVol object related statsvvolNumbernumberlatestabsolute44469Total no. of cmds done on vVol PEpe.total.donevVol object related statsvvolNumbernumberlatestabsolute44470Total no. of read cmds sent on vVol PEpe.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44471Total no. of write cmds sent on vVol PEpe.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44472Total no. of cmds sent on vVol PEpe.total.sentvVol object related statsvvolNumbernumberlatestabsolute44473No. of read cmds issued on vVol PE that failedpe.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44474No. of write cmds issued on vVol PE that failedpe.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44475Total no. of cmds issued on vVol PE that failedpe.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44476Total no. of read cmds failed on vVol PEpe.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44477Total no. of write cmds failed on vVol PEpe.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44478Total no. of cmds failed on vVol PEpe.total.failedvVol object related statsvvolNumbernumberlatestabsolute44479Cumulative latency of successful reads on vVol PEpe.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44480Cumulative latency of successful writes on vVol PEpe.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44481Cumulative latency of cmds that failed before issue on vVol PEpe.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44482Cumulative latency of all issued cmds on vVol PEpe.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44483Total no. of cancel cmds sent on vVol PEpe.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44484Total no. of cancel cmds failed on vVol PEpe.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44485Total no. of device reset cmds sent on vVol PEpe.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44486Total no. of device reset cmds failed on vVol PEpe.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44487Total no. of reset cmds sent on vVol PEpe.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44488Total no. of reset cmds failed on vVol PEpe.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44489Total no. of unmap cmds sent on vVol PEpe.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44490Total no. of unmap cmds failed on vVol PEpe.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44491Total no. of read cmds done by vVol Containercontainer.reads.donevVol object related statsvvolNumbernumberlatestabsolute44492Total no. of write cmds done by vVol Containercontainer.writes.donevVol object related statsvvolNumbernumberlatestabsolute44493Total no. of cmds done by vVol Containercontainer.total.donevVol object related statsvvolNumbernumberlatestabsolute44494Total no. of read cmds sent by vVol Containercontainer.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44495Total no. of write cmds sent by vVol Containercontainer.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44496Total no. of cmds sent by vVol Containercontainer.total.sentvVol object related statsvvolNumbernumberlatestabsolute44497No. of read cmds issued by vVol Container that failedcontainer.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44498No. of write cmds issued by vVol Container that failedcontainer.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44499Total no. of cmds issued by vVol Container that failedcontainer.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44500Total no. of read cmds failed by vVol Containercontainer.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44501Container:Total no. of write cmds failed by vVol Containercontainer.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44502Total no. of cmds failed by vVol Containercontainer.total.failedvVol object related statsvvolNumbernumberlatestabsolute44503Cumulative latency of successful reads by vVol Containercontainer.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44504Cumulative latency of successful writes by vVol Containercontainer.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44505Cumulative latency of cmds that failed before issue by vVol Containercontainer.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44506Cumulative latency of all issued cmds by vVol Containercontainer.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44507Total no. of read cmds done by vVol Devicedevice.reads.donevVol object related statsvvolNumbernumberlatestabsolute44508Total no. of write cmds done by vVol Devicedevice.writes.donevVol object related statsvvolNumbernumberlatestabsolute44509Total no. of cmds done by vVol Devicedevice.total.donevVol object related statsvvolNumbernumberlatestabsolute44510Total no. of read cmds sent by vVol Devicedevice.reads.sentvVol object related statsvvolNumbernumberlatestabsolute44511Total no. of write cmds sent by vVol Devicedevice.writes.sentvVol object related statsvvolNumbernumberlatestabsolute44512Total no. of cmds sent by vVol Devicedevice.total.sentvVol object related statsvvolNumbernumberlatestabsolute44513No. of read cmds issued by vVol Device that faileddevice.readsissued.failedvVol object related statsvvolNumbernumberlatestabsolute44514No. of write cmds issued by vVol Device that faileddevice.writesissued.failedvVol object related statsvvolNumbernumberlatestabsolute44515Total no. of cmds issued by vVol Device that faileddevice.totalissued.failedvVol object related statsvvolNumbernumberlatestabsolute44516Total no. of read cmds failed by vVol Devicedevice.reads.failedvVol object related statsvvolNumbernumberlatestabsolute44517Total no. of write cmds failed by vVol Devicedevice.writes.failedvVol object related statsvvolNumbernumberlatestabsolute44518Total no. of cmds failed by vVol Devicedevice.total.failedvVol object related statsvvolNumbernumberlatestabsolute44519Cumulative latency of successful reads by vVol Devicedevice.read.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44520Cumulative latency of successful writes by vVol Devicedevice.write.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44521Cumulative latency of cmds that failed before issue by vVol Devicedevice.issue.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44522Cumulative latency of all issued cmds by vVol Devicedevice.total.latencyvVol object related statsvvolMicrosecondmicrosecondlatestabsolute44523Total no. of cancel cmds sent by vVol Devicedevice.cancel.sentvVol object related statsvvolNumbernumberlatestabsolute44524Total no. of cancel cmds failed by vVol Devicedevice.cancel.failedvVol object related statsvvolNumbernumberlatestabsolute44525Total no. of device reset cmds sent by vVol Devicedevice.deviceresets.sentvVol object related statsvvolNumbernumberlatestabsolute44526Total no. of device reset cmds failed by vVol Devicedevice.deviceresets.failedvVol object related statsvvolNumbernumberlatestabsolute44527Total no. of reset cmds sent by vVol Devicedevice.resets.sentvVol object related statsvvolNumbernumberlatestabsolute44528Total no. of reset cmds failed by vVol Devicedevice.resets.failedvVol object related statsvvolNumbernumberlatestabsolute44529Total no. of unmap cmds sent by vVol Devicedevice.unmaps.sentvVol object related statsvvolNumbernumberlatestabsolute44530Total no. of unmap cmds failed by vVol Devicedevice.unmaps.failedvVol object related statsvvolNumbernumberlatestabsolute44531CPU time spent waiting for swap-inswapwaitCPUcpuMillisecondmillisecondsummationdelta33532CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentnonerate44533CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentmaximumrate44534CPU utilization as a percentage during the interval (CPU usage and CPU utilization might be different due to power management technologies or hyper-threading)utilizationCPUcpuPercentagepercentminimumrate44535CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentnonerate44536CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentaveragerate23537CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentmaximumrate44538CPU utilization of the corresponding core (if hyper-threading is enabled) as a percentage during the interval (A core is utilized if either or both of its logical CPUs are utilized)coreUtilizationCPUcpuPercentagepercentminimumrate44539Total CPU capacity reserved by and available for virtual machinestotalCapacityCPUcpuMegahertzmegaHertzaverageabsolute23540Percent of time the virtual machine is unable to run because it is contending for access to the physical CPU(s)latencyCPUcpuPercentagepercentaveragerate23541CPU resources devoted by the ESX schedulerentitlementCPUcpuMegahertzmegaHertzlatestabsolute23542The amount of CPU resources a virtual machine would use if there were no CPU contention or CPU limitdemandCPUcpuMegahertzmegaHertzaverageabsolute23543Time the virtual machine is ready to run, but is unable to run due to co-scheduling constraintscostopCPUcpuMillisecondmillisecondsummationdelta23544Time the virtual machine is ready to run, but is not run due to maxing out its CPU limit settingmaxlimitedCPUcpuMillisecondmillisecondsummationdelta23545Time the virtual machine was interrupted to perform system services on behalf of itself or other virtual machinesoverlapCPUcpuMillisecondmillisecondsummationdelta33546Time the virtual machine is scheduled to runrunCPUcpuMillisecondmillisecondsummationdelta23547CPU resource entitlement to CPU demand ratio (in percents)demandEntitlementRatioCPUcpuPercentagepercentlatestabsolute44548Percentage of time that the virtual machine was ready, but could not get scheduled to run on the physical CPUreadinessCPUcpuPercentagepercentaveragerate44549Virtual CPU usage as a percentage during the intervalusage.vcpusCPUcpuPercentagepercentaveragerate44550Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesnoneabsolute44551Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesaverageabsolute23552Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesmaximumabsolute44553Amount of guest physical memory that is swapped in from the swap space since the virtual machine has been powered on. This value is less than or equal to the 'Swap out' counterswapinMemorymemKilobytekiloBytesminimumabsolute44554Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesnoneabsolute44555Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesaverageabsolute23556Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesmaximumabsolute44557Amount of guest physical memory that is swapped out from the virtual machine to its swap space since it has been powered on.swapoutMemorymemKilobytekiloBytesminimumabsolute44558Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesnoneabsolute44559Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesaverageabsolute23560Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesmaximumabsolute44561Amount of host physical memory consumed by VMkernelsysUsageMemorymemKilobytekiloBytesminimumabsolute44562Amount of guest physical memory that is being actively written by guest. Activeness is estimated by ESXiactivewriteMemorymemKilobytekiloBytesaverageabsolute23563Host physical memory reserved by ESXi, for its data structures, for running the virtual machineoverheadMaxMemorymemKilobytekiloBytesaverageabsolute23564Total reservation, available and consumed, for powered-on virtual machinestotalCapacityMemorymemMegabytemegaBytesaverageabsolute23565Amount of guest physical memory pages compressed by ESXizippedMemorymemKilobytekiloByteslatestabsolute23566Host physical memory, reclaimed from a virtual machine, by memory compression. This value is less than the value of 'Compressed' memoryzipSavedMemorymemKilobytekiloByteslatestabsolute23567Percentage of time the virtual machine spent waiting to swap in or decompress guest physical memorylatencyMemorymemPercentagepercentaverageabsolute23568Amount of host physical memory the virtual machine deserves, as determined by ESXientitlementMemorymemKilobytekiloBytesaverageabsolute23569Threshold of free host physical memory below which ESXi will begin actively reclaiming memory from virtual machines by swapping, compression and ballooninglowfreethresholdMemorymemKilobytekiloBytesaverageabsolute23570Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesnoneabsolute44571Rate at which guest physical memory is swapped in from the host swap cachellSwapInRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23572Rate at which guest physical memory is swapped out to the host swap cachellSwapOutRateMemorymemKilobytes per secondkiloBytesPerSecondaveragerate23573Estimate of the host physical memory, from Overhead consumed, that is actively read or written to by ESXioverheadTouchedMemorymemKilobytekiloBytesaverageabsolute44574Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesaverageabsolute44575Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesmaximumabsolute44576Storage space consumed on the host swap cache for storing swapped guest physical memory pagesllSwapUsedMemorymemKilobytekiloBytesminimumabsolute44577Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesnoneabsolute44578Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesaverageabsolute44579Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesmaximumabsolute44580Amount of guest physical memory swapped in from host cachellSwapInMemorymemKilobytekiloBytesminimumabsolute44581Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesnoneabsolute44582Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesaverageabsolute44583Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesmaximumabsolute44584Amount of guest physical memory swapped out to the host swap cachellSwapOutMemorymemKilobytekiloBytesminimumabsolute44585Space used for holding VMFS Pointer Blocks in memoryvmfs.pbc.sizeMemorymemMegabytemegaByteslatestabsolute44586Maximum size the VMFS Pointer Block Cache can grow tovmfs.pbc.sizeMaxMemorymemMegabytemegaByteslatestabsolute44587Amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMemorymemTerabyteteraByteslatestabsolute44588Maximum amount of file blocks whose addresses are cached in the VMFS PB Cachevmfs.pbc.workingSetMaxMemorymemTerabyteteraByteslatestabsolute44589Amount of VMFS heap used by the VMFS PB Cachevmfs.pbc.overheadMemorymemKilobytekiloByteslatestabsolute44590Trailing average of the ratio of capacity misses to compulsory misses for the VMFS PB Cachevmfs.pbc.capMissRatioMemorymemPercentagepercentlatestabsolute44591Number of Storage commands issued during the collection intervalcommandsDiskdiskNumbernumbersummationdelta23592Average amount of time, in milliseconds, to read from the physical devicedeviceReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23593Average amount of time, in milliseconds, spent by VMkernel to process each Storage read commandkernelReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23594Average amount of time taken during the collection interval to process a Storage read command issued from the guest OS to the virtual machinetotalReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23595Average amount of time spent in the VMkernel queue, per Storage read command, during the collection intervalqueueReadLatencyDiskdiskMillisecondmillisecondaverageabsolute23596Average amount of time, in milliseconds, to write to the physical devicedeviceWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23597Average amount of time, in milliseconds, spent by VMkernel to process each Storage write commandkernelWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23598Average amount of time taken during the collection interval to process a Storage write command issued by the guest OS to the virtual machinetotalWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23599Average amount of time spent in the VMkernel queue, per Storage write command, during the collection intervalqueueWriteLatencyDiskdiskMillisecondmillisecondaverageabsolute23600Average amount of time, in milliseconds, to complete a Storage command from the physical devicedeviceLatencyDiskdiskMillisecondmillisecondaverageabsolute13601Average amount of time, in milliseconds, spent by VMkernel to process each Storage commandkernelLatencyDiskdiskMillisecondmillisecondaverageabsolute23602Average amount of time spent in the VMkernel queue, per Storage command, during the collection intervalqueueLatencyDiskdiskMillisecondmillisecondaverageabsolute23603Maximum queue depthmaxQueueDepthDiskdiskNumbernumberaverageabsolute13604Average number of Storage commands issued per second during the collection intervalcommandsAveragedDiskdiskNumbernumberaveragerate23605Number of receives droppeddroppedRxNetworknetNumbernumbersummationdelta23606Number of transmits droppeddroppedTxNetworknetNumbernumbersummationdelta23607Average amount of data received per secondbytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23608Average amount of data transmitted per secondbytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate23609Number of broadcast packets received during the sampling intervalbroadcastRxNetworknetNumbernumbersummationdelta23610Number of broadcast packets transmitted during the sampling intervalbroadcastTxNetworknetNumbernumbersummationdelta23611Number of multicast packets received during the sampling intervalmulticastRxNetworknetNumbernumbersummationdelta23612Number of multicast packets transmitted during the sampling intervalmulticastTxNetworknetNumbernumbersummationdelta23613Number of packets with errors received during the sampling intervalerrorsRxNetworknetNumbernumbersummationdelta23614Number of packets with errors transmitted during the sampling intervalerrorsTxNetworknetNumbernumbersummationdelta23615Number of frames with unknown protocol received during the sampling intervalunknownProtosNetworknetNumbernumbersummationdelta23616Average amount of data received per second by a pNicpnicBytesRxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44617Average amount of data transmitted per second through a pNicpnicBytesTxNetworknetKilobytes per secondkiloBytesPerSecondaveragerate44618Number of heartbeats issued per virtual machine during the intervalheartbeatSystemsysNumbernumberlatestabsolute44619Amount of disk space usage for each mount pointdiskUsageSystemsysPercentagepercentlatestabsolute33620Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertznonerate44621Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzaveragerate33622Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzmaximumrate44623Amount of CPU used by the Service Console and other applications during the intervalresourceCpuUsageSystemsysMegahertzmegaHertzminimumrate44624Memory touched by the system resource groupresourceMemTouchedSystemsysKilobytekiloByteslatestabsolute33625Memory mapped by the system resource groupresourceMemMappedSystemsysKilobytekiloByteslatestabsolute33626Memory saved due to sharing by the system resource groupresourceMemSharedSystemsysKilobytekiloByteslatestabsolute33627Memory swapped out by the system resource groupresourceMemSwappedSystemsysKilobytekiloByteslatestabsolute33628Overhead memory consumed by the system resource groupresourceMemOverheadSystemsysKilobytekiloByteslatestabsolute33629Memory shared by the system resource groupresourceMemCowSystemsysKilobytekiloByteslatestabsolute33630Zero filled memory used by the system resource groupresourceMemZeroSystemsysKilobytekiloByteslatestabsolute33631CPU running average over 1 minute of the system resource groupresourceCpuRun1SystemsysPercentagepercentlatestabsolute33632CPU active average over 1 minute of the system resource groupresourceCpuAct1SystemsysPercentagepercentlatestabsolute33633CPU maximum limited over 1 minute of the system resource groupresourceCpuMaxLimited1SystemsysPercentagepercentlatestabsolute33634CPU running average over 5 minutes of the system resource groupresourceCpuRun5SystemsysPercentagepercentlatestabsolute33635CPU active average over 5 minutes of the system resource groupresourceCpuAct5SystemsysPercentagepercentlatestabsolute33636CPU maximum limited over 5 minutes of the system resource groupresourceCpuMaxLimited5SystemsysPercentagepercentlatestabsolute33637CPU allocation reservation (in MHz) of the system resource groupresourceCpuAllocMinSystemsysMegahertzmegaHertzlatestabsolute33638CPU allocation limit (in MHz) of the system resource groupresourceCpuAllocMaxSystemsysMegahertzmegaHertzlatestabsolute33639CPU allocation shares of the system resource groupresourceCpuAllocSharesSystemsysNumbernumberlatestabsolute33640Memory allocation reservation (in KB) of the system resource groupresourceMemAllocMinSystemsysKilobytekiloByteslatestabsolute33641Memory allocation limit (in KB) of the system resource groupresourceMemAllocMaxSystemsysKilobytekiloByteslatestabsolute33642Memory allocation shares of the system resource groupresourceMemAllocSharesSystemsysNumbernumberlatestabsolute33643Total time elapsed, in seconds, since last operating system boot-uposUptimeSystemsysSecondsecondlatestabsolute44644Memory consumed by the system resource groupresourceMemConsumedSystemsysKilobytekiloByteslatestabsolute44645Number of file descriptors used by the system resource groupresourceFdUsageSystemsysNumbernumberlatestabsolute44646CPU active peak over 1 minuteactpk1Resource group CPUrescpuPercentagepercentlatestabsolute33647CPU running average over 1 minuterunav1Resource group CPUrescpuPercentagepercentlatestabsolute33648CPU active average over 5 minutesactav5Resource group CPUrescpuPercentagepercentlatestabsolute33649CPU active peak over 5 minutesactpk5Resource group CPUrescpuPercentagepercentlatestabsolute33650CPU running average over 5 minutesrunav5Resource group CPUrescpuPercentagepercentlatestabsolute33651CPU active average over 15 minutesactav15Resource group CPUrescpuPercentagepercentlatestabsolute33652CPU active peak over 15 minutesactpk15Resource group CPUrescpuPercentagepercentlatestabsolute33653CPU running average over 15 minutesrunav15Resource group CPUrescpuPercentagepercentlatestabsolute33654CPU running peak over 1 minuterunpk1Resource group CPUrescpuPercentagepercentlatestabsolute33655Amount of CPU resources over the limit that were refused, average over 1 minutemaxLimited1Resource group CPUrescpuPercentagepercentlatestabsolute33656CPU running peak over 5 minutesrunpk5Resource group CPUrescpuPercentagepercentlatestabsolute33657Amount of CPU resources over the limit that were refused, average over 5 minutesmaxLimited5Resource group CPUrescpuPercentagepercentlatestabsolute33658CPU running peak over 15 minutesrunpk15Resource group CPUrescpuPercentagepercentlatestabsolute33659Amount of CPU resources over the limit that were refused, average over 15 minutesmaxLimited15Resource group CPUrescpuPercentagepercentlatestabsolute33660Group CPU sample countsampleCountResource group CPUrescpuNumbernumberlatestabsolute33661Group CPU sample periodsamplePeriodResource group CPUrescpuMillisecondmillisecondlatestabsolute33662Amount of total configured memory that is available for usememUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33663Sum of the memory swapped by all powered-on virtual machines on the hostswapUsedManagement agentmanagementAgentKilobytekiloBytesaverageabsolute33664Amount of Service Console CPU usagecpuUsageManagement agentmanagementAgentMegahertzmegaHertzaveragerate33665Average number of commands issued per second on the storage path during the collection intervalcommandsAveragedStorage pathstoragePathNumbernumberaveragerate33666Average number of read commands issued per second on the storage path during the collection intervalnumberReadAveragedStorage pathstoragePathNumbernumberaveragerate33667Average number of write commands issued per second on the storage path during the collection intervalnumberWriteAveragedStorage pathstoragePathNumbernumberaveragerate33668Rate of reading data on the storage pathreadStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33669Rate of writing data on the storage pathwriteStorage pathstoragePathKilobytes per secondkiloBytesPerSecondaveragerate33670The average time a read issued on the storage path takestotalReadLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33671The average time a write issued on the storage path takestotalWriteLatencyStorage pathstoragePathMillisecondmillisecondaverageabsolute33672Average read request size in bytesreadIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44673Average write request size in byteswriteIOSizeVirtual diskvirtualDiskNumbernumberlatestabsolute44674Number of seeks during the interval that were less than 64 LBNs apartsmallSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44675Number of seeks during the interval that were between 64 and 8192 LBNs apartmediumSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44676Number of seeks during the interval that were greater than 8192 LBNs apartlargeSeeksVirtual diskvirtualDiskNumbernumberlatestabsolute44677Read latency in microsecondsreadLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44678Write latency in microsecondswriteLatencyUSVirtual diskvirtualDiskMicrosecondmicrosecondlatestabsolute44679Storage I/O Control datastore maximum queue depthdatastoreMaxQueueDepthDatastoredatastoreNumbernumberlatestabsolute13680Unmapped size in MBunmapSizeDatastoredatastoreMegabytemegaBytessummationdelta44681Number of unmap IOs issuedunmapIOsDatastoredatastoreNumbernumbersummationdelta44682Current number of replicated virtual machineshbrNumVmsvSphere ReplicationhbrNumbernumberaverageabsolute44683Average amount of data received per secondhbrNetRxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44684Average amount of data transmitted per secondhbrNetTxvSphere ReplicationhbrKilobytes per secondkiloBytesPerSecondaveragerate44685Average network latency seen by vSphere ReplicationhbrNetLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44686Average disk read latency seen by vSphere ReplicationhbrDiskReadLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44687Average guest I/O stall introduced by vSphere ReplicationhbrDiskStallLatencyvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44688Average amount of successful transfer time per diskhbrDiskTransferSuccessvSphere ReplicationhbrMillisecondmillisecondaverageabsolute44689Average amount of idle time per diskhbrDiskTransferIdlevSphere ReplicationhbrMillisecondmillisecondaverageabsolute44690Average amount of data in KB successfully transferred per diskhbrDiskTransferBytesvSphere ReplicationhbrKilobytekiloBytesaverageabsolute44691Number of caches controlled by the virtual flash modulenumActiveVMDKsVirtual flash module related statistical valuesvflashModuleNumbernumberlatestabsolute44692Read IOPSreadIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44693Read throughput in kBpsreadThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44694Average read latency in msreadAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44695Max read latency in msreadMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44696Cache hit rate percentagereadCacheHitRatevSAN DOM object related statistical valuesvsanDomObjPercentagepercentlatestabsolute44697Read congestionreadCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44698Write IOPSwriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44699Write throughput in kBpswriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44700Average write latency in mswriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44701Max write latency in mswriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44702Write congestionwriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44703Recovery write IOPSrecoveryWriteIopsvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44704Recovery write through-put in kBpsrecoveryWriteThroughputvSAN DOM object related statistical valuesvsanDomObjKilobytes per secondkiloBytesPerSecondaveragerate44705Average recovery write latency in msrecoveryWriteAvgLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondaverageabsolute44706Max recovery write latency in msrecoveryWriteMaxLatencyvSAN DOM object related statistical valuesvsanDomObjMillisecondmillisecondlatestabsolute44707Recovery write congestionrecoveryWriteCongestionvSAN DOM object related statistical valuesvsanDomObjNumbernumberaveragerate44708The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentnoneabsolute44709The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentmaximumabsolute44710The compute utilization of a GPU in percentagesutilizationGPUgpuPercentagepercentminimumabsolute44711The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesnoneabsolute44712The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesmaximumabsolute44713The amount of GPU memory used in kilobytesmem.usedGPUgpuKilobytekiloBytesminimumabsolute44714The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentnoneabsolute44715The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentaverageabsolute44716The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentmaximumabsolute44717The amount of GPU memory used in percentages of the total availablemem.usageGPUgpuPercentagepercentminimumabsolute44718The amount of GPU memory used in gigabytesmem.used.gbGPUgpuGigabytegigaByteslatestabsolute33719The amount of GPU memory reserved in gigabytesmem.reserved.gbGPUgpuGigabytegigaByteslatestabsolute33720The total amount of GPU memory in gigabytesmem.total.gbGPUgpuGigabytegigaByteslatestabsolute33721Persistent memory available reservation on a host.available.reservationPMEMpmemMegabytemegaByteslatestabsolute44722Persistent memory reservation managed by DRS on a host.drsmanaged.reservationPMEMpmemMegabytemegaByteslatestabsolute44723Total count of virtual CPUs in VMnumVCPUsVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44724Minimum clock speed of the vCPUs during last stats intervalvcpusMhzMinVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44725Maximum clock speed of the vCPUs during last stats intervalvcpusMhzMaxVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44726Average clock speed of the vCPUs during last stats intervalvcpusMhzMeanVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44727Actual clock speed of host CPUcpuSpeedVMX Stats for VMX componentsvmxMegahertzmegaHertzlatestabsolute44728Minimum overhead heap memory usage since the VM started runningoverheadMemSizeMinVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44729Maximum overhead heap memory usage since the VM started runningoverheadMemSizeMaxVMX Stats for VMX componentsvmxMegabytemegaByteslatestabsolute44730vigor.opsTotalVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44731poll.itersPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44732userRpc.opsPerSVMX Stats for VMX componentsvmxNumbernumberlatestabsolute44
12855:20241101:185843.264 adding performance counter cpu/usage[none]:1
12855:20241101:185843.264 adding performance counter cpu/usage[none,rate]:1
12855:20241101:185843.264 adding performance counter cpu/usage[average]:2
12855:20241101:185843.264 adding performance counter cpu/usage[average,rate]:2
12855:20241101:185843.264 adding performance counter cpu/usage[minimum]:3
12855:20241101:185843.264 adding performance counter cpu/usage[minimum,rate]:3
12855:20241101:185843.264 adding performance counter cpu/usage[maximum]:4
12855:20241101:185843.264 adding performance counter cpu/usage[maximum,rate]:4
12855:20241101:185843.264 adding performance counter cpu/usagemhz[none]:5
12855:20241101:185843.264 adding performance counter cpu/usagemhz[none,rate]:5
12855:20241101:185843.264 adding performance counter cpu/usagemhz[average]:6
12855:20241101:185843.264 adding performance counter cpu/usagemhz[average,rate]:6
12855:20241101:185843.264 adding performance counter cpu/usagemhz[minimum]:7
12855:20241101:185843.264 adding performance counter cpu/usagemhz[minimum,rate]:7
12855:20241101:185843.264 adding performance counter cpu/usagemhz[maximum]:8
12855:20241101:185843.264 adding performance counter cpu/usagemhz[maximum,rate]:8
12855:20241101:185843.264 adding performance counter cpu/reservedCapacity[average]:9
12855:20241101:185843.264 adding performance counter cpu/reservedCapacity[average,absolute]:9
12855:20241101:185843.265 adding performance counter cpu/system[summation]:10
12855:20241101:185843.265 adding performance counter cpu/system[summation,delta]:10
12855:20241101:185843.265 adding performance counter cpu/wait[summation]:11
12855:20241101:185843.265 adding performance counter cpu/wait[summation,delta]:11
12855:20241101:185843.265 adding performance counter cpu/ready[summation]:12
12855:20241101:185843.265 adding performance counter cpu/ready[summation,delta]:12
12855:20241101:185843.265 adding performance counter cpu/idle[summation]:13
12855:20241101:185843.265 adding performance counter cpu/idle[summation,delta]:13
12855:20241101:185843.265 adding performance counter cpu/used[summation]:14
12855:20241101:185843.265 adding performance counter cpu/used[summation,delta]:14
12855:20241101:185843.265 adding performance counter cpu/capacity.provisioned[average]:15
12855:20241101:185843.265 adding performance counter cpu/capacity.provisioned[average,absolute]:15
12855:20241101:185843.265 adding performance counter cpu/capacity.entitlement[average]:16
12855:20241101:185843.265 adding performance counter cpu/capacity.entitlement[average,absolute]:16
12855:20241101:185843.265 adding performance counter cpu/capacity.usage[average]:17
12855:20241101:185843.265 adding performance counter cpu/capacity.usage[average,rate]:17
12855:20241101:185843.265 adding performance counter cpu/capacity.demand[average]:18
12855:20241101:185843.265 adding performance counter cpu/capacity.demand[average,absolute]:18
12855:20241101:185843.265 adding performance counter cpu/capacity.contention[average]:19
12855:20241101:185843.265 adding performance counter cpu/capacity.contention[average,rate]:19
12855:20241101:185843.265 adding performance counter cpu/corecount.provisioned[average]:20
12855:20241101:185843.265 adding performance counter cpu/corecount.provisioned[average,absolute]:20
12855:20241101:185843.265 adding performance counter cpu/corecount.usage[average]:21
12855:20241101:185843.265 adding performance counter cpu/corecount.usage[average,absolute]:21
12855:20241101:185843.265 adding performance counter cpu/corecount.contention[average]:22
12855:20241101:185843.265 adding performance counter cpu/corecount.contention[average,rate]:22
12855:20241101:185843.265 adding performance counter mem/usage[none]:23
12855:20241101:185843.265 adding performance counter mem/usage[none,absolute]:23
12855:20241101:185843.265 adding performance counter mem/usage[average]:24
12855:20241101:185843.265 adding performance counter mem/usage[average,absolute]:24
12855:20241101:185843.265 adding performance counter mem/usage[minimum]:25
12855:20241101:185843.265 adding performance counter mem/usage[minimum,absolute]:25
12855:20241101:185843.265 adding performance counter mem/usage[maximum]:26
12855:20241101:185843.265 adding performance counter mem/usage[maximum,absolute]:26
12855:20241101:185843.265 adding performance counter mem/reservedCapacity[average]:27
12855:20241101:185843.265 adding performance counter mem/reservedCapacity[average,absolute]:27
12855:20241101:185843.265 adding performance counter mem/granted[none]:28
12855:20241101:185843.265 adding performance counter mem/granted[none,absolute]:28
12855:20241101:185843.266 adding performance counter mem/granted[average]:29
12855:20241101:185843.266 adding performance counter mem/granted[average,absolute]:29
12855:20241101:185843.266 adding performance counter mem/granted[minimum]:30
12855:20241101:185843.266 adding performance counter mem/granted[minimum,absolute]:30
12855:20241101:185843.266 adding performance counter mem/granted[maximum]:31
12855:20241101:185843.266 adding performance counter mem/granted[maximum,absolute]:31
12855:20241101:185843.266 adding performance counter mem/active[none]:32
12855:20241101:185843.266 adding performance counter mem/active[none,absolute]:32
12855:20241101:185843.266 adding performance counter mem/active[average]:33
12855:20241101:185843.266 adding performance counter mem/active[average,absolute]:33
12855:20241101:185843.266 adding performance counter mem/active[minimum]:34
12855:20241101:185843.266 adding performance counter mem/active[minimum,absolute]:34
12855:20241101:185843.266 adding performance counter mem/active[maximum]:35
12855:20241101:185843.266 adding performance counter mem/active[maximum,absolute]:35
12855:20241101:185843.266 adding performance counter mem/shared[none]:36
12855:20241101:185843.266 adding performance counter mem/shared[none,absolute]:36
12855:20241101:185843.266 adding performance counter mem/shared[average]:37
12855:20241101:185843.266 adding performance counter mem/shared[average,absolute]:37
12855:20241101:185843.266 adding performance counter mem/shared[minimum]:38
12855:20241101:185843.266 adding performance counter mem/shared[minimum,absolute]:38
12855:20241101:185843.266 adding performance counter mem/shared[maximum]:39
12855:20241101:185843.266 adding performance counter mem/shared[maximum,absolute]:39
12855:20241101:185843.266 adding performance counter mem/zero[none]:40
12855:20241101:185843.266 adding performance counter mem/zero[none,absolute]:40
12855:20241101:185843.266 adding performance counter mem/zero[average]:41
12855:20241101:185843.266 adding performance counter mem/zero[average,absolute]:41
12855:20241101:185843.266 adding performance counter mem/zero[minimum]:42
12855:20241101:185843.266 adding performance counter mem/zero[minimum,absolute]:42
12855:20241101:185843.266 adding performance counter mem/zero[maximum]:43
12855:20241101:185843.266 adding performance counter mem/zero[maximum,absolute]:43
12855:20241101:185843.266 adding performance counter mem/unreserved[none]:44
12855:20241101:185843.266 adding performance counter mem/unreserved[none,absolute]:44
12855:20241101:185843.266 adding performance counter mem/unreserved[average]:45
12855:20241101:185843.266 adding performance counter mem/unreserved[average,absolute]:45
12855:20241101:185843.266 adding performance counter mem/unreserved[minimum]:46
12855:20241101:185843.266 adding performance counter mem/unreserved[minimum,absolute]:46
12855:20241101:185843.266 adding performance counter mem/unreserved[maximum]:47
12855:20241101:185843.266 adding performance counter mem/unreserved[maximum,absolute]:47
12855:20241101:185843.266 adding performance counter mem/swapused[none]:48
12855:20241101:185843.266 adding performance counter mem/swapused[none,absolute]:48
12855:20241101:185843.267 adding performance counter mem/swapused[average]:49
12855:20241101:185843.267 adding performance counter mem/swapused[average,absolute]:49
12855:20241101:185843.267 adding performance counter mem/swapused[minimum]:50
12855:20241101:185843.267 adding performance counter mem/swapused[minimum,absolute]:50
12855:20241101:185843.267 adding performance counter mem/swapused[maximum]:51
12855:20241101:185843.267 adding performance counter mem/swapused[maximum,absolute]:51
12855:20241101:185843.267 adding performance counter mem/swapunreserved[none]:52
12855:20241101:185843.267 adding performance counter mem/swapunreserved[none,absolute]:52
12855:20241101:185843.267 adding performance counter mem/swapunreserved[average]:53
12855:20241101:185843.267 adding performance counter mem/swapunreserved[average,absolute]:53
12855:20241101:185843.267 adding performance counter mem/swapunreserved[minimum]:54
12855:20241101:185843.267 adding performance counter mem/swapunreserved[minimum,absolute]:54
12855:20241101:185843.267 adding performance counter mem/swapunreserved[maximum]:55
12855:20241101:185843.267 adding performance counter mem/swapunreserved[maximum,absolute]:55
12855:20241101:185843.267 adding performance counter mem/sharedcommon[none]:56
12855:20241101:185843.267 adding performance counter mem/sharedcommon[none,absolute]:56
12855:20241101:185843.267 adding performance counter mem/sharedcommon[average]:57
12855:20241101:185843.267 adding performance counter mem/sharedcommon[average,absolute]:57
12855:20241101:185843.267 adding performance counter mem/sharedcommon[minimum]:58
12855:20241101:185843.267 adding performance counter mem/sharedcommon[minimum,absolute]:58
12855:20241101:185843.267 adding performance counter mem/sharedcommon[maximum]:59
12855:20241101:185843.267 adding performance counter mem/sharedcommon[maximum,absolute]:59
12855:20241101:185843.267 adding performance counter mem/heap[none]:60
12855:20241101:185843.267 adding performance counter mem/heap[none,absolute]:60
12855:20241101:185843.267 adding performance counter mem/heap[average]:61
12855:20241101:185843.267 adding performance counter mem/heap[average,absolute]:61
12855:20241101:185843.267 adding performance counter mem/heap[minimum]:62
12855:20241101:185843.267 adding performance counter mem/heap[minimum,absolute]:62
12855:20241101:185843.267 adding performance counter mem/heap[maximum]:63
12855:20241101:185843.267 adding performance counter mem/heap[maximum,absolute]:63
12855:20241101:185843.267 adding performance counter mem/heapfree[none]:64
12855:20241101:185843.267 adding performance counter mem/heapfree[none,absolute]:64
12855:20241101:185843.267 adding performance counter mem/heapfree[average]:65
12855:20241101:185843.267 adding performance counter mem/heapfree[average,absolute]:65
12855:20241101:185843.267 adding performance counter mem/heapfree[minimum]:66
12855:20241101:185843.267 adding performance counter mem/heapfree[minimum,absolute]:66
12855:20241101:185843.267 adding performance counter mem/heapfree[maximum]:67
12855:20241101:185843.267 adding performance counter mem/heapfree[maximum,absolute]:67
12855:20241101:185843.267 adding performance counter mem/state[latest]:68
12855:20241101:185843.267 adding performance counter mem/state[latest,absolute]:68
12855:20241101:185843.268 adding performance counter mem/swapped[none]:69
12855:20241101:185843.268 adding performance counter mem/swapped[none,absolute]:69
12855:20241101:185843.268 adding performance counter mem/swapped[average]:70
12855:20241101:185843.268 adding performance counter mem/swapped[average,absolute]:70
12855:20241101:185843.268 adding performance counter mem/swapped[minimum]:71
12855:20241101:185843.268 adding performance counter mem/swapped[minimum,absolute]:71
12855:20241101:185843.268 adding performance counter mem/swapped[maximum]:72
12855:20241101:185843.268 adding performance counter mem/swapped[maximum,absolute]:72
12855:20241101:185843.268 adding performance counter mem/swaptarget[none]:73
12855:20241101:185843.268 adding performance counter mem/swaptarget[none,absolute]:73
12855:20241101:185843.268 adding performance counter mem/swaptarget[average]:74
12855:20241101:185843.268 adding performance counter mem/swaptarget[average,absolute]:74
12855:20241101:185843.268 adding performance counter mem/swaptarget[minimum]:75
12855:20241101:185843.268 adding performance counter mem/swaptarget[minimum,absolute]:75
12855:20241101:185843.268 adding performance counter mem/swaptarget[maximum]:76
12855:20241101:185843.268 adding performance counter mem/swaptarget[maximum,absolute]:76
12855:20241101:185843.268 adding performance counter mem/swapIn[none]:77
12855:20241101:185843.268 adding performance counter mem/swapIn[none,absolute]:77
12855:20241101:185843.268 adding performance counter mem/swapIn[average]:78
12855:20241101:185843.268 adding performance counter mem/swapIn[average,absolute]:78
12855:20241101:185843.268 adding performance counter mem/swapIn[minimum]:79
12855:20241101:185843.268 adding performance counter mem/swapIn[minimum,absolute]:79
12855:20241101:185843.268 adding performance counter mem/swapIn[maximum]:80
12855:20241101:185843.268 adding performance counter mem/swapIn[maximum,absolute]:80
12855:20241101:185843.268 adding performance counter mem/swapOut[none]:81
12855:20241101:185843.268 adding performance counter mem/swapOut[none,absolute]:81
12855:20241101:185843.268 adding performance counter mem/swapOut[average]:82
12855:20241101:185843.268 adding performance counter mem/swapOut[average,absolute]:82
12855:20241101:185843.268 adding performance counter mem/swapOut[minimum]:83
12855:20241101:185843.268 adding performance counter mem/swapOut[minimum,absolute]:83
12855:20241101:185843.268 adding performance counter mem/swapOut[maximum]:84
12855:20241101:185843.268 adding performance counter mem/swapOut[maximum,absolute]:84
12855:20241101:185843.268 adding performance counter mem/swapinRate[average]:85
12855:20241101:185843.268 adding performance counter mem/swapinRate[average,rate]:85
12855:20241101:185843.268 adding performance counter mem/swapoutRate[average]:86
12855:20241101:185843.268 adding performance counter mem/swapoutRate[average,rate]:86
12855:20241101:185843.268 adding performance counter managementAgent/swapOut[average]:87
12855:20241101:185843.268 adding performance counter managementAgent/swapOut[average,rate]:87
12855:20241101:185843.268 adding performance counter managementAgent/swapIn[average]:88
12855:20241101:185843.268 adding performance counter managementAgent/swapIn[average,rate]:88
12855:20241101:185843.269 adding performance counter mem/vmmemctl[none]:89
12855:20241101:185843.269 adding performance counter mem/vmmemctl[none,absolute]:89
12855:20241101:185843.269 adding performance counter mem/vmmemctl[average]:90
12855:20241101:185843.269 adding performance counter mem/vmmemctl[average,absolute]:90
12855:20241101:185843.269 adding performance counter mem/vmmemctl[minimum]:91
12855:20241101:185843.269 adding performance counter mem/vmmemctl[minimum,absolute]:91
12855:20241101:185843.269 adding performance counter mem/vmmemctl[maximum]:92
12855:20241101:185843.269 adding performance counter mem/vmmemctl[maximum,absolute]:92
12855:20241101:185843.269 adding performance counter mem/vmmemctltarget[none]:93
12855:20241101:185843.269 adding performance counter mem/vmmemctltarget[none,absolute]:93
12855:20241101:185843.269 adding performance counter mem/vmmemctltarget[average]:94
12855:20241101:185843.269 adding performance counter mem/vmmemctltarget[average,absolute]:94
12855:20241101:185843.269 adding performance counter mem/vmmemctltarget[minimum]:95
12855:20241101:185843.269 adding performance counter mem/vmmemctltarget[minimum,absolute]:95
12855:20241101:185843.269 adding performance counter mem/vmmemctltarget[maximum]:96
12855:20241101:185843.269 adding performance counter mem/vmmemctltarget[maximum,absolute]:96
12855:20241101:185843.269 adding performance counter mem/consumed[none]:97
12855:20241101:185843.269 adding performance counter mem/consumed[none,absolute]:97
12855:20241101:185843.269 adding performance counter mem/consumed[average]:98
12855:20241101:185843.269 adding performance counter mem/consumed[average,absolute]:98
12855:20241101:185843.269 adding performance counter mem/consumed[minimum]:99
12855:20241101:185843.269 adding performance counter mem/consumed[minimum,absolute]:99
12855:20241101:185843.269 adding performance counter mem/consumed[maximum]:100
12855:20241101:185843.269 adding performance counter mem/consumed[maximum,absolute]:100
12855:20241101:185843.269 adding performance counter mem/overhead[none]:101
12855:20241101:185843.269 adding performance counter mem/overhead[none,absolute]:101
12855:20241101:185843.269 adding performance counter mem/overhead[average]:102
12855:20241101:185843.269 adding performance counter mem/overhead[average,absolute]:102
12855:20241101:185843.269 adding performance counter mem/overhead[minimum]:103
12855:20241101:185843.269 adding performance counter mem/overhead[minimum,absolute]:103
12855:20241101:185843.269 adding performance counter mem/overhead[maximum]:104
12855:20241101:185843.269 adding performance counter mem/overhead[maximum,absolute]:104
12855:20241101:185843.269 adding performance counter mem/compressed[average]:105
12855:20241101:185843.269 adding performance counter mem/compressed[average,absolute]:105
12855:20241101:185843.269 adding performance counter mem/compressionRate[average]:106
12855:20241101:185843.269 adding performance counter mem/compressionRate[average,rate]:106
12855:20241101:185843.269 adding performance counter mem/decompressionRate[average]:107
12855:20241101:185843.269 adding performance counter mem/decompressionRate[average,rate]:107
12855:20241101:185843.269 adding performance counter mem/capacity.provisioned[average]:108
12855:20241101:185843.269 adding performance counter mem/capacity.provisioned[average,absolute]:108
12855:20241101:185843.270 adding performance counter mem/capacity.entitlement[average]:109
12855:20241101:185843.270 adding performance counter mem/capacity.entitlement[average,absolute]:109
12855:20241101:185843.270 adding performance counter mem/capacity.usable[average]:110
12855:20241101:185843.270 adding performance counter mem/capacity.usable[average,absolute]:110
12855:20241101:185843.270 adding performance counter mem/capacity.usage[average]:111
12855:20241101:185843.270 adding performance counter mem/capacity.usage[average,absolute]:111
12855:20241101:185843.270 adding performance counter mem/capacity.contention[average]:112
12855:20241101:185843.270 adding performance counter mem/capacity.contention[average,rate]:112
12855:20241101:185843.270 adding performance counter mem/capacity.usage.vm[average]:113
12855:20241101:185843.270 adding performance counter mem/capacity.usage.vm[average,absolute]:113
12855:20241101:185843.270 adding performance counter mem/capacity.usage.vmOvrhd[average]:114
12855:20241101:185843.270 adding performance counter mem/capacity.usage.vmOvrhd[average,absolute]:114
12855:20241101:185843.270 adding performance counter mem/capacity.usage.vmkOvrhd[average]:115
12855:20241101:185843.270 adding performance counter mem/capacity.usage.vmkOvrhd[average,absolute]:115
12855:20241101:185843.270 adding performance counter mem/capacity.usage.userworld[average]:116
12855:20241101:185843.270 adding performance counter mem/capacity.usage.userworld[average,absolute]:116
12855:20241101:185843.270 adding performance counter mem/reservedCapacity.vm[average]:117
12855:20241101:185843.270 adding performance counter mem/reservedCapacity.vm[average,absolute]:117
12855:20241101:185843.270 adding performance counter mem/reservedCapacity.vmOvhd[average]:118
12855:20241101:185843.270 adding performance counter mem/reservedCapacity.vmOvhd[average,absolute]:118
12855:20241101:185843.270 adding performance counter mem/reservedCapacity.vmkOvrhd[average]:119
12855:20241101:185843.270 adding performance counter mem/reservedCapacity.vmkOvrhd[average,absolute]:119
12855:20241101:185843.270 adding performance counter mem/reservedCapacity.userworld[average]:120
12855:20241101:185843.270 adding performance counter mem/reservedCapacity.userworld[average,absolute]:120
12855:20241101:185843.270 adding performance counter mem/reservedCapacityPct[average]:121
12855:20241101:185843.270 adding performance counter mem/reservedCapacityPct[average,absolute]:121
12855:20241101:185843.270 adding performance counter mem/consumed.vms[average]:122
12855:20241101:185843.270 adding performance counter mem/consumed.vms[average,absolute]:122
12855:20241101:185843.270 adding performance counter mem/consumed.userworlds[average]:123
12855:20241101:185843.270 adding performance counter mem/consumed.userworlds[average,absolute]:123
12855:20241101:185843.270 adding performance counter mem/bandwidth.read[latest]:124
12855:20241101:185843.270 adding performance counter mem/bandwidth.read[latest,absolute]:124
12855:20241101:185843.270 adding performance counter mem/bandwidth.write[latest]:125
12855:20241101:185843.270 adding performance counter mem/bandwidth.write[latest,absolute]:125
12855:20241101:185843.270 adding performance counter mem/bandwidth.total[latest]:126
12855:20241101:185843.270 adding performance counter mem/bandwidth.total[latest,absolute]:126
12855:20241101:185843.270 adding performance counter mem/vm.bandwidth.read[latest]:127
12855:20241101:185843.270 adding performance counter mem/vm.bandwidth.read[latest,absolute]:127
12855:20241101:185843.270 adding performance counter mem/missrate[latest]:128
12855:20241101:185843.270 adding performance counter mem/missrate[latest,absolute]:128
12855:20241101:185843.271 adding performance counter mem/latency.read[latest]:129
12855:20241101:185843.271 adding performance counter mem/latency.read[latest,absolute]:129
12855:20241101:185843.271 adding performance counter mem/latency.write[latest]:130
12855:20241101:185843.271 adding performance counter mem/latency.write[latest,absolute]:130
12855:20241101:185843.271 adding performance counter disk/usage[none]:131
12855:20241101:185843.271 adding performance counter disk/usage[none,rate]:131
12855:20241101:185843.271 adding performance counter disk/usage[average]:132
12855:20241101:185843.271 adding performance counter disk/usage[average,rate]:132
12855:20241101:185843.271 adding performance counter disk/usage[minimum]:133
12855:20241101:185843.271 adding performance counter disk/usage[minimum,rate]:133
12855:20241101:185843.271 adding performance counter disk/usage[maximum]:134
12855:20241101:185843.271 adding performance counter disk/usage[maximum,rate]:134
12855:20241101:185843.271 adding performance counter disk/numberRead[summation]:135
12855:20241101:185843.271 adding performance counter disk/numberRead[summation,delta]:135
12855:20241101:185843.271 adding performance counter disk/numberWrite[summation]:136
12855:20241101:185843.271 adding performance counter disk/numberWrite[summation,delta]:136
12855:20241101:185843.271 adding performance counter disk/read[average]:137
12855:20241101:185843.271 adding performance counter disk/read[average,rate]:137
12855:20241101:185843.271 adding performance counter disk/write[average]:138
12855:20241101:185843.271 adding performance counter disk/write[average,rate]:138
12855:20241101:185843.271 adding performance counter disk/totalLatency[average]:139
12855:20241101:185843.271 adding performance counter disk/totalLatency[average,absolute]:139
12855:20241101:185843.271 adding performance counter disk/maxTotalLatency[latest]:140
12855:20241101:185843.271 adding performance counter disk/maxTotalLatency[latest,absolute]:140
12855:20241101:185843.271 adding performance counter disk/commandsAborted[summation]:141
12855:20241101:185843.271 adding performance counter disk/commandsAborted[summation,delta]:141
12855:20241101:185843.271 adding performance counter disk/busResets[summation]:142
12855:20241101:185843.271 adding performance counter disk/busResets[summation,delta]:142
12855:20241101:185843.271 adding performance counter disk/numberReadAveraged[average]:143
12855:20241101:185843.271 adding performance counter disk/numberReadAveraged[average,rate]:143
12855:20241101:185843.271 adding performance counter disk/numberWriteAveraged[average]:144
12855:20241101:185843.271 adding performance counter disk/numberWriteAveraged[average,rate]:144
12855:20241101:185843.271 adding performance counter disk/throughput.usage[average]:145
12855:20241101:185843.271 adding performance counter disk/throughput.usage[average,rate]:145
12855:20241101:185843.271 adding performance counter disk/throughput.contention[average]:146
12855:20241101:185843.271 adding performance counter disk/throughput.contention[average,absolute]:146
12855:20241101:185843.271 adding performance counter disk/scsiReservationConflicts[summation]:147
12855:20241101:185843.271 adding performance counter disk/scsiReservationConflicts[summation,delta]:147
12855:20241101:185843.271 adding performance counter disk/scsiReservationCnflctsPct[average]:148
12855:20241101:185843.271 adding performance counter disk/scsiReservationCnflctsPct[average,absolute]:148
12855:20241101:185843.272 adding performance counter net/usage[none]:149
12855:20241101:185843.272 adding performance counter net/usage[none,rate]:149
12855:20241101:185843.272 adding performance counter net/usage[average]:150
12855:20241101:185843.272 adding performance counter net/usage[average,rate]:150
12855:20241101:185843.272 adding performance counter net/usage[minimum]:151
12855:20241101:185843.272 adding performance counter net/usage[minimum,rate]:151
12855:20241101:185843.272 adding performance counter net/usage[maximum]:152
12855:20241101:185843.272 adding performance counter net/usage[maximum,rate]:152
12855:20241101:185843.272 adding performance counter net/packetsRx[summation]:153
12855:20241101:185843.272 adding performance counter net/packetsRx[summation,delta]:153
12855:20241101:185843.272 adding performance counter net/packetsTx[summation]:154
12855:20241101:185843.272 adding performance counter net/packetsTx[summation,delta]:154
12855:20241101:185843.272 adding performance counter net/received[average]:155
12855:20241101:185843.272 adding performance counter net/received[average,rate]:155
12855:20241101:185843.272 adding performance counter net/transmitted[average]:156
12855:20241101:185843.272 adding performance counter net/transmitted[average,rate]:156
12855:20241101:185843.272 adding performance counter net/throughput.provisioned[average]:157
12855:20241101:185843.272 adding performance counter net/throughput.provisioned[average,absolute]:157
12855:20241101:185843.272 adding performance counter net/throughput.usable[average]:158
12855:20241101:185843.272 adding performance counter net/throughput.usable[average,absolute]:158
12855:20241101:185843.272 adding performance counter net/throughput.usage[average]:159
12855:20241101:185843.272 adding performance counter net/throughput.usage[average,rate]:159
12855:20241101:185843.272 adding performance counter net/throughput.contention[summation]:160
12855:20241101:185843.272 adding performance counter net/throughput.contention[summation,delta]:160
12855:20241101:185843.272 adding performance counter net/throughput.packetsPerSec[average]:161
12855:20241101:185843.272 adding performance counter net/throughput.packetsPerSec[average,rate]:161
12855:20241101:185843.272 adding performance counter sys/uptime[latest]:162
12855:20241101:185843.272 adding performance counter sys/uptime[latest,absolute]:162
12855:20241101:185843.272 adding performance counter sys/heartbeat[summation]:163
12855:20241101:185843.272 adding performance counter sys/heartbeat[summation,delta]:163
12855:20241101:185843.272 adding performance counter power/power[average]:164
12855:20241101:185843.272 adding performance counter power/power[average,rate]:164
12855:20241101:185843.272 adding performance counter power/powerCap[average]:165
12855:20241101:185843.272 adding performance counter power/powerCap[average,absolute]:165
12855:20241101:185843.272 adding performance counter power/energy[summation]:166
12855:20241101:185843.272 adding performance counter power/energy[summation,delta]:166
12855:20241101:185843.272 adding performance counter power/capacity.usagePct[average]:167
12855:20241101:185843.272 adding performance counter power/capacity.usagePct[average,absolute]:167
12855:20241101:185843.272 adding performance counter storageAdapter/commandsAveraged[average]:168
12855:20241101:185843.272 adding performance counter storageAdapter/commandsAveraged[average,rate]:168
12855:20241101:185843.272 adding performance counter storageAdapter/numberReadAveraged[average]:169
12855:20241101:185843.272 adding performance counter storageAdapter/numberReadAveraged[average,rate]:169
12855:20241101:185843.273 adding performance counter storageAdapter/numberWriteAveraged[average]:170
12855:20241101:185843.273 adding performance counter storageAdapter/numberWriteAveraged[average,rate]:170
12855:20241101:185843.273 adding performance counter storageAdapter/read[average]:171
12855:20241101:185843.273 adding performance counter storageAdapter/read[average,rate]:171
12855:20241101:185843.273 adding performance counter storageAdapter/write[average]:172
12855:20241101:185843.273 adding performance counter storageAdapter/write[average,rate]:172
12855:20241101:185843.273 adding performance counter storageAdapter/totalReadLatency[average]:173
12855:20241101:185843.273 adding performance counter storageAdapter/totalReadLatency[average,absolute]:173
12855:20241101:185843.273 adding performance counter storageAdapter/totalWriteLatency[average]:174
12855:20241101:185843.273 adding performance counter storageAdapter/totalWriteLatency[average,absolute]:174
12855:20241101:185843.273 adding performance counter storageAdapter/maxTotalLatency[latest]:175
12855:20241101:185843.273 adding performance counter storageAdapter/maxTotalLatency[latest,absolute]:175
12855:20241101:185843.273 adding performance counter storageAdapter/throughput.cont[average]:176
12855:20241101:185843.273 adding performance counter storageAdapter/throughput.cont[average,absolute]:176
12855:20241101:185843.273 adding performance counter storageAdapter/OIOsPct[average]:177
12855:20241101:185843.273 adding performance counter storageAdapter/OIOsPct[average,absolute]:177
12855:20241101:185843.273 adding performance counter virtualDisk/numberReadAveraged[average]:178
12855:20241101:185843.273 adding performance counter virtualDisk/numberReadAveraged[average,rate]:178
12855:20241101:185843.273 adding performance counter virtualDisk/numberWriteAveraged[average]:179
12855:20241101:185843.273 adding performance counter virtualDisk/numberWriteAveraged[average,rate]:179
12855:20241101:185843.273 adding performance counter virtualDisk/read[average]:180
12855:20241101:185843.273 adding performance counter virtualDisk/read[average,rate]:180
12855:20241101:185843.273 adding performance counter virtualDisk/write[average]:181
12855:20241101:185843.273 adding performance counter virtualDisk/write[average,rate]:181
12855:20241101:185843.273 adding performance counter virtualDisk/totalReadLatency[average]:182
12855:20241101:185843.273 adding performance counter virtualDisk/totalReadLatency[average,absolute]:182
12855:20241101:185843.273 adding performance counter virtualDisk/totalWriteLatency[average]:183
12855:20241101:185843.273 adding performance counter virtualDisk/totalWriteLatency[average,absolute]:183
12855:20241101:185843.273 adding performance counter virtualDisk/throughput.cont[average]:184
12855:20241101:185843.273 adding performance counter virtualDisk/throughput.cont[average,absolute]:184
12855:20241101:185843.273 adding performance counter datastore/numberReadAveraged[average]:185
12855:20241101:185843.273 adding performance counter datastore/numberReadAveraged[average,rate]:185
12855:20241101:185843.273 adding performance counter datastore/numberWriteAveraged[average]:186
12855:20241101:185843.273 adding performance counter datastore/numberWriteAveraged[average,rate]:186
12855:20241101:185843.273 adding performance counter datastore/read[average]:187
12855:20241101:185843.273 adding performance counter datastore/read[average,rate]:187
12855:20241101:185843.273 adding performance counter datastore/write[average]:188
12855:20241101:185843.273 adding performance counter datastore/write[average,rate]:188
12855:20241101:185843.274 adding performance counter datastore/totalReadLatency[average]:189
12855:20241101:185843.274 adding performance counter datastore/totalReadLatency[average,absolute]:189
12855:20241101:185843.274 adding performance counter datastore/totalWriteLatency[average]:190
12855:20241101:185843.274 adding performance counter datastore/totalWriteLatency[average,absolute]:190
12855:20241101:185843.274 adding performance counter datastore/maxTotalLatency[latest]:191
12855:20241101:185843.274 adding performance counter datastore/maxTotalLatency[latest,absolute]:191
12855:20241101:185843.274 adding performance counter datastore/datastoreIops[average]:192
12855:20241101:185843.274 adding performance counter datastore/datastoreIops[average,absolute]:192
12855:20241101:185843.274 adding performance counter datastore/sizeNormalizedDatastoreLatency[average]:193
12855:20241101:185843.274 adding performance counter datastore/sizeNormalizedDatastoreLatency[average,absolute]:193
12855:20241101:185843.274 adding performance counter datastore/throughput.usage[average]:194
12855:20241101:185843.274 adding performance counter datastore/throughput.usage[average,absolute]:194
12855:20241101:185843.274 adding performance counter datastore/throughput.contention[average]:195
12855:20241101:185843.274 adding performance counter datastore/throughput.contention[average,absolute]:195
12855:20241101:185843.274 adding performance counter datastore/busResets[summation]:196
12855:20241101:185843.274 adding performance counter datastore/busResets[summation,delta]:196
12855:20241101:185843.274 adding performance counter datastore/commandsAborted[summation]:197
12855:20241101:185843.274 adding performance counter datastore/commandsAborted[summation,delta]:197
12855:20241101:185843.274 adding performance counter datastore/siocActiveTimePercentage[average]:198
12855:20241101:185843.274 adding performance counter datastore/siocActiveTimePercentage[average,absolute]:198
12855:20241101:185843.274 adding performance counter storagePath/throughput.cont[average]:199
12855:20241101:185843.274 adding performance counter storagePath/throughput.cont[average,absolute]:199
12855:20241101:185843.274 adding performance counter storagePath/maxTotalLatency[latest]:200
12855:20241101:185843.274 adding performance counter storagePath/maxTotalLatency[latest,absolute]:200
12855:20241101:185843.274 adding performance counter virtualDisk/throughput.usage[average]:201
12855:20241101:185843.274 adding performance counter virtualDisk/throughput.usage[average,rate]:201
12855:20241101:185843.274 adding performance counter virtualDisk/commandsAborted[summation]:202
12855:20241101:185843.274 adding performance counter virtualDisk/commandsAborted[summation,delta]:202
12855:20241101:185843.274 adding performance counter virtualDisk/busResets[summation]:203
12855:20241101:185843.274 adding performance counter virtualDisk/busResets[summation,delta]:203
12855:20241101:185843.274 adding performance counter storageAdapter/outstandingIOs[average]:204
12855:20241101:185843.274 adding performance counter storageAdapter/outstandingIOs[average,absolute]:204
12855:20241101:185843.274 adding performance counter storageAdapter/queued[average]:205
12855:20241101:185843.274 adding performance counter storageAdapter/queued[average,absolute]:205
12855:20241101:185843.274 adding performance counter storageAdapter/queueDepth[average]:206
12855:20241101:185843.274 adding performance counter storageAdapter/queueDepth[average,absolute]:206
12855:20241101:185843.274 adding performance counter storageAdapter/queueLatency[average]:207
12855:20241101:185843.274 adding performance counter storageAdapter/queueLatency[average,absolute]:207
12855:20241101:185843.274 adding performance counter storageAdapter/throughput.usag[average]:208
12855:20241101:185843.274 adding performance counter storageAdapter/throughput.usag[average,rate]:208
12855:20241101:185843.274 adding performance counter storagePath/busResets[summation]:209
12855:20241101:185843.274 adding performance counter storagePath/busResets[summation,delta]:209
12855:20241101:185843.275 adding performance counter storagePath/commandsAborted[summation]:210
12855:20241101:185843.275 adding performance counter storagePath/commandsAborted[summation,delta]:210
12855:20241101:185843.275 adding performance counter storagePath/throughput.usage[average]:211
12855:20241101:185843.275 adding performance counter storagePath/throughput.usage[average,rate]:211
12855:20241101:185843.275 adding performance counter net/throughput.usage.vm[average]:212
12855:20241101:185843.275 adding performance counter net/throughput.usage.vm[average,rate]:212
12855:20241101:185843.275 adding performance counter net/throughput.usage.nfs[average]:213
12855:20241101:185843.275 adding performance counter net/throughput.usage.nfs[average,rate]:213
12855:20241101:185843.275 adding performance counter net/throughput.usage.vmotion[average]:214
12855:20241101:185843.275 adding performance counter net/throughput.usage.vmotion[average,rate]:214
12855:20241101:185843.275 adding performance counter net/throughput.usage.ft[average]:215
12855:20241101:185843.275 adding performance counter net/throughput.usage.ft[average,rate]:215
12855:20241101:185843.275 adding performance counter net/throughput.usage.iscsi[average]:216
12855:20241101:185843.275 adding performance counter net/throughput.usage.iscsi[average,rate]:216
12855:20241101:185843.275 adding performance counter net/throughput.usage.hbr[average]:217
12855:20241101:185843.275 adding performance counter net/throughput.usage.hbr[average,rate]:217
12855:20241101:185843.275 adding performance counter power/capacity.usable[average]:218
12855:20241101:185843.275 adding performance counter power/capacity.usable[average,absolute]:218
12855:20241101:185843.275 adding performance counter power/capacity.usage[average]:219
12855:20241101:185843.275 adding performance counter power/capacity.usage[average,absolute]:219
12855:20241101:185843.275 adding performance counter power/capacity.usageIdle[average]:220
12855:20241101:185843.275 adding performance counter power/capacity.usageIdle[average,absolute]:220
12855:20241101:185843.275 adding performance counter power/capacity.usageSystem[average]:221
12855:20241101:185843.275 adding performance counter power/capacity.usageSystem[average,absolute]:221
12855:20241101:185843.275 adding performance counter power/capacity.usageVm[average]:222
12855:20241101:185843.275 adding performance counter power/capacity.usageVm[average,absolute]:222
12855:20241101:185843.275 adding performance counter power/capacity.usageStatic[average]:223
12855:20241101:185843.275 adding performance counter power/capacity.usageStatic[average,absolute]:223
12855:20241101:185843.275 adding performance counter cpu/cpuentitlement[latest]:224
12855:20241101:185843.275 adding performance counter cpu/cpuentitlement[latest,absolute]:224
12855:20241101:185843.275 adding performance counter mem/mementitlement[latest]:225
12855:20241101:185843.275 adding performance counter mem/mementitlement[latest,absolute]:225
12855:20241101:185843.275 adding performance counter clusterServices/vmDrsScore[latest]:226
12855:20241101:185843.275 adding performance counter clusterServices/vmDrsScore[latest,absolute]:226
12855:20241101:185843.275 adding performance counter clusterServices/cpufairness[latest]:227
12855:20241101:185843.275 adding performance counter clusterServices/cpufairness[latest,absolute]:227
12855:20241101:185843.275 adding performance counter clusterServices/memfairness[latest]:228
12855:20241101:185843.275 adding performance counter clusterServices/memfairness[latest,absolute]:228
12855:20241101:185843.275 adding performance counter net/throughput.pktsTx[average]:229
12855:20241101:185843.275 adding performance counter net/throughput.pktsTx[average,absolute]:229
12855:20241101:185843.276 adding performance counter net/throughput.pktsTxMulticast[average]:230
12855:20241101:185843.276 adding performance counter net/throughput.pktsTxMulticast[average,absolute]:230
12855:20241101:185843.276 adding performance counter net/throughput.pktsTxBroadcast[average]:231
12855:20241101:185843.276 adding performance counter net/throughput.pktsTxBroadcast[average,absolute]:231
12855:20241101:185843.276 adding performance counter net/throughput.pktsRx[average]:232
12855:20241101:185843.276 adding performance counter net/throughput.pktsRx[average,absolute]:232
12855:20241101:185843.276 adding performance counter net/throughput.pktsRxMulticast[average]:233
12855:20241101:185843.276 adding performance counter net/throughput.pktsRxMulticast[average,absolute]:233
12855:20241101:185843.276 adding performance counter net/throughput.pktsRxBroadcast[average]:234
12855:20241101:185843.276 adding performance counter net/throughput.pktsRxBroadcast[average,absolute]:234
12855:20241101:185843.276 adding performance counter net/throughput.droppedTx[average]:235
12855:20241101:185843.276 adding performance counter net/throughput.droppedTx[average,absolute]:235
12855:20241101:185843.276 adding performance counter net/throughput.droppedRx[average]:236
12855:20241101:185843.276 adding performance counter net/throughput.droppedRx[average,absolute]:236
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsTx[average]:237
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsTx[average,absolute]:237
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsTxMcast[average]:238
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsTxMcast[average,absolute]:238
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsTxBcast[average]:239
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsTxBcast[average,absolute]:239
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsRx[average]:240
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsRx[average,absolute]:240
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsRxMcast[average]:241
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsRxMcast[average,absolute]:241
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsRxBcast[average]:242
12855:20241101:185843.276 adding performance counter net/throughput.vds.pktsRxBcast[average,absolute]:242
12855:20241101:185843.276 adding performance counter net/throughput.vds.droppedTx[average]:243
12855:20241101:185843.276 adding performance counter net/throughput.vds.droppedTx[average,absolute]:243
12855:20241101:185843.276 adding performance counter net/throughput.vds.droppedRx[average]:244
12855:20241101:185843.276 adding performance counter net/throughput.vds.droppedRx[average,absolute]:244
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagTx[average]:245
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagTx[average,absolute]:245
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagTxMcast[average]:246
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagTxMcast[average,absolute]:246
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagTxBcast[average]:247
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagTxBcast[average,absolute]:247
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagRx[average]:248
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagRx[average,absolute]:248
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagRxMcast[average]:249
12855:20241101:185843.276 adding performance counter net/throughput.vds.lagRxMcast[average,absolute]:249
12855:20241101:185843.277 adding performance counter net/throughput.vds.lagRxBcast[average]:250
12855:20241101:185843.277 adding performance counter net/throughput.vds.lagRxBcast[average,absolute]:250
12855:20241101:185843.277 adding performance counter net/throughput.vds.lagDropTx[average]:251
12855:20241101:185843.277 adding performance counter net/throughput.vds.lagDropTx[average,absolute]:251
12855:20241101:185843.277 adding performance counter net/throughput.vds.lagDropRx[average]:252
12855:20241101:185843.277 adding performance counter net/throughput.vds.lagDropRx[average,absolute]:252
12855:20241101:185843.277 adding performance counter vmop/numPoweron[latest]:253
12855:20241101:185843.277 adding performance counter vmop/numPoweron[latest,absolute]:253
12855:20241101:185843.277 adding performance counter vmop/numPoweroff[latest]:254
12855:20241101:185843.277 adding performance counter vmop/numPoweroff[latest,absolute]:254
12855:20241101:185843.277 adding performance counter vmop/numSuspend[latest]:255
12855:20241101:185843.277 adding performance counter vmop/numSuspend[latest,absolute]:255
12855:20241101:185843.277 adding performance counter vmop/numReset[latest]:256
12855:20241101:185843.277 adding performance counter vmop/numReset[latest,absolute]:256
12855:20241101:185843.277 adding performance counter vmop/numRebootGuest[latest]:257
12855:20241101:185843.277 adding performance counter vmop/numRebootGuest[latest,absolute]:257
12855:20241101:185843.277 adding performance counter vmop/numStandbyGuest[latest]:258
12855:20241101:185843.277 adding performance counter vmop/numStandbyGuest[latest,absolute]:258
12855:20241101:185843.277 adding performance counter vmop/numShutdownGuest[latest]:259
12855:20241101:185843.277 adding performance counter vmop/numShutdownGuest[latest,absolute]:259
12855:20241101:185843.277 adding performance counter vmop/numCreate[latest]:260
12855:20241101:185843.277 adding performance counter vmop/numCreate[latest,absolute]:260
12855:20241101:185843.277 adding performance counter vmop/numDestroy[latest]:261
12855:20241101:185843.277 adding performance counter vmop/numDestroy[latest,absolute]:261
12855:20241101:185843.277 adding performance counter vmop/numRegister[latest]:262
12855:20241101:185843.277 adding performance counter vmop/numRegister[latest,absolute]:262
12855:20241101:185843.277 adding performance counter vmop/numUnregister[latest]:263
12855:20241101:185843.277 adding performance counter vmop/numUnregister[latest,absolute]:263
12855:20241101:185843.277 adding performance counter vmop/numReconfigure[latest]:264
12855:20241101:185843.277 adding performance counter vmop/numReconfigure[latest,absolute]:264
12855:20241101:185843.277 adding performance counter vmop/numClone[latest]:265
12855:20241101:185843.277 adding performance counter vmop/numClone[latest,absolute]:265
12855:20241101:185843.277 adding performance counter vmop/numDeploy[latest]:266
12855:20241101:185843.277 adding performance counter vmop/numDeploy[latest,absolute]:266
12855:20241101:185843.277 adding performance counter vmop/numChangeHost[latest]:267
12855:20241101:185843.277 adding performance counter vmop/numChangeHost[latest,absolute]:267
12855:20241101:185843.277 adding performance counter vmop/numChangeDS[latest]:268
12855:20241101:185843.277 adding performance counter vmop/numChangeDS[latest,absolute]:268
12855:20241101:185843.277 adding performance counter vmop/numChangeHostDS[latest]:269
12855:20241101:185843.277 adding performance counter vmop/numChangeHostDS[latest,absolute]:269
12855:20241101:185843.278 adding performance counter vmop/numVMotion[latest]:270
12855:20241101:185843.278 adding performance counter vmop/numVMotion[latest,absolute]:270
12855:20241101:185843.278 adding performance counter vmop/numSVMotion[latest]:271
12855:20241101:185843.278 adding performance counter vmop/numSVMotion[latest,absolute]:271
12855:20241101:185843.278 adding performance counter vmop/numXVMotion[latest]:272
12855:20241101:185843.278 adding performance counter vmop/numXVMotion[latest,absolute]:272
12855:20241101:185843.278 adding performance counter clusterServices/effectivecpu[average]:273
12855:20241101:185843.278 adding performance counter clusterServices/effectivecpu[average,rate]:273
12855:20241101:185843.278 adding performance counter clusterServices/effectivemem[average]:274
12855:20241101:185843.278 adding performance counter clusterServices/effectivemem[average,absolute]:274
12855:20241101:185843.278 adding performance counter cpu/totalmhz[average]:275
12855:20241101:185843.278 adding performance counter cpu/totalmhz[average,rate]:275
12855:20241101:185843.278 adding performance counter mem/totalmb[average]:276
12855:20241101:185843.278 adding performance counter mem/totalmb[average,absolute]:276
12855:20241101:185843.278 adding performance counter clusterServices/clusterDrsScore[latest]:277
12855:20241101:185843.278 adding performance counter clusterServices/clusterDrsScore[latest,absolute]:277
12855:20241101:185843.278 adding performance counter clusterServices/failover[latest]:278
12855:20241101:185843.278 adding performance counter clusterServices/failover[latest,absolute]:278
12855:20241101:185843.278 adding performance counter gpu/utilization[average]:279
12855:20241101:185843.278 adding performance counter gpu/utilization[average,absolute]:279
12855:20241101:185843.278 adding performance counter gpu/mem.used[average]:280
12855:20241101:185843.278 adding performance counter gpu/mem.used[average,absolute]:280
12855:20241101:185843.278 adding performance counter gpu/mem.reserved[latest]:281
12855:20241101:185843.278 adding performance counter gpu/mem.reserved[latest,absolute]:281
12855:20241101:185843.278 adding performance counter gpu/power.used[latest]:282
12855:20241101:185843.278 adding performance counter gpu/power.used[latest,absolute]:282
12855:20241101:185843.278 adding performance counter gpu/temperature[average]:283
12855:20241101:185843.278 adding performance counter gpu/temperature[average,absolute]:283
12855:20241101:185843.278 adding performance counter gpu/mem.total[latest]:284
12855:20241101:185843.278 adding performance counter gpu/mem.total[latest,absolute]:284
12855:20241101:185843.278 adding performance counter disk/used[latest]:285
12855:20241101:185843.278 adding performance counter disk/used[latest,absolute]:285
12855:20241101:185843.278 adding performance counter disk/provisioned[latest]:286
12855:20241101:185843.278 adding performance counter disk/provisioned[latest,absolute]:286
12855:20241101:185843.278 adding performance counter disk/capacity[latest]:287
12855:20241101:185843.278 adding performance counter disk/capacity[latest,absolute]:287
12855:20241101:185843.278 adding performance counter disk/unshared[latest]:288
12855:20241101:185843.278 adding performance counter disk/unshared[latest,absolute]:288
12855:20241101:185843.278 adding performance counter disk/actualused[latest]:289
12855:20241101:185843.278 adding performance counter disk/actualused[latest,absolute]:289
12855:20241101:185843.279 adding performance counter disk/deltaused[latest]:290
12855:20241101:185843.279 adding performance counter disk/deltaused[latest,absolute]:290
12855:20241101:185843.279 adding performance counter disk/capacity.provisioned[average]:291
12855:20241101:185843.279 adding performance counter disk/capacity.provisioned[average,absolute]:291
12855:20241101:185843.279 adding performance counter disk/capacity.usage[average]:292
12855:20241101:185843.279 adding performance counter disk/capacity.usage[average,absolute]:292
12855:20241101:185843.279 adding performance counter disk/capacity.contention[average]:293
12855:20241101:185843.279 adding performance counter disk/capacity.contention[average,absolute]:293
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationlatencystats[maximum]:294
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationlatencystats[maximum,absolute]:294
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationlatencystats[minimum]:295
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationlatencystats[minimum,absolute]:295
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationlatencystats[summation]:296
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationlatencystats[summation,absolute]:296
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationstats[maximum]:297
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationstats[maximum,absolute]:297
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationstats[minimum]:298
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationstats[minimum,absolute]:298
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationstats[summation]:299
12855:20241101:185843.279 adding performance counter vcDebugInfo/activationstats[summation,absolute]:299
12855:20241101:185843.279 adding performance counter vcResources/buffersz[average]:300
12855:20241101:185843.279 adding performance counter vcResources/buffersz[average,absolute]:300
12855:20241101:185843.279 adding performance counter vcResources/cachesz[average]:301
12855:20241101:185843.279 adding performance counter vcResources/cachesz[average,absolute]:301
12855:20241101:185843.279 adding performance counter vcResources/ctxswitchesrate[average]:302
12855:20241101:185843.279 adding performance counter vcResources/ctxswitchesrate[average,rate]:302
12855:20241101:185843.279 adding performance counter vcResources/diskreadsectorrate[average]:303
12855:20241101:185843.279 adding performance counter vcResources/diskreadsectorrate[average,rate]:303
12855:20241101:185843.279 adding performance counter vcResources/diskreadsrate[average]:304
12855:20241101:185843.279 adding performance counter vcResources/diskreadsrate[average,rate]:304
12855:20241101:185843.279 adding performance counter vcResources/diskwritesectorrate[average]:305
12855:20241101:185843.279 adding performance counter vcResources/diskwritesectorrate[average,rate]:305
12855:20241101:185843.279 adding performance counter vcResources/diskwritesrate[average]:306
12855:20241101:185843.279 adding performance counter vcResources/diskwritesrate[average,rate]:306
12855:20241101:185843.279 adding performance counter vcDebugInfo/hostsynclatencystats[maximum]:307
12855:20241101:185843.279 adding performance counter vcDebugInfo/hostsynclatencystats[maximum,absolute]:307
12855:20241101:185843.279 adding performance counter vcDebugInfo/hostsynclatencystats[minimum]:308
12855:20241101:185843.279 adding performance counter vcDebugInfo/hostsynclatencystats[minimum,absolute]:308
12855:20241101:185843.279 adding performance counter vcDebugInfo/hostsynclatencystats[summation]:309
12855:20241101:185843.279 adding performance counter vcDebugInfo/hostsynclatencystats[summation,absolute]:309
12855:20241101:185843.280 adding performance counter vcDebugInfo/hostsyncstats[maximum]:310
12855:20241101:185843.280 adding performance counter vcDebugInfo/hostsyncstats[maximum,absolute]:310
12855:20241101:185843.280 adding performance counter vcDebugInfo/hostsyncstats[minimum]:311
12855:20241101:185843.280 adding performance counter vcDebugInfo/hostsyncstats[minimum,absolute]:311
12855:20241101:185843.280 adding performance counter vcDebugInfo/hostsyncstats[summation]:312
12855:20241101:185843.280 adding performance counter vcDebugInfo/hostsyncstats[summation,absolute]:312
12855:20241101:185843.280 adding performance counter vcDebugInfo/inventorystats[maximum]:313
12855:20241101:185843.280 adding performance counter vcDebugInfo/inventorystats[maximum,absolute]:313
12855:20241101:185843.280 adding performance counter vcDebugInfo/inventorystats[minimum]:314
12855:20241101:185843.280 adding performance counter vcDebugInfo/inventorystats[minimum,absolute]:314
12855:20241101:185843.280 adding performance counter vcDebugInfo/inventorystats[summation]:315
12855:20241101:185843.280 adding performance counter vcDebugInfo/inventorystats[summation,absolute]:315
12855:20241101:185843.280 adding performance counter vcDebugInfo/lockstats[maximum]:316
12855:20241101:185843.280 adding performance counter vcDebugInfo/lockstats[maximum,absolute]:316
12855:20241101:185843.280 adding performance counter vcDebugInfo/lockstats[minimum]:317
12855:20241101:185843.280 adding performance counter vcDebugInfo/lockstats[minimum,absolute]:317
12855:20241101:185843.280 adding performance counter vcDebugInfo/lockstats[summation]:318
12855:20241101:185843.280 adding performance counter vcDebugInfo/lockstats[summation,absolute]:318
12855:20241101:185843.280 adding performance counter vcDebugInfo/lrostats[maximum]:319
12855:20241101:185843.280 adding performance counter vcDebugInfo/lrostats[maximum,absolute]:319
12855:20241101:185843.280 adding performance counter vcDebugInfo/lrostats[minimum]:320
12855:20241101:185843.280 adding performance counter vcDebugInfo/lrostats[minimum,absolute]:320
12855:20241101:185843.280 adding performance counter vcDebugInfo/lrostats[summation]:321
12855:20241101:185843.280 adding performance counter vcDebugInfo/lrostats[summation,absolute]:321
12855:20241101:185843.280 adding performance counter vcDebugInfo/miscstats[maximum]:322
12855:20241101:185843.280 adding performance counter vcDebugInfo/miscstats[maximum,absolute]:322
12855:20241101:185843.280 adding performance counter vcDebugInfo/miscstats[minimum]:323
12855:20241101:185843.280 adding performance counter vcDebugInfo/miscstats[minimum,absolute]:323
12855:20241101:185843.280 adding performance counter vcDebugInfo/miscstats[summation]:324
12855:20241101:185843.280 adding performance counter vcDebugInfo/miscstats[summation,absolute]:324
12855:20241101:185843.280 adding performance counter vcDebugInfo/morefregstats[maximum]:325
12855:20241101:185843.280 adding performance counter vcDebugInfo/morefregstats[maximum,absolute]:325
12855:20241101:185843.280 adding performance counter vcDebugInfo/morefregstats[minimum]:326
12855:20241101:185843.280 adding performance counter vcDebugInfo/morefregstats[minimum,absolute]:326
12855:20241101:185843.280 adding performance counter vcDebugInfo/morefregstats[summation]:327
12855:20241101:185843.280 adding performance counter vcDebugInfo/morefregstats[summation,absolute]:327
12855:20241101:185843.280 adding performance counter vcResources/packetrecvrate[average]:328
12855:20241101:185843.280 adding performance counter vcResources/packetrecvrate[average,rate]:328
12855:20241101:185843.280 adding performance counter vcResources/packetsentrate[average]:329
12855:20241101:185843.280 adding performance counter vcResources/packetsentrate[average,rate]:329
12855:20241101:185843.281 adding performance counter vcResources/systemcpuusage[average]:330
12855:20241101:185843.281 adding performance counter vcResources/systemcpuusage[average,rate]:330
12855:20241101:185843.281 adding performance counter vcResources/pagefaultrate[average]:331
12855:20241101:185843.281 adding performance counter vcResources/pagefaultrate[average,rate]:331
12855:20241101:185843.281 adding performance counter vcResources/physicalmemusage[average]:332
12855:20241101:185843.281 adding performance counter vcResources/physicalmemusage[average,absolute]:332
12855:20241101:185843.281 adding performance counter vcResources/priviledgedcpuusage[average]:333
12855:20241101:185843.281 adding performance counter vcResources/priviledgedcpuusage[average,rate]:333
12855:20241101:185843.281 adding performance counter vcDebugInfo/scoreboard[maximum]:334
12855:20241101:185843.281 adding performance counter vcDebugInfo/scoreboard[maximum,absolute]:334
12855:20241101:185843.281 adding performance counter vcDebugInfo/scoreboard[minimum]:335
12855:20241101:185843.281 adding performance counter vcDebugInfo/scoreboard[minimum,absolute]:335
12855:20241101:185843.281 adding performance counter vcDebugInfo/scoreboard[summation]:336
12855:20241101:185843.281 adding performance counter vcDebugInfo/scoreboard[summation,absolute]:336
12855:20241101:185843.281 adding performance counter vcDebugInfo/sessionstats[maximum]:337
12855:20241101:185843.281 adding performance counter vcDebugInfo/sessionstats[maximum,absolute]:337
12855:20241101:185843.281 adding performance counter vcDebugInfo/sessionstats[minimum]:338
12855:20241101:185843.281 adding performance counter vcDebugInfo/sessionstats[minimum,absolute]:338
12855:20241101:185843.281 adding performance counter vcDebugInfo/sessionstats[summation]:339
12855:20241101:185843.281 adding performance counter vcDebugInfo/sessionstats[summation,absolute]:339
12855:20241101:185843.281 adding performance counter vcResources/syscallsrate[average]:340
12855:20241101:185843.281 adding performance counter vcResources/syscallsrate[average,rate]:340
12855:20241101:185843.281 adding performance counter vcDebugInfo/systemstats[maximum]:341
12855:20241101:185843.281 adding performance counter vcDebugInfo/systemstats[maximum,absolute]:341
12855:20241101:185843.281 adding performance counter vcDebugInfo/systemstats[minimum]:342
12855:20241101:185843.281 adding performance counter vcDebugInfo/systemstats[minimum,absolute]:342
12855:20241101:185843.281 adding performance counter vcDebugInfo/systemstats[summation]:343
12855:20241101:185843.281 adding performance counter vcDebugInfo/systemstats[summation,absolute]:343
12855:20241101:185843.281 adding performance counter vcResources/usercpuusage[average]:344
12855:20241101:185843.281 adding performance counter vcResources/usercpuusage[average,rate]:344
12855:20241101:185843.281 adding performance counter vcDebugInfo/vcservicestats[maximum]:345
12855:20241101:185843.281 adding performance counter vcDebugInfo/vcservicestats[maximum,absolute]:345
12855:20241101:185843.281 adding performance counter vcDebugInfo/vcservicestats[minimum]:346
12855:20241101:185843.281 adding performance counter vcDebugInfo/vcservicestats[minimum,absolute]:346
12855:20241101:185843.281 adding performance counter vcDebugInfo/vcservicestats[summation]:347
12855:20241101:185843.281 adding performance counter vcDebugInfo/vcservicestats[summation,absolute]:347
12855:20241101:185843.281 adding performance counter vcResources/virtualmemusage[average]:348
12855:20241101:185843.281 adding performance counter vcResources/virtualmemusage[average,absolute]:348
12855:20241101:185843.281 adding performance counter virtualDisk/readOIO[latest]:349
12855:20241101:185843.281 adding performance counter virtualDisk/readOIO[latest,absolute]:349
12855:20241101:185843.282 adding performance counter virtualDisk/writeOIO[latest]:350
12855:20241101:185843.282 adding performance counter virtualDisk/writeOIO[latest,absolute]:350
12855:20241101:185843.282 adding performance counter virtualDisk/readLoadMetric[latest]:351
12855:20241101:185843.282 adding performance counter virtualDisk/readLoadMetric[latest,absolute]:351
12855:20241101:185843.282 adding performance counter virtualDisk/writeLoadMetric[latest]:352
12855:20241101:185843.282 adding performance counter virtualDisk/writeLoadMetric[latest,absolute]:352
12855:20241101:185843.282 adding performance counter rescpu/actav1[latest]:353
12855:20241101:185843.282 adding performance counter rescpu/actav1[latest,absolute]:353
12855:20241101:185843.282 adding performance counter datastore/datastoreReadBytes[latest]:354
12855:20241101:185843.282 adding performance counter datastore/datastoreReadBytes[latest,absolute]:354
12855:20241101:185843.282 adding performance counter datastore/datastoreWriteBytes[latest]:355
12855:20241101:185843.282 adding performance counter datastore/datastoreWriteBytes[latest,absolute]:355
12855:20241101:185843.282 adding performance counter datastore/datastoreReadIops[latest]:356
12855:20241101:185843.282 adding performance counter datastore/datastoreReadIops[latest,absolute]:356
12855:20241101:185843.282 adding performance counter datastore/datastoreWriteIops[latest]:357
12855:20241101:185843.282 adding performance counter datastore/datastoreWriteIops[latest,absolute]:357
12855:20241101:185843.282 adding performance counter datastore/datastoreReadOIO[latest]:358
12855:20241101:185843.282 adding performance counter datastore/datastoreReadOIO[latest,absolute]:358
12855:20241101:185843.282 adding performance counter datastore/datastoreWriteOIO[latest]:359
12855:20241101:185843.282 adding performance counter datastore/datastoreWriteOIO[latest,absolute]:359
12855:20241101:185843.282 adding performance counter datastore/datastoreNormalReadLatency[latest]:360
12855:20241101:185843.282 adding performance counter datastore/datastoreNormalReadLatency[latest,absolute]:360
12855:20241101:185843.282 adding performance counter datastore/datastoreNormalWriteLatency[latest]:361
12855:20241101:185843.282 adding performance counter datastore/datastoreNormalWriteLatency[latest,absolute]:361
12855:20241101:185843.282 adding performance counter datastore/datastoreReadLoadMetric[latest]:362
12855:20241101:185843.282 adding performance counter datastore/datastoreReadLoadMetric[latest,absolute]:362
12855:20241101:185843.282 adding performance counter datastore/datastoreWriteLoadMetric[latest]:363
12855:20241101:185843.282 adding performance counter datastore/datastoreWriteLoadMetric[latest,absolute]:363
12855:20241101:185843.282 adding performance counter datastore/datastoreVMObservedLatency[latest]:364
12855:20241101:185843.282 adding performance counter datastore/datastoreVMObservedLatency[latest,absolute]:364
12855:20241101:185843.282 adding performance counter disk/scsiReservationCnflctsPct[average]:365
12855:20241101:185843.282 adding performance counter disk/scsiReservationCnflctsPct[average,rate]:365
12855:20241101:185843.282 adding performance counter disk/read[latest]:366
12855:20241101:185843.282 adding performance counter disk/read[latest,absolute]:366
12855:20241101:185843.282 adding performance counter disk/readFailed[latest]:367
12855:20241101:185843.282 adding performance counter disk/readFailed[latest,absolute]:367
12855:20241101:185843.282 adding performance counter disk/write[latest]:368
12855:20241101:185843.282 adding performance counter disk/write[latest,absolute]:368
12855:20241101:185843.282 adding performance counter disk/writeFailed[latest]:369
12855:20241101:185843.283 adding performance counter disk/writeFailed[latest,absolute]:369
12855:20241101:185843.283 adding performance counter disk/commands.success[latest]:370
12855:20241101:185843.283 adding performance counter disk/commands.success[latest,absolute]:370
12855:20241101:185843.283 adding performance counter disk/commands.failed[latest]:371
12855:20241101:185843.283 adding performance counter disk/commands.failed[latest,absolute]:371
12855:20241101:185843.283 adding performance counter disk/commands.queued[latest]:372
12855:20241101:185843.283 adding performance counter disk/commands.queued[latest,absolute]:372
12855:20241101:185843.283 adding performance counter disk/commands.active[latest]:373
12855:20241101:185843.283 adding performance counter disk/commands.active[latest,absolute]:373
12855:20241101:185843.283 adding performance counter disk/state[latest]:374
12855:20241101:185843.283 adding performance counter disk/state[latest,absolute]:374
12855:20241101:185843.283 adding performance counter disk/TM.abort[latest]:375
12855:20241101:185843.283 adding performance counter disk/TM.abort[latest,absolute]:375
12855:20241101:185843.283 adding performance counter disk/TM.abortRetry[latest]:376
12855:20241101:185843.283 adding performance counter disk/TM.abortRetry[latest,absolute]:376
12855:20241101:185843.283 adding performance counter disk/TM.abortFailed[latest]:377
12855:20241101:185843.283 adding performance counter disk/TM.abortFailed[latest,absolute]:377
12855:20241101:185843.283 adding performance counter disk/TM.virtReset[latest]:378
12855:20241101:185843.283 adding performance counter disk/TM.virtReset[latest,absolute]:378
12855:20241101:185843.283 adding performance counter disk/TM.virtResetRetry[latest]:379
12855:20241101:185843.283 adding performance counter disk/TM.virtResetRetry[latest,absolute]:379
12855:20241101:185843.283 adding performance counter disk/TM.virtResetFailed[latest]:380
12855:20241101:185843.283 adding performance counter disk/TM.virtResetFailed[latest,absolute]:380
12855:20241101:185843.283 adding performance counter disk/TM.lunReset[latest]:381
12855:20241101:185843.283 adding performance counter disk/TM.lunReset[latest,absolute]:381
12855:20241101:185843.283 adding performance counter disk/TM.lunResetRetry[latest]:382
12855:20241101:185843.283 adding performance counter disk/TM.lunResetRetry[latest,absolute]:382
12855:20241101:185843.283 adding performance counter disk/TM.lunResetFailed[latest]:383
12855:20241101:185843.283 adding performance counter disk/TM.lunResetFailed[latest,absolute]:383
12855:20241101:185843.283 adding performance counter disk/TM.deviceReset[latest]:384
12855:20241101:185843.283 adding performance counter disk/TM.deviceReset[latest,absolute]:384
12855:20241101:185843.283 adding performance counter disk/TM.deviceResetRetry[latest]:385
12855:20241101:185843.283 adding performance counter disk/TM.deviceResetRetry[latest,absolute]:385
12855:20241101:185843.283 adding performance counter disk/TM.deviceResetFailed[latest]:386
12855:20241101:185843.283 adding performance counter disk/TM.deviceResetFailed[latest,absolute]:386
12855:20241101:185843.283 adding performance counter disk/TM.busReset[latest]:387
12855:20241101:185843.283 adding performance counter disk/TM.busReset[latest,absolute]:387
12855:20241101:185843.283 adding performance counter disk/TM.busResetRetry[latest]:388
12855:20241101:185843.283 adding performance counter disk/TM.busResetRetry[latest,absolute]:388
12855:20241101:185843.284 adding performance counter disk/TM.busResetFailed[latest]:389
12855:20241101:185843.284 adding performance counter disk/TM.busResetFailed[latest,absolute]:389
12855:20241101:185843.284 adding performance counter disk/latency.qavg[latest]:390
12855:20241101:185843.284 adding performance counter disk/latency.qavg[latest,absolute]:390
12855:20241101:185843.284 adding performance counter disk/latency.davg[latest]:391
12855:20241101:185843.284 adding performance counter disk/latency.davg[latest,absolute]:391
12855:20241101:185843.284 adding performance counter disk/latency.kavg[latest]:392
12855:20241101:185843.284 adding performance counter disk/latency.kavg[latest,absolute]:392
12855:20241101:185843.284 adding performance counter disk/latency.gavg[latest]:393
12855:20241101:185843.284 adding performance counter disk/latency.gavg[latest,absolute]:393
12855:20241101:185843.284 adding performance counter storageAdapter/outstandingIOs[latest]:394
12855:20241101:185843.284 adding performance counter storageAdapter/outstandingIOs[latest,absolute]:394
12855:20241101:185843.284 adding performance counter storageAdapter/queued[latest]:395
12855:20241101:185843.284 adding performance counter storageAdapter/queued[latest,absolute]:395
12855:20241101:185843.284 adding performance counter storageAdapter/queueDepth[latest]:396
12855:20241101:185843.284 adding performance counter storageAdapter/queueDepth[latest,absolute]:396
12855:20241101:185843.284 adding performance counter cpu/partnerBusyTime[average]:397
12855:20241101:185843.284 adding performance counter cpu/partnerBusyTime[average,rate]:397
12855:20241101:185843.284 adding performance counter cpu/utilization[average]:398
12855:20241101:185843.284 adding performance counter cpu/utilization[average,rate]:398
12855:20241101:185843.284 adding performance counter cpu/corecount.provisioned[latest]:399
12855:20241101:185843.284 adding performance counter cpu/corecount.provisioned[latest,absolute]:399
12855:20241101:185843.284 adding performance counter cpu/cache.l3.occupancy[average]:400
12855:20241101:185843.284 adding performance counter cpu/cache.l3.occupancy[average,absolute]:400
12855:20241101:185843.284 adding performance counter cpu/corecount.usage[latest]:401
12855:20241101:185843.284 adding performance counter cpu/corecount.usage[latest,absolute]:401
12855:20241101:185843.284 adding performance counter cpu/load.avg1min[latest]:402
12855:20241101:185843.284 adding performance counter cpu/load.avg1min[latest,absolute]:402
12855:20241101:185843.284 adding performance counter cpu/load.avg5min[latest]:403
12855:20241101:185843.284 adding performance counter cpu/load.avg5min[latest,absolute]:403
12855:20241101:185843.284 adding performance counter cpu/load.avg15min[latest]:404
12855:20241101:185843.284 adding performance counter cpu/load.avg15min[latest,absolute]:404
12855:20241101:185843.284 adding performance counter mem/capacity.provisioned[latest]:405
12855:20241101:185843.284 adding performance counter mem/capacity.provisioned[latest,absolute]:405
12855:20241101:185843.284 adding performance counter mem/reservedCapacityPct[latest]:406
12855:20241101:185843.284 adding performance counter mem/reservedCapacityPct[latest,absolute]:406
12855:20241101:185843.284 adding performance counter mem/overcommit.avg1min[latest]:407
12855:20241101:185843.284 adding performance counter mem/overcommit.avg1min[latest,absolute]:407
12855:20241101:185843.284 adding performance counter mem/overcommit.avg5min[latest]:408
12855:20241101:185843.284 adding performance counter mem/overcommit.avg5min[latest,absolute]:408
12855:20241101:185843.285 adding performance counter mem/overcommit.avg15min[latest]:409
12855:20241101:185843.285 adding performance counter mem/overcommit.avg15min[latest,absolute]:409
12855:20241101:185843.285 adding performance counter mem/physical.total[latest]:410
12855:20241101:185843.285 adding performance counter mem/physical.total[latest,absolute]:410
12855:20241101:185843.285 adding performance counter mem/physical.user[latest]:411
12855:20241101:185843.285 adding performance counter mem/physical.user[latest,absolute]:411
12855:20241101:185843.285 adding performance counter mem/physical.free[latest]:412
12855:20241101:185843.285 adding performance counter mem/physical.free[latest,absolute]:412
12855:20241101:185843.285 adding performance counter mem/kernel.managed[latest]:413
12855:20241101:185843.285 adding performance counter mem/kernel.managed[latest,absolute]:413
12855:20241101:185843.285 adding performance counter mem/kernel.minfree[latest]:414
12855:20241101:185843.285 adding performance counter mem/kernel.minfree[latest,absolute]:414
12855:20241101:185843.285 adding performance counter mem/kernel.unreserved[latest]:415
12855:20241101:185843.285 adding performance counter mem/kernel.unreserved[latest,absolute]:415
12855:20241101:185843.285 adding performance counter mem/pshare.shared[latest]:416
12855:20241101:185843.285 adding performance counter mem/pshare.shared[latest,absolute]:416
12855:20241101:185843.285 adding performance counter mem/pshare.common[latest]:417
12855:20241101:185843.285 adding performance counter mem/pshare.common[latest,absolute]:417
12855:20241101:185843.285 adding performance counter mem/pshare.sharedSave[latest]:418
12855:20241101:185843.285 adding performance counter mem/pshare.sharedSave[latest,absolute]:418
12855:20241101:185843.285 adding performance counter mem/swap.current[latest]:419
12855:20241101:185843.285 adding performance counter mem/swap.current[latest,absolute]:419
12855:20241101:185843.285 adding performance counter mem/swap.target[latest]:420
12855:20241101:185843.285 adding performance counter mem/swap.target[latest,absolute]:420
12855:20241101:185843.285 adding performance counter mem/swap.readrate[average]:421
12855:20241101:185843.285 adding performance counter mem/swap.readrate[average,rate]:421
12855:20241101:185843.285 adding performance counter mem/swap.writerate[average]:422
12855:20241101:185843.285 adding performance counter mem/swap.writerate[average,rate]:422
12855:20241101:185843.285 adding performance counter mem/zip.zipped[latest]:423
12855:20241101:185843.285 adding performance counter mem/zip.zipped[latest,absolute]:423
12855:20241101:185843.285 adding performance counter mem/zip.saved[latest]:424
12855:20241101:185843.285 adding performance counter mem/zip.saved[latest,absolute]:424
12855:20241101:185843.285 adding performance counter mem/memctl.current[latest]:425
12855:20241101:185843.285 adding performance counter mem/memctl.current[latest,absolute]:425
12855:20241101:185843.285 adding performance counter mem/memctl.target[latest]:426
12855:20241101:185843.285 adding performance counter mem/memctl.target[latest,absolute]:426
12855:20241101:185843.285 adding performance counter mem/memctl.max[latest]:427
12855:20241101:185843.285 adding performance counter mem/memctl.max[latest,absolute]:427
12855:20241101:185843.285 adding performance counter mem/health.reservationState[latest]:428
12855:20241101:185843.285 adding performance counter mem/health.reservationState[latest,absolute]:428
12855:20241101:185843.286 adding performance counter mem/capacity.overhead[average]:429
12855:20241101:185843.286 adding performance counter mem/capacity.overhead[average,absolute]:429
12855:20241101:185843.286 adding performance counter mem/capacity.overheadResv[average]:430
12855:20241101:185843.286 adding performance counter mem/capacity.overheadResv[average,absolute]:430
12855:20241101:185843.286 adding performance counter mem/capacity.consumed[latest]:431
12855:20241101:185843.286 adding performance counter mem/capacity.consumed[latest,absolute]:431
12855:20241101:185843.286 adding performance counter mem/capacity.active[latest]:432
12855:20241101:185843.286 adding performance counter mem/capacity.active[latest,absolute]:432
12855:20241101:185843.286 adding performance counter power/capacity.usageCpu[average]:433
12855:20241101:185843.286 adding performance counter power/capacity.usageCpu[average,absolute]:433
12855:20241101:185843.286 adding performance counter power/capacity.usageMem[average]:434
12855:20241101:185843.286 adding performance counter power/capacity.usageMem[average,absolute]:434
12855:20241101:185843.286 adding performance counter power/capacity.usageOther[average]:435
12855:20241101:185843.286 adding performance counter power/capacity.usageOther[average,absolute]:435
12855:20241101:185843.286 adding performance counter vmotion/vmkernel.downtime[latest]:436
12855:20241101:185843.286 adding performance counter vmotion/vmkernel.downtime[latest,absolute]:436
12855:20241101:185843.286 adding performance counter vmotion/downtime[latest]:437
12855:20241101:185843.286 adding performance counter vmotion/downtime[latest,absolute]:437
12855:20241101:185843.286 adding performance counter vmotion/precopy.time[latest]:438
12855:20241101:185843.286 adding performance counter vmotion/precopy.time[latest,absolute]:438
12855:20241101:185843.286 adding performance counter vmotion/rtt[latest]:439
12855:20241101:185843.286 adding performance counter vmotion/rtt[latest,absolute]:439
12855:20241101:185843.286 adding performance counter vmotion/dst.migration.time[latest]:440
12855:20241101:185843.286 adding performance counter vmotion/dst.migration.time[latest,absolute]:440
12855:20241101:185843.286 adding performance counter vmotion/mem.sizemb[latest]:441
12855:20241101:185843.286 adding performance counter vmotion/mem.sizemb[latest,absolute]:441
12855:20241101:185843.286 adding performance counter hbr/vms[latest]:442
12855:20241101:185843.286 adding performance counter hbr/vms[latest,absolute]:442
12855:20241101:185843.286 adding performance counter net/throughput.hbr.inbound[average]:443
12855:20241101:185843.286 adding performance counter net/throughput.hbr.inbound[average,rate]:443
12855:20241101:185843.286 adding performance counter net/throughput.hbr.outbound[average]:444
12855:20241101:185843.286 adding performance counter net/throughput.hbr.outbound[average,rate]:444
12855:20241101:185843.286 adding performance counter virtualDisk/hbr.readLatencyMS[latest]:445
12855:20241101:185843.286 adding performance counter virtualDisk/hbr.readLatencyMS[latest,absolute]:445
12855:20241101:185843.286 adding performance counter virtualDisk/hbr.stallLatencyMS[latest]:446
12855:20241101:185843.286 adding performance counter virtualDisk/hbr.stallLatencyMS[latest,absolute]:446
12855:20241101:185843.286 adding performance counter net/latency.hbr.outbound[latest]:447
12855:20241101:185843.286 adding performance counter net/latency.hbr.outbound[latest,absolute]:447
12855:20241101:185843.286 adding performance counter lwd/numSnapshots[latest]:448
12855:20241101:185843.287 adding performance counter lwd/numSnapshots[latest,absolute]:448
12855:20241101:185843.287 adding performance counter nfs/apdState[latest]:449
12855:20241101:185843.287 adding performance counter nfs/apdState[latest,absolute]:449
12855:20241101:185843.287 adding performance counter nfs/readIssueTime[latest]:450
12855:20241101:185843.287 adding performance counter nfs/readIssueTime[latest,absolute]:450
12855:20241101:185843.287 adding performance counter nfs/writeIssueTime[latest]:451
12855:20241101:185843.287 adding performance counter nfs/writeIssueTime[latest,absolute]:451
12855:20241101:185843.287 adding performance counter nfs/totalReads[latest]:452
12855:20241101:185843.287 adding performance counter nfs/totalReads[latest,absolute]:452
12855:20241101:185843.287 adding performance counter nfs/readsFailed[latest]:453
12855:20241101:185843.287 adding performance counter nfs/readsFailed[latest,absolute]:453
12855:20241101:185843.287 adding performance counter nfs/totalWrites[latest]:454
12855:20241101:185843.287 adding performance counter nfs/totalWrites[latest,absolute]:454
12855:20241101:185843.287 adding performance counter nfs/writesFailed[latest]:455
12855:20241101:185843.287 adding performance counter nfs/writesFailed[latest,absolute]:455
12855:20241101:185843.287 adding performance counter nfs/readTime[latest]:456
12855:20241101:185843.287 adding performance counter nfs/readTime[latest,absolute]:456
12855:20241101:185843.287 adding performance counter nfs/writeTime[latest]:457
12855:20241101:185843.287 adding performance counter nfs/writeTime[latest,absolute]:457
12855:20241101:185843.287 adding performance counter nfs/ioRequestsQueued[latest]:458
12855:20241101:185843.287 adding performance counter nfs/ioRequestsQueued[latest,absolute]:458
12855:20241101:185843.287 adding performance counter nfs/totalCreate[latest]:459
12855:20241101:185843.287 adding performance counter nfs/totalCreate[latest,absolute]:459
12855:20241101:185843.287 adding performance counter nfs/createFailed[latest]:460
12855:20241101:185843.287 adding performance counter nfs/createFailed[latest,absolute]:460
12855:20241101:185843.287 adding performance counter nfs/socketBufferFull[latest]:461
12855:20241101:185843.287 adding performance counter nfs/socketBufferFull[latest,absolute]:461
12855:20241101:185843.287 adding performance counter datastore/vmfs.totalTxn[latest]:462
12855:20241101:185843.287 adding performance counter datastore/vmfs.totalTxn[latest,absolute]:462
12855:20241101:185843.287 adding performance counter datastore/vmfs.cancelledTxn[latest]:463
12855:20241101:185843.287 adding performance counter datastore/vmfs.cancelledTxn[latest,absolute]:463
12855:20241101:185843.287 adding performance counter datastore/vmfs.apdState[latest]:464
12855:20241101:185843.287 adding performance counter datastore/vmfs.apdState[latest,absolute]:464
12855:20241101:185843.287 adding performance counter datastore/vmfs.apdCount[latest]:465
12855:20241101:185843.287 adding performance counter datastore/vmfs.apdCount[latest,absolute]:465
12855:20241101:185843.287 adding performance counter vvol/pe.isaccessible[latest]:466
12855:20241101:185843.287 adding performance counter vvol/pe.isaccessible[latest,absolute]:466
12855:20241101:185843.287 adding performance counter vvol/pe.reads.done[latest]:467
12855:20241101:185843.287 adding performance counter vvol/pe.reads.done[latest,absolute]:467
12855:20241101:185843.288 adding performance counter vvol/pe.writes.done[latest]:468
12855:20241101:185843.288 adding performance counter vvol/pe.writes.done[latest,absolute]:468
12855:20241101:185843.288 adding performance counter vvol/pe.total.done[latest]:469
12855:20241101:185843.288 adding performance counter vvol/pe.total.done[latest,absolute]:469
12855:20241101:185843.288 adding performance counter vvol/pe.reads.sent[latest]:470
12855:20241101:185843.288 adding performance counter vvol/pe.reads.sent[latest,absolute]:470
12855:20241101:185843.288 adding performance counter vvol/pe.writes.sent[latest]:471
12855:20241101:185843.288 adding performance counter vvol/pe.writes.sent[latest,absolute]:471
12855:20241101:185843.288 adding performance counter vvol/pe.total.sent[latest]:472
12855:20241101:185843.288 adding performance counter vvol/pe.total.sent[latest,absolute]:472
12855:20241101:185843.288 adding performance counter vvol/pe.readsissued.failed[latest]:473
12855:20241101:185843.288 adding performance counter vvol/pe.readsissued.failed[latest,absolute]:473
12855:20241101:185843.288 adding performance counter vvol/pe.writesissued.failed[latest]:474
12855:20241101:185843.288 adding performance counter vvol/pe.writesissued.failed[latest,absolute]:474
12855:20241101:185843.288 adding performance counter vvol/pe.totalissued.failed[latest]:475
12855:20241101:185843.288 adding performance counter vvol/pe.totalissued.failed[latest,absolute]:475
12855:20241101:185843.288 adding performance counter vvol/pe.reads.failed[latest]:476
12855:20241101:185843.288 adding performance counter vvol/pe.reads.failed[latest,absolute]:476
12855:20241101:185843.288 adding performance counter vvol/pe.writes.failed[latest]:477
12855:20241101:185843.288 adding performance counter vvol/pe.writes.failed[latest,absolute]:477
12855:20241101:185843.288 adding performance counter vvol/pe.total.failed[latest]:478
12855:20241101:185843.288 adding performance counter vvol/pe.total.failed[latest,absolute]:478
12855:20241101:185843.288 adding performance counter vvol/pe.read.latency[latest]:479
12855:20241101:185843.288 adding performance counter vvol/pe.read.latency[latest,absolute]:479
12855:20241101:185843.288 adding performance counter vvol/pe.write.latency[latest]:480
12855:20241101:185843.288 adding performance counter vvol/pe.write.latency[latest,absolute]:480
12855:20241101:185843.288 adding performance counter vvol/pe.issue.latency[latest]:481
12855:20241101:185843.288 adding performance counter vvol/pe.issue.latency[latest,absolute]:481
12855:20241101:185843.288 adding performance counter vvol/pe.total.latency[latest]:482
12855:20241101:185843.288 adding performance counter vvol/pe.total.latency[latest,absolute]:482
12855:20241101:185843.288 adding performance counter vvol/pe.cancel.sent[latest]:483
12855:20241101:185843.288 adding performance counter vvol/pe.cancel.sent[latest,absolute]:483
12855:20241101:185843.288 adding performance counter vvol/pe.cancel.failed[latest]:484
12855:20241101:185843.288 adding performance counter vvol/pe.cancel.failed[latest,absolute]:484
12855:20241101:185843.288 adding performance counter vvol/pe.deviceresets.sent[latest]:485
12855:20241101:185843.288 adding performance counter vvol/pe.deviceresets.sent[latest,absolute]:485
12855:20241101:185843.288 adding performance counter vvol/pe.deviceresets.failed[latest]:486
12855:20241101:185843.288 adding performance counter vvol/pe.deviceresets.failed[latest,absolute]:486
12855:20241101:185843.288 adding performance counter vvol/pe.resets.sent[latest]:487
12855:20241101:185843.288 adding performance counter vvol/pe.resets.sent[latest,absolute]:487
12855:20241101:185843.289 adding performance counter vvol/pe.resets.failed[latest]:488
12855:20241101:185843.289 adding performance counter vvol/pe.resets.failed[latest,absolute]:488
12855:20241101:185843.289 adding performance counter vvol/pe.unmaps.sent[latest]:489
12855:20241101:185843.289 adding performance counter vvol/pe.unmaps.sent[latest,absolute]:489
12855:20241101:185843.289 adding performance counter vvol/pe.unmaps.failed[latest]:490
12855:20241101:185843.289 adding performance counter vvol/pe.unmaps.failed[latest,absolute]:490
12855:20241101:185843.289 adding performance counter vvol/container.reads.done[latest]:491
12855:20241101:185843.289 adding performance counter vvol/container.reads.done[latest,absolute]:491
12855:20241101:185843.289 adding performance counter vvol/container.writes.done[latest]:492
12855:20241101:185843.289 adding performance counter vvol/container.writes.done[latest,absolute]:492
12855:20241101:185843.289 adding performance counter vvol/container.total.done[latest]:493
12855:20241101:185843.289 adding performance counter vvol/container.total.done[latest,absolute]:493
12855:20241101:185843.289 adding performance counter vvol/container.reads.sent[latest]:494
12855:20241101:185843.289 adding performance counter vvol/container.reads.sent[latest,absolute]:494
12855:20241101:185843.289 adding performance counter vvol/container.writes.sent[latest]:495
12855:20241101:185843.289 adding performance counter vvol/container.writes.sent[latest,absolute]:495
12855:20241101:185843.289 adding performance counter vvol/container.total.sent[latest]:496
12855:20241101:185843.289 adding performance counter vvol/container.total.sent[latest,absolute]:496
12855:20241101:185843.289 adding performance counter vvol/container.readsissued.failed[latest]:497
12855:20241101:185843.289 adding performance counter vvol/container.readsissued.failed[latest,absolute]:497
12855:20241101:185843.289 adding performance counter vvol/container.writesissued.failed[latest]:498
12855:20241101:185843.289 adding performance counter vvol/container.writesissued.failed[latest,absolute]:498
12855:20241101:185843.289 adding performance counter vvol/container.totalissued.failed[latest]:499
12855:20241101:185843.289 adding performance counter vvol/container.totalissued.failed[latest,absolute]:499
12855:20241101:185843.289 adding performance counter vvol/container.reads.failed[latest]:500
12855:20241101:185843.289 adding performance counter vvol/container.reads.failed[latest,absolute]:500
12855:20241101:185843.289 adding performance counter vvol/container.writes.failed[latest]:501
12855:20241101:185843.289 adding performance counter vvol/container.writes.failed[latest,absolute]:501
12855:20241101:185843.289 adding performance counter vvol/container.total.failed[latest]:502
12855:20241101:185843.289 adding performance counter vvol/container.total.failed[latest,absolute]:502
12855:20241101:185843.289 adding performance counter vvol/container.read.latency[latest]:503
12855:20241101:185843.289 adding performance counter vvol/container.read.latency[latest,absolute]:503
12855:20241101:185843.289 adding performance counter vvol/container.write.latency[latest]:504
12855:20241101:185843.289 adding performance counter vvol/container.write.latency[latest,absolute]:504
12855:20241101:185843.289 adding performance counter vvol/container.issue.latency[latest]:505
12855:20241101:185843.289 adding performance counter vvol/container.issue.latency[latest,absolute]:505
12855:20241101:185843.290 adding performance counter vvol/container.total.latency[latest]:506
12855:20241101:185843.290 adding performance counter vvol/container.total.latency[latest,absolute]:506
12855:20241101:185843.290 adding performance counter vvol/device.reads.done[latest]:507
12855:20241101:185843.290 adding performance counter vvol/device.reads.done[latest,absolute]:507
12855:20241101:185843.290 adding performance counter vvol/device.writes.done[latest]:508
12855:20241101:185843.290 adding performance counter vvol/device.writes.done[latest,absolute]:508
12855:20241101:185843.290 adding performance counter vvol/device.total.done[latest]:509
12855:20241101:185843.290 adding performance counter vvol/device.total.done[latest,absolute]:509
12855:20241101:185843.290 adding performance counter vvol/device.reads.sent[latest]:510
12855:20241101:185843.290 adding performance counter vvol/device.reads.sent[latest,absolute]:510
12855:20241101:185843.290 adding performance counter vvol/device.writes.sent[latest]:511
12855:20241101:185843.290 adding performance counter vvol/device.writes.sent[latest,absolute]:511
12855:20241101:185843.290 adding performance counter vvol/device.total.sent[latest]:512
12855:20241101:185843.290 adding performance counter vvol/device.total.sent[latest,absolute]:512
12855:20241101:185843.290 adding performance counter vvol/device.readsissued.failed[latest]:513
12855:20241101:185843.290 adding performance counter vvol/device.readsissued.failed[latest,absolute]:513
12855:20241101:185843.290 adding performance counter vvol/device.writesissued.failed[latest]:514
12855:20241101:185843.290 adding performance counter vvol/device.writesissued.failed[latest,absolute]:514
12855:20241101:185843.290 adding performance counter vvol/device.totalissued.failed[latest]:515
12855:20241101:185843.290 adding performance counter vvol/device.totalissued.failed[latest,absolute]:515
12855:20241101:185843.290 adding performance counter vvol/device.reads.failed[latest]:516
12855:20241101:185843.290 adding performance counter vvol/device.reads.failed[latest,absolute]:516
12855:20241101:185843.290 adding performance counter vvol/device.writes.failed[latest]:517
12855:20241101:185843.290 adding performance counter vvol/device.writes.failed[latest,absolute]:517
12855:20241101:185843.290 adding performance counter vvol/device.total.failed[latest]:518
12855:20241101:185843.290 adding performance counter vvol/device.total.failed[latest,absolute]:518
12855:20241101:185843.290 adding performance counter vvol/device.read.latency[latest]:519
12855:20241101:185843.290 adding performance counter vvol/device.read.latency[latest,absolute]:519
12855:20241101:185843.290 adding performance counter vvol/device.write.latency[latest]:520
12855:20241101:185843.290 adding performance counter vvol/device.write.latency[latest,absolute]:520
12855:20241101:185843.290 adding performance counter vvol/device.issue.latency[latest]:521
12855:20241101:185843.290 adding performance counter vvol/device.issue.latency[latest,absolute]:521
12855:20241101:185843.290 adding performance counter vvol/device.total.latency[latest]:522
12855:20241101:185843.290 adding performance counter vvol/device.total.latency[latest,absolute]:522
12855:20241101:185843.290 adding performance counter vvol/device.cancel.sent[latest]:523
12855:20241101:185843.290 adding performance counter vvol/device.cancel.sent[latest,absolute]:523
12855:20241101:185843.290 adding performance counter vvol/device.cancel.failed[latest]:524
12855:20241101:185843.290 adding performance counter vvol/device.cancel.failed[latest,absolute]:524
12855:20241101:185843.290 adding performance counter vvol/device.deviceresets.sent[latest]:525
12855:20241101:185843.290 adding performance counter vvol/device.deviceresets.sent[latest,absolute]:525
12855:20241101:185843.291 adding performance counter vvol/device.deviceresets.failed[latest]:526
12855:20241101:185843.291 adding performance counter vvol/device.deviceresets.failed[latest,absolute]:526
12855:20241101:185843.291 adding performance counter vvol/device.resets.sent[latest]:527
12855:20241101:185843.291 adding performance counter vvol/device.resets.sent[latest,absolute]:527
12855:20241101:185843.291 adding performance counter vvol/device.resets.failed[latest]:528
12855:20241101:185843.291 adding performance counter vvol/device.resets.failed[latest,absolute]:528
12855:20241101:185843.291 adding performance counter vvol/device.unmaps.sent[latest]:529
12855:20241101:185843.291 adding performance counter vvol/device.unmaps.sent[latest,absolute]:529
12855:20241101:185843.291 adding performance counter vvol/device.unmaps.failed[latest]:530
12855:20241101:185843.291 adding performance counter vvol/device.unmaps.failed[latest,absolute]:530
12855:20241101:185843.291 adding performance counter cpu/swapwait[summation]:531
12855:20241101:185843.291 adding performance counter cpu/swapwait[summation,delta]:531
12855:20241101:185843.291 adding performance counter cpu/utilization[none]:532
12855:20241101:185843.291 adding performance counter cpu/utilization[none,rate]:532
12855:20241101:185843.291 adding performance counter cpu/utilization[maximum]:533
12855:20241101:185843.291 adding performance counter cpu/utilization[maximum,rate]:533
12855:20241101:185843.291 adding performance counter cpu/utilization[minimum]:534
12855:20241101:185843.291 adding performance counter cpu/utilization[minimum,rate]:534
12855:20241101:185843.291 adding performance counter cpu/coreUtilization[none]:535
12855:20241101:185843.291 adding performance counter cpu/coreUtilization[none,rate]:535
12855:20241101:185843.291 adding performance counter cpu/coreUtilization[average]:536
12855:20241101:185843.291 adding performance counter cpu/coreUtilization[average,rate]:536
12855:20241101:185843.291 adding performance counter cpu/coreUtilization[maximum]:537
12855:20241101:185843.291 adding performance counter cpu/coreUtilization[maximum,rate]:537
12855:20241101:185843.291 adding performance counter cpu/coreUtilization[minimum]:538
12855:20241101:185843.291 adding performance counter cpu/coreUtilization[minimum,rate]:538
12855:20241101:185843.291 adding performance counter cpu/totalCapacity[average]:539
12855:20241101:185843.291 adding performance counter cpu/totalCapacity[average,absolute]:539
12855:20241101:185843.291 adding performance counter cpu/latency[average]:540
12855:20241101:185843.291 adding performance counter cpu/latency[average,rate]:540
12855:20241101:185843.291 adding performance counter cpu/entitlement[latest]:541
12855:20241101:185843.291 adding performance counter cpu/entitlement[latest,absolute]:541
12855:20241101:185843.291 adding performance counter cpu/demand[average]:542
12855:20241101:185843.291 adding performance counter cpu/demand[average,absolute]:542
12855:20241101:185843.291 adding performance counter cpu/costop[summation]:543
12855:20241101:185843.291 adding performance counter cpu/costop[summation,delta]:543
12855:20241101:185843.291 adding performance counter cpu/maxlimited[summation]:544
12855:20241101:185843.291 adding performance counter cpu/maxlimited[summation,delta]:544
12855:20241101:185843.291 adding performance counter cpu/overlap[summation]:545
12855:20241101:185843.291 adding performance counter cpu/overlap[summation,delta]:545
12855:20241101:185843.291 adding performance counter cpu/run[summation]:546
12855:20241101:185843.292 adding performance counter cpu/run[summation,delta]:546
12855:20241101:185843.292 adding performance counter cpu/demandEntitlementRatio[latest]:547
12855:20241101:185843.292 adding performance counter cpu/demandEntitlementRatio[latest,absolute]:547
12855:20241101:185843.292 adding performance counter cpu/readiness[average]:548
12855:20241101:185843.292 adding performance counter cpu/readiness[average,rate]:548
12855:20241101:185843.292 adding performance counter cpu/usage.vcpus[average]:549
12855:20241101:185843.292 adding performance counter cpu/usage.vcpus[average,rate]:549
12855:20241101:185843.292 adding performance counter mem/swapin[none]:550
12855:20241101:185843.292 adding performance counter mem/swapin[none,absolute]:550
12855:20241101:185843.292 adding performance counter mem/swapin[average]:551
12855:20241101:185843.292 adding performance counter mem/swapin[average,absolute]:551
12855:20241101:185843.292 adding performance counter mem/swapin[maximum]:552
12855:20241101:185843.292 adding performance counter mem/swapin[maximum,absolute]:552
12855:20241101:185843.292 adding performance counter mem/swapin[minimum]:553
12855:20241101:185843.292 adding performance counter mem/swapin[minimum,absolute]:553
12855:20241101:185843.292 adding performance counter mem/swapout[none]:554
12855:20241101:185843.292 adding performance counter mem/swapout[none,absolute]:554
12855:20241101:185843.292 adding performance counter mem/swapout[average]:555
12855:20241101:185843.292 adding performance counter mem/swapout[average,absolute]:555
12855:20241101:185843.292 adding performance counter mem/swapout[maximum]:556
12855:20241101:185843.292 adding performance counter mem/swapout[maximum,absolute]:556
12855:20241101:185843.292 adding performance counter mem/swapout[minimum]:557
12855:20241101:185843.292 adding performance counter mem/swapout[minimum,absolute]:557
12855:20241101:185843.292 adding performance counter mem/sysUsage[none]:558
12855:20241101:185843.292 adding performance counter mem/sysUsage[none,absolute]:558
12855:20241101:185843.292 adding performance counter mem/sysUsage[average]:559
12855:20241101:185843.292 adding performance counter mem/sysUsage[average,absolute]:559
12855:20241101:185843.292 adding performance counter mem/sysUsage[maximum]:560
12855:20241101:185843.292 adding performance counter mem/sysUsage[maximum,absolute]:560
12855:20241101:185843.292 adding performance counter mem/sysUsage[minimum]:561
12855:20241101:185843.292 adding performance counter mem/sysUsage[minimum,absolute]:561
12855:20241101:185843.292 adding performance counter mem/activewrite[average]:562
12855:20241101:185843.292 adding performance counter mem/activewrite[average,absolute]:562
12855:20241101:185843.292 adding performance counter mem/overheadMax[average]:563
12855:20241101:185843.292 adding performance counter mem/overheadMax[average,absolute]:563
12855:20241101:185843.292 adding performance counter mem/totalCapacity[average]:564
12855:20241101:185843.292 adding performance counter mem/totalCapacity[average,absolute]:564
12855:20241101:185843.292 adding performance counter mem/zipped[latest]:565
12855:20241101:185843.292 adding performance counter mem/zipped[latest,absolute]:565
12855:20241101:185843.293 adding performance counter mem/zipSaved[latest]:566
12855:20241101:185843.293 adding performance counter mem/zipSaved[latest,absolute]:566
12855:20241101:185843.293 adding performance counter mem/latency[average]:567
12855:20241101:185843.293 adding performance counter mem/latency[average,absolute]:567
12855:20241101:185843.293 adding performance counter mem/entitlement[average]:568
12855:20241101:185843.293 adding performance counter mem/entitlement[average,absolute]:568
12855:20241101:185843.293 adding performance counter mem/lowfreethreshold[average]:569
12855:20241101:185843.293 adding performance counter mem/lowfreethreshold[average,absolute]:569
12855:20241101:185843.293 adding performance counter mem/llSwapUsed[none]:570
12855:20241101:185843.293 adding performance counter mem/llSwapUsed[none,absolute]:570
12855:20241101:185843.293 adding performance counter mem/llSwapInRate[average]:571
12855:20241101:185843.293 adding performance counter mem/llSwapInRate[average,rate]:571
12855:20241101:185843.293 adding performance counter mem/llSwapOutRate[average]:572
12855:20241101:185843.293 adding performance counter mem/llSwapOutRate[average,rate]:572
12855:20241101:185843.293 adding performance counter mem/overheadTouched[average]:573
12855:20241101:185843.293 adding performance counter mem/overheadTouched[average,absolute]:573
12855:20241101:185843.293 adding performance counter mem/llSwapUsed[average]:574
12855:20241101:185843.293 adding performance counter mem/llSwapUsed[average,absolute]:574
12855:20241101:185843.293 adding performance counter mem/llSwapUsed[maximum]:575
12855:20241101:185843.293 adding performance counter mem/llSwapUsed[maximum,absolute]:575
12855:20241101:185843.293 adding performance counter mem/llSwapUsed[minimum]:576
12855:20241101:185843.293 adding performance counter mem/llSwapUsed[minimum,absolute]:576
12855:20241101:185843.293 adding performance counter mem/llSwapIn[none]:577
12855:20241101:185843.293 adding performance counter mem/llSwapIn[none,absolute]:577
12855:20241101:185843.293 adding performance counter mem/llSwapIn[average]:578
12855:20241101:185843.293 adding performance counter mem/llSwapIn[average,absolute]:578
12855:20241101:185843.293 adding performance counter mem/llSwapIn[maximum]:579
12855:20241101:185843.293 adding performance counter mem/llSwapIn[maximum,absolute]:579
12855:20241101:185843.293 adding performance counter mem/llSwapIn[minimum]:580
12855:20241101:185843.293 adding performance counter mem/llSwapIn[minimum,absolute]:580
12855:20241101:185843.293 adding performance counter mem/llSwapOut[none]:581
12855:20241101:185843.293 adding performance counter mem/llSwapOut[none,absolute]:581
12855:20241101:185843.293 adding performance counter mem/llSwapOut[average]:582
12855:20241101:185843.293 adding performance counter mem/llSwapOut[average,absolute]:582
12855:20241101:185843.293 adding performance counter mem/llSwapOut[maximum]:583
12855:20241101:185843.293 adding performance counter mem/llSwapOut[maximum,absolute]:583
12855:20241101:185843.293 adding performance counter mem/llSwapOut[minimum]:584
12855:20241101:185843.293 adding performance counter mem/llSwapOut[minimum,absolute]:584
12855:20241101:185843.293 adding performance counter mem/vmfs.pbc.size[latest]:585
12855:20241101:185843.293 adding performance counter mem/vmfs.pbc.size[latest,absolute]:585
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.sizeMax[latest]:586
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.sizeMax[latest,absolute]:586
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.workingSet[latest]:587
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.workingSet[latest,absolute]:587
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.workingSetMax[latest]:588
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.workingSetMax[latest,absolute]:588
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.overhead[latest]:589
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.overhead[latest,absolute]:589
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.capMissRatio[latest]:590
12855:20241101:185843.294 adding performance counter mem/vmfs.pbc.capMissRatio[latest,absolute]:590
12855:20241101:185843.294 adding performance counter disk/commands[summation]:591
12855:20241101:185843.294 adding performance counter disk/commands[summation,delta]:591
12855:20241101:185843.294 adding performance counter disk/deviceReadLatency[average]:592
12855:20241101:185843.294 adding performance counter disk/deviceReadLatency[average,absolute]:592
12855:20241101:185843.294 adding performance counter disk/kernelReadLatency[average]:593
12855:20241101:185843.294 adding performance counter disk/kernelReadLatency[average,absolute]:593
12855:20241101:185843.294 adding performance counter disk/totalReadLatency[average]:594
12855:20241101:185843.294 adding performance counter disk/totalReadLatency[average,absolute]:594
12855:20241101:185843.294 adding performance counter disk/queueReadLatency[average]:595
12855:20241101:185843.294 adding performance counter disk/queueReadLatency[average,absolute]:595
12855:20241101:185843.294 adding performance counter disk/deviceWriteLatency[average]:596
12855:20241101:185843.294 adding performance counter disk/deviceWriteLatency[average,absolute]:596
12855:20241101:185843.294 adding performance counter disk/kernelWriteLatency[average]:597
12855:20241101:185843.294 adding performance counter disk/kernelWriteLatency[average,absolute]:597
12855:20241101:185843.294 adding performance counter disk/totalWriteLatency[average]:598
12855:20241101:185843.294 adding performance counter disk/totalWriteLatency[average,absolute]:598
12855:20241101:185843.294 adding performance counter disk/queueWriteLatency[average]:599
12855:20241101:185843.294 adding performance counter disk/queueWriteLatency[average,absolute]:599
12855:20241101:185843.294 adding performance counter disk/deviceLatency[average]:600
12855:20241101:185843.294 adding performance counter disk/deviceLatency[average,absolute]:600
12855:20241101:185843.294 adding performance counter disk/kernelLatency[average]:601
12855:20241101:185843.294 adding performance counter disk/kernelLatency[average,absolute]:601
12855:20241101:185843.294 adding performance counter disk/queueLatency[average]:602
12855:20241101:185843.294 adding performance counter disk/queueLatency[average,absolute]:602
12855:20241101:185843.294 adding performance counter disk/maxQueueDepth[average]:603
12855:20241101:185843.294 adding performance counter disk/maxQueueDepth[average,absolute]:603
12855:20241101:185843.294 adding performance counter disk/commandsAveraged[average]:604
12855:20241101:185843.294 adding performance counter disk/commandsAveraged[average,rate]:604
12855:20241101:185843.294 adding performance counter net/droppedRx[summation]:605
12855:20241101:185843.294 adding performance counter net/droppedRx[summation,delta]:605
12855:20241101:185843.295 adding performance counter net/droppedTx[summation]:606
12855:20241101:185843.295 adding performance counter net/droppedTx[summation,delta]:606
12855:20241101:185843.295 adding performance counter net/bytesRx[average]:607
12855:20241101:185843.295 adding performance counter net/bytesRx[average,rate]:607
12855:20241101:185843.295 adding performance counter net/bytesTx[average]:608
12855:20241101:185843.295 adding performance counter net/bytesTx[average,rate]:608
12855:20241101:185843.295 adding performance counter net/broadcastRx[summation]:609
12855:20241101:185843.295 adding performance counter net/broadcastRx[summation,delta]:609
12855:20241101:185843.295 adding performance counter net/broadcastTx[summation]:610
12855:20241101:185843.295 adding performance counter net/broadcastTx[summation,delta]:610
12855:20241101:185843.295 adding performance counter net/multicastRx[summation]:611
12855:20241101:185843.295 adding performance counter net/multicastRx[summation,delta]:611
12855:20241101:185843.295 adding performance counter net/multicastTx[summation]:612
12855:20241101:185843.295 adding performance counter net/multicastTx[summation,delta]:612
12855:20241101:185843.295 adding performance counter net/errorsRx[summation]:613
12855:20241101:185843.295 adding performance counter net/errorsRx[summation,delta]:613
12855:20241101:185843.295 adding performance counter net/errorsTx[summation]:614
12855:20241101:185843.295 adding performance counter net/errorsTx[summation,delta]:614
12855:20241101:185843.295 adding performance counter net/unknownProtos[summation]:615
12855:20241101:185843.295 adding performance counter net/unknownProtos[summation,delta]:615
12855:20241101:185843.295 adding performance counter net/pnicBytesRx[average]:616
12855:20241101:185843.295 adding performance counter net/pnicBytesRx[average,rate]:616
12855:20241101:185843.295 adding performance counter net/pnicBytesTx[average]:617
12855:20241101:185843.295 adding performance counter net/pnicBytesTx[average,rate]:617
12855:20241101:185843.295 adding performance counter sys/heartbeat[latest]:618
12855:20241101:185843.295 adding performance counter sys/heartbeat[latest,absolute]:618
12855:20241101:185843.295 adding performance counter sys/diskUsage[latest]:619
12855:20241101:185843.295 adding performance counter sys/diskUsage[latest,absolute]:619
12855:20241101:185843.295 adding performance counter sys/resourceCpuUsage[none]:620
12855:20241101:185843.295 adding performance counter sys/resourceCpuUsage[none,rate]:620
12855:20241101:185843.295 adding performance counter sys/resourceCpuUsage[average]:621
12855:20241101:185843.295 adding performance counter sys/resourceCpuUsage[average,rate]:621
12855:20241101:185843.295 adding performance counter sys/resourceCpuUsage[maximum]:622
12855:20241101:185843.295 adding performance counter sys/resourceCpuUsage[maximum,rate]:622
12855:20241101:185843.295 adding performance counter sys/resourceCpuUsage[minimum]:623
12855:20241101:185843.295 adding performance counter sys/resourceCpuUsage[minimum,rate]:623
12855:20241101:185843.295 adding performance counter sys/resourceMemTouched[latest]:624
12855:20241101:185843.295 adding performance counter sys/resourceMemTouched[latest,absolute]:624
12855:20241101:185843.295 adding performance counter sys/resourceMemMapped[latest]:625
12855:20241101:185843.295 adding performance counter sys/resourceMemMapped[latest,absolute]:625
12855:20241101:185843.296 adding performance counter sys/resourceMemShared[latest]:626
12855:20241101:185843.296 adding performance counter sys/resourceMemShared[latest,absolute]:626
12855:20241101:185843.296 adding performance counter sys/resourceMemSwapped[latest]:627
12855:20241101:185843.296 adding performance counter sys/resourceMemSwapped[latest,absolute]:627
12855:20241101:185843.296 adding performance counter sys/resourceMemOverhead[latest]:628
12855:20241101:185843.296 adding performance counter sys/resourceMemOverhead[latest,absolute]:628
12855:20241101:185843.296 adding performance counter sys/resourceMemCow[latest]:629
12855:20241101:185843.296 adding performance counter sys/resourceMemCow[latest,absolute]:629
12855:20241101:185843.296 adding performance counter sys/resourceMemZero[latest]:630
12855:20241101:185843.296 adding performance counter sys/resourceMemZero[latest,absolute]:630
12855:20241101:185843.296 adding performance counter sys/resourceCpuRun1[latest]:631
12855:20241101:185843.296 adding performance counter sys/resourceCpuRun1[latest,absolute]:631
12855:20241101:185843.296 adding performance counter sys/resourceCpuAct1[latest]:632
12855:20241101:185843.296 adding performance counter sys/resourceCpuAct1[latest,absolute]:632
12855:20241101:185843.296 adding performance counter sys/resourceCpuMaxLimited1[latest]:633
12855:20241101:185843.296 adding performance counter sys/resourceCpuMaxLimited1[latest,absolute]:633
12855:20241101:185843.296 adding performance counter sys/resourceCpuRun5[latest]:634
12855:20241101:185843.296 adding performance counter sys/resourceCpuRun5[latest,absolute]:634
12855:20241101:185843.296 adding performance counter sys/resourceCpuAct5[latest]:635
12855:20241101:185843.296 adding performance counter sys/resourceCpuAct5[latest,absolute]:635
12855:20241101:185843.296 adding performance counter sys/resourceCpuMaxLimited5[latest]:636
12855:20241101:185843.296 adding performance counter sys/resourceCpuMaxLimited5[latest,absolute]:636
12855:20241101:185843.296 adding performance counter sys/resourceCpuAllocMin[latest]:637
12855:20241101:185843.296 adding performance counter sys/resourceCpuAllocMin[latest,absolute]:637
12855:20241101:185843.296 adding performance counter sys/resourceCpuAllocMax[latest]:638
12855:20241101:185843.296 adding performance counter sys/resourceCpuAllocMax[latest,absolute]:638
12855:20241101:185843.296 adding performance counter sys/resourceCpuAllocShares[latest]:639
12855:20241101:185843.296 adding performance counter sys/resourceCpuAllocShares[latest,absolute]:639
12855:20241101:185843.296 adding performance counter sys/resourceMemAllocMin[latest]:640
12855:20241101:185843.296 adding performance counter sys/resourceMemAllocMin[latest,absolute]:640
12855:20241101:185843.296 adding performance counter sys/resourceMemAllocMax[latest]:641
12855:20241101:185843.296 adding performance counter sys/resourceMemAllocMax[latest,absolute]:641
12855:20241101:185843.296 adding performance counter sys/resourceMemAllocShares[latest]:642
12855:20241101:185843.296 adding performance counter sys/resourceMemAllocShares[latest,absolute]:642
12855:20241101:185843.296 adding performance counter sys/osUptime[latest]:643
12855:20241101:185843.296 adding performance counter sys/osUptime[latest,absolute]:643
12855:20241101:185843.296 adding performance counter sys/resourceMemConsumed[latest]:644
12855:20241101:185843.296 adding performance counter sys/resourceMemConsumed[latest,absolute]:644
12855:20241101:185843.296 adding performance counter sys/resourceFdUsage[latest]:645
12855:20241101:185843.296 adding performance counter sys/resourceFdUsage[latest,absolute]:645
12855:20241101:185843.296 adding performance counter rescpu/actpk1[latest]:646
12855:20241101:185843.297 adding performance counter rescpu/actpk1[latest,absolute]:646
12855:20241101:185843.297 adding performance counter rescpu/runav1[latest]:647
12855:20241101:185843.297 adding performance counter rescpu/runav1[latest,absolute]:647
12855:20241101:185843.297 adding performance counter rescpu/actav5[latest]:648
12855:20241101:185843.297 adding performance counter rescpu/actav5[latest,absolute]:648
12855:20241101:185843.297 adding performance counter rescpu/actpk5[latest]:649
12855:20241101:185843.297 adding performance counter rescpu/actpk5[latest,absolute]:649
12855:20241101:185843.297 adding performance counter rescpu/runav5[latest]:650
12855:20241101:185843.297 adding performance counter rescpu/runav5[latest,absolute]:650
12855:20241101:185843.297 adding performance counter rescpu/actav15[latest]:651
12855:20241101:185843.297 adding performance counter rescpu/actav15[latest,absolute]:651
12855:20241101:185843.297 adding performance counter rescpu/actpk15[latest]:652
12855:20241101:185843.297 adding performance counter rescpu/actpk15[latest,absolute]:652
12855:20241101:185843.297 adding performance counter rescpu/runav15[latest]:653
12855:20241101:185843.297 adding performance counter rescpu/runav15[latest,absolute]:653
12855:20241101:185843.297 adding performance counter rescpu/runpk1[latest]:654
12855:20241101:185843.297 adding performance counter rescpu/runpk1[latest,absolute]:654
12855:20241101:185843.297 adding performance counter rescpu/maxLimited1[latest]:655
12855:20241101:185843.297 adding performance counter rescpu/maxLimited1[latest,absolute]:655
12855:20241101:185843.297 adding performance counter rescpu/runpk5[latest]:656
12855:20241101:185843.297 adding performance counter rescpu/runpk5[latest,absolute]:656
12855:20241101:185843.297 adding performance counter rescpu/maxLimited5[latest]:657
12855:20241101:185843.297 adding performance counter rescpu/maxLimited5[latest,absolute]:657
12855:20241101:185843.297 adding performance counter rescpu/runpk15[latest]:658
12855:20241101:185843.297 adding performance counter rescpu/runpk15[latest,absolute]:658
12855:20241101:185843.297 adding performance counter rescpu/maxLimited15[latest]:659
12855:20241101:185843.297 adding performance counter rescpu/maxLimited15[latest,absolute]:659
12855:20241101:185843.297 adding performance counter rescpu/sampleCount[latest]:660
12855:20241101:185843.297 adding performance counter rescpu/sampleCount[latest,absolute]:660
12855:20241101:185843.297 adding performance counter rescpu/samplePeriod[latest]:661
12855:20241101:185843.297 adding performance counter rescpu/samplePeriod[latest,absolute]:661
12855:20241101:185843.297 adding performance counter managementAgent/memUsed[average]:662
12855:20241101:185843.297 adding performance counter managementAgent/memUsed[average,absolute]:662
12855:20241101:185843.297 adding performance counter managementAgent/swapUsed[average]:663
12855:20241101:185843.297 adding performance counter managementAgent/swapUsed[average,absolute]:663
12855:20241101:185843.297 adding performance counter managementAgent/cpuUsage[average]:664
12855:20241101:185843.297 adding performance counter managementAgent/cpuUsage[average,rate]:664
12855:20241101:185843.298 adding performance counter storagePath/commandsAveraged[average]:665
12855:20241101:185843.298 adding performance counter storagePath/commandsAveraged[average,rate]:665
12855:20241101:185843.298 adding performance counter storagePath/numberReadAveraged[average]:666
12855:20241101:185843.298 adding performance counter storagePath/numberReadAveraged[average,rate]:666
12855:20241101:185843.298 adding performance counter storagePath/numberWriteAveraged[average]:667
12855:20241101:185843.298 adding performance counter storagePath/numberWriteAveraged[average,rate]:667
12855:20241101:185843.298 adding performance counter storagePath/read[average]:668
12855:20241101:185843.298 adding performance counter storagePath/read[average,rate]:668
12855:20241101:185843.298 adding performance counter storagePath/write[average]:669
12855:20241101:185843.298 adding performance counter storagePath/write[average,rate]:669
12855:20241101:185843.298 adding performance counter storagePath/totalReadLatency[average]:670
12855:20241101:185843.298 adding performance counter storagePath/totalReadLatency[average,absolute]:670
12855:20241101:185843.298 adding performance counter storagePath/totalWriteLatency[average]:671
12855:20241101:185843.298 adding performance counter storagePath/totalWriteLatency[average,absolute]:671
12855:20241101:185843.298 adding performance counter virtualDisk/readIOSize[latest]:672
12855:20241101:185843.298 adding performance counter virtualDisk/readIOSize[latest,absolute]:672
12855:20241101:185843.298 adding performance counter virtualDisk/writeIOSize[latest]:673
12855:20241101:185843.298 adding performance counter virtualDisk/writeIOSize[latest,absolute]:673
12855:20241101:185843.298 adding performance counter virtualDisk/smallSeeks[latest]:674
12855:20241101:185843.298 adding performance counter virtualDisk/smallSeeks[latest,absolute]:674
12855:20241101:185843.298 adding performance counter virtualDisk/mediumSeeks[latest]:675
12855:20241101:185843.298 adding performance counter virtualDisk/mediumSeeks[latest,absolute]:675
12855:20241101:185843.298 adding performance counter virtualDisk/largeSeeks[latest]:676
12855:20241101:185843.298 adding performance counter virtualDisk/largeSeeks[latest,absolute]:676
12855:20241101:185843.298 adding performance counter virtualDisk/readLatencyUS[latest]:677
12855:20241101:185843.298 adding performance counter virtualDisk/readLatencyUS[latest,absolute]:677
12855:20241101:185843.298 adding performance counter virtualDisk/writeLatencyUS[latest]:678
12855:20241101:185843.298 adding performance counter virtualDisk/writeLatencyUS[latest,absolute]:678
12855:20241101:185843.298 adding performance counter datastore/datastoreMaxQueueDepth[latest]:679
12855:20241101:185843.298 adding performance counter datastore/datastoreMaxQueueDepth[latest,absolute]:679
12855:20241101:185843.298 adding performance counter datastore/unmapSize[summation]:680
12855:20241101:185843.298 adding performance counter datastore/unmapSize[summation,delta]:680
12855:20241101:185843.298 adding performance counter datastore/unmapIOs[summation]:681
12855:20241101:185843.298 adding performance counter datastore/unmapIOs[summation,delta]:681
12855:20241101:185843.298 adding performance counter hbr/hbrNumVms[average]:682
12855:20241101:185843.298 adding performance counter hbr/hbrNumVms[average,absolute]:682
12855:20241101:185843.298 adding performance counter hbr/hbrNetRx[average]:683
12855:20241101:185843.298 adding performance counter hbr/hbrNetRx[average,rate]:683
12855:20241101:185843.298 adding performance counter hbr/hbrNetTx[average]:684
12855:20241101:185843.298 adding performance counter hbr/hbrNetTx[average,rate]:684
12855:20241101:185843.298 adding performance counter hbr/hbrNetLatency[average]:685
12855:20241101:185843.299 adding performance counter hbr/hbrNetLatency[average,absolute]:685
12855:20241101:185843.299 adding performance counter hbr/hbrDiskReadLatency[average]:686
12855:20241101:185843.299 adding performance counter hbr/hbrDiskReadLatency[average,absolute]:686
12855:20241101:185843.299 adding performance counter hbr/hbrDiskStallLatency[average]:687
12855:20241101:185843.299 adding performance counter hbr/hbrDiskStallLatency[average,absolute]:687
12855:20241101:185843.299 adding performance counter hbr/hbrDiskTransferSuccess[average]:688
12855:20241101:185843.299 adding performance counter hbr/hbrDiskTransferSuccess[average,absolute]:688
12855:20241101:185843.299 adding performance counter hbr/hbrDiskTransferIdle[average]:689
12855:20241101:185843.299 adding performance counter hbr/hbrDiskTransferIdle[average,absolute]:689
12855:20241101:185843.299 adding performance counter hbr/hbrDiskTransferBytes[average]:690
12855:20241101:185843.299 adding performance counter hbr/hbrDiskTransferBytes[average,absolute]:690
12855:20241101:185843.299 adding performance counter vflashModule/numActiveVMDKs[latest]:691
12855:20241101:185843.299 adding performance counter vflashModule/numActiveVMDKs[latest,absolute]:691
12855:20241101:185843.299 adding performance counter vsanDomObj/readIops[average]:692
12855:20241101:185843.299 adding performance counter vsanDomObj/readIops[average,rate]:692
12855:20241101:185843.299 adding performance counter vsanDomObj/readThroughput[average]:693
12855:20241101:185843.299 adding performance counter vsanDomObj/readThroughput[average,rate]:693
12855:20241101:185843.299 adding performance counter vsanDomObj/readAvgLatency[average]:694
12855:20241101:185843.299 adding performance counter vsanDomObj/readAvgLatency[average,absolute]:694
12855:20241101:185843.299 adding performance counter vsanDomObj/readMaxLatency[latest]:695
12855:20241101:185843.299 adding performance counter vsanDomObj/readMaxLatency[latest,absolute]:695
12855:20241101:185843.299 adding performance counter vsanDomObj/readCacheHitRate[latest]:696
12855:20241101:185843.299 adding performance counter vsanDomObj/readCacheHitRate[latest,absolute]:696
12855:20241101:185843.299 adding performance counter vsanDomObj/readCongestion[average]:697
12855:20241101:185843.299 adding performance counter vsanDomObj/readCongestion[average,rate]:697
12855:20241101:185843.299 adding performance counter vsanDomObj/writeIops[average]:698
12855:20241101:185843.299 adding performance counter vsanDomObj/writeIops[average,rate]:698
12855:20241101:185843.299 adding performance counter vsanDomObj/writeThroughput[average]:699
12855:20241101:185843.299 adding performance counter vsanDomObj/writeThroughput[average,rate]:699
12855:20241101:185843.299 adding performance counter vsanDomObj/writeAvgLatency[average]:700
12855:20241101:185843.299 adding performance counter vsanDomObj/writeAvgLatency[average,absolute]:700
12855:20241101:185843.299 adding performance counter vsanDomObj/writeMaxLatency[latest]:701
12855:20241101:185843.299 adding performance counter vsanDomObj/writeMaxLatency[latest,absolute]:701
12855:20241101:185843.299 adding performance counter vsanDomObj/writeCongestion[average]:702
12855:20241101:185843.299 adding performance counter vsanDomObj/writeCongestion[average,rate]:702
12855:20241101:185843.299 adding performance counter vsanDomObj/recoveryWriteIops[average]:703
12855:20241101:185843.299 adding performance counter vsanDomObj/recoveryWriteIops[average,rate]:703
12855:20241101:185843.299 adding performance counter vsanDomObj/recoveryWriteThroughput[average]:704
12855:20241101:185843.299 adding performance counter vsanDomObj/recoveryWriteThroughput[average,rate]:704
12855:20241101:185843.299 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average]:705
12855:20241101:185843.299 adding performance counter vsanDomObj/recoveryWriteAvgLatency[average,absolute]:705
12855:20241101:185843.300 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest]:706
12855:20241101:185843.300 adding performance counter vsanDomObj/recoveryWriteMaxLatency[latest,absolute]:706
12855:20241101:185843.300 adding performance counter vsanDomObj/recoveryWriteCongestion[average]:707
12855:20241101:185843.300 adding performance counter vsanDomObj/recoveryWriteCongestion[average,rate]:707
12855:20241101:185843.300 adding performance counter gpu/utilization[none]:708
12855:20241101:185843.300 adding performance counter gpu/utilization[none,absolute]:708
12855:20241101:185843.300 adding performance counter gpu/utilization[maximum]:709
12855:20241101:185843.300 adding performance counter gpu/utilization[maximum,absolute]:709
12855:20241101:185843.300 adding performance counter gpu/utilization[minimum]:710
12855:20241101:185843.300 adding performance counter gpu/utilization[minimum,absolute]:710
12855:20241101:185843.300 adding performance counter gpu/mem.used[none]:711
12855:20241101:185843.300 adding performance counter gpu/mem.used[none,absolute]:711
12855:20241101:185843.300 adding performance counter gpu/mem.used[maximum]:712
12855:20241101:185843.300 adding performance counter gpu/mem.used[maximum,absolute]:712
12855:20241101:185843.300 adding performance counter gpu/mem.used[minimum]:713
12855:20241101:185843.300 adding performance counter gpu/mem.used[minimum,absolute]:713
12855:20241101:185843.300 adding performance counter gpu/mem.usage[none]:714
12855:20241101:185843.300 adding performance counter gpu/mem.usage[none,absolute]:714
12855:20241101:185843.300 adding performance counter gpu/mem.usage[average]:715
12855:20241101:185843.300 adding performance counter gpu/mem.usage[average,absolute]:715
12855:20241101:185843.300 adding performance counter gpu/mem.usage[maximum]:716
12855:20241101:185843.300 adding performance counter gpu/mem.usage[maximum,absolute]:716
12855:20241101:185843.300 adding performance counter gpu/mem.usage[minimum]:717
12855:20241101:185843.300 adding performance counter gpu/mem.usage[minimum,absolute]:717
12855:20241101:185843.300 Unknown performance counter 718 type of unitInfo:gigaBytes
12855:20241101:185843.300 adding performance counter gpu/mem.used.gb[latest]:718
12855:20241101:185843.300 Unknown performance counter 718 type of unitInfo:gigaBytes
12855:20241101:185843.300 adding performance counter gpu/mem.used.gb[latest,absolute]:718
12855:20241101:185843.300 Unknown performance counter 719 type of unitInfo:gigaBytes
12855:20241101:185843.300 adding performance counter gpu/mem.reserved.gb[latest]:719
12855:20241101:185843.300 Unknown performance counter 719 type of unitInfo:gigaBytes
12855:20241101:185843.300 adding performance counter gpu/mem.reserved.gb[latest,absolute]:719
12855:20241101:185843.300 Unknown performance counter 720 type of unitInfo:gigaBytes
12855:20241101:185843.300 adding performance counter gpu/mem.total.gb[latest]:720
12855:20241101:185843.300 Unknown performance counter 720 type of unitInfo:gigaBytes
12855:20241101:185843.300 adding performance counter gpu/mem.total.gb[latest,absolute]:720
12855:20241101:185843.300 adding performance counter pmem/available.reservation[latest]:721
12855:20241101:185843.300 adding performance counter pmem/available.reservation[latest,absolute]:721
12855:20241101:185843.300 adding performance counter pmem/drsmanaged.reservation[latest]:722
12855:20241101:185843.300 adding performance counter pmem/drsmanaged.reservation[latest,absolute]:722
12855:20241101:185843.300 adding performance counter vmx/numVCPUs[latest]:723
12855:20241101:185843.300 adding performance counter vmx/numVCPUs[latest,absolute]:723
12855:20241101:185843.300 adding performance counter vmx/vcpusMhzMin[latest]:724
12855:20241101:185843.300 adding performance counter vmx/vcpusMhzMin[latest,absolute]:724
12855:20241101:185843.301 adding performance counter vmx/vcpusMhzMax[latest]:725
12855:20241101:185843.301 adding performance counter vmx/vcpusMhzMax[latest,absolute]:725
12855:20241101:185843.301 adding performance counter vmx/vcpusMhzMean[latest]:726
12855:20241101:185843.301 adding performance counter vmx/vcpusMhzMean[latest,absolute]:726
12855:20241101:185843.301 adding performance counter vmx/cpuSpeed[latest]:727
12855:20241101:185843.301 adding performance counter vmx/cpuSpeed[latest,absolute]:727
12855:20241101:185843.301 adding performance counter vmx/overheadMemSizeMin[latest]:728
12855:20241101:185843.301 adding performance counter vmx/overheadMemSizeMin[latest,absolute]:728
12855:20241101:185843.301 adding performance counter vmx/overheadMemSizeMax[latest]:729
12855:20241101:185843.301 adding performance counter vmx/overheadMemSizeMax[latest,absolute]:729
12855:20241101:185843.301 adding performance counter vmx/vigor.opsTotal[latest]:730
12855:20241101:185843.301 adding performance counter vmx/vigor.opsTotal[latest,absolute]:730
12855:20241101:185843.301 adding performance counter vmx/poll.itersPerS[latest]:731
12855:20241101:185843.301 adding performance counter vmx/poll.itersPerS[latest,absolute]:731
12855:20241101:185843.301 adding performance counter vmx/userRpc.opsPerS[latest]:732
12855:20241101:185843.301 adding performance counter vmx/userRpc.opsPerS[latest,absolute]:732
12855:20241101:185843.301 End of vmware_service_get_perf_counters():SUCCEED
12855:20241101:185843.302 In vmware_service_get_evt_severity()
12861:20241101:185843.328 In vmware_job_get() queue:2
12861:20241101:185843.328 End of vmware_job_get() queue:1 type:update_perf_counters
12861:20241101:185843.328 In vmware_job_exec() type:update_perf_counters
12861:20241101:185843.328 End of vmware_job_exec() type:update_perf_counters ret:FAIL
12861:20241101:185843.328 In vmware_job_schedule() queue:1 type:update_perf_counters
12861:20241101:185843.328 End of vmware_job_schedule() type:update_perf_counters nextcheck:18:59:43
12861:20241101:185843.328 In vmware_job_get() queue:2
12861:20241101:185843.328 End of vmware_job_get() queue:2 type:none
12859:20241101:185843.328 In vmware_job_get() queue:2
12857:20241101:185843.328 In vmware_job_get() queue:2
12857:20241101:185843.328 End of vmware_job_get() queue:2 type:none
12859:20241101:185843.328 End of vmware_job_get() queue:2 type:none
12855:20241101:185843.358 vmware_service_get_evt_severity() SOAP response:
EventManagerdescriptionInformationinfoWarningwarningErrorerrorUseruserExtendedEventImport certificate successinfoImport certificate succeeded.Import certificate succeeded.Import certificate succeeded.Import certificate succeeded.ad.event.ImportCertEvent|Import certificate succeeded. <EventLongDescription id="ad.event.ImportCertEvent"> <description> Import certificate succeeded </description> </EventLongDescription> ExtendedEventImport certificate failureerrorImport certificate failed.Import certificate failed.Import certificate failed.Import certificate failed.ad.event.ImportCertFailedEvent|Import certificate failed. <EventLongDescription id="ad.event.ImportCertFailedEvent"> <description> Import certificate failed </description> </EventLongDescription> ExtendedEventJoin domain successinfoJoin domain succeeded.Join domain succeeded.Join domain succeeded.Join domain succeeded.ad.event.JoinDomainEvent|Join domain succeeded. <EventLongDescription id="ad.event.JoinDomainEvent"> <description> Join domain succeeded </description> </EventLongDescription> ExtendedEventJoin domain failureerrorJoin domain failed.Join domain failed.Join domain failed.Join domain failed.ad.event.JoinDomainFailedEvent|Join domain failed. <EventLongDescription id="ad.event.JoinDomainFailedEvent"> <description> Join domain failed </description> </EventLongDescription> ExtendedEventLeave domain successinfoLeave domain succeeded.Leave domain succeeded.Leave domain succeeded.Leave domain succeeded.ad.event.LeaveDomainEvent|Leave domain succeeded. <EventLongDescription id="ad.event.LeaveDomainEvent"> <description> Leave domain succeeded </description> </EventLongDescription> ExtendedEventLeave domain failureerrorLeave domain failed.Leave domain failed.Leave domain failed.Leave domain failed.ad.event.LeaveDomainFailedEvent|Leave domain failed. <EventLongDescription id="ad.event.LeaveDomainFailedEvent"> <description> Leave domain failed </description> </EventLongDescription> ExtendedEventBackup job failederrorcom.vmware.applmgmt.backup.job.failed.event|Backup job failed <EventLongDescription id="com.vmware.applmgmt.backup.job.failed.event"> <description> Backup job failed </description> <cause> <description> Backup job failed </description> <action> Check backup server connectivity and available space </action> </cause> </EventLongDescription> ExtendedEventBackup job finished successfullyinfocom.vmware.applmgmt.backup.job.finished.event|Backup job finished successfully <EventLongDescription id="com.vmware.applmgmt.backup.job.finished.event"> <description> Backup job finished successfully </description> <cause> <description> Backup job finished successfully </description> </cause> </EventLongDescription> ExtendedEventGlobal Permission created for user with role and propagation.infocom.vmware.cis.CreateGlobalPermission|Global Permission created for user {User} with role {Role} and propagation {Propagation}.EventExPermission created for user on item with role.infocom.vmware.cis.CreatePermission|Permission created for user {User} on item {DocType} with role {Role}.EventExGlobal Permission removed for user.infocom.vmware.cis.RemoveGlobalPermission|Global Permission removed for user {User}.EventExPermission removed for user on iteminfocom.vmware.cis.RemovePermission|Permission removed for user {User} on item {DocType}EventExUser attached tag(s) to object(s)com.vmware.cis.tagging.attach|User {User} attached tag(s) {Tag} to object(s) {Object}EventExUser detached tag(s) from object(s)com.vmware.cis.tagging.detach|User {User} detached tag(s) {Tag} from object(s) {Object}ExtendedEventHttpNfc service disabled - missing configurationerrorHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationHttpNfc service disabled - missing configurationcom.vmware.configuration.httpnfc.missing|HttpNfc service is disabled because of missing configuration. Please check vpxa configuration file and correct the error and reconnect host. <EventLongDescription id="com.vmware.configuration.httpnfc.missing"> <description> The HttpNfc service is disabled because of missing configuration section in vpxa.cfg. Please check vpxa configuration file and correct the error and reconnect host. </description> <cause> <description>The vpxa configuration file requires a configuration section for HttpNfc</description> <action>Please check vpxa configuration file and correct the error and reconnect host.</action> </cause> </EventLongDescription> EventExAdded Licenseinfocom.vmware.license.AddLicenseEvent|License {licenseKey} added to VirtualCenterEventExAssigned Licenseinfocom.vmware.license.AssignLicenseEvent|License {licenseKey} assigned to asset {entityName} with id {entityId}EventExDownload License Informationwarningcom.vmware.license.DLFDownloadFailedEvent|Failed to download license information from the host {hostname} due to {errorReason.@enum.com.vmware.license.DLFDownloadFailedEvent.DLFDownloadFailedReason}EventExDefault License Keys Updatedinfocom.vmware.license.DefaultLicenseKeysUpdatedEvent|Default License Keys for asset {entityName} have been updatedEventExHost License Edition Not Allowedwarningcom.vmware.license.HostLicenseEditionNotAllowedEvent|The host is licensed with {edition}. The license edition of vCenter Server does not support {edition}.ExtendedEventHost license or evaluation period has expiredwarningcom.vmware.license.HostLicenseExpiredEvent|Expired host license or evaluation period. <EventLongDescription id="com.vmware.license.HostLicenseExpiredEvent"> <description> Host license or evaluation period has expired. </description> <cause> <description>Expired host license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventHost time-limited license has expiredwarningcom.vmware.license.HostSubscriptionLicenseExpiredEvent|Expired host time-limited license. <EventLongDescription id="com.vmware.license.HostSubscriptionLicenseExpiredEvent"> <description> Host time-limited license has expired. </description> <cause> <description>Expired host time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> EventExLicense assignment faultsinfocom.vmware.license.LicenseAssignFailedEvent|License assignment on the host fails. Reasons: {errorMessage.@enum.com.vmware.license.LicenseAssignError}. <EventLongDescription id="com.vmware.license.LicenseAssignFailedEvent"> <description> The host license assignment succeeds on vCenter Server but can not be successfully pushed down to the host. Any license assignment to a host proceeds in two stages. In the first stage vCenter Server does preliminary checks on the license key, the license state of the host and determines if the requested assignment is valid. If so, it stores this assignment locally in its database. In the second stage, vCenter Server pushes the newly assigned license to the host. During the second stage the host might reject the assignment under certain circumstances. These circumstances usually result from a mismatch of the information available to vCenter Server and the host concerned. Any such discrepancies are notified to the user via this event. This event lists the reason because of which it was logged and also shows up as a configuration issue on the vSphere Client. </description> <cause> <description>License expiry information mismatch between vCenter Server and host</description> <action>If the system time on the machine running vCenter Server and host are not in sync then put them in sync</action> </cause> <cause> <description>The license key is a per Virtual Machine key and the number of powered on Virtual Machines is larger than the maximum limit of the key</description> <action>Use a different key with a larger capacity</action> </cause> </EventLongDescription> EventExLicense Capacity Exceededwarningcom.vmware.license.LicenseCapacityExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the license capacity ({capacity} {costUnitText})EventExLicense ExpirywarningYour host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires.com.vmware.license.LicenseExpiryEvent|Your host license expires in {remainingDays} days. The host will disconnect from vCenter Server when its license expires. <EventLongDescription id="com.vmware.license.LicenseExpiryEvent"> <description> If a host is assigned a temporary license (a license key with an expiry), this event is logged in order to provide users an advanced warning on the imminent expiry of the license key. The event logging starts 15 days prior to the expiry of the license key. This event also shows up on the host summary page as a configuration issue on the vSphere Client. </description> <cause> <description>License key is about to expire or has expired</description> <action>Assign a different license key</action> </cause> </EventLongDescription> EventExLicense User Threshold Exceededwarningcom.vmware.license.LicenseUserThresholdExceededEvent|The current license usage ({currentUsage} {costUnitText}) for {edition} exceeds the user-defined threshold ({threshold} {costUnitText}) <EventLongDescription id="com.vmware.license.LicenseUserThresholdExceededEvent"> <description> Users can define thresholds to monitor overuse of the product license. This event is logged when the license usage threshold defined by the user for a product edition is exceeded. </description> <cause> <description> License usage of a product edition has exceeded the user-defined threshold </description> <action> Review license assignments and usage </action> </cause> </EventLongDescription> EventExRemoved Licenseinfocom.vmware.license.RemoveLicenseEvent|License {licenseKey} removed from VirtualCenterEventExUnassigned Licenseinfocom.vmware.license.UnassignLicenseEvent|License unassigned from asset {entityName} with id {entityId}ExtendedEventvCenter Server license or evaluation period has expiredwarningcom.vmware.license.VcLicenseExpiredEvent|Expired vCenter Server license or evaluation period. <EventLongDescription id="com.vmware.license.VcLicenseExpiredEvent"> <description> vCenter Server license or evaluation period has expired. </description> <cause> <description>Expired vCenter Server license or evaluation period</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventvCenter Server time-limited license has expiredwarningcom.vmware.license.VcSubscriptionLicenseExpiredEvent|Expired vCenter Server time-limited license. <EventLongDescription id="com.vmware.license.VcSubscriptionLicenseExpiredEvent"> <description> vCenter Server time-limited license has expired. </description> <cause> <description>Expired vCenter Server time-limited license</description> <action>Assign a different license</action> </cause> </EventLongDescription> ExtendedEventSome in-use features are not supported by current licensewarningcom.vmware.license.vsan.FeatureBeyondCapability|In-use vSAN features {feature} are not supported by current license.ExtendedEventHost flash capacity exceeds the licensed limit for vSANwarningcom.vmware.license.vsan.HostSsdOverUsageEvent|The capacity of the flash disks on the host exceeds the limit of the vSAN license. <EventLongDescription id="com.vmware.license.vsan.HostSsdOverUsageEvent"> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <cause> <description> The capacity of the SSD disks on the host exceeds the limit of the vSAN license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN license or evaluation period has expiredwarningcom.vmware.license.vsan.LicenseExpiryEvent|Expired vSAN license or evaluation period. <EventLongDescription id="com.vmware.license.vsan.LicenseExpiryEvent"> <description> Expired vSAN license or evaluation period. </description> <cause> <description> Expired vSAN license or evaluation period. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> ExtendedEventvSAN time-limited license has expiredwarningcom.vmware.license.vsan.SubscriptionLicenseExpiredEvent|Expired vSAN time-limited license. <EventLongDescription id="com.vmware.license.vsan.SubscriptionLicenseExpiredEvent"> <description> Expired vSAN time-limited license. </description> <cause> <description> Expired vSAN time-limited license. </description> <action> Review cluster license assignments. </action> </cause> </EventLongDescription> EventExStorage policy associatedinfoAssociated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}Associated storage policy: {ProfileId} with entity: {EntityId}com.vmware.pbm.profile.associate|Associated storage policy: {ProfileId} with entity: {EntityId}EventExStorage policy createdinfoStorage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.create|Storage policy created. Policy Id: {ProfileId}. Policy name: {ProfileName}EventExStorage policy deletedinfoDeleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}Deleted storage policy: {ProfileId}com.vmware.pbm.profile.delete|Deleted storage policy: {ProfileId}EventExStorage policy dissociatedinfoDissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}Dissociated storage policy: {ProfileId} from entity: {EntityId}com.vmware.pbm.profile.dissociate|Dissociated storage policy: {ProfileId} from entity: {EntityId}EventExStorage policy updatedinfoStorage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}Storage policy updated for {ProfileId}. Policy name: {ProfileName}com.vmware.pbm.profile.update|Storage policy updated for {ProfileId}. Policy name: {ProfileName}EventExStorage policy name updatedinfoStorage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}Storage policy name updated for {ProfileId}. New name: {NewProfileName}com.vmware.pbm.profile.updateName|Storage policy name updated for {ProfileId}. New name: {NewProfileName}EventExCertificate Manager event in SSOinfocom.vmware.sso.CertificateManager|Certificate Manager event by {userName} at {timestamp} : {description}EventExConfiguration Management event in SSOinfocom.vmware.sso.ConfigurationManagement|Configuration Management event by {userName} at {timestamp} : {description}EventExDomain Management event in SSOinfocom.vmware.sso.DomainManagement|Domain Management event by {userName} at {timestamp} : {description}EventExIdentity Source Management event in SSOinfocom.vmware.sso.IdentitySourceManagement|Identity Source Management event by {userName} at {timestamp} : {description}EventExIdentity Source LDAP Certificate is about to expireinfocom.vmware.sso.LDAPCertExpiry|Renew Identity Source LDAP Certificate: {description}EventExLockout Policy event in SSOinfocom.vmware.sso.LockoutPolicy|Lockout Policy event by {userName} at {timestamp} : {description}EventExFailed login attempt event in SSOerrorcom.vmware.sso.LoginFailure|Failed login {userName} from {userIp} at {timestamp} in SSOEventExSuccessful login attempt event in SSOinfocom.vmware.sso.LoginSuccess|Successful login {userName} from {userIp} at {timestamp} in SSOEventExLogout attempt event in SSOinfocom.vmware.sso.Logout|Logout event by {userName} from {userIp} at {timestamp} in SSOEventExPassword Policy event in SSOinfocom.vmware.sso.PasswordPolicy|Password Policy event by {userName} at {timestamp} : {description}EventExPrincipal Management event in SSOinfocom.vmware.sso.PrincipalManagement|Principal Management event by {userName} at {timestamp} : {description}EventExRole Management event in SSOinfocom.vmware.sso.RoleManagement|Role Management event by {userName} at {timestamp} : {description}EventExSTS Signing Certificates are about to expireinfocom.vmware.sso.STSCertExpiry|Renew STS Signing Certificates: {description}EventExSMTP Configuration event in SSOinfocom.vmware.sso.SmtpConfiguration|SMTP Configuration event by {userName} at {timestamp} : {description}EventExSystem Management event in SSOinfocom.vmware.sso.SystemManagement|System Management event by {userName} at {timestamp} : {description}EventExvCenter Identity event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentity|vCenter Identity event by {userName} at {timestamp} : {description}EventExvCenter Identity Providers event in Trustmanagementinfocom.vmware.trustmanagement.VcIdentityProviders|vCenter Identity Providers event by {userName} at {timestamp} : {description}EventExvCenter Trusts event in Trustmanagementinfocom.vmware.trustmanagement.VcTrusts|vCenter Trusts event by {userName} at {timestamp} : {description}EventExIdentity Provider SSL Trust Certificate is about to expireinfocom.vmware.trustmanagement.WS1SSLCertExpiry|Renew Identity Provider SSL Trust Certificate: {description}EventExIdentity Provider Users and Groups token is about to expireinfocom.vmware.trustmanagement.WS1SyncTokenExpiry|Renew Identity Provider Users and Groups token: {description}EventExReports that a stage from autonomous cluster creation has failedwarningcom.vmware.vc.A8sCluster.CreateStageFailedEvent|Autonomous cluster creation stage: {stage} failed: {reason}EventExReports that a stage from autonomous cluster creation has completed successfullyinfocom.vmware.vc.A8sCluster.CreateStageSuccessEvent|Autonomous cluster creation stage: {stage} succeededEventExAutonomous cluster health is degraded.warningcom.vmware.vc.A8sCluster.HealthDegradedEvent|Autonomous cluster health is degraded. Reason: {reason}ExtendedEventAutonomous cluster is healthy.infocom.vmware.vc.A8sCluster.HealthHealthyEvent|Autonomous cluster is healthy.EventExAutonomous cluster is unhealthy.warningcom.vmware.vc.A8sCluster.HealthUnhealthyEvent|Autonomous cluster is unhealthy. Reason: {reason}ExtendedEventAuthz service is not running. Authorization data might not be synchronized.errorcom.vmware.vc.AuthzDataNotSynced|Authz service is not running. Authorization data might not be synchronized.ExtendedEventAuthz service is running. Authorization data is being synchronized.infocom.vmware.vc.AuthzDataSynced|Authz service is running. Authorization data is being synchronized.ExtendedEventEvent sequence ID reached its max value and was reset.infocom.vmware.vc.EventIdOverflow|Event sequence ID reached its max value and was reset.ExtendedEventcom.vmware.vc.FailedToApplyPermissionsEvent|ExtendedEventvSphere HA agent can reach all cluster management addressesinfoThe vSphere HA agent on the host {host.name} in cluster {computeResource.name} can reach all the cluster management addressesThe vSphere HA agent on the host {host.name} can reach all the cluster management addressesThe vSphere HA agent on this host can reach all the cluster management addressescom.vmware.vc.HA.AllHostAddrsPingable|The vSphere HA agent on the host {host.name} in cluster {computeResource.name} in {datacenter.name} can reach all the cluster management addresses <EventLongDescription id="com.vmware.vc.HA.AllHostAddrsPingable"> <description> The host is able to ping all of the vSphere HA management addresses of every other cluster host. </description> </EventLongDescription> ExtendedEventvSphere HA agent can reach all isolation addressesinfoAll vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name}All vSphere HA isolation addresses are reachable by this hostAll vSphere HA isolation addresses are reachable by hostcom.vmware.vc.HA.AllIsoAddrsPingable|All vSphere HA isolation addresses are reachable by host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.AllIsoAddrsPingable"> <description> The host is able to ping all of the vSphere HA isolation addresses. </description> </EventLongDescription> ExtendedEventvSphere HA answered a lock-lost question on a virtual machinewarningvSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name}vSphere HA answered the lock-lost question on virtual machine {vm.name}vSphere HA answered the lock-lost question on this virtual machinecom.vmware.vc.HA.AnsweredVmLockLostQuestionEvent|vSphere HA answered the lock-lost question on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmLockLostQuestionEvent"> <description> The virtual machine running on this host lost the exclusive lock of its files on disk. This will occur if another instance of this virtual machine is running on a different host. This situation can happen if a host loses access to both its storage and management networks but is not configured to shutdown its virtual machines on isolation. The virtual machines on this host will continue to run without access to their disks, while vSphere HA will start a new instance of the virtual machines on another host in the cluster. When the isolated host regains access to the storage network, it will try to reacquire the disk locks. This will fail since the disk locks are held by another host. The host will then issue a question on the virtual machine indicating that disk locks have been lost. vSphere HA will automatically answer this question to allow the virtual machine instance without the disk locks to power off. <description> </EventLongDescription> ExtendedEventvSphere HA answered a question from the host about terminating a virtual machinewarningvSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name}vSphere HA answered a question from host {host.name} about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating virtual machine {vm.name}vSphere HA answered a question from the host about terminating this virtual machinecom.vmware.vc.HA.AnsweredVmTerminatePDLEvent|vSphere HA answered a question from host {host.name} in cluster {computeResource.name} about terminating virtual machine {vm.name} <EventLongDescription id="com.vmware.vc.HA.AnsweredVmTerminatePDLEvent"> <description> The virtual machine running on this host had a virtual disk which experienced permenant device loss. The host will issue a question if it is configured to terminate the VM automatically under such condition. This event indicates that vSphere HA answered the question. After the VM is terminated, vSphere HA will make a best effort to restart it. <description> </EventLongDescription> ExtendedEventvSphere HA disabled the automatic VM Startup/Shutdown featureinfovSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on the host {host.name}. Automatic VM restarts will interfere with HA when reacting to a host failure.vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature. Automatic VM restarts will interfere with HA when reacting to a host failure.com.vmware.vc.HA.AutoStartDisabled|vSphere HA disabled the automatic Virtual Machine Startup/Shutdown feature on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Automatic VM restarts will interfere with HA when reacting to a host failure. <EventLongDescription id="com.vmware.vc.HA.AutoStartDisabled"> <description> Virtual Machine Startup/Shutdown has been disabled by HA. A host which is contained in an vSphere HA cluster is not permitted to have automatic virtual machine startup and shutdown since it may conflict with HA's attempts to relocate the virtual machines if a host fails. </description> </EventLongDescription> ExtendedEventvSphere HA did not reset a VM which had files on inaccessible datastore(s)warningvSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on host {host.name} because the VM had files on inaccessible datastore(s)vSphere HA did not reset VM {vm.name} on this host because the VM had files on inaccessible datastore(s)vSphere HA did not reset this VM because the VM had file(s) on inaccessible datastore(s)com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore|vSphere HA did not reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the VM had files on inaccessible datastore(s) <EventLongDescription id=" com.vmware.vc.HA.CannotResetVmWithInaccessibleDatastore"> <description> This event is logged when vSphere HA did not reset a VM affected by an inaccessible datastore. It will attempt to reset the VM after storage failure is cleared. </description> <cause> <description> The VM is affected by an inaccessible datastore due to storage connectivity loss. Resetting such a VM might cause the VM to be powered off and not restarted by vSphere HA. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA cluster contains incompatible hosts.warningvSphere HA Cluster {computeResource.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.vSphere HA Cluster contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported.com.vmware.vc.HA.ClusterContainsIncompatibleHosts|vSphere HA Cluster {computeResource.name} in {datacenter.name} contains ESX/ESXi 3.5 hosts and more recent host versions, which isn't fully supported. <EventLongDescription id="com.vmware.vc.HA.ClusterContainsIncompatibleHosts"> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions. </description> <cause> <description> This vSphere HA cluster contains an ESX/ESXi 3.5 host and more recent host versions, which isn't fully supported. Failover of VMs from ESX/ESXi 3.5 hosts to newer hosts is not guaranteed. </description> <action> Place ESX/ESXi 3.5 hosts into a separate vSphere HA cluster from hosts with more recent ESX versions. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA completed a failover actioninfovSphere HA completed a virtual machine failover action in cluster {computeResource.name}vSphere HA completed a virtual machine failover actioncom.vmware.vc.HA.ClusterFailoverActionCompletedEvent|vSphere HA completed a virtual machine failover action in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA initiated a failover actionwarningvSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name}vSphere HA initiated a failover action on {pendingVms} virtual machinescom.vmware.vc.HA.ClusterFailoverActionInitiatedEvent|vSphere HA initiated a failover action on {pendingVms} virtual machines in cluster {computeResource.name} in datacenter {datacenter.name}EventExvSphere HA failover operation in progressWarningvSphere HA failover operation in progress in cluster {computeResource.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMsvSphere HA failover operation in progress: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMscom.vmware.vc.HA.ClusterFailoverInProgressEvent|vSphere HA failover operation in progress in cluster {computeResource.name} in datacenter {datacenter.name}: {numBeingPlaced} VMs being restarted, {numToBePlaced} VMs waiting for a retry, {numAwaitingResource} VMs waiting for resources, {numAwaitingVsanVmChange} inaccessible vSAN VMs <EventLongDescription id="com.vmware.vc.HA.ClusterFailoverInProgressEvent"> <description> This event is logged when a vSphere HA failover operation is in progress for virtual machines in the cluster. It also reports the number of virtual machines that are being restarted. There are four different categories of such VMs. (1) VMs being placed: vSphere HA is in the process of trying to restart these VMs; (2) VMs awaiting retry: a previous restart attempt failed, and vSphere HA is waiting for a timeout to expire before trying again; (3) VMs requiring additional resources: insufficient resources are available to restart these VMs. vSphere HA will retry when more resources become available (such as a host comes back on line); (4) Inaccessible vSAN VMs: vSphere HA cannot restart these vSAN VMs because they are not accessible. It will retry when there is a change in accessibility. </description> <cause> <description> vSphere HA is attempting to restart failed virtual machines in the cluster. It might be that the virtual machine restart is pending and has not yet completed. </description> <action> vSphere HA will retry the failover on another host unless the maximum number of failover attempts has been reached. A subsequent retry may succeed in powering on the virtual machine so allow the vSphere HA failover operation to be declared a success or failure. </action> </cause> <cause> <description> This event might also be generated when a required resource in the cluster becomes temporarily unavailabile due to network reconfiguration, hardware upgrade, software update, host overload, etc. which can cause vSphere HA to lose its network or storage hearbeats to certain hosts or virtual machines and mark them inaccessible. </description> <action> In many cases, this may be a temporary condition. If the cluster soon stabilizes to its normal condition vSphere HA will detect the host and virtual machines to be live and discard any failover attempts. In such cases, this event may be treated as a soft alarm caused by such changes. </action> </cause> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It can also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to fail over the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM might be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it might have been powered on by a user on a host. If any hosts have been declared dead, investigate whether a networking or storage issue is the cause. </action> <action> If the error reports that the virtual machine is in an invalid state, there might be an operation in progress that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation, that are taking a long time to complete. </action> </cause> </EventLongDescription> ExtendedEventHost connected to a vSphere HA masterinfovSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName}com.vmware.vc.HA.ConnectedToMaster|vSphere HA agent on host {host.name} connected to the vSphere HA master on host {masterHostName} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.ConnectedToMaster"> <description> This event is logged whenever a host in a vSphere HA cluster transitions to a slave host state and establishes a connection with a master host. </description> </EventLongDescription> ExtendedEventvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}errorvSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault}com.vmware.vc.HA.CreateConfigVvolFailedEvent|vSphere HA failed to create a configuration vVol for this datastore and so will not be able to protect virtual machines on the datastore until the problem is resolved. Error: {fault} <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolFailedEvent"> <description> vSphere HA failed to create a config vvol on the datastore </description> <cause> <description>A possible VP, host, network, or lack of resources prevented vSphere HA from creating a config vvol</description> <action>Look for errors in the environment, then re-enable vSphere HA</action> </cause> </EventLongDescription> ExtendedEventvSphere HA successfully created a configuration vVol after the previous failureinfovSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurevSphere HA successfully created a configuration vVol after the previous failurecom.vmware.vc.HA.CreateConfigVvolSucceededEvent|vSphere HA successfully created a configuration vVol after the previous failure <EventLongDescription id="com.vmware.vc.HA.CreateConfigVvolSucceededEvent"> <description> vSphere HA successfully created a config vvol on the datastore. If there was a failed config vvol datastore configuration issue, it is being cleared </description> <cause> <description> There were no errors during creation of the config vvol on the datastore</description> </cause> </EventLongDescription> ExtendedEventvSphere HA agent is runninginfovSphere HA agent on host {host.name} in cluster {computeResource.name} is runningvSphere HA agent on host {host.name} is runningvSphere HA agent is runningcom.vmware.vc.HA.DasAgentRunningEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is running <EventLongDescription id=" com.vmware.vc.HA.DasAgentRunningEvent"> <description> This event is logged when the vSphere HA agent is running on a host. </description> <cause> <description> This event is reported after vSphere HA is configured on a host or after the vSphere HA agent on a host starts, such as after a host reboot. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected an HA cluster state version inconsistencywarningvSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name}vSphere HA detected an HA cluster state version inconsistencycom.vmware.vc.HA.DasClusterVersionInconsistentEvent|vSphere HA detected an HA cluster state version inconsistency in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasClusterVersionInconsistentEvent"> <description> This event is logged when vSphere HA cluster has a version inconsistency for cluster state(HostList, ClusterConfiguration, VM protection state). </description> <cause> <description> This situation could primarily occur if vCenter has been restored to an older backed up state causing vCenter to rollback to older version for the vSphere HA cluster state (HostList, ClusterConfiguration, VM protection state) while the hosts on the cluster have the latest version for the cluster state. As a result, protection state for VMs will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster, any new cluster configuration state will not get updated on the vSphere HA agents on the hosts which are part of this vSphere HA cluster and if hosts were added or removed to/from this vSphere HA cluster after vCenter backup and before vCenter Restore, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </description> <action> Step 1. If hosts were added or removed to/from the vSphere HA cluster after vCenter backup and before vCenter Restore, please add or remove those respective hosts back to the vSphere HA cluster so that the list of hosts in the vSphere HA cluster is identical to the list of hosts in the cluster before vCenter was last restored. If you do not want to add hosts to the cluster, stop the vSphere HA process on the hosts that were added to vCenter after the backup. If this is not done, in case of a failure, VMs could potentially failover to hosts not being managed by vCenter but which are still part of the HA cluster. </action> <action> Step 2. Disable vSphere HA on the cluster and then re-enable vSphere HA on the cluster. This will make sure that vCenter's version for the vSphere HA cluster state(HostList, ClusterConfiguration, VM protection state) is reset with a new fault domain id for the HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a failed failover hosterrorvSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name}vSphere HA detected a possible failure of failover host {host.name}vSphere HA detected a possible failure of this failover hostcom.vmware.vc.HA.DasFailoverHostFailedEvent|vSphere HA detected a possible failure of failover host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostFailedEvent"> <description> This event is logged when vSphere HA has detected the failure of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the failure of a failover host. A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the failover host failure, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is not running and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-isolated failover hosterrorvSphere HA detected that failover host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that failover host {host.name} is network isolated from the clustervSphere HA detected that this failover host is network isolated from the clustercom.vmware.vc.HA.DasFailoverHostIsolatedEvent|Host {host.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a designated failover host. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if vSphere HA detects the network isolation of a failover host. vSphere HA reports a host as isolated if there are no heartbeats received from the HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the failover host isolation, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if the failover host is isolated and a host failure occurs. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network-partitioned failover hostwarningvSphere HA detected that failover host {host.name} in {computeResource.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that failover host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this failover host is in a different network partition than the mastercom.vmware.vc.HA.DasFailoverHostPartitionedEvent|Failover Host {host.name} in {computeResource.name} in {datacenter.name} is in a different network partition than the master <EventLongDescription id=" com.vmware.vc.HA.DasFailoverHostPartitionedEvent"> <description> This event is logged when vSphere HA has detected a designated failover host is network partitioned. </description> <cause> <description> If the admission control policy specifies one or more failover hosts, this event will be generated if a vSphere HA master agent detects a failover host is network partitioned. vSphere HA reports a host as partitioned if it cannot communicate with a subset of hosts in the cluster, yet can determine that the host is alive via its datastore heartbeats. </description> <action> Determine the cause of the partitioned failover host, and correct. vSphere HA will make a best effort to place VMs on remaining hosts in the cluster if a failover host is partitioned and a host failure occurs. See the prodcut documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA agent on a failover host is unreachableerrorThe vSphere HA agent on the failover host {host.name} in {computeResource.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on the failover host {host.name} is not reachable but host responds to ICMP pingsThe vSphere HA agent on this failover host is not reachable but host responds to ICMP pingscom.vmware.vc.HA.DasFailoverHostUnreachableEvent|The vSphere HA agent on the failover host {host.name} in cluster {computeResource.name} in {datacenter.name} is not reachable but host responds to ICMP pingsEventExHost complete datastore failureerrorAll shared datastores failed on the host {hostName} in cluster {computeResource.name}All shared datastores failed on the host {hostName}All shared datastores failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent|All shared datastores failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteDatastoreFailureEvent"> <description> A host in a Component Protection-enabled cluster has lost connectivity to all shared datastores </description> <cause> <description>Connectivity to all shared datastores has been lost</description> <action>Reconnect at least one shared datastore</action> </cause> </EventLongDescription> EventExHost complete network failureerrorAll VM networks failed on the host {hostName} in cluster {computeResource.name}All VM networks failed on the host {hostName}All VM networks failed on the host {hostName}com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent|All VM networks failed on the host {hostName} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.DasHostCompleteNetworkFailureEvent"> <description> A host in a Component Protection enabled cluster has lost connectivity to all virtual machine networks </description> <cause> <description>Connectivity to all virtual machine networks has been lost</description> <action>Reconnect at least one virtual machine network</action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a host failureerrorvSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name}vSphere HA detected a possible host failure of host {host.name}vSphere HA detected a possible host failure of this hostcom.vmware.vc.HA.DasHostFailedEvent|vSphere HA detected a possible host failure of host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostFailedEvent"> <description> This event is logged when vSphere HA detects a possible host failure. </description> <cause> <description> A host is considered to have failed by a vSphere HA master agent if it looses contact with the vSphere HA agent on the host, the host does not respond to pings on any of the management interfaces, and the master does not observe any datastore heartbeats. </description> <action> Determine the cause of the host failure, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected a network isolated hosterrorvSphere HA detected that host {host.name} is network isolated from cluster {computeResource.name}vSphere HA detected that host {host.name} is network isolated from the clustervSphere HA detected that this host is network isolated from the clustercom.vmware.vc.HA.DasHostIsolatedEvent|vSphere HA detected that host {host.name} is isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostIsolatedEvent"> <description> This event is logged when vSphere HA has detected the network isolation of a host. </description> <cause> <description> This event will be generated if there are no heartbeats received from the vSphere HA agent on that host, the host is not pingable on any of the management interfaces, yet the host is still alive as determined by the the host's datastore heartbeats. </description> <action> Determine the cause of the host isolation, and correct. See the product documentation for troubleshooting tips. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA host monitoring is disabledwarningvSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name}vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabledcom.vmware.vc.HA.DasHostMonitoringDisabledEvent|vSphere HA host monitoring is disabled. No virtual machine failover will occur until Host Monitoring is re-enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.DasHostMonitoringDisabledEvent"> <description> This event is logged when host monitoring has been disabled in a vSphere HA cluster. </description> <cause> <description> Host monitoring is disabled, so vSphere HA will not perform any failover actions. This event is generated to inform the user that their cluster is temporarily not being protected against host or VM failures. If host or VM failures occur while host monitoring is disabled, HA will not attempt to restart the the VMs that were running on the failed hosts. Other vSphere HA features are not impacted by whether host monitoring is disabled. </description> <action> Enable host monitoring to resume hosts monitoring. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA failed to restart a network isolated virtual machineerrorvSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart virtual machine {vm.name} after it was powered off in response to a network isolation eventvSphere HA was unable to restart this virtual machine after it was powered off in response to a network isolation eventcom.vmware.vc.HA.FailedRestartAfterIsolationEvent|vSphere HA was unable to restart virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} after it was powered off in response to a network isolation event. The virtual machine should be manually powered back on.EventExRunning VMs utilization cannot satisfy the configured failover resources on the cluster.warningRunning VMs utilization cannot satisfy the configured failover resources on cluster {computeResource.name}Running VMs utilization cannot satisfy the configured failover resources on the cluster.com.vmware.vc.HA.FailoverResourcesViolationEvent|Running VMs utilization cannot satisfy the configured failover resources on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.FailoverResourcesViolationEvent"> <description> This event is logged when the total utilization of the running VMs cannot satisfy the configured failover resources on a vSphere HA admission controlled cluster. </description> <cause> <description> The total utilization of the running VMs on this cluster is unable to satisfy the configured failover resources in the cluster. This event is generated to inform the user that their cluster will be running in a compromised state during failover and would not have sufficient failover resources to ensure the optimal functioning of the VMs and their workloads. The side-effect of this situation is that VMs won't be working optimally even though we ensure required failover capacity in case of failures. Other vSphere HA features are not impacted by this and this warning doesn't affect any VM related operations like power-on, vmotion etc. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to ensure that there is sufficient failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA changed a host's heartbeat datastoresinfoDatastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name}Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on this hostcom.vmware.vc.HA.HeartbeatDatastoreChanged|Datastore {dsName} is {changeType.@enum.com.vmware.vc.HA.HeartbeatDatastoreChange} for storage heartbeating monitored by the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreSelected"> <description> A datastore is selected or deselected for storage heartbeating monitored by the vSphere agent on this host. vSphere HA employs stroage heartbeating to detect host failures when there is network partition. </description> </EventLongDescription> EventExvSphere HA heartbeat datastore number for a host is insufficientwarningThe number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for host {host.name} is {selectedNum}, which is less than required: {requiredNum}The number of vSphere HA heartbeat datastores for this host is {selectedNum}, which is less than required: {requiredNum}com.vmware.vc.HA.HeartbeatDatastoreNotSufficient|The number of vSphere HA heartbeat datastores for host {host.name} in cluster {computeResource.name} in {datacenter.name} is {selectedNum}, which is less than required: {requiredNum} <EventLongDescription id="com.vmware.vc.HA.HeartbeatDatastoreNotSufficient"> <description> The number of heartbeat datastores used for this host is less than required. Multiple heartbeat datastores are needed to tolerate storage failures. The host summary page will report a configuration issue in this case. To ignore the configuration issue, use the vSphere HA cluster advanced option, das.ignoreInsufficientHbDatastore. </description> <cause> <description> The host does not have sufficient number of accessible datastores that are shared among other hosts in the cluster. </description> <action> Add more shared datastores to the host or check if any of its datastore is currently inaccessible. </action> </cause> </EventLongDescription> EventExvSphere HA agent on a host has an errorwarningvSphere HA agent for host {host.name} has an error in {computeResource.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}vSphere HA agent for this host has an error: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason}com.vmware.vc.HA.HostAgentErrorEvent|vSphere HA agent for host {host.name} has an error in {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.HostAgentErrorReason} <EventLongDescription id="com.vmware.vc.HA.AgentErrorEvent"> <description> This event is logged when the vSphere HA agent for the host has an error. </description> <action> See product documentation for troubleshooting tips. </action> </EventLongDescription> ExtendedEventvSphere HA agent is healthyinfovSphere HA agent on host {host.name} in cluster {computeResource.name} is healthyvSphere HA agent on host {host.name} is healthyvSphere HA agent is healthycom.vmware.vc.HA.HostDasAgentHealthyEvent|vSphere HA agent on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is healthy <EventLongDescription id=" com.vmware.vc.HA.HostDasAgentHealthyEvent"> <description> This event is logged when the vSphere HA agent on a host transitions to a healthy state. </description> <cause> <description> vSphere HA reports this event when the vSphere HA agent on the host is either a master or a slave that is connected to the master over the management network. </description> </cause> </EventLongDescription> EventExvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error: {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error. {reason.@enum.com.vmware.vc.HA.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}com.vmware.vc.HA.HostDasErrorEvent|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error: {reason.@enum.HostDasErrorEvent.HostDasErrorReason} <EventLongDescription id="com.vmware.vc.HA.HostDasErrorEvent"> <description> The vSphere HA agent on this host has an error. The event may provide details with extra information indicating the cause of the error. </description> <cause> <description>There was an error configuring the vSphere HA agent on the host</description> <action> Look at the task details for the configure vSphere HA task that failed. That will provide more details about why the failure occurred. Address the problem and reconfigure vSphere HA on the host. </action> </cause> <cause> <description> There was a timeout while communicating with the vSphere HA agent. This can occur if there is a high rate of operations being performed on virtual machines in the cluster resulting in the vSphere HA agents not being able to process the changes fast enough. </description> <action> Verify that this is a transient problem by stopping operations on virtual machines in the cluster for a few minutes to give time to the vSphere HA agents to process all their pending messages. If this resolves the problem, consider reducing the rate of operations performed on the cluster. </action> </cause> <cause> <description>There is vSphere HA agent is in a shutdown or failed state</description> <action>Reconfigure vSphere HA on the host. If this fails, reconfigure vSphere HA on the cluster</action> </cause> </EventLongDescription> EventExvSphere HA detected a datastore failurewarningvSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of datastore {arg1} on host {host.name}vSphere HA detected a failure of datastore {arg1}com.vmware.vc.HA.HostDatastoreFailedEvent|vSphere HA detected a failure of datastore {arg1} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventUnsupported vSphere HA and vCloud Distributed Storage configurationerrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured on host {host.name} because vCloud Distributed Storage is enabled but the host does not support that featurevSphere HA cannot be configured because vCloud Distributed Storage is enabled but the host does not support that featurecom.vmware.vc.HA.HostDoesNotSupportVsan|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in {datacenter.name} because vCloud Distributed Storage is enabled but the host does not support that featureExtendedEventHost has no vSphere HA isolation addresseserrorHost {host.name} in cluster {computeResource.name} has no isolation addresses defined as required by vSphere HAHost {host.name} has no isolation addresses defined as required by vSphere HAThis host has no isolation addresses defined as required by vSphere HAcom.vmware.vc.HA.HostHasNoIsolationAddrsDefined|Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no isolation addresses defined as required by vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostHasNoIsolationAddrsDefined"> <description> The host has an vSphere HA configuration issue because there were no IP addresses that vSphere HA could use for detecting network isolation. Without at least one, the host will not take any isolation response. HA, by default, will use the host's default gateway (defined in the host's networking configuration), or use the addresses that were specified in the cluster's advanced settings. </description> <action> Define a default gateway in the host's networking configuration. </action> <action> If the cluster advanced setting das.usedefaultisolationaddress is false, you must define at least one isolation address using the advanced options. </action> <action> Define one or more cluster advanced options, each containing an IP address to be pinged by vSphere HA to detect if it is network-isolated when it no longer receives communication with other hosts in the cluster. The advanced option is das.isolationAddress[n], where 'n' is a number from 1 to 9. You may specify multiple addresses. </action> </EventLongDescription> ExtendedEventvSphere HA cannot be configured on this host because there are no mounted datastores.errorvSphere HA cannot be configured on {host.name} in cluster {computeResource.name} because there are no mounted datastores.vSphere HA cannot be configured on {host.name} because there are no mounted datastores.vSphere HA cannot be configured on this host because there are no mounted datastores.com.vmware.vc.HA.HostHasNoMountedDatastores|vSphere HA cannot be configured on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because there are no mounted datastores.ExtendedEventvSphere HA requires a SSL Thumbprint for hosterrorvSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on {host.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified.vSphere HA cannot be configured on this host because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for this host has been verified.com.vmware.vc.HA.HostHasNoSslThumbprint|vSphere HA cannot be configured on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} because its SSL thumbprint has not been verified. Check that vCenter Server is configured to verify SSL thumbprints and that the thumbprint for {host.name} has been verified. <EventLongDescription id="com.vmware.vc.HA.HostHasNoSslThumbprint"> <description> The host has an vSphere HA configuration issue because it does not have a verified ssl thumbprint. Hosts need verified SSL thumbprints for secure vSphere HA communications. </description> <action> If the host is using self-signed certificates, check that vCenter Server is configured to verify SSL certificates, and verify the thumbprints for the hosts in the vSphere HA cluster. </action> </EventLongDescription> ExtendedEventHost is incompatible with vSphere HAerrorThe product version of host {host.name} in cluster {computeResource.name} is incompatible with vSphere HA.The product version of host {host.name} is incompatible with vSphere HA.The product version of this host is incompatible with vSphere HA.com.vmware.vc.HA.HostIncompatibleWithHA|The product version of host {host.name} in cluster {computeResource.name} in {datacenter.name} is incompatible with vSphere HA. <EventLongDescription id="com.vmware.vc.HA.HostIncompatibleWithHA"> <description> The host is in a vSphere HA cluster but its product version is incompatible with HA. </description> <action> To fix the situation the host should either be moved out of the vSphere HA cluster or upgraded to a version supporting HA. </action> </EventLongDescription> EventExvSphere HA detected a network failurewarningvSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name}vSphere HA detected a failure of network {network} on host {host.name}vSphere HA detected a failure of network {network}com.vmware.vc.HA.HostNetworkFailedEvent|vSphere HA detected a failure of network {network} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventvSphere HA detected a network-partitioned hostwarningvSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connected in {computeResource.name}vSphere HA detected that host {host.name} is in a different network partition than the master to which vCenter Server is connectedvSphere HA detected that this host is in a different network partition than the master to which vCenter Server is connectedcom.vmware.vc.HA.HostPartitionedFromMasterEvent|vSphere HA detected that host {host.name} is in a different network partition than the master {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.HostPartitionedFromMasterEvent"> <description> This event is logged when the host is in a different partition than the master. </description> </EventLongDescription> EventExThe vSphere HA host availability state changedinfoThe vSphere HA availability state of the host {host.name} in cluster {computeResource.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of the host {host.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}The vSphere HA availability state of this host has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState}com.vmware.vc.HA.HostStateChangedEvent|The vSphere HA availability state of the host {host.name} in cluster in {computeResource.name} in {datacenter.name} has changed to {newState.@enum.com.vmware.vc.HA.DasFdmAvailabilityState} <EventLongDescription id="com.vmware.vc.HA.HostStateChangedEvent"> <description> This event is logged when the availability state of a host has changed. </description> </EventLongDescription> ExtendedEventvSphere HA agent unconfigure failed on hostwarningThere was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on host {host.name}. To solve this problem, reconnect the host to vCenter Server.There was an error unconfiguring the vSphere HA agent on this host. To solve this problem, reconnect the host to vCenter Server.com.vmware.vc.HA.HostUnconfigureError|There was an error unconfiguring the vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name}. To solve this problem, reconnect the host to vCenter Server. <EventLongDescription id="com.vmware.vc.HA.HostUnconfigureError"> <description> There was an error unconfiguring the vSphere HA agent on this host. </description> <cause> <description> The vSphere HA unconfiguration task failed to send the updated hostList to vSphere HA agent on the host. This condition may interfere with the vSphere HA cluster to which the host used to belong and should be corrected. </description> <action> Add the host back to a vCenter Server of version 5.0 or later. </action> </cause> </EventLongDescription> EventExA disconnected host has vSphere HA protected VMserrorHost {host.name} in cluster {computeResource.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)Host {host.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s)This host is disconnected from vCenter Server, but contains {protectedVmCount} vSphere HA protected virtual machine(s)com.vmware.vc.HA.HostUnconfiguredWithProtectedVms|Host {host.name} in cluster {computeResource.name} in {datacenter.name} is disconnected from vCenter Server, but contains {protectedVmCount} protected virtual machine(s) <EventLongDescription id="com.vmware.vc.HA.HostUnconfiguredWithProtectedVms"> <description> This host is disconnected and contains one or more virtual machine(s) that are still protected by vSphere HA. Consequently, these virtual machines could be failed over to another host if this host should fail. </description> <cause> <description> If a vSphere HA-enabled host is disconnected and is unable to unprotect the virtual machines currently running on it (perhaps due to datastores being unavailable, or not being able to communicate with the vSphere HA master host) then these virtual machines would still be protected, but reside on the disconnected host. Also, if a virtual machine is migrated using vMotion to a vSphere HA-enabled host that is currently in the process of disconnecting, this can lead to the same result. </description> <action> To correct this situation, ensure that the host has access to the datastores used by these virtual machines, and then reconnect the host to an vSphere HA-enabled cluster. The virtual machines should become unprotected shortly after vSphere HA is configured on the host. </action> </cause> </EventLongDescription> EventExvSphere HA configured failover resources are insufficient to satisfy desired failover levelwarningInsufficient configured resources to satisfy the desired vSphere HA failover level on cluster {computeResource.name}Insufficient configured resources to satisfy the desired vSphere HA failover levelcom.vmware.vc.HA.InsufficientFailoverLevelEvent|Insufficient configured resources to satisfy the desired vSphere HA failover level on the cluster {computeResource.name} in {datacenter.name} <EventLongDescription id=" com.vmware.vc.HA.InsufficientFailoverLevelEvent"> <description> The cluster does not have enough failover capacity to satisfy the desired host failures to tolerate for vSphere HA. Failovers may still be performed by vSphere HA but will be on a best effort basis and configured resources may not sufficient to respect the desired host failures to tolerate. </description> <cause> <description> The desired host failures to tolerate setting might not be completely respected since the cluster does not have the required failover capacity to satisfy the failover of the largest desired number of hosts. </description> <action> Add more capacity in the cluster to clear this warning or change the admission control settings to reserve more failover capacity. </action> </cause> </EventLongDescription> EventExvSphere HA detected an invalid master agentwarningvSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised.com.vmware.vc.HA.InvalidMaster|vSphere HA agent on host {remoteHostname} is an invalid master. The host should be examined to determine if it has been compromised. <EventLongDescription id="com.vmware.vc.HA.InvalidMaster"> <description> A host in a vSphere HA cluster that is claiming to be a master has been determined to be invalid be another master host. This occurs when an existing master gets a message from another master in the same cluster. The existing master verifies that the other master is actually a valid master before it considers abdicating to the other master. An invalid master is an indication that there may be a compromised host on the network that is attempting to disrupt the HA cluster. The offending host should be examined to determine if it has been compromised. Its also possible a compromised host is impersonating a valid host so the reported host may not be the actual host that is compromised. </description> </EventLongDescription> ExtendedEventvSphere HA could not identify lock owner host on VM with duplicatesinfovSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name}vSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on VM {vm.name} with duplicatesvSphere HA could not identify lock owner host on this VM with duplicatescom.vmware.vc.HA.LockOwnerUnKnownForDupVms|vSphere HA could not identify lock owner host on VM {vm.name} with duplicates in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.LockOwnerUnKnownForDupVms"> <description> The vSphere HA agent could not identify lock owner host on duplicate VMs. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Could not determine the lock owner host on duplicate VM. </action> </cause> </EventLongDescription> EventExvSphere HA agent cannot reach some cluster management addressesinfovSphere HA agent on {host.name} in cluster {computeResource.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on {host.name} cannot reach some management network addresses of other hosts: {unpingableAddrs}vSphere HA agent on host cannot reach some management network addresses of other hosts: {unpingableAddrs}com.vmware.vc.HA.NotAllHostAddrsPingable|vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} cannot reach some management network addresses of other hosts: {unpingableAddrs} <EventLongDescription id="com.vmware.vc.HA.NotAllIsoAddrsPingable"> <description> The vSphere HA agent on host cannot reach some of the management network addresses of other hosts, and vSphere HA may not be able to restart VMs if a host failure occurs. </description> <cause> <description> There is a network issue preventing this host from communicating with some or all of the hosts in the cluster over their vSphere HA management networks. vSphere HA reliability ic currently compromised in the cluster and failover may not reliably occur if a host or hosts should fail during this condition. </description> <action> Determine and correct the source of the communication problem. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not terminate the VM that was selected for preemptionerrorvSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name}vSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate the VM {vm.name} that was selected for preemptionvSphere HA could not terminate this VM that was selected for preemptioncom.vmware.vc.HA.PreemptionFailedWithMaxRetry|vSphere HA could not terminate the VM {vm.name} that was selected for preemption in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.PreemptionFailedWithMaxRetry"> <description> vSphere HA could not terminate the VM that was selected for preemption. </description> <cause> <description> Instances when vSphere HA receives the InsufficientResourcesFault, for any VM with fault reason indicating presence of preemptible VM. vSphere HA terminates appropriate preemptibe VM to free up resources. </description> <action> Terminate the preemptibe VM manually to free up resources. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA remediated duplicates of VMinfovSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of VM {vm.name}vSphere HA remediated duplicates of this VMcom.vmware.vc.HA.RemediatedDupVMs|vSphere HA remediated duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediatedDupVMs"> <description> The vSphere HA agent on host remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Kept the VM running on host which holds the lock on datastore, terminated VM on rest of the hosts where VM was running. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA could not remediate duplicates of VMwarningvSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of VM {vm.name}vSphere HA could not remediate duplicates of this VMcom.vmware.vc.HA.RemediationFailedForDupVMs|vSphere HA could not remediate duplicates of VM {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.RemediationFailedForDupVMs"> <description> The vSphere HA agent on host could not remediate duplicate VM. </description> <cause> <description> Instances when vSphere HA failovers the VM to another host, and unable to bring down the VM from the failed host. This results in multiple instances of a VM running in the cluster if the failed host joins back the cluster. </description> <action> Duplicates of VM running on multiple hosts could not be terminated. </action> </cause> </EventLongDescription> EventExvSphere HA failed to start a Fault Tolerance secondary VM.errorvSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}. Reason : {fault.msg}. vSphere HA agent will retry until it times out.com.vmware.vc.HA.StartFTSecondaryFailedEvent|vSphere HA agent failed to start Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name} in {datacenter.name}. Reason : {fault.msg}. vSphere HA agent will retry until it times out. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondaryFailedEvent"> <description> vSphere HA agent failed to start a Fault Tolerance secondary VM. vSphere HA will retry until either the operation succeeds or until the maximum number of restart attempts is reached. </description> </EventLongDescription> EventExvSphere HA successfully started a Fault Tolerance secondary VM.infovSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name}.vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost}.com.vmware.vc.HA.StartFTSecondarySucceededEvent|vSphere HA agent successfully started Fault Tolerance secondary VM {secondaryCfgPath} on host {secondaryHost} for primary VM {vm.name} in cluster {computeResource.name}. <EventLongDescription id="com.vmware.vc.HA.StartFTSecondarySucceededEvent"> <description> vSphere HA agent successfully started a Fault Tolerance secondary virtual machine. </description> </EventLongDescription> EventExvSphere HA removed a datastore from preferred heartbeat datastoreswarningvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} because the datastore is removed from inventoryvSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster because the datastore is removed from inventorycom.vmware.vc.HA.UserHeartbeatDatastoreRemoved|vSphere HA removed datastore {dsName} from the set of preferred heartbeat datastores selected for cluster {computeResource.name} in {datacenter.name} because the datastore is removed from inventory <EventLongDescription id="com.vmware.vc.HA.UserHeartbeatDatastoreRemoved"> <description> The datastore is removed from the set of preferred heartbeat datastores selected for this cluster. </description> <cause> <description> The datastore does not exist in the inventory. This happens when the datastore is removed from a host in the cluster manually or via a rescan. </description> <action> Choose a different datastore by reconfiguring the vSphere HA cluster. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA did not perform an isolation response for vm because its VM restart priority is DisabledinfovSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} because its VM restart priorirty is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is DisabledvSphere HA did not perform an isolation response for {vm.name} because its VM restart priority is Disabled"vSphere HA did not perform an isolation response because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledIsolationEvent|vSphere HA did not perform an isolation response for {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledIsolationEvent"> <description> This event is logged when a host in a vSphere HA cluster was isolated and no isolation response was taken. </description> <cause> <description> The VM restart priority setting is set to disabled, so vSphere HA did not perform any action on this VM when the host became isolated. If the restart priority is disabled, HA will not attempt to restart the VM on another host, so HA will take no action for this VM on the isolated host. This event is informational only. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA did not attempt to restart vm because its VM restart priority is DisabledinfovSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is DisabledvSphere HA did not attempt to restart {vm.name} because its VM restart priority is Disabled"vSphere HA did not attempt to restart vm because its VM restart priority is Disabled"com.vmware.vc.HA.VMIsHADisabledRestartEvent|vSphere HA did not attempt to restart {vm.name} in cluster {computeResource.name} in {datacenter.name} because its VM restart priority is Disabled <EventLongDescription id=" com.vmware.vc.HA.VMIsHADisabledRestartEvent"> <description> This event is logged when a failed VM in a vSphere HA cluster will not be restarted because its VM restart priority setting is set to disabled. </description> <cause> <description> The restart priority for the cluster or VM is disabled, so vSphere HA did not perform any action on this VM failed. This event is informational only. </description> </cause> </EventLongDescription> EventExvCenter Server cannot communicate with the master vSphere HA agentwarningvCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name}vCenter Server cannot communicate with the master vSphere HA agent on {hostname}com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent|vCenter Server cannot communicate with the master vSphere HA agent on {hostname} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotCommunicateWithMasterEvent"> <description> This event is logged when vCenter Server cannot communicate with a vSphere HA master agent. </description> <cause> <description> This event is reported when vCenter Server is not able to communicate with a vSphere HA master agent on the host, but it can communicate with other vSphere HA agents in the cluster and these are reporting the host is a master. </description> <action> Correct the networking issue that is preventing vCenter Server from communicating with the host listed in the event. This problem can occur, for example, if the physical NIC in use by this network connection has failed. </action> </cause> </EventLongDescription> ExtendedEventvCenter Server is unable to find a master vSphere HA agentwarningvCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name}vCenter Server is unable to find a master vSphere HA agentcom.vmware.vc.HA.VcCannotFindMasterEvent|vCenter Server is unable to find a master vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcCannotFindMasterEvent"> <description> This event is logged when vCenter Server is unable to find a master vSphere HA agent. </description> <cause> <description> </description> <action> </action> </cause> </EventLongDescription> EventExvCenter Server connected to a vSphere HA master agentinfovCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is connected to a master HA agent running on host {hostname}com.vmware.vc.HA.VcConnectedToMasterEvent|vCenter Server is connected to a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcConnectedToMasterEvent"> <description> This event is logged when vCenter Server is connected with a master vSphere HA agent. </description> </EventLongDescription> EventExvCenter Server disconnected from a master vSphere HA agentwarningvCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name}vCenter Server is disconnected from a master HA agent running on host {hostname}com.vmware.vc.HA.VcDisconnectedFromMasterEvent|vCenter Server is disconnected from a master HA agent running on host {hostname} in {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.HA.VcDisconnectedFromMasterEvent"> <description> This event is logged when vCenter Server is disconnected from a master vSphere HA agent. </description> </EventLongDescription> ExtendedEventvSphere HA was unable to reset a VM after it exhausted the retrieserrorvSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on host {host.name} after {retryTimes} retriesvSphere HA was unable to reset VM {vm.name} on this host after {retryTimes} retriesvSphere HA was unable to reset this VM after {retryTimes} retriescom.vmware.vc.HA.VmDasResetAbortedEvent|vSphere HA was unable to reset VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmDasResetAbortedEvent"> <description> This event is logged when vSphere HA was unable to reset a VM. </description> <cause> <description> The operation to reset the VM continued to fail. vSphere HA stopped resetting the VM after it exhausted the retries. </description> <action>Ensure that the host system is manageable, for example host agent is not hung. Check if there are no other concurrent tasks running for the VM.</action> </cause> </EventLongDescription> ExtendedEventVirtual machine failed to become vSphere HA ProtectederrorVirtual machine {vm.name} in cluster {computeResource.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.Virtual machine {vm.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.This virtual machine failed to become vSphere HA Protected and HA may not attempt to restart it after a failure.com.vmware.vc.HA.VmNotProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} failed to become vSphere HA Protected and HA may not attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmNotProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation but the VM has not transitioned to vSphere HA Protected in the time period expected. This condition exists because the master vSphere HA agent has not yet persisted that the VM successfully powered on or vCenter is unaware that it did. Consequently, vSphere HA may not restart the VM after a failure. </description> <action> There are a number of reasons why a VM may remain not protected for a period of time. First, the system may be heavily loaded, in which case the transition will just take longer. Second, vCenter may be unable to communicate with the vSphere HA master agent. Examine the inventory to see if any hosts in the cluster are not responding. Third, the the management network may be partitioned, which is preventing the master that owns the VM from protecting it or reporting this information to vCenter. The cluster summary page may report a config issue in this case or hosts in the VM inventory will be reported as not responding. Finally, the vSphere HA master election is taking too long to complete. The cluster summary page will report if this situation exists. See the product documentation for additional troubleshooting tips. </action> </EventLongDescription> ExtendedEventVirtual machine is vSphere HA protectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.Virtual machine {vm.name} is vSphere HA Protected and HA will attempt to restart it after a failure.This virtual machine is vSphere HA Protected and HA will attempt to restart it after a failure.com.vmware.vc.HA.VmProtectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is vSphere HA Protected and HA will attempt to restart it after a failure. <EventLongDescription id="com.vmware.vc.HA.VmProtectedEvent"> <description> The virtual machine successfully powered on in a vSphere HA cluster after a user-initiated power operation and vSphere HA has persisted this fact. Consequently, vSphere HA will attempt to restart the VM after a failure. </description> </EventLongDescription> ExtendedEventVirtual machine is not vSphere HA ProtectedinfoVirtual machine {vm.name} in cluster {computeResource.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.Virtual machine {vm.name} is not vSphere HA Protected.This virtual machine is not vSphere HA Protected.com.vmware.vc.HA.VmUnprotectedEvent|Virtual machine {vm.name} in cluster {computeResource.name} in {datacenter.name} is not vSphere HA Protected. <EventLongDescription id="com.vmware.vc.HA.VmUnprotectedEvent"> <description> The virtual machine transitioned from the vSphere HA protected to unprotected state. This transition is a result of a user powering off the virtual machine, disabling vSphere HA, disconnecting the host on which the virtual machine is running, or destroying the cluster in which the virtual machine resides. </description> </EventLongDescription> ExtendedEventvSphere HA has unprotected out-of-disk-space VMinfovSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected virtual machine {vm.name} because it ran out of disk spacevSphere HA has unprotected this virtual machine because it ran out of disk spacecom.vmware.vc.HA.VmUnprotectedOnDiskSpaceFull|vSphere HA has unprotected virtual machine {vm.name} in cluster {computeResource.name} in datacenter {datacenter.name} because it ran out of disk spaceExtendedEventvSphere HA did not terminate a VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}warningvSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}vSphere HA did not terminate this VM affected by an inaccessible datastore: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore}com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore|vSphere HA did not terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore} <EventLongDescription id=" com.vmware.vc.HA.VmcpNotTerminateVmWithInaccessibleDatastore"> <description> This event is logged when a VM affected by an inaccessible datastore in a vSphere HA cluster was not terminated. </description> <cause> <description> VM Component Protection is configured to not terminate the VM, or vSphere HA host monitoring is disabled, or VM restart priority is diabled, or the VM is an agent VM, or there are no sufficient resources to fail over the VM. For the case of insufficent resources, vSphere HA will attempt to terminate the VM when resources become available. </description> <action>Select VM Component Protection option to terminate VM</action> <action>Enable host monitoring</action> <action>Enable VM Restart priority</action> <action>Reduce resource reservations of other VMs in the cluster</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition or isolation if one exists</action> <action>If vSphere DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> </EventLongDescription> ExtendedEventDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleinfoDatastore {ds.name} mounted on host {host.name} in cluster {computeResource.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on host {host.name} was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessibleDatastore {ds.name} mounted on this host was inaccessible. vSphere HA detected that the condition was cleared and the datastore is now accessiblecom.vmware.vc.HA.VmcpStorageFailureCleared|Datastore {ds.name} mounted on host {host.name} was inaccessible. The condition was cleared and the datastore is now accessible <EventLongDescription id=" com.vmware.vc.HA.VmcpStorageFailureCleared"> <description> This event is logged when a datastore connectivity was restored. The host can have the following storage access failures: All Paths Down (APD) and Permanent Device Loss (PDL). Datastore was shown as unavailable/inaccessible in storage view. </description> <cause> <description> A datastore on this host was inaccessible. The condition was cleared and the datastore is now accessible. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA detected that a datastore was inaccessible. This affected the VM with files on the datastorewarningvSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on host {host.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore mounted on this host was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastorevSphere HA detected that a datastore was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected the VM with files on the datastorecom.vmware.vc.HA.VmcpStorageFailureDetectedForVm|vSphere HA detected that a datastore mounted on host {host.name} in cluster {computeResource.name} in {datacenter.name} was inaccessible due to {failureType.@enum.com.vmware.vc.HA.VmcpStorageFailureDetectedForVm}. This affected VM {vm.name} with files on the datastore <EventLongDescription id="com.vmware.vc.HA.VmcpStorageFailureDetectedForVm"> <description> This event is logged when a VM's files were not accessible due to a storage connectivity failure. vSphere HA will take action if VM Component Protection is enabled for the VM. </description> <cause> <description> A datastore was inaccessible due to a storage connectivity loss of All Paths Down or Permenant Device Loss. A VM was affected because it had files on the inaccessible datastore. </description> </cause> </EventLongDescription> ExtendedEventvSphere HA was unable to terminate VM affected by an inaccessible datastore after it exhausted the retrieserrorvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} after {retryTimes} retriesvSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on this host after {retryTimes} retriesvSphere HA was unable to terminate this VM affected by an inaccessible datastore after {retryTimes} retriescom.vmware.vc.HA.VmcpTerminateVmAborted|vSphere HA was unable to terminate VM {vm.name} affected by an inaccessible datastore on host {host.name} in cluster {computeResource.name} in {datacenter.name} after {retryTimes} retries <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminateVmAborted"> <description> This event is logged when vSphere HA was unable to terminate a VM affected by an inaccessible datastore. </description> <cause> <description> The operation to terminate the VM continued to fail. vSphere HA stopped terminating the VM after it exhausted the retries. </description> <action> Ensure that the host system is manageable, for example host agent is not hung. Check if there are other concurrent tasks running for the VM.</action> <action> Reset the VM if guest application is not operational after the datastore becomes accessible.</action> </cause> </EventLongDescription> ExtendedEventvSphere HA attempted to terminate a VM affected by an inaccessible datastorewarningvSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on host{host.name} because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate VM {vm.name} on this host because the VM was affected by an inaccessible datastorevSphere HA attempted to terminate this VM because the VM was affected by an inaccessible datastorecom.vmware.vc.HA.VmcpTerminatingVm|vSphere HA attempted to terminate VM {vm.name} on host{host.name} in cluster {computeResource.name} in {datacenter.name} because the VM was affected by an inaccessible datastore <EventLongDescription id=" com.vmware.vc.HA.VmcpTerminatingVm"> <description> This event is logged when vSphere HA attempted to terminate a VM affected by an inaccessible datastore. A VM is terminated by issuing a SIGKILL to the vmx process. </description> <cause> <description> The VM was affected by an inaccessible datastore. vSphere HA VM Component Protection attempted to terminate the VM. </description> </cause> </EventLongDescription> EventExHardware Health Status Changedinfocom.vmware.vc.HardwareSensorEvent|Sensor {sensorNumber} type {sensorType}, Description {sensorName} state {status} for {message}. Part Name/Number {partName} {partNumber} Manufacturer {manufacturer}EventExStatus of each Hardware Health Sensor Groupinfocom.vmware.vc.HardwareSensorGroupStatus|Hardware Sensor Status: Processor {processor}, Memory {memory}, Fan {fan}, Voltage {voltage}, Temperature {temperature}, Power {power}, System Board {systemBoard}, Battery {battery}, Storage {storage}, Other {other}ExtendedEventHost configuration is TPM encrypted.warningcom.vmware.vc.HostTpmConfigEncryptionEvent|Host configuration is TPM encrypted.EventExOperation cleanup encountered errorsinfoOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup for {vm.name} with task {taskId} encountered errorsOperation cleanup with task {taskId} encountered errorscom.vmware.vc.OperationCleanupErrorsEvent|Operation cleanup for {vm.name} with task {taskId} encountered errorsExtendedEventThe user does not have permission to view the entity associated with this event.infocom.vmware.vc.RestrictedAccess|The user does not have permission to view the entity associated with this event.EventExFailed to register host with Intel® SGX Registration Service.errorFailed to register host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.FailedRegistration|Failed to register host {host.name} with Intel® SGX Registration Service {registrationUrl}. The service responded with {statusCode}, {errorCode}: {errorMessage}.EventExSending registration request to Intel® SGX Registration Service.infoSending registration request to Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.InitiatingRegistration|Sending registration request for host {host.name} to Intel® SGX Registration Service {registrationUrl}.EventExSuccessfully registered host with Intel® SGX Registration Service.infoSuccessfully registered host with Intel® SGX Registration Service.com.vmware.vc.SgxRegistration.SuccessfulRegistration|Successfully registered host {host.name} with Intel® SGX Registration Service {registrationUrl}.EventExStateless Alarm TriggeredinfoAlarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'com.vmware.vc.StatelessAlarmTriggeredEvent|Alarm '{alarm}' on {triggeredEntity} triggered by event {triggerEventKey} '{triggerEventDescription}'ExtendedEventTrusted Host attestation failed.errorcom.vmware.vc.TaHostAttestFailEvent|Trusted Host attestation failed.ExtendedEventTrusted Host attestation passed.infocom.vmware.vc.TaHostAttestPassEvent|Trusted Host attestation passed.ExtendedEventTrusted Host attestation status unset.infocom.vmware.vc.TaHostAttestUnsetEvent|Trusted Host attestation status unset.EventExHost Time Syncronization establishedinfocom.vmware.vc.TimeSyncEvent|Time service {serviceName} has synchronized with remote time source, details: {message}.EventExHost Time Syncronization losterrorcom.vmware.vc.TimeSyncFailedEvent|Time service {serviceName} is not sychronized with the remote time source, details: {message}.ExtendedEventHost must be decommissioned when moved out of a Trusted Infrastructure cluster.errorHost {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.com.vmware.vc.TrustAuthority.DecommissionHost|Host {host.name} must be decommissioned when moved out of a Trusted Infrastructure cluster.ExtendedEventHost is not configured for vSphere Trust Authority.errorHost {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.Host {host.name} is not configured for vSphere Trust Authority.com.vmware.vc.TrustAuthority.HostNotConfigured|Host {host.name} is not configured for vSphere Trust Authority.EventExThe client certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSClientCertExpirationEvent|The client certificate for the Key Provider {keyProviderId} in the Trust Authority Host {hostName} will expire in {dayNum} day(s).EventExThe server certificate of Trusted Key Provider will expire soon.warningcom.vmware.vc.TrustAuthority.KMSServerCertExpirationEvent|The server cetificate of key server {serverName} in the Trusted Key Provider {keyProviderId} will expire in {dayNum} day(s).ExtendedEventCertificates have changed. Trust authority cluster needs to be reconfigured.errorcom.vmware.vc.TrustAuthority.StsCertificatesChange|Certificates have changed. Trust authority cluster needs to be reconfigured.EventExvCenter Service Overall Health Changedinfocom.vmware.vc.VCHealthStateChangedEvent|vCenter Service overall health changed from '{oldState}' to '{newState}' <EventLongDescription id="com.vmware.vc.VCHealthStateChangedEvent"> <description> This event is logged when the overall health of vCenter Service has changed or become unavailable. </description> <cause> <description> The vCenter Service overall health state has changed or become unavailable </description> <action> Examine the vCenter Service health state and make sure the VimWebServices service is up and running on the vCenter Server </action> </cause> </EventLongDescription> EventExDatastore is in healthy state within the clusterinfoDatastore {dsName} is in healthy state within the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreHealthy|Datastore {dsName} is in healthy state within the cluster {computeResource.name}EventExDatastore is not accessible on the host(s)warningDatastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreInaccessible|Datastore {dsName} is not accessible from the host(s) {hosts} in the cluster {computeResource.name}EventExDatastore unmount is failederrorUnmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}com.vmware.vc.VMCStorage.DatastoreUnmountFailed|Unmount of datastore {dsName} failed on host(s) {hosts} in the cluster {computeResource.name}EventExDatastore in desired configuration is missing on the host(s)warningDatastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}com.vmware.vc.VMCStorage.DesiredDatastoreMissing|Datastore {dsName} is missing on the host(s) {hosts} on {computeResource.name}EventExHost(s) mounted with the datastore which is not present in desired configurationerrorHost(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}com.vmware.vc.VMCStorage.NotDesiredDatastorePresent|Host(s) {hosts} is/are mounted with datastore {dsName} which is not present in desired configuration on {computeResource.name}EventExExecuting VM Instant CloneinfoExecuting Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} on {host.name} to {destVmName}Executing Instant Clone of {vm.name} to {destVmName}Executing Instant Clone to {destVmName}com.vmware.vc.VmBeginInstantCloneEvent|Executing Instant Clone of {vm.name} on {host.name} to {destVmName}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneFailedInvalidDestinationEvent|Cannot clone {vm.name} as {destVmName} to invalid or non-existent destination with ID {invalidMoRef}: {fault}EventExRestarting VM CloneinfoRestarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}Restarting VM Clone of {vm.name} to {destVmName} with task {taskId}Restarting VM Clone to {destVmName} with task {taskId}com.vmware.vc.VmCloneRestartEvent|Restarting VM Clone of {vm.name} on {host.name} to {destVmName} with task {taskId}EventExCannot complete virtual machine clone.errorcom.vmware.vc.VmCloneToResourcePoolFailedEvent|Cannot clone {vm.name} as {destVmName} to resource pool {destResourcePool}: {fault}EventExFailed to create virtual machineerrorFailed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName} on {host.name}Failed to create virtual machine {vmName}Failed to create virtual machine on {host.name}com.vmware.vc.VmCreateFailedEvent|Failed to create virtual machine {vmName} on {host.name}ExtendedEventVirtual machine disks consolidation succeeded.infoVirtual machine {vm.name} disks consolidatation succeeded on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation succeeded on {host.name}.Virtual machine {vm.name} disks consolidation succeeded.Virtual machine disks consolidation succeeded.com.vmware.vc.VmDiskConsolidatedEvent|Virtual machine {vm.name} disks consolidated successfully on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation needed.warningVirtual machine {vm.name} disks consolidatation is needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is needed on {host.name}.Virtual machine {vm.name} disks consolidation is needed.Virtual machine disks consolidation is needed.com.vmware.vc.VmDiskConsolidationNeeded|Virtual machine {vm.name} disks consolidation is needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation no longer needed.infoVirtual machine {vm.name} disks consolidatation is no longer needed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation is no longer needed on {host.name}.Virtual machine {vm.name} disks consolidation is no longer needed.Virtual machine disks consolidation is no longer needed.com.vmware.vc.VmDiskConsolidationNoLongerNeeded|Virtual machine {vm.name} disks consolidation is no longer needed on {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVirtual machine disks consolidation failed.warningVirtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name}.Virtual machine {vm.name} disks consolidation failed on {host.name}.Virtual machine {vm.name} disks consolidation failed.Virtual machine disks consolidation failed.com.vmware.vc.VmDiskFailedToConsolidateEvent|Virtual machine {vm.name} disks consolidation failed on {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExcom.vmware.vc.VmForkFailedInvalidDestinationEvent|EventExCannot complete Instant Clone of VMerrorCannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone of {vm.name} to {destVmName}. Reason : {fault.msg}Cannot complete Instant Clone to {destVmName}. Reason : {fault.msg}com.vmware.vc.VmInstantCloneFailedEvent|Cannot complete Instant Clone of {vm.name} on {host.name} to {destVmName}. Reason : {fault.msg}EventExInstant Clone WarningwarningInstant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning for {vmName} - {warning}Instant Clone Warning - {warning}com.vmware.vc.VmInstantCloneWarningEvent|Instant Clone Warning for {vmName} - {warning}EventExInstant Clone of VM has completedinfoInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} on {host.name} has completedInstant Clone of {srcVmName} has completedInstant Clone of {srcVmName} has completedcom.vmware.vc.VmInstantClonedEvent|Instant Clone of {srcVmName} on {host.name} has completedEventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.infocom.vmware.vc.VpxdMemoryUsageClearEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status}.EventExvCenter Server memory usage changed to {newState.@enum.ManagedEntity.Status}.errorcom.vmware.vc.VpxdMemoryUsageErrorEvent|vCenter Server memory usage changed from {oldState.@enum.ManagedEntity.Status} to {newState.@enum.ManagedEntity.Status} (used: {usedMemory}%, soft limit: {limit}%).EventExOperation enabledinfocom.vmware.vc.authorization.MethodEnabled|The operation {MethodName} on the {EntityName} of type {EntityType} is enabled.EventExPrivilege check failedwarningPrivilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}com.vmware.vc.authorization.NoPermission|Privilege check failed for user {User} for missing permission {Permission}. Session user performing the check: {SessionUser}ExtendedEventErrors occurred during automatic CPVM certificate rotation.errorcom.vmware.vc.certificatemanagement.CPVMCertificateUpdateFailedEvent|Errors occurred during automatic CPVM certificate rotation.ExtendedEventCPVM successfully performed automatic certificate rotation.infocom.vmware.vc.certificatemanagement.CPVMCertificateUpdateHealthyEvent|CPVM successfully performed automatic certificate rotation.ExtendedEventErrors occurred during automatic Spherelet certificate rotation.errorcom.vmware.vc.certificatemanagement.SphereletCertificateUpdateFailedEvent|Errors occurred during automatic Spherelet certificate rotation.ExtendedEventNo errors found during automatic Spherelet certificate rotation.infocom.vmware.vc.certificatemanagement.SphereletCertificateUpdateHealthyEvent|No errors found during automatic Spherelet certificate rotation.ExtendedEventTRUSTED ROOT certificates imported successfully.infocom.vmware.vc.certificatemanagement.TrustedRootsImportedEvent|TRUSTED ROOT certificates imported successfully.ExtendedEventTRUSTED ROOT certificates imported successfully, but with warnings.warningcom.vmware.vc.certificatemanagement.TrustedRootsImportedWithWarningsEvent|TRUSTED ROOT certificates imported successfully, but with warnings.ExtendedEventvCenter Server TLS certificate replaced successfully.infocom.vmware.vc.certificatemanagement.VcCertificateReplacedEvent|vCenter Server TLS certificate replaced successfully.ExtendedEventvCenter Server TLS certificate replaced successfully, but there are warnings detected.warningcom.vmware.vc.certificatemanagement.VcCertificateReplacedWithWarningsEvent|vCenter Server TLS certificate replaced successfully, but there are warnings detected.EventExFailed to update the vCenter server certificate.warningcom.vmware.vc.certificatemanagement.VcServerCertificateUpdateFailureEvent|{cause} for the {serviceName}. Remediation suggested: {remediation}. For more details, please refer to {kbLink}.EventExCA Certificates were updated on hostinfoCA Certificates were updated on {hostname}com.vmware.vc.certmgr.HostCaCertsAndCrlsUpdatedEvent|CA Certificates were updated on {hostname}EventExHost Certificate expiration is imminentwarningHost Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpirationImminentEvent|Host Certificate expiration is imminent on {hostname}. Expiration Date: {expiryDate}EventExHost Certificate is nearing expirationwarningHost Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringEvent|Host Certificate on {hostname} is nearing expiration. Expiration Date: {expiryDate}EventExHost Certificate will expire soonwarningHost Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}com.vmware.vc.certmgr.HostCertExpiringShortlyEvent|Host Certificate on {hostname} will expire soon. Expiration Date: {expiryDate}ExtendedEventHost Certificate Management Mode changedinfocom.vmware.vc.certmgr.HostCertManagementModeChangedEvent|Host Certificate Management Mode changed from {previousMode} to {presentMode}ExtendedEventHost Certificate Management Metadata changedinfocom.vmware.vc.certmgr.HostCertMetadataChangedEvent|Host Certificate Management Metadata changedEventExHost Certificate revokedwarningHost Certificate on {hostname} is revoked.com.vmware.vc.certmgr.HostCertRevokedEvent|Host Certificate on {hostname} is revoked.EventExHost Certificate was updatedinfoHost Certificate was updated on {hostname}, new thumbprint: {thumbprint}com.vmware.vc.certmgr.HostCertUpdatedEvent|Host Certificate was updated on {hostname}, new thumbprint: {thumbprint}EventExAdding host to cluster store failederrorAdding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.AddHostFailed|Adding host {hostName} to cluster store failed. Fault Reason : {errorMessage}EventExInitializing cluster store member cache failederrorInitializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}Initializing cluster store member cache failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.InitializeMemberCacheFailed|Initializing cluster store member cache failed. Fault Reason : {errorMessage}EventExRemoving host from cluster store failederrorRemoving host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}com.vmware.vc.clusterstore.RemoveHostFailed|Removing host {hostName} from cluster store failed. Fault Reason : {errorMessage}EventExUpdating host encryption keyinfocom.vmware.vc.crypto.HostKeyUpdatedEvent|Host encryption key set to {newKey}. Old key: {oldKey}EventExcom.vmware.vc.crypto.IntegrityCheckFailed|EventExcom.vmware.vc.crypto.IntegrityCheckPassed|EventExCrypto operation audit eventinfocom.vmware.vc.crypto.Operation|Cryptographic operations during {description}{operation}{diskOperations}EventExFailed to update VM fileserrorFailed to update VM files on datastore {ds.name}com.vmware.vc.datastore.UpdateVmFilesFailedEvent|Failed to update VM files on datastore {ds.name} using host {hostName}EventExUpdated VM filesinfoUpdated VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatedVmFilesEvent|Updated VM files on datastore {ds.name} using host {hostName}EventExUpdating VM FilesinfoUpdating VM files on datastore {ds.name}com.vmware.vc.datastore.UpdatingVmFilesEvent|Updating VM files on datastore {ds.name} using host {hostName}ExtendedEventLink Aggregation Control Protocol configuration is inconsistentinfoSingle Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.com.vmware.vc.dvs.LacpConfigInconsistentEvent|Single Link Aggregation Control Group is enabled on Uplink Port Groups while enhanced LACP support is enabled.ExtendedEventFault Tolerance VM restart disabledwarningvSphere HA has been disabled in cluster {computeResource.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure.vSphere HA has been disabled. vSphere HA will not restart this VM or its Secondary VM after a failure.com.vmware.vc.ft.VmAffectedByDasDisabledEvent|vSphere HA has been disabled in cluster {computeResource.name} of datacenter {datacenter.name}. vSphere HA will not restart VM {vm.name} or its Secondary VM after a failure. <EventLongDescription id="com.vmware.vc.ft.VmAffectedByDasDisabledEvent"> <description> When vSphere HA is disabled in a cluster, you cannot restart a Primary VM or its Secondary VM after a failure. This event is issued when vSphere HA is disabled and a Fault Tolerant virtual machine is powered on. The event alerts you of the risk to the Fault Tolerant virtual machine that results from disabling vSphere HA. </description> <cause> <description>vSphere HA was disabled when a Fault Tolerant virtual machine was powered on</description> <action>Re-enable vSphere HA</action> </cause> </EventLongDescription> EventExGuest operationinfoGuest operation {operationName.@enum.com.vmware.vc.guestOp} performed.com.vmware.vc.guestOperations.GuestOperation|Guest operation {operationName.@enum.com.vmware.vc.guestOp} performed on Virtual machine {vm.name}.EventExGuest operation authentication failurewarningGuest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp}.com.vmware.vc.guestOperations.GuestOperationAuthFailure|Guest operation authentication failed for operation {operationName.@enum.com.vmware.vc.guestOp} on Virtual machine {vm.name}.ExtendedEventvSphere HA restarted a virtual machinewarningvSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}vSphere HA restarted virtual machine {vm.name} on host {host.name}vSphere HA restarted virtual machine {vm.name}vSphere HA restarted this virtual machinecom.vmware.vc.ha.VmRestartedByHAEvent|vSphere HA restarted virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} <EventLongDescription id="com.vmware.vc.ha.VmRestartedByHAEvent"> <description> The virtual machine was restarted automatically by vSphere HA on this host. This response may be triggered by a failure of the host the virtual machine was originally running on or by an unclean power-off of the virtual machine (eg. if the vmx process was killed). </description> </EventLongDescription> ExtendedEventAutostart power on failederrorPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on host {host.name} failedPowering on virtual machines according to autostart rules on this host failedcom.vmware.vc.host.AutoStartPowerOnFailedEvent|Powering on virtual machines according to autostart rules on host {host.name} in datacenter {datacenter.name} failedExtendedEventAutostart rules reconfigure failederrorReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on host {host.name} failedReconfiguring autostart rules for virtual machines on this host failedcom.vmware.vc.host.AutoStartReconfigureFailedEvent|Reconfiguring autostart rules for virtual machines on {host.name} in datacenter {datacenter.name} failedEventExEncryption mode is enabled on host.infoEncryption mode is enabled on host.com.vmware.vc.host.Crypto.Enabled|Encryption mode is enabled on host {hostName}.EventExThe operation is not supported on hosts which have encryption disabled.errorcom.vmware.vc.host.Crypto.HostCryptoDisabled|The operation is not supported on host {hostName} because encryption is disabled.EventExHost key is being renewed because an error occurred on the key provider.warningHost key is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KMSClusterError|Host key of {hostName} is being renewed because an error occurred on the key provider {kmsCluster} and key {missingKey} was not available. The new key is {newKey}.EventExHost key is being renewed because key was missing on the key provider.warningHost key is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.com.vmware.vc.host.Crypto.HostKey.NewKey.KeyMissingOnKMS|Host key of {hostName} is being renewed because key {missingKey} was missing on the key provider {kmsCluster}. The new key is {newKey}.EventExHost requires encryption mode enabled and the key provider is not available.errorHost requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KMSClusterError|Host {hostName} requires encryption mode enabled. Check the status of the key provider {kmsCluster} and manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExHost requires encryption mode enabled and the key is not available on the key provider.errorHost requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}.com.vmware.vc.host.Crypto.ReqEnable.KeyMissingOnKMS|Host {hostName} requires encryption mode enabled. Manually recover the missing key {missingKey} to the key provider {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExFailed to send keys to host because of host error.errorcom.vmware.vc.host.Crypto.SendKeyError.HostError|Failed to send keys {keys} to host {hostName}. Please check host connection.EventExHost profile {operation} failed with error: {error}.errorHost profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.Host profile {operation} failed with error: {error}.com.vmware.vc.host.HPOperationFailed|Host profile {operation} failed with error: {error}.ExtendedEventHost booted from stateless cache.warningHost booted from stateless cache.Host booted from stateless cache.Host booted from stateless cache.com.vmware.vc.host.HostBootedFromStatelessCacheEvent|Host booted from stateless cache.EventExHost IP address conflict detectederrorHost IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}com.vmware.vc.host.HostIpConflictEvent|Host IP address conflict detected. {changingHostName} has changed own IP to {existingIp} which is already used by {existingHostName}ExtendedEventHost in Memory Mode and active DRAM usage is normalinfo{host.name} is in Memory Mode and its active DRAM usage is normal{host.name} is in Memory Mode and its active DRAM usage is normalThe host is in Memory Mode and its active DRAM usage is normalcom.vmware.vc.host.MemoryModeActiveDRAMGreen|Host {host.name} is in Memory Mode and its active DRAM usage is normalExtendedEventHost in Memory Mode and active DRAM usage is highwarningHost {host.name} is in Memory Mode and its active DRAM usage is highHost {host.name} is in Memory Mode and its active DRAM usage is highThe host is in Memory Mode and its active DRAM usage is highcom.vmware.vc.host.MemoryModeActiveDRAMYellow|Host {host.name} is in Memory Mode and its active DRAM usage is highExtendedEventNSX installation failed on host.errorNSX installation failed on host.NSX installation failed on host.NSX installation failed on host.com.vmware.vc.host.NsxInstallFailed|NSX installation failed on host.ExtendedEventNSX installation successful on host.infoNSX installation successful on host.NSX installation successful on host.NSX installation successful on host.com.vmware.vc.host.NsxInstallSuccess|NSX installation successful on host.ExtendedEventPartial maintenance mode status has changed.infoHost status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.com.vmware.vc.host.PartialMaintenanceModeStatusChanged|Host status for '{id.@enum.host.PartialMaintenanceModeId}' is now '{status.@enum.host.PartialMaintenanceModeStatus} partial maintenance mode'.EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}com.vmware.vc.host.StatelessHPApplyEarlyBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}com.vmware.vc.host.StatelessHPApplyFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. Failed Profiles: {failedProfiles}. {error}EventExHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}errorHost profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}com.vmware.vc.host.StatelessHPApplyPostBootFailed|Host profile apply failed during stateless boot. Host is in Maintenance Mode. {error}EventExHost TPM attestation failederrorHost TPM attestation failed for host {host.name}: {1}Host TPM attestation failed for host {host.name}: {1}Host TPM attestation failed: {1}com.vmware.vc.host.TPMAttestationFailedEvent|Host TPM attestation failed for host {host.name} in datacenter {datacenter.name}: {1}ExtendedEventActive DRAM usage of the memory tiered host is normalinfoHost {host.name} is a memory tiered host and its active DRAM usage is normalHost {host.name} is a memory tiered host and its active DRAM usage is normalActive DRAM usage of the memory tiered host is normalcom.vmware.vc.host.TieringMemoryActiveDRAMGreen|Host {host.name} is a memory tiered host and its active DRAM usage is normalExtendedEventActive DRAM usage of the memory tiered host is highwarningHost {host.name} is a memory tiered host and its active DRAM usage is highHost {host.name} is a memory tiered host and its active DRAM usage is highActive DRAM usage of the memory tiered host is highcom.vmware.vc.host.TieringMemoryActiveDRAMYellow|Host {host.name} is a memory tiered host and its active DRAM usage is highExtendedEventNew TPM host endorsement key doesn't match the one in the DBerrorThe new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name}The new host TPM endorsement key doesn't match the one stored in the DBcom.vmware.vc.host.TpmEndorsementKeyMismatch|The new host TPM endorsement key doesn't match the one stored in the DB for host {host.name} in datacenter {datacenter.name}ExtendedEventHost's virtual flash resource is accessible.infoHost's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.Host's virtual flash resource is restored to be accessible.com.vmware.vc.host.clear.vFlashResource.inaccessible|Host's virtual flash resource is restored to be accessible.EventExHost's virtual flash resource usage dropped below the threshold.infoHost's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.Host's virtual flash resource usage dropped below {1}%.com.vmware.vc.host.clear.vFlashResource.reachthreshold|Host's virtual flash resource usage dropped below {1}%.ExtendedEventDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.warningDeprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.com.vmware.vc.host.problem.DeprecatedVMFSVolumeFound|Deprecated VMFS volume(s) found on the host. Please consider upgrading volume(s) to the latest version.ExtendedEventDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostswarningDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsDeprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostscom.vmware.vc.host.problem.DeprecatedVMFSVolumeFoundAfterVMFS3EOL|Deprecated VMFS (ver 3) volumes found. Upgrading such volumes to VMFS (ver 5) is mandatory for continued availability on vSphere 6.7 hostsExtendedEventImproved virtual disk infrastructure's catalog management turned unhealthywarningcom.vmware.vc.host.problem.VStorageObjectInfraCatalogUnhealthy|Improved virtual disk infrastructure's catalog management turned unhealthyExtendedEventImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.warningImproved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss.com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent|Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. <EventLongDescription id="com.vmware.vc.host.problem.VStorageObjectInfraNamespacePolicyEmptyEvent"> <description> Improved virtual disk infrastructure namespaces are created with empty storage policy. Please consider updating infrastructure namespace storage policy to avoid potential data loss. </description> <cause> <description> This is caused by creating improved virtual disk infrastructure namespaces with empty storage policy. </description> <action> Update infrastructure namespaces storage policy. </action> </cause> </EventLongDescription> ExtendedEventHost's virtual flash resource is inaccessible.warningHost's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.Host's virtual flash resource is inaccessible.com.vmware.vc.host.problem.vFlashResource.inaccessible|Host's virtual flash resource is inaccessible. <EventLongDescription id="com.vmware.vc.host.problem.vFlashResource.inaccessible"> <description> Inaccessible host virtual flash resource indicates that its backing VFFS volume is inaccessible. Due to inaccessible host virtual flash resource, virtual machines with vSphere Flash Read Cache configured cannot be powered on or might experience unpredicted behavior if powered on. </description> <cause> <description> This might be caused by an unmounted VFFS volume or an APD/PDL on the VFFS volume. </description> <action> Check the backing VFFS volume connection status. For example, mount the unmounted volume or resolve the APD/PDL issues. The host virtual flash resource is accessible as long as the backing VFFS volume is accessible. </action> </cause> </EventLongDescription> EventExHost's virtual flash resource usage exceeds the threshold.warningHost's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.Host's virtual flash resource usage is more than {1}%.com.vmware.vc.host.problem.vFlashResource.reachthreshold|Host's virtual flash resource usage is more than {1}%.ExtendedEventVirtual flash resource is configured on the hostinfoVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostVirtual flash resource is configured on the hostcom.vmware.vc.host.vFlash.VFlashResourceConfiguredEvent|Virtual flash resource is configured on the hostExtendedEventVirtual flash resource is removed from the hostinfoVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostVirtual flash resource is removed from the hostcom.vmware.vc.host.vFlash.VFlashResourceRemovedEvent|Virtual flash resource is removed from the hostEventExDefault virtual flash module is changed to {vFlashModule} on the hostinfoDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostDefault virtual flash module is changed to {vFlashModule} on the hostcom.vmware.vc.host.vFlash.defaultModuleChangedEvent|Any new virtual Flash Read Cache configuration request will use {vFlashModule} as default virtual flash module. All existing virtual Flash Read Cache configurations remain unchanged. <EventLongDescription id="com.vmware.vc.host.vFlash.defaultModuleChangedEvent"> <description> The default virtual flash module has been changed. Any new virtual Flash Read Cache configuration uses the new default virtual flash module if undefined in configuration. All existing configurations will remain unchanged. </description> </EventLongDescription> ExtendedEventVirtual flash modules are loaded or reloaded on the hostinfoVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostVirtual flash modules are loaded or reloaded on the hostcom.vmware.vc.host.vFlash.modulesLoadedEvent|Virtual flash modules are loaded or reloaded on the hostEventExEntity became healthyinfo{entityName} became healthycom.vmware.vc.infraUpdateHa.GreenHealthEvent|{entityName} became healthyEventExProvider has posted invalid health updateswarningProvider {providerName} has posted invalid health updatesProvider {providerName} has posted invalid health updatescom.vmware.vc.infraUpdateHa.InvalidUpdatesEvent|Provider {providerName} has posted invalid health updatesEventExProvider reported a healthy statusinfo{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}com.vmware.vc.infraUpdateHa.PostGreenHealthUpdateEvent|{providerName} reported a healthy status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}EventExProvider reported a severely degraded statuswarning{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostRedHealthUpdateEvent|{providerName} reported a severely degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExProvider reported a moderately degraded statuswarning{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}com.vmware.vc.infraUpdateHa.PostYellowHealthUpdateEvent|{providerName} reported a moderately degraded status on host {entityName} for ID {healthUpdateInfoId} ({description}) in update {healthUpdateId}. Please contact your hardware vendor support. Remediation suggested by {providerName}: {remediation}EventExEntity has entered quarantine modewarning{entityName} has entered quarantine modecom.vmware.vc.infraUpdateHa.QuarantineEvent|{entityName} has entered quarantine modeEventExEntity has exited quarantine modeinfo{entityName} has exited quarantine modecom.vmware.vc.infraUpdateHa.QuarantineRemovedEvent|{entityName} has exited quarantine modeEventExEntity became severely degradedwarning{entityName} became severely degradedcom.vmware.vc.infraUpdateHa.RedHealthEvent|{entityName} became severely degradedEventExProvider has stale updateswarningProvider {providerName} has not posted an update in {timeout} secondsProvider {providerName} has not posted an update in {timeout} secondscom.vmware.vc.infraUpdateHa.StaleUpdatesEvent|Provider {providerName} has not posted an update in {timeout} secondsEventExEntity has unknown health statewarning{entityName} has unknown health statecom.vmware.vc.infraUpdateHa.UnknownHealthEvent|{entityName} has unknown health stateEventExEntity became moderately degradedwarning{entityName} became moderately degradedcom.vmware.vc.infraUpdateHa.YellowHealthEvent|{entityName} became moderately degradedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters has failederrorvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterInstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) installation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterInstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) installation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters has failederrorvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUninstallationFailedEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) uninstallation of filters is successfulinfovSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} are successfulcom.vmware.vc.iofilter.FilterUninstallationSuccessEvent|vSphere APIs for I/O Filters (VAIO) uninstallation of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters has failederrorvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} and in datacenter {datacenter.name} has failedvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} has failedcom.vmware.vc.iofilter.FilterUpgradeFailedEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has failedExtendedEventvSphere APIs for I/O Filters (VAIO) upgrade of filters is successfulinfovSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} is successfulvSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} is successfulcom.vmware.vc.iofilter.FilterUpgradeSuccessEvent|vSphere APIs for I/O Filters (VAIO) upgrade of filters on cluster {computeResource.name} in datacenter {datacenter.name} has succeededEventExvSphere APIs for I/O Filters (VAIO) host vendor provider registration has failed.errorvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderRegistrationFailedEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} registration has failed. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully registeredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredcom.vmware.vc.iofilter.HostVendorProviderRegistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully registeredEventExFailed to unregister vSphere APIs for I/O Filters (VAIO) host vendor provider.errorFailed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.com.vmware.vc.iofilter.HostVendorProviderUnregistrationFailedEvent|Failed to unregister vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name}. Reason : {fault.msg}.ExtendedEventvSphere APIs for I/O Filters (VAIO) host vendor provider has been successfully unregisteredinfovSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredvSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredcom.vmware.vc.iofilter.HostVendorProviderUnregistrationSuccessEvent|vSphere APIs for I/O Filters (VAIO) vendor provider on {host.name} has been successfully unregisteredExtendedEventIoFilterManager API invoked with untrusted certificate SSL trust policywarningIoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name}com.vmware.vc.iofilter.UntrustedCertificateEvent|IoFilterManager API invoked with untrusted certificate SSL trust policy for VIB URL {vibUrl} on cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventKey providers are backed up.infocom.vmware.vc.kms.crypto.AllBackedUp|All key providers are backed up.EventExKey creation failed on key provider.errorcom.vmware.vc.kms.crypto.KeyGenerateFail|Key creation failed on key provider {clusterName} with error code {errorCode}. Check log for details.EventExKey provider(s) are not backed up.errorcom.vmware.vc.kms.crypto.NotBackedUp|Key provider(s) {providerIds} are not backed up.EventExKey provider backup is suggested after it is updated.warningcom.vmware.vc.kms.crypto.NotBackedUpAfterUpdate|Key provider(s) {providerIds} are not backed up. Backup is suggested after updating a provider.EventExFailed to send keys because of key provider error.errorcom.vmware.vc.kms.crypto.SendKeyError.KMSClusterError|Failed to send keys {keys} because of KMS connection error.EventExFailed to send keys because keys are missing on key provider.errorcom.vmware.vc.kms.crypto.SendKeyError.KeyMissingOnKMS|Failed to send keys {keys} because of keys missing on key provider.EventExThe Trusted Key Provider is not available.warningcom.vmware.vc.kms.crypto.TrustAuthority.ClusterNotAvailable|The Trusted Key Provider {keyProviderId} is not available.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.ClusterUnhealthy|The Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExThe Trusted Key Provider is unhealthy.errorcom.vmware.vc.kms.crypto.TrustAuthority.KmsUnhealthy|The key server {serverName} in the Trusted Key Provider {keyProviderId} is unhealthy. Reasons: {errorMessage.@enum.com.vmware.vc.kms.crypto.TrustAuthority.UnhealthyReason}.EventExKey Management Server is unreachableerrorcom.vmware.vc.kms.crypto.Unreachable|Key Management Server {serverName}({address}) is unreachableEventExRetrieved Key Management Server vendor information.infocom.vmware.vc.kms.crypto.Vendor|Key Management Server {serverName}({address}) vendor: {vendor}EventExVirtual NIC entered passthrough modeinfoNetwork passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is active on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterEnteredPassthroughEvent|Network passthrough is active on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExVirtual NIC exited passthrough modeinfoNetwork passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name}Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name}Network passthrough is inactive on adapter {deviceLabel}com.vmware.vc.npt.VmAdapterExitedPassthroughEvent|Network passthrough is inactive on adapter {deviceLabel} of virtual machine {vm.name} on host {host.name} in {datacenter.name}EventExFailed to clone state for entity on extensionerrorFailed to clone state on extension {extensionName}com.vmware.vc.ovfconsumers.CloneOvfConsumerStateErrorEvent|Failed to clone state for the entity '{entityName}' on extension {extensionName}EventExFailed to retrieve OVF environment sections for VM on extensionerrorFailed to retrieve OVF environment sections from extension {extensionName}com.vmware.vc.ovfconsumers.GetOvfEnvironmentSectionsErrorEvent|Failed to retrieve OVF environment sections for VM '{vm.name}' from extension {extensionName}EventExUnable to power on VM after cloningerrorPowering on after cloning was blocked by an extension. Message: {description}com.vmware.vc.ovfconsumers.PowerOnAfterCloneErrorEvent|Powering on VM '{vm.name}' after cloning was blocked by an extension. Message: {description}EventExFailed to register entity on extensionerrorcom.vmware.vc.ovfconsumers.RegisterEntityErrorEvent|Failed to register entity '{entityName}' on extension {extensionName}EventExFailed to unregister entities on extensionerrorcom.vmware.vc.ovfconsumers.UnregisterEntitiesErrorEvent|Failed to unregister entities on extension {extensionName}EventExFailed to validate OVF descriptor on extensionerrorcom.vmware.vc.ovfconsumers.ValidateOstErrorEvent|Failed to validate OVF descriptor on extension {extensionName}ExtendedEventAnswer file exportedinfoAnswer file for host {host.name} has been exportedAnswer file for host {host.name} has been exportedAnswer file exportedcom.vmware.vc.profile.AnswerFileExportedEvent|Answer file for host {host.name} in datacenter {datacenter.name} has been exportedExtendedEventHost customization settings updatedinfoHost customization settings for host {host.name} has been updatedHost customization settings for host {host.name} has been updatedHost customization settings updatedcom.vmware.vc.profile.AnswerFileUpdatedEvent|Host customization settings for host {host.name} in datacenter {datacenter.name} has been updatedEventExResource pool renamedinfoResource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'Resource pool '{oldName}' has been renamed to '{newName}'com.vmware.vc.rp.ResourcePoolRenamedEvent|Resource pool '{oldName}' has been renamed to '{newName}'ExtendedEventDatastore maintenance mode operation canceledinfoThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledThe datastore maintenance mode operation has been canceledcom.vmware.vc.sdrs.CanceledDatastoreMaintenanceModeEvent|The datastore maintenance mode operation has been canceledExtendedEventDatastore cluster is healthyinfoDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthyDatastore cluster {objectName} is healthycom.vmware.vc.sdrs.ClearDatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} is healthyExtendedEventConfigured storage DRSinfoConfigured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}Configured storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.ConfiguredStorageDrsOnPodEvent|Configured storage DRS on datastore cluster {objectName}ExtendedEventDatastore cluster has datastores that belong to different SRM Consistency GroupswarningDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsDatastore cluster {objectName} has datastores that belong to different SRM Consistency Groupscom.vmware.vc.sdrs.ConsistencyGroupViolationEvent|Datastore cluster {objectName} has datastores that belong to different SRM Consistency GroupsExtendedEventDatastore entered maintenance modeinfoDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modeDatastore {ds.name} has entered maintenance modecom.vmware.vc.sdrs.DatastoreEnteredMaintenanceModeEvent|Datastore {ds.name} has entered maintenance modeExtendedEventDatastore is entering maintenance modeinfoDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modeDatastore {ds.name} is entering maintenance modecom.vmware.vc.sdrs.DatastoreEnteringMaintenanceModeEvent|Datastore {ds.name} is entering maintenance modeExtendedEventDatastore exited maintenance modeinfoDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modeDatastore {ds.name} has exited maintenance modecom.vmware.vc.sdrs.DatastoreExitedMaintenanceModeEvent|Datastore {ds.name} has exited maintenance modeEventExDatastore cluster has datastores shared across multiple datacenterswarningDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersDatastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacenterscom.vmware.vc.sdrs.DatastoreInMultipleDatacentersEvent|Datastore cluster {objectName} has one or more datastores {datastore} shared across multiple datacentersExtendedEventErrors encountered while datastore entering into maintenance modeerrorDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modeDatastore {ds.name} encountered errors while entering maintenance modecom.vmware.vc.sdrs.DatastoreMaintenanceModeErrorsEvent|Datastore {ds.name} encountered errors while entering maintenance modeExtendedEventStorage DRS disabledinfoDisabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}Disabled storage DRS on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsDisabledEvent|Disabled storage DRS on datastore cluster {objectName}EventExStorage DRS enabledinfoEnabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}com.vmware.vc.sdrs.StorageDrsEnabledEvent|Enabled storage DRS on datastore cluster {objectName} with automation level {behavior.@enum.storageDrs.PodConfigInfo.Behavior}ExtendedEventStorage DRS invocation failederrorStorage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}Storage DRS invocation failed on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsInvocationFailedEvent|Storage DRS invocation failed on datastore cluster {objectName}ExtendedEventNew storage DRS recommendation generatedinfoA new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}A new storage DRS recommendation has been generated on datastore cluster {objectName}com.vmware.vc.sdrs.StorageDrsNewRecommendationPendingEvent|A new storage DRS recommendation has been generated on datastore cluster {objectName}EventExDatastore cluster connected to host(s) that do not support storage DRSwarningDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSDatastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRScom.vmware.vc.sdrs.StorageDrsNotSupportedHostConnectedToPodEvent|Datastore cluster {objectName} is connected to one or more hosts {host} that do not support storage DRSExtendedEventPending storage recommendations were appliedinfoAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedAll pending recommendations on datastore cluster {objectName} were appliedcom.vmware.vc.sdrs.StorageDrsRecommendationApplied|All pending recommendations on datastore cluster {objectName} were appliedEventExStorage DRS migrated VM disksinfoStorage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStorageMigrationEvent|Storage DRS migrated disks of VM {vm.name} to datastore {ds.name}EventExStorage DRS placed VM disksinfoStorage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}Storage DRS placed disks of VM {vm.name} on datastore {ds.name}com.vmware.vc.sdrs.StorageDrsStoragePlacementEvent|Storage DRS placed disks of VM {vm.name} on datastore {ds.name}EventExDatastore cluster createdinfoCreated datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}Created datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodCreatedEvent|Created datastore cluster {objectName}EventExDatastore cluster deletedinfoRemoved datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}Removed datastore cluster {objectName}com.vmware.vc.sdrs.StoragePodDestroyedEvent|Removed datastore cluster {objectName}EventExSIOC: pre-4.1 host connected to SIOC-enabled datastorewarningSIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.com.vmware.vc.sioc.NotSupportedHostConnectedToDatastoreEvent|SIOC has detected that a host: {host} connected to a SIOC-enabled datastore: {objectName} is running an older version of ESX that does not support SIOC. This is an unsupported configuration.ExtendedEventESXi VASA client certificate provision has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateProvisionFailure|ESXi VASA client certificate provision has failedExtendedEventESXi VASA client certificate provision has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateProvisionSuccess|ESXi VASA client certificate provision has succeededExtendedEventESXi VASA client certificate register to some/all VP(s) has failederrorcom.vmware.vc.sms.EsxiVasaClientCertificateRegisterFailure|ESXi VASA client certificate register to some/all VP(s) has failedExtendedEventESXi VASA client certificate register to VP(s) has succeededinfocom.vmware.vc.sms.EsxiVasaClientCertificateRegisterSuccess|ESXi VASA client certificate register to VP(s) has succeededEventExSystem capability warning from storage providerwarningcom.vmware.vc.sms.LunCapabilityInitEvent|Storage provider [{providerName}] : system capability warning for {eventSubjectId} : {msgTxt}EventExSystem capability normal event from storage providerinfocom.vmware.vc.sms.LunCapabilityMetEvent|Storage provider [{providerName}] : system capability normal for {eventSubjectId}EventExSystem capability alert from storage providererrorcom.vmware.vc.sms.LunCapabilityNotMetEvent|Storage provider [{providerName}] : system capability alert for {eventSubjectId} : {msgTxt}EventExA Storage Alarm of type 'Object' cleared by the VASA providerinfocom.vmware.vc.sms.ObjectTypeAlarmClearedEvent|Storage provider [{providerName}] cleared a Storage Alarm of type 'Object' on {eventSubjectId} : {msgTxt}EventExAn alert on an object raised by the VASA providererrorcom.vmware.vc.sms.ObjectTypeAlarmErrorEvent|Storage provider [{providerName}] raised an alert type 'Object' on {eventSubjectId} : {msgTxt}EventExA warning on an object raised by the VASA providerwarningcom.vmware.vc.sms.ObjectTypeAlarmWarningEvent|Storage provider [{providerName}] raised a warning of type 'Object' on {eventSubjectId} : {msgTxt}EventExRegistering renewed VC Client Certificate failed for the VASA provider.errorcom.vmware.vc.sms.RegisterVcClientCertOnRenewalFailure|Registering renewed VC Client Certificate failed for VASA provider with url : {provider}.ExtendedEventRegistering renewed VC Client Certificate succeeded for all the VASA providers.infocom.vmware.vc.sms.RegisterVcClientCertOnRenewalSuccess|Registering renewed VC Client Certificate succeeded for all the VASA providers.EventExThin provisioning capacity threshold normal event from storage providerinfocom.vmware.vc.sms.ThinProvisionedLunThresholdClearedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold normal for {eventSubjectId}EventExThin provisioning capacity threshold alert from storage providererrorcom.vmware.vc.sms.ThinProvisionedLunThresholdCrossedEvent|Storage provider [{providerName}] : thin provisioning capacity threshold alert for {eventSubjectId}EventExThin provisioning capacity threshold warning from storage providerwarningcom.vmware.vc.sms.ThinProvisionedLunThresholdInitEvent|Storage provider [{providerName}] : thin provisioning capacity threshold warning for {eventSubjectId}EventExStorage provider certificate will expire very shortlyerrorcom.vmware.vc.sms.VasaProviderCertificateHardLimitReachedEvent|Certificate for storage provider {providerName} will expire very shortly. Expiration date : {expiryDate}EventExVASA Provider certificate is renewedinfocom.vmware.vc.sms.VasaProviderCertificateRenewalEvent|VASA Provider certificate for {providerName} is renewedEventExStorage provider certificate will expire soonwarningcom.vmware.vc.sms.VasaProviderCertificateSoftLimitReachedEvent|Certificate for storage provider {providerName} will expire soon. Expiration date : {expiryDate}EventExStorage provider certificate is validinfocom.vmware.vc.sms.VasaProviderCertificateValidEvent|Certificate for storage provider {providerName} is validEventExStorage provider is connectedinfocom.vmware.vc.sms.VasaProviderConnectedEvent|Storage provider {providerName} is connectedEventExStorage provider is disconnectederrorcom.vmware.vc.sms.VasaProviderDisconnectedEvent|Storage provider {providerName} is disconnectedEventExRefreshing CA certificates and CRLs failed for some VASA providerserrorcom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsFailure|Refreshing CA certificates and CRLs failed for VASA providers with url : {providerUrls}ExtendedEventRefreshing CA certificates and CRLs succeeded for all registered VASA providers.infocom.vmware.vc.sms.VasaProviderRefreshCACertsAndCRLsSuccess|Refreshing CA certificates and CRLs succeeded for all registered VASA providers.EventExOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate failed for the VASA 5.0 or greater VASA providers.errorcom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationFailure|On VMCA Root Certificate rotation, register and refresh certificates failed for VASA 5.0 or greater VASA provider : {provider}ExtendedEventOn VMCA Root Certificate rotation, register of vCenter client certificate and/or refresh of VASA VP certificate succeeded for all the VASA 5.0 or greater VASA providers.infocom.vmware.vc.sms.VcClientAndVpCertRefreshOnVmcaRootCertRotationSuccess|On VMCA Root Certificate rotation, register and refresh certificates succeeded for all the VASA 5.0 or greater VASA providers.EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vmName} connected to {datastore.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vmName} connected to datastore {datastore.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vmName} connected to {datastore.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vmName} connected to {datastore.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.datastore.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vmName} connected to {datastore.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExHealth event from storage providerinfocom.vmware.vc.sms.provider.health.event|Storage provider [{providerName}] : health event for {eventSubjectId} : {msgTxt}EventExSystem event from storage providerinfocom.vmware.vc.sms.provider.system.event|Storage provider [{providerName}] : system event : {msgTxt}EventExVirtual disk bound to a policy profile is compliant backing object based storage.infoVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is compliant from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is compliant from storage provider {providerName}.EventExVirtual disk bound to a policy profile is non compliant backing object based storage.errorVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} is not compliant [{operationalStatus}] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant {operationalStatus] from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} is not compliant [{operationalStatus}] from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusNonCompliantEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} is not compliant [{operationalStatus}] from storage provider {providerName}.EventExVirtual disk bound to a policy profile is unknown compliance status backing object based storage.warningVirtual disk {diskKey} on {vm.name} on {host.name} in {computeResource.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} on {host.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.Virtual disk {diskKey} on {vm.name} compliance status is unknown from storage provider {providerName}.com.vmware.vc.sms.vm.ComplianceStatusUnknownEvent|Virtual disk {diskKey} on {vm.name} on {host.name} and {computeResource.name} in {datacenter.name} compliance status is unknown from storage provider {providerName}.EventExProfile association/dissociation failederrorProfile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}Profile association/dissociation failed for {entityName}com.vmware.vc.spbm.ProfileAssociationFailedEvent|Profile association/dissociation failed for {entityName}EventExConfiguring storage policy failed.errorConfiguring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}com.vmware.vc.spbm.ServiceErrorEvent|Configuring storage policy failed for VM {entityName}. Verify that SPBM service is healthy. Fault Reason : {errorMessage}ExtendedEventQuick stats is not up-to-dateinfoQuick stats on {host.name} in {computeResource.name} is not up-to-dateQuick stats on {host.name} is not up-to-dateQuick stats on {host.name} is not up-to-datecom.vmware.vc.stats.HostQuickStatesNotUpToDateEvent|Quick stats on {host.name} in {computeResource.name} in {datacenter.name} is not up-to-date <EventLongDescription id="com.vmware.vc.stats.HostQuickStatesNotUpToDateEvent"> <description> "Quick stats on the host is not up-to-date. </description> <cause> <description> Quickstats on the host are not up-to-date. This is expected if the host was recently added or reconnected or VC just started up. </description> <action> No specific action needs to be taken. </action> </cause> </EventLongDescription> EventExODBC errorerrorcom.vmware.vc.stats.StatsInsertErrorEvent|Stats insertion failed for entity {entity} due to ODBC error. <EventLongDescription id="com.vmware.vc.stats.StatsInsertErrorEvent"> <description> If a set of performance statistics data insertion fails due to database related issues, this event is logged. </description> <cause> <description>Usually an attempt to insert duplicate entries causes this event</description> <action>Usually it is transient and self-healing. If not then probably the database contains rogue entries. Manually deleting the data for the particular stat provider might fix the issue</action> </cause> </EventLongDescription> EventExRoot user password expired.errorcom.vmware.vc.system.RootPasswordExpiredEvent|Root user password has expired. Log in to https://{pnid}:5480 to update the root password.EventExRoot user password is about to expire.warningcom.vmware.vc.system.RootPasswordExpiryEvent|Root user password expires in {days} days. Log in to https://{pnid}:5480 to update the root password.ExtendedEventFT Disabled VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because the FT state is disabledHA VM Component Protection will protect this virtul machine as non-FT virtual machine because the FT state is disabledcom.vmware.vc.vcp.FtDisabledVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because the FT state is disabledExtendedEventFailover FT VM due to component failureinfoFT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} on host {host.name} is going to fail over to Secondary VM due to component failureFT Primary VM {vm.name} is going to fail over to Secondary VM due to component failureFT Primary VM is going to fail over to Secondary VM due to component failurecom.vmware.vc.vcp.FtFailoverEvent|FT Primary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} is going to fail over to Secondary VM due to component failure ExtendedEventFT VM failover failederrorFT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} failed to failover to secondaryFT virtual machine {vm.name} on host {host.name} failed to failover to secondaryFT virtual machine {vm.name} failed to failover to secondaryFT virtual machine failed to failover to secondarycom.vmware.vc.vcp.FtFailoverFailedEvent|FT virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to failover to secondaryExtendedEventRestarting FT secondary due to component failureinfoHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine {vm.name} due to component failureHA VM Component Protection is restarting FT secondary virtual machine due to component failurecom.vmware.vc.vcp.FtSecondaryRestartEvent|HA VM Component Protection is restarting FT secondary virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to component failureExtendedEventFT secondary VM restart failederrorFT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} failed to restartFT Secondary VM {vm.name} on host {host.name} failed to restartFT Secondary VM {vm.name} failed to restartFT Secondary VM failed to restartcom.vmware.vc.vcp.FtSecondaryRestartFailedEvent|FT Secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartExtendedEventNeed secondary VM protected as non-FT VMinfoHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine since it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} on host {host.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects virtual machine {vm.name} as non-FT virtual machine because it has been in the needSecondary state too longHA VM Component Protection protects this virtul machine as non-FT virtual machine because it has been in the needSecondary state too longcom.vmware.vc.vcp.NeedSecondaryFtVmTreatAsNonFtEvent|HA VM Component Protection protects virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} as non-FT virtual machine because it has been in the needSecondary state too longEventExVM Component Protection test endsinfoVM Component Protection test ends on host {host.name} in cluster {computeResource.name}VM Component Protection test ends on host {host.name}VM Component Protection test endscom.vmware.vc.vcp.TestEndEvent|VM Component Protection test ends on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}EventExVM Component Protection test startsinfoVM Component Protection test starts on host {host.name} in cluster {computeResource.name}VM Component Protection test starts on host {host.name}VM Component Protection test startscom.vmware.vc.vcp.TestStartEvent|VM Component Protection test starts on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventNo action on VMinfoHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} due to the feature configuration settingHA VM Component Protection did not take action on virtual machine {vm.name} due to the feature configuration settingHA VM Component Protection did not take action due to the feature configuration settingcom.vmware.vc.vcp.VcpNoActionEvent|HA VM Component Protection did not take action on virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} due to the feature configuration settingEventExVirtual machine lost datastore accesserrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {datastore}Virtual machine {vm.name} on host {host.name} lost access to {datastore}Virtual machine {vm.name} lost access to {datastore}Virtual machine lost access to {datastore}com.vmware.vc.vcp.VmDatastoreFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {datastore}EventExVirtual machine lost VM network accessibilityerrorVirtual machine {vm.name} on host {host.name} in cluster {computeResource.name} lost access to {network}Virtual machine {vm.name} on host {host.name} lost access to {network}Virtual machine {vm.name} lost access to {network}Virtual machine lost access to {network}com.vmware.vc.vcp.VmNetworkFailedEvent|Virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} lost access to {network}EventExVM power off hangerrorHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine {vm.name} successfully after trying {numTimes} times and will keep tryingHA VM Component Protection could not power off virtual machine successfully after trying {numTimes} times and will keep tryingcom.vmware.vc.vcp.VmPowerOffHangEvent|HA VM Component Protection could not power off virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} successfully after trying {numTimes} times and will keep tryingExtendedEventRestarting VM due to component failureinfoHA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name}HA VM Component Protection is restarting virtual machine {vm.name} due to component failureHA VM Component Protection is restarting virtual machine due to component failurecom.vmware.vc.vcp.VmRestartEvent|HA VM Component Protection is restarting virtual machine {vm.name} due to component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}ExtendedEventVirtual machine affected by component failure failed to restarterrorVirtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} failed to restartVirtual machine {vm.name} affected by component failure on host {host.name} failed to restartVirtual machine {vm.name} affected by component failure failed to restartVirtual machine affected by component failure failed to restartcom.vmware.vc.vcp.VmRestartFailedEvent|Virtual machine {vm.name} affected by component failure on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} failed to restartEventExNo candidate host to restarterrorHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for virtual machine {vm.name} after waiting {numSecWait} seconds and will keep tryingHA VM Component Protection could not find a destination host for this virtual machine after waiting {numSecWait} seconds and will keep tryingcom.vmware.vc.vcp.VmWaitForCandidateHostEvent|HA VM Component Protection could not find a destination host for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} after waiting {numSecWait} seconds and will keep tryingEventExCertificate will expire soon.warningcom.vmware.vc.vecs.CertExpirationEvent|Certificate '{subject}' from '{store}' expires on {expiryDate}EventExKMS Client Certificate will expire soon.warningcom.vmware.vc.vecs.KMSClientCertExpirationEvent|KMS Client Certificate '{subject}' expires on {expiryDate}EventExKMS Server Certificate will expire soon.warningcom.vmware.vc.vecs.KMSServerCertExpirationEvent|KMS Server Certificate '{subject}' expires on {expiryDate}EventExOperation on the SSD device failederrorConfiguration on disk {disk.path} failed. Reason : {fault.msg}com.vmware.vc.vflash.SsdConfigurationFailedEvent|Configuration on disk {disk.path} failed. Reason : {fault.msg}EventExVirtual machine is locked because an error occurred on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KMSClusterError|Virtual machine {vmName} is locked. Before unlocking the virtual machine, check the status of key provider(s) {errorCluster} and the key(s) {missingKeys} on the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the host.errorVirtual machine is locked because keys were missing on the host {host}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnHost|Virtual machine {vmName} is locked because keys were missing on the host {host}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because keys were missing on the key provider.errorVirtual machine is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}.com.vmware.vc.vm.Crypto.VMLocked.KeyMissingOnKMS|Virtual machine {vmName} is locked. Before unlocking the virtual machine, manually recover the missing key(s) {missingKeys} to the key provider(s) {kmsCluster}. Go to docs.vmware.com for detailed remediation steps.EventExVirtual machine is locked because the required Trusted Key Provider(s) is unavailable.errorVirtual machine is locked. Before unlocking, check the status of Trusted Key Provider(s) {kmsCluster} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).com.vmware.vc.vm.Crypto.VMLocked.TAKMSClusterUnavaliable|Virtual machine {vmName} is locked. Before unlocking, check the status of Trusted Key Provider(s) {keyProviderId} and the Trust Authority managed key(s) {thsKeys} on the Trusted Key Provider(s).EventExVirtual machine is locked because Trust Authority managed key(s) are missing on the required host.errorVirtual machine is locked because Trust Authority managed key(s) are missing on host {host}.com.vmware.vc.vm.Crypto.VMLocked.TAKeyMissingOnHost|Virtual machine {vmName} is locked because Trust Authority managed key(s) {missedkeys} are missing on the required host {host}.EventExVirtual machine is unlocked.infoVirtual machine is unlocked.com.vmware.vc.vm.Crypto.VMUnlocked|Virtual machine {vmName} is unlocked.EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}Virtual machine {vm.name} {newMoRef} was cloned from {oldMoRef}com.vmware.vc.vm.DstVmClonedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was cloned from {oldMoRef}EventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {newMoRef} in {computeResource.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} on host {host.name} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}Virtual machine {vm.name} {newMoRef} was migrated from {oldMoRef}com.vmware.vc.vm.DstVmMigratedEvent|Virtual machine {vm.name} {newMoRef} in {computeResource.name} in {datacenter.name} was migrated from {oldMoRef}ExtendedEventVirtual machine PMem bandwidth usage is normalinfoVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalVirtual machine {vm.name}'s PMem bandwidth usage is normalThe virtual machine's PMem bandwidth usage is normalcom.vmware.vc.vm.PMemBandwidthGreen|Virtual machine {vm.name}'s PMem bandwidth usage is normalExtendedEventVirtual machine PMem bandwidth usage is highwarningVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highVirtual machine {vm.name}'s PMem bandwidth usage is highThe virtual machine's PMem bandwidth usage is highcom.vmware.vc.vm.PMemBandwidthYellow|Virtual machine {vm.name}'s PMem bandwidth usage is highExtendedEventVirtual machine failed to power on after cloning.errorVirtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after cloning on host {host.name}.Virtual machine {vm.name} failed to power on after performing cloning operation on this host.Virtual machine failed to power on after cloning.com.vmware.vc.vm.PowerOnAfterCloneErrorEvent|Virtual machine {vm.name} failed to power on after cloning on host {host.name} in datacenter {datacenter.name}EventExVirtual machine clone failederrorVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}Virtual machine {vm.name} {oldMoRef} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}"Virtual machine on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}com.vmware.vc.vm.SrcVmCloneFailedEvent|Virtual machine {vm.name} {oldMoRef} on host {host.name} in {datacenter.name} failed to clone to {destHost.name}, {destPool.name} in {destDatacenter.name}EventExVirtual machine cloned successfullyinfoVirtual machine {vm.name} {oldMoRef} in {computeResource.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} on host {host.name} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}Virtual machine {vm.name} {oldMoRef} was cloned to {newMoRef}com.vmware.vc.vm.SrcVmClonedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} was cloned to {newMoRef}ExtendedEventVirtual machine failed to create instant clone childerrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childVirtual machine {vm.name} {oldMoRef} failed to create instant clone childcom.vmware.vc.vm.SrcVmForkFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to create instant clone childEventExVirtual machine migration failederrorVirtual machine {vm.name} {oldMoRef} in {computeResource.name} failed to migrateVirtual machine {vm.name} {oldMoRef} on host {host.name} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migrateVirtual machine {vm.name} {oldMoRef} failed to migratecom.vmware.vc.vm.SrcVmMigrateFailedEvent|Virtual machine {vm.name} {oldMoRef} in {computeResource.name} in {datacenter.name} failed to migrateEventExVirtual machine migrated successfullyinfoVirtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} on {host.name} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}Virtual machine {vm.name} {oldMoRef} was migrated to {newMoRef}com.vmware.vc.vm.SrcVmMigratedEvent|Virtual machine {vm.name} {oldMoRef} on {host.name}, {computeResource.name} in {datacenter.name} was migrated to {newMoRef}ExtendedEventTemplate converted to VMinfoTemplate {vm.name} converted to VM on {host.name}Template {vm.name} converted to VM on {host.name}Template {vm.name} converted to VMConverted to VM on {host.name}com.vmware.vc.vm.TemplateConvertedToVmEvent|Template {vm.name} converted to VM on {host.name} in {datacenter.name}ExtendedEventVirtual machine tier 1 bandwidth usage is normalinfoVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalVirtual machine {vm.name}'s tier 1 bandwidth usage is normalThe virtual machine's tier 1 bandwidth usage is normalcom.vmware.vc.vm.Tier1BandwidthGreen|Virtual machine {vm.name}'s tier 1 bandwidth usage is normalExtendedEventVirtual machine tier 1 bandwidth usage is highwarningVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highVirtual machine {vm.name}'s tier 1 bandwidth usage is highThe virtual machine's tier 1 bandwidth usage is highcom.vmware.vc.vm.Tier1BandwidthYellow|Virtual machine {vm.name}'s tier 1 bandwidth usage is highExtendedEventThe network adapter of VM successfully activate UPTinfoUPT on network adapter is activatedcom.vmware.vc.vm.Uptv2Active|The UPT is successfully activated on the network adapterEventExThe network adapter of VM fails to activate UPTwarningUPT on network adapter is not activatedcom.vmware.vc.vm.Uptv2Inactive|The UPT failed to activate on the network adapter.{details}EventExVirtual NIC reservation is not satisfiederrorReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is not satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is not satisfiedReservation of Virtual NIC {deviceLabel} is not satisfiedcom.vmware.vc.vm.VmAdapterResvNotSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is not satisfiedEventExVirtual NIC reservation is satisfiedinfoReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} is satisfiedReservation of Virtual NIC {deviceLabel} of machine {vm.name} on this host is satisfiedReservation of Virtual NIC {deviceLabel} is satisfiedcom.vmware.vc.vm.VmAdapterResvSatisfiedEvent|Reservation of Virtual NIC {deviceLabel} of machine {vm.name} on host {host.name} in datacenter {datacenter.name} is satisfiedExtendedEventVM marked as templateinfoVM {vm.name} marked as template on {host.name}VM {vm.name} marked as template on {host.name}VM {vm.name} marked as templateMarked as template on {host.name}com.vmware.vc.vm.VmConvertedToTemplateEvent|VM {vm.name} marked as template on {host.name} in {datacenter.name}ExtendedEventPromoted disks of virtual machine successfullyinfoPromoted disks of virtual machine {vm.name} in {computeResource.name}Promoted disks of virtual machine {vm.name} on host {host.name}Promoted disks of virtual machine {vm.name}Promoted disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotedEvent|Promoted disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventPromoting disks of virtual machineinfoPromoting disks of virtual machine {vm.name} in {computeResource.name}Promoting disks of virtual machine {vm.name} on host {host.name}Promoting disks of virtual machine {vm.name}Promoting disks of virtual machine {vm.name}com.vmware.vc.vm.VmDisksPromotingEvent|Promoting disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}EventExHot migrating virtual machine with encryptioninfoHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating {vm.name} on {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptionHot migrating from {host.name}, {ds.name} to {destHost}, {destDatastore} with encryptioncom.vmware.vc.vm.VmHotMigratingWithEncryptionEvent|Hot migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost}, {destDatastore} in {destDatacenter} with encryptionEventExcom.vmware.vc.vm.VmMigratingWithEncryptionEvent|ExtendedEventFailed to promote disks of virtual machineinfoFailed to promote disks of virtual machine {vm.name} in {computeResource.name}Failed to promote disks of virtual machine {vm.name} on host {host.name}Failed to promote disks of virtual machine {vm.name}Failed to promote disks of virtual machine {vm.name}com.vmware.vc.vm.VmPromoteDisksFailedEvent|Failed to promote disks of virtual machine {vm.name} in {computeResource.name} in {datacenter.name}ExtendedEventReconfigure VM failed for {VM} on shared diskwarningReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskReconfigure VM failed for {VM} on shared diskcom.vmware.vc.vm.VmReconfigureFailedonSharedDiskEvent|Reconfigure VM failed for {VM} on shared diskExtendedEventVirtual machine register failederrorVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on host {host.name} failedVirtual machine {vm.name} registration on this host failedVirtual machine registration failedcom.vmware.vc.vm.VmRegisterFailedEvent|Virtual machine {vm.name} registration on {host.name} in datacenter {datacenter.name} failedEventExFailed to revert the virtual machine state to a snapshoterrorFailed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} on host {host.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine {vm.name} to snapshot {snapshotName}, with ID {snapshotId}Failed to revert the execution state of the virtual machine to snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateFailedToRevertToSnapshot|Failed to revert the execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} to snapshot {snapshotName}, with ID {snapshotId}EventExThe virtual machine state has been reverted to a snapshotinfoThe execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} on host {host.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine {vm.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}The execution state of the virtual machine has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}com.vmware.vc.vm.VmStateRevertedToSnapshot|The execution state of the virtual machine {vm.name} on host {host.name}, in compute resource {computeResource.name} has been reverted to the state of snapshot {snapshotName}, with ID {snapshotId}EventExFault Tolerance virtual machine syncing to secondary with encryptioninfoFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionFault Tolerance VM syncing to secondary on {dstHost} with encryptioncom.vmware.vc.vm.VmSyncingWithEncryptionEvent|Fault Tolerance VM {vm.name} syncing to secondary on {dstHost} with encryptionExtendedEventVirtual machine termination requestedinfoVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine {vm.name} termination requestedVirtual machine termination requestedcom.vmware.vc.vm.VmTerminateEvent|Virtual machine {vm.name} termination requestedExtendedEventVirtual machine termination failederrorVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine {vm.name} termination failedVirtual machine termination failedcom.vmware.vc.vm.VmTerminateFailedEvent|Virtual machine {vm.name} termination failedEventExThe disk device is encrypted with mixed keys.warningThe disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.com.vmware.vc.vm.crypto.DiskchainUsingMixedKeys|The disk device {diskName} is encrypted with mixed keys. It's probably caused by rekey/re-encryption failure. Please retry.EventExCryptographic operation failed due to insufficient disk space on datastoreerrorCryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.com.vmware.vc.vm.crypto.NoDiskSpace|Cryptographic operation on virtual machine {vmName} failed due to insufficient disk space on datastore {datastore}.EventExcom.vmware.vc.vm.crypto.RekeyFail|ExtendedEventApplication Monitoring Is Not SupportedwarningApplication monitoring is not supported on {host.name} in cluster {computeResource.name}Application monitoring is not supported on {host.name}Application monitoring is not supportedcom.vmware.vc.vmam.AppMonitoringNotSupported|Application monitoring is not supported on {host.name} in cluster {computeResource.name} in {datacenter.name}EventExvSphere HA detected application heartbeat status changewarningvSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name}vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent|vSphere HA detected that the application heartbeat status changed to {status.@enum.VirtualMachine.AppHeartbeatStatusType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthMonitoringStateChangedEvent"> <description> Application monitoring state changes indicate a change in the health of the application being monitored or in the application-monitoring process. A transition from gray to green occurs when application heartbeat is being enabled from within the guest. A transition to red occurs after vSphere HA didn't receive any heartbeats within 30 seconds. A transition from red to green occurs if heartbeats begin again before vSphere HA can react. A transition to gray occurs after application heartbeating is disabled from within the guest. </description> <cause> <description> Either the user initiated action from inside the guest or vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> If the state transitions to red, investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. Frequent state transitions to or from gray may indicate a problem with the application-monitoring agent. If they occur, investigate whether the enabling/disabling of monitoring is expected. </action> </cause> </EventLongDescription> EventExvSphere HA detected application state changewarningvSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name}vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for this virtual machinecom.vmware.vc.vmam.VmAppHealthStateChangedEvent|vSphere HA detected that the application state changed to {state.@enum.vm.GuestInfo.AppStateType} for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmAppHealthStateChangedEvent"> <description> Application state changes indicate that an in-guest application has posted one of the two allowed values - appStateOk or appStateNeedReset. The former indicates that the monitored application is fine, the latter causes an immediate reset if Application Monitoring is enabled for this virtual machine. </description> <cause> <description> This is an in-guest initated action. </description> <action> If vSphere HA and Application Monitoring are enabled for this virtual machine, it is reset if the state is appStateNeedReset. If the virtual machine is being migrated using vMotion the reset will be delayed until the virtual machine has reached its destination. Also, the reset will be delayed until the datastore connectivity issues are resolved. </action> </cause> </EventLongDescription> ExtendedEventvSphere HA detected application heartbeat failurewarningvSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA detected application heartbeat failure for {vm.name} on {host.name}vSphere HA detected application heartbeat failure for {vm.name}vSphere HA detected application heartbeat failure for this virtual machinecom.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent|vSphere HA detected application heartbeat failure for {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="com.vmware.vc.vmam.VmDasAppHeartbeatFailedEvent"> <description> vSphere HA has detected a heartbeat failure from the application-monitoring agent inside the guest. If application monitoring is enabled in vSphere the virtual machine will be reset. </description> <cause> <description> vSphere HA did not receive application heartbeats from the application-monitoring agent within a 30-second interval. </description> <action> Investigate why the application-monitoring agent stopped heartbeating. Missing heartbeats may be a result of the application failing or a problem with the application-monitoring agent. </action> </cause> </EventLongDescription> EventExvCenter server replication status has changed.infocom.vmware.vc.vmdir.ReplicationStatusChangeEvent|vCenter Server Replication Status : {replicationStatus} . {message}EventExvCenter server replication state has changedinfocom.vmware.vc.vmdir.StateChangeEvent|vCenter Server Replication State changed to '{newState}' from '{oldState}' cause: {reason}EventExvSAN datastore {datastoreName} does not have capacityerrorvSAN datastore {datastoreName} in cluster {computeResource.name} does not have capacityvSAN datastore {datastoreName} does not have capacitycom.vmware.vc.vsan.DatastoreNoCapacityEvent|vSAN datastore {datastoreName} in cluster {computeResource.name} in datacenter {datacenter.name} does not have capacity <EventLongDescription id="com.vmware.vc.vsan.DatastoreNoCapacityEvent"> <description> vSAN datastore does not have capacity. </description> <cause> <description> This might be because no disk is configured for vSAN, local disks configured for vSAN service become inaccessible or flash disks configured for vSAN service become inaccessible. </description> <action> Check if vSAN storage configuration is correct and if the local disks and flash disks configured for vSAN service are accessible. </action> </cause> </EventLongDescription> EventExHost cannot communicate with one or more other nodes in the vSAN enabled clustererrorHost {host.name} in cluster {computeResource.name} cannot communicate with all other nodes in the vSAN enabled clusterHost {host.name} cannot communicate with all other nodes in the vSAN enabled clusterHost cannot communicate with one or more other nodes in the vSAN enabled clustercom.vmware.vc.vsan.HostCommunicationErrorEvent|Host {host.name} in cluster {computeResource.name} in datacenter {datacenter.name} cannot communicate with all other nodes in the vSAN enabled cluster <EventLongDescription id="com.vmware.vc.vsan.HostCommunicationErrorEvent"> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. </description> <cause> <description> Host cannot communicate with one or more other nodes in the vSAN enabled cluster. This might be caused by network partition or misconfiguration. Each host needs at least one vmnic with vSAN enabled. Those vmnics need to be on the same physical network. The host should have the vSAN service enabled. </description> <action> Check the host for vSAN service configuration, vSAN network configuration and network connection. </action> </cause> </EventLongDescription> ExtendedEventHost with vSAN service enabled is not in the vCenter clustererror{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name}{host.name} with vSAN service enabled is not in the vCenter clusterHost with vSAN service enabled is not in the vCenter clustercom.vmware.vc.vsan.HostNotInClusterEvent|{host.name} with vSAN service enabled is not in the vCenter cluster {computeResource.name} in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.HostNotInClusterEvent"> <description> Host with the vSAN service enabled is not in the vCenter cluster. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN service is not enabled with the recommended interface. </description> <action> Add the host into the cluster or disable vSAN on the host. </action> </cause> </EventLongDescription> ExtendedEventHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailserror{host.name} is in a vSAN cluster {computeResource.name} but does not have vSAN service enabled{host.name} is in a vSAN cluster but does not have vSAN service enabledHost is in a vSAN cluster but does not have vSAN service enabled because of insufficient memory or other errors. Please check recent tasks for more detailscom.vmware.vc.vsan.HostNotInVsanClusterEvent|{host.name} is in a vSAN enabled cluster {computeResource.name} in datacenter {datacenter.name} but does not have vSAN service enabled <EventLongDescription id="com.vmware.vc.vsan.HostNotInVsanClusterEvent"> <description> Host is in a vSAN enabled cluster but does not have vSAN service enabled. </description> <cause> <description> vSAN service membership does not match vCenter cluster membership. This may happen if the vSAN is not enabled with the recommended interface or the vSAN configuration is not set up appropriately. </description> <action> Re-enable vSAN or check the vSAN configuration. </action> </cause> </EventLongDescription> EventExvSAN host vendor provider registration has failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationFailedEvent"> <description> Cannot deregister host vendor provider in Storage management service </description> <cause> <description>Host vendor provider deregistration failed</description> <action>Check if Storage management service is running</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider has been successfully unregisteredinfovSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredvSAN vendor provider {host.name} has been successfully unregisteredcom.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully unregistered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderDeregistrationSuccessEvent"> <description> Deregistered host vendor provider from Storage management service </description> </EventLongDescription> EventExvSAN host vendor provider registration failed.errorvSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}.com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent|vSAN vendor provider {host.name} registration has failed. Reason : {fault.msg}. <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationFailedEvent"> <description> Cannot register host vendor provider in Storage management service </description> <cause> <description>Host vendor provider registration failed</description> <action>Check if Storage management service is running</action> <action>Check if the vendor provider on host is running</action> <action>Check if there are network connectivity issues between host and VC</action> </cause> </EventLongDescription> ExtendedEventvSAN host vendor provider registration succeededinfovSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredvSAN vendor provider {host.name} has been successfully registeredcom.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent|vSAN vendor provider {host.name} has been successfully registered <EventLongDescription id="com.vmware.vc.vsan.HostVendorProviderRegistrationSuccessEvent"> <description> Registered host vendor provider in Storage management service </description> </EventLongDescription> ExtendedEventvSAN network is not configurederrorvSAN network is not configured on {host.name} in cluster {computeResource.name}vSAN network is not configured on {host.name}vSAN network is not configuredcom.vmware.vc.vsan.NetworkMisConfiguredEvent|vSAN network is not configured on {host.name}, in cluster {computeResource.name}, and in datacenter {datacenter.name} <EventLongDescription id="com.vmware.vc.vsan.NetworkMisConfiguredEvent"> <description> vSAN network is not configured. </description> <cause> <description> vSAN network is not set up appropriately. vSAN datastore will not be formed as expected. </description> <action> Create at least one vmnic with vSAN enabled on the host. </action> </cause> </EventLongDescription> EventExFound another host participating in the vSAN service which is not a member of this host's vCenter clustererrorFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter cluster {computeResource.name}Found host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clusterFound host(s) {hostString} participating in the vSAN service which is not a member of this host's vCenter clustercom.vmware.vc.vsan.RogueHostFoundEvent|Found host(s) {hostString} participating in the vSAN service in cluster {computeResource.name} in datacenter {datacenter.name} is not a member of this host's vCenter cluster <EventLongDescription id="com.vmware.vc.vsan.RogueHostFoundEvent"> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. </description> <cause> <description> Found another host participating in the vSAN service which is not a member of this host's vCenter cluster. This might be caused by misconfiguration. </description> <action> Add the rogue host into the cluster or disable vSAN on the rogue host. </action> </cause> </EventLongDescription> EventExFailed to turn off the disk locator LEDerrorFailed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOffFailedEvent|Failed to turn off the locator LED of disk {disk.path}. Reason : {fault.msg}EventExFailed to turn on the disk locator LEDerrorFailed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}com.vmware.vc.vsan.TurnDiskLocatorLedOnFailedEvent|Failed to turn on the locator LED of disk {disk.path}. Reason : {fault.msg}EventExvSAN cluster needs disk format upgradewarningvSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationvSAN cluster has one or more hosts for which disk format upgrade is recommended: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationcom.vmware.vc.vsan.VsanHostNeedsUpgradeEvent|vSAN cluster {computeResource.name} has one or more hosts that need disk format upgrade: {host}. For more detailed information of vSAN upgrade, please see the 'vSAN upgrade procedure' section in the documentationEventExUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}errorUnable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}com.vmware.vc.vtpm.FailedProcessingVTpmCertsEvent|Unable to {operation} a Certificate for Virtual TPM device on Virtual Machine {vmName}. {fault.msg}ExtendedEventA compute policy has been createdinfocom.vmware.vcenter.compute.policies.createEvent|Compute policy {policyName} has been createdExtendedEventA compute policy has been deletedinfocom.vmware.vcenter.compute.policies.deleteEvent|Compute policy {policyName} has been deletedEventExDatabase replication state changed: sync, async or no replicationinfocom.vmware.vcha.DB.replication.state.changed|Database replication mode changed to {newState}EventExThe management interface (NIC0) IP address you specified as for the Passive node is different than the original IP address used to configure vCenter HA. You must use the same IP address.errorcom.vmware.vcha.cluster.differentFailoverIp|The management interface (NIC0) IP address you specified as {given} for the Passive node is different than the original IP address {original} used to configure vCenter HA. You must use the same IP address.EventExvCenter HA cluster mode changedinfocom.vmware.vcha.cluster.mode.changed|vCenter HA cluster mode changed to {clusterMode}ExtendedEventUnable to enable mode.errorcom.vmware.vcha.cluster.modeEnableFailed|Unable to enable mode.EventExThe hostname for a node does not map to the vCenter Server PNID.errorcom.vmware.vcha.cluster.pnidHostnameMismatch|The hostname for {nodeIp} does not map to the vCenter Server PNID. Review the hostname you specified during the VM clone customization step.ExtendedEventVerify if the Passive and the Witness nodes are up and reachable.errorcom.vmware.vcha.cluster.quorumNotCloned|Verify if the Passive and the Witness nodes are up and reachable.EventExUnable to SSH to a node.errorcom.vmware.vcha.cluster.sshConnectFailed|Unable to SSH to {nodeIp}.ExtendedEventvCenter HA cluster state is currently degradedwarningcom.vmware.vcha.cluster.state.degraded|vCenter HA cluster state is currently degradedExtendedEventvCenter HA cluster is destroyedinfocom.vmware.vcha.cluster.state.destroyed|vCenter HA cluster is destroyedExtendedEventvCenter HA cluster state is currently healthyinfocom.vmware.vcha.cluster.state.healthy|vCenter HA cluster state is currently healthyExtendedEventvCenter HA cluster state is currently isolatederrorcom.vmware.vcha.cluster.state.isolated|vCenter HA cluster state is currently isolatedExtendedEventUnable to get vpxd hostname.errorcom.vmware.vcha.cluster.vcFqdnUnavailable|Unable to get vpxd hostname.ExtendedEventFailover cannot proceed when cluster is in disabled modewarningcom.vmware.vcha.failover.failed.disabled.mode|Failover cannot proceed when cluster is in disabled modeExtendedEventFailover cannot proceed when cluster does not have all three nodes connectedwarningcom.vmware.vcha.failover.failed.node.lost|Failover cannot proceed when cluster does not have all three nodes connectedExtendedEventFailover cannot proceed when Passive node is not ready to takeoverwarningcom.vmware.vcha.failover.failed.passive.not.ready|Failover cannot proceed when vPostgres on Passive node is not ready to takeoverExtendedEventFailover did not succeed. Failed to flush the data to the Passive nodewarningcom.vmware.vcha.failover.flush.failed.degraded|Failover did not succeed. Failed to flush the data to the Passive nodeExtendedEventFailover failure is acknowledgedinfocom.vmware.vcha.failover.flush.failed.healthy|Failover failure is acknowledgedExtendedEventFailover status is unknowninfocom.vmware.vcha.failover.flush.failed.unknown|Failover status is unknownExtendedEventFailover succeededinfocom.vmware.vcha.failover.succeeded|Failover succeededEventExAppliance File replication state changedinfocom.vmware.vcha.file.replication.state.changed|Appliance {fileProviderType} is {state}EventExThis node was forcefully converted to the Active nodeinfocom.vmware.vcha.force.reset.active|Node {nodename} was forcefully converted to the Active nodeEventExOne node joined back to the clusterinfocom.vmware.vcha.node.joined|Node {nodeName} joined back to the clusterEventExOne node left the clusterwarningcom.vmware.vcha.node.left|Node {nodeName} left the clusterExtendedEventPSC HA state is currently degradedinfocom.vmware.vcha.psc.ha.health.degraded|PSC HA state is currently degradedExtendedEventPSC HA state is currently healthyinfocom.vmware.vcha.psc.ha.health.healthy|PSC HA state is currently healthyExtendedEventPSC HA state is not being monitoredinfocom.vmware.vcha.psc.ha.health.unknown|PSC HA is not monitored after vCenter HA cluster is destroyedExtendedEventVMware Directory Service health is currently degradedwarningcom.vmware.vcha.vmdir.health.degraded|VMware Directory Service health is currently degradedExtendedEventVMware Directory Service is currently healthyinfocom.vmware.vcha.vmdir.health.healthy|VMware Directory Service is currently healthyExtendedEventVMware Directory Service health is not being monitoredinfocom.vmware.vcha.vmdir.health.unknown|VMware Directory Service health is not being monitoredExtendedEventvSphere Cluster Services mode is system managed on cluster.infocom.vmware.vcls.cluster.DeploymentModeSystemManagedEvent|vSphere Cluster Services mode is system managed on cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.infocom.vmware.vcls.cluster.DrsDisabledHaDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-disabled and HA-disabled cluster.ExtendedEventvSphere Cluster Services mode is absent on DRS-enabled cluster.errorcom.vmware.vcls.cluster.DrsEnabledDeployModeAbsentEvent|vSphere Cluster Services mode is absent on DRS-enabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.DrsEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. DRS-enabled cluster waiting for VSAN VASA provider availability.ExtendedEventvSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.warningcom.vmware.vcls.cluster.HaEnabledDrsDisabledDeploymentModeAbsentEvent|vSphere Cluster Services mode is absent on HA-enabled and DRS-disabled cluster.ExtendedEventvSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.HaEnabledVsanProviderWaitingEvent|vSphere Cluster Services deployment in progress. HA-enabled and DRS-disabled cluster waiting for VSAN VASA provider availability.ExtendedEventVSAN VASA provider became available.infocom.vmware.vcls.cluster.VsanProviderAvailableEvent|VSAN VASA provider became available.ExtendedEventTimed out waiting for VSAN VASA provider availability.infocom.vmware.vcls.cluster.VsanProviderTimedoutEvent|Timed out waiting for VSAN VASA provider availability.EventExA Data Processing Unit is down.infoA Data Processing Unit is down.com.vmware.vim.dpu.down|The Data Processing Unit with id '{dpuId}' is down.EventExA Data Processing Unit has been removed from the system.infoA Data Processing Unit has been removed from the system.com.vmware.vim.dpu.removed|The Data Processing Unit with id '{dpuId}' has been removed from the system.EventExThe management state for a Data Processing Unit has changed.infoThe management state for a Data Processing Unit has changed.com.vmware.vim.dpu.state.changed|The management state for the Data Processing Unit with id '{dpuId}' has changed to '{state}'.EventExThe dpu failover ended on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.com.vmware.vim.dpuFailover.end|DPU failover from {fromDpu} to {toDpu} on vds {vds} has ended.EventExThe dpu failover started on host.infoDPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.com.vmware.vim.dpuFailover.start|DPU failover from {fromDpu} to {toDpu} on vds {vds} has been started.ExtendedEventInvalid UTF-8 string encountered.warningInvalid UTF-8 string encountered.com.vmware.vim.utf8filter.badvalue|Invalid UTF-8 string encountered.ExtendedEventSome of the disks of the virtual machine failed to load. The information present for them in the virtual machine configuration may be incompletewarningSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteSome of the disks of the virtual machine {vm.name} failed to load. The information present for them in the virtual machine configuration may be incompletecom.vmware.vim.vm.DisksNotLoaded|Some of the disks of the virtual machine {vm.name} on host {host.name} failed to load. The information present for them in the virtual machine configuration may be incompleteExtendedEventSnapshot operations are not allowed due to some of the snapshot related objects failed to load.warningSnapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.com.vmware.vim.vm.SnapshotNotAllowed|Snapshot operations are not allowed on virtual machine {vm.name} due to some of the snapshot related objects failed to load.ExtendedEventVirtual machine reboot converted to power off because the rebootPowerOff option is enabledinfoReboot converted to power off on virtual machine {vm.name}.Reboot converted to power off.com.vmware.vim.vm.reboot.powerOff|Reboot converted to power off on virtual machine {vm.name} on {host.name} because the rebootPowerOff option is enabled.EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' bound to provider '{providerName}'EventExvService dependency boundinfocom.vmware.vim.vsm.dependency.bind.vm|vService dependency '{dependencyName}' on '{vm.name}' bound to provider '{providerName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vApp|Created vService dependency '{dependencyName}' with type '{dependencyType}' on vApp '{targetName}'EventExvService dependency createdinfocom.vmware.vim.vsm.dependency.create.vm|Created vService dependency '{dependencyName}' with type '{dependencyType}' on '{vm.name}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vApp|Destroyed vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency destroyedinfocom.vmware.vim.vsm.dependency.destroy.vm|Destroyed vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vApp|Reconfigured vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency reconfiguredinfocom.vmware.vim.vsm.dependency.reconfigure.vm|Reconfigured vService dependency '{dependencyName}' on '{vm.name}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vApp|vService dependency '{dependencyName}' on vApp '{targetName}' unbound from provider '{providerName}'EventExvService dependency unboundinfocom.vmware.vim.vsm.dependency.unbind.vm|vService dependency '{dependencyName}' on '{vm.name}' unbound from provider '{providerName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vApp|Updated vService dependency '{dependencyName}' on vApp '{targetName}'EventExvService dependency updatedinfocom.vmware.vim.vsm.dependency.update.vm|Updated vService dependency '{dependencyName}' on '{vm.name}'EventExvService provider registeredinfocom.vmware.vim.vsm.provider.register|vService provider '{providerName}' with type '{providerType}' registered for extension '{extensionKey}'EventExvService provider unregisteredinfocom.vmware.vim.vsm.provider.unregister|vService provider '{providerName}' with type '{providerType}' unregistered for extension '{extensionKey}'EventExvService provider updatedinfocom.vmware.vim.vsm.provider.update|Updating vService provider '{providerName}' registered for extension '{extensionKey}'EventExDeleting stale vdisks generated by FCD migration failed.errorcom.vmware.vslm.DeleteStaleDiskFailureEvent|Deleting stale vdisk {diskPath} and related files generated as part of FCD migration failed. Try to delete them manually.EventExRegistering vdisk as FCD at source failed during rollback of unsuccessful migration.errorcom.vmware.vslm.RegisterDiskFailed|Registering {fcdPath} with name {fcdName} as FCD at source failed during rollback of unsuccessful migration. Try to register it manually using RegisterDisk API.EventExUnregistering of vdisk at destination failed during rollback of unsuccessful migration.errorcom.vmware.vslm.UnRegisterDiskFailed|Unregistering of FCD {fcdId} failed at destination during rollback of unsuccessful migration. Reconcile of datastore {datastore} should fix inconsistencies if any.EventExConnectivity check completedinfocom.vmware.vsphere.client.security.ConnectivityCheckEvent|Connectivity check completed. Operation: {Operation}. Subscription status: {SubscriptionCheckResult}. Connectivity status: {ConnectivityCheckResult}. Access type: {AccessType}. User: {Username}ExtendedEventDatastore is accessible to all hosts under the cluster.infocom.vmware.wcp.Datastore.accessible|Datastore is accessible to all hosts under the clusterExtendedEventDatastore not accessible to all hosts under the cluster.warningcom.vmware.wcp.Datastore.inaccessible|Datastore not accessible to all hosts under the cluster.EventExRemote access for an ESXi local user account has been locked temporarilly due to multiple failed login attempts.warningesx.audit.account.locked|Remote access for ESXi local user account '{1}' has been locked for {2} seconds after {3} failed login attempts.EventExMultiple remote login failures detected for an ESXi local user account.warningesx.audit.account.loginfailures|Multiple remote login failures detected for ESXi local user account '{1}'.ExtendedEventRestoring factory defaults through DCUI.warningesx.audit.dcui.defaults.factoryrestore|The host has been restored to default factory settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventThe DCUI has been disabled.infoesx.audit.dcui.disabled|The DCUI has been disabled.ExtendedEventThe DCUI has been enabled.infoesx.audit.dcui.enabled|The DCUI has been enabled.ExtendedEventRebooting host through DCUI.warningesx.audit.dcui.host.reboot|The host is being rebooted through the Direct Console User Interface (DCUI).ExtendedEventShutting down host through DCUI.warningesx.audit.dcui.host.shutdown|The host is being shut down through the Direct Console User Interface (DCUI).ExtendedEventRestarting host agents through DCUI.infoesx.audit.dcui.hostagents.restart|The management agents on the host are being restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExLogin authentication on DCUI failederroresx.audit.dcui.login.failed|Authentication of user {1} has failed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExDCUI login password changed.infoesx.audit.dcui.login.passwd.changed|Login password for user {1} has been changed. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventFactory network settings restored through DCUI.warningesx.audit.dcui.network.factoryrestore|The host has been restored to factory network settings. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.EventExRestarting network through DCUI.infoesx.audit.dcui.network.restart|A management interface {1} has been restarted. Please consult ESXi Embedded and vCenter Server Setup Guide or follow the Ask VMware link for more information.ExtendedEventHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.esx.audit.entropy.available.low|Host is configured with external entropy source. Host is running low on entropy bits in its memory cache. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.warningHost is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.esx.audit.entropy.external.source.disconnected|Host is configured with external entropy source. The external entropy source is disconnected. Please refer to KB 89074 for more details.EventExPowering off host through esxcliwarningesx.audit.esxcli.host.poweroff.reason|The host is being powered off through esxcli. Reason for powering off: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.reboot.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExRebooting host through esxcliwarningesx.audit.esxcli.host.restart.reason|The host is being rebooted through esxcli. Reason for reboot: {1}, User: {2}.EventExHost acceptance level changedinfoesx.audit.esximage.hostacceptance.changed|Host acceptance level changed from {1} to {2}ExtendedEventUEFI Secure Boot enabled: Cannot skip signature checks.warningesx.audit.esximage.install.nobypasssigcheck|UEFI Secure Boot enabled: Cannot skip signature checks. Installing unsigned VIBs will prevent the system from booting. So the vib signature check will be enforced.ExtendedEventAttempting to install an image profile bypassing signing and acceptance level verification.warningesx.audit.esximage.install.nosigcheck|Attempting to install an image profile bypassing signing and acceptance level verification. This may pose a large security risk.ExtendedEventAttempting to install an image profile with validation disabled.warningesx.audit.esximage.install.novalidation|Attempting to install an image profile with validation disabled. This may result in an image with unsatisfied dependencies, file or package conflicts, and potential security violations.EventExSECURITY ALERT: Installing image profile.warningesx.audit.esximage.install.securityalert|SECURITY ALERT: Installing image profile '{1}' with {2}.EventExSuccessfully installed image profile.infoesx.audit.esximage.profile.install.successful|Successfully installed image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully updated host to new image profile.infoesx.audit.esximage.profile.update.successful|Successfully updated host to image profile '{1}'. Installed {2} VIB(s), removed {3} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully changed software on host.infoesx.audit.esximage.software.apply.succeeded|Successfully installed {1} component(s) and removed {2} component(s) on host. To see more details about the transaction, run 'esxcli software profile get'.EventExSuccessfully installed VIBs.infoesx.audit.esximage.vib.install.successful|Successfully installed {1} VIB(s), removed {2} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExSuccessfully removed VIBsinfoesx.audit.esximage.vib.remove.successful|Successfully removed {1} VIB(s). Please use 'esxcli software profile get' or see log for more detail about the transaction.EventExDPU trust validation failederroresx.audit.esxtokend.dputrust.failed|DPU: {1} trust validation failedEventExDPU was removedwarningesx.audit.esxtokend.dputrust.removed|DPU:{1} was removed.EventExDPU trust validation succeededinfoesx.audit.esxtokend.dputrust.succeeded|DPU: {1} trust validation succeeded.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Warning tripped.EventExNVDIMM: Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.alarms.lifetime.warning|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Warning tripped.EventExNVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.spareblocks|NVDIMM (handle {1}, idString {2}): SpareBlocksPct ({3}) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.warningesx.audit.hardware.nvd.health.alarms.temperature|NVDIMM (handle {1}, idString {2}): Temperature ({3} C) has reached the pre-programmed threshold limit.EventExNVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).warningesx.audit.hardware.nvd.health.life.pctused|NVDIMM (handle {1}, idString {2}): Life Percentage Used ({3}) has reached the threshold limit ({4}).EventExNVDIMM Count of DRAM correctable ECC errors above threshold.infoesx.audit.hardware.nvd.health.module.ce|NVDIMM (handle {1}, idString {2}): Count of DRAM correctable ECC errors above threshold.EventExNVDIMM: Energy Source Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.lifetime.warning|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Warning tripped.EventExNVDIMM: Energy Source Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.es.temperature.warning|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Warning tripped.EventExNVDIMM: Module Lifetime Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.lifetime.warning|NVDIMM (handle {1}, idString {2}): Module Lifetime Warning tripped.EventExNVDIMM: Module Temperature Warning tripped.warningesx.audit.hardware.nvd.health.vmw.alarms.module.temperature.warning|NVDIMM (handle {1}, idString {2}): Module Temperature Warning tripped.EventExNVDIMM: Maintenance needed.warningesx.audit.hardware.nvd.health.vmw.statusflags.maintNeeded|NVDIMM (handle {1}, idString {2}): Maintenance needed.EventExA physical disk has been inserted.infoA physical disk has been insertedesx.audit.hcm.event.disk.insertion|A physical disk has been inserted ({1}).EventExA physical disk has been removed.infoA physical disk has been removed.esx.audit.hcm.event.disk.removal|A physical disk has been removed ({1}).ExtendedEventHost has booted.infoesx.audit.host.boot|Host has booted.EventExHost experienced a crashinfoesx.audit.host.crash.reason|The crash at {1} occurred due to: {2}. More details will be available in the generated vmkernel-zdump.EventExThe host experienced a crashinfoesx.audit.host.crash.reason.available|The host experienced a crash. Reason: {1}.ExtendedEventHost experienced a crashinfoesx.audit.host.crash.reason.unavailable|Host experienced a crash. More details will be available in the generated vmkernel-zdump.EventExThe number of virtual machines registered on the host exceeded limit.warningThe number of virtual machines registered on host {host.name} in cluster {computeResource.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered on host {host.name} exceeded limit: {current} registered, {limit} is the maximum supported.The number of virtual machines registered exceeded limit: {current} registered, {limit} is the maximum supported.esx.audit.host.maxRegisteredVMsExceeded|The number of virtual machines registered on host {host.name} in cluster {computeResource.name} in {datacenter.name} exceeded limit: {current} registered, {limit} is the maximum supported.EventExThe host has been powered offinfoesx.audit.host.poweroff.reason.available|The host has been powered off. Reason for powering off: {1}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.management|The power off at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.subsystem|The power off at {1} was requested by {2} due to: {3}.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.timestamp|The power off at {1} was requested due to: {2}.ExtendedEventHost had been powered offinfoesx.audit.host.poweroff.reason.unavailable|Host had been powered off. The poweroff was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated power offinfoesx.audit.host.poweroff.reason.user|The power off at {1} was requested by user/entity {2} due to: {3}.EventExThe host experienced Quick Bootinfoesx.audit.host.quickboot.reason.available|The host experienced Quick Boot. Reason for reboot: {1}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.management|The Quick Boot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.subsystem|The Quick Boot at {1} was requested by {2} due to: {3}.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.timestamp|The Quick Boot at {1} was requested due to: {2}.ExtendedEventHost experienced Quick Bootinfoesx.audit.host.quickboot.reason.unavailable|Host experienced Quick Boot. The Quick Boot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated Quick Bootinfoesx.audit.host.quickboot.reason.user|The Quick Boot at {1} was requested by user/entity {2} due to: {3}.EventExThe host has been rebootedinfoesx.audit.host.reboot.reason.available|The host has been rebooted. Reason for reboot: {1}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.management|The reboot at {1} was requested by {2} by user/entity {3} due to: {4}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.subsystem|The reboot at {1} was requested by {2} due to: {3}.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.timestamp|The reboot at {1} was requested due to: {2}.ExtendedEventHost had been rebootedinfoesx.audit.host.reboot.reason.unavailable|Host had been rebooted. The reboot was not the result of a kernel error, deliberate reboot, or shut down. This could indicate a hardware issue. Hardware may reboot abruptly due to power outages, faulty components, and heating issues. To investigate further, engage the hardware vendor.EventExUser had initiated rebootinfoesx.audit.host.reboot.reason.user|The reboot at {1} was requested by user/entity {2} due to: {3}.ExtendedEventHost is rebooting.infoesx.audit.host.stop.reboot|Host is rebooting.ExtendedEventHost is shutting down.infoesx.audit.host.stop.shutdown|Host is shutting down.EventExPowering off host through hostdwarningesx.audit.hostd.host.poweroff.reason|The host is being powered off through hostd. Reason for powering off: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.reboot.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.EventExRebooting host through hostdwarningesx.audit.hostd.host.restart.reason|The host is being rebooted through hostd. Reason for reboot: {1}, User: {2}.ExtendedEventAdministrator access to the host has been enabled.infoesx.audit.lockdownmode.disabled|Administrator access to the host has been enabled.ExtendedEventAdministrator access to the host has been disabled.infoesx.audit.lockdownmode.enabled|Administrator access to the host has been disabled.ExtendedEventList of lockdown exception users has been changed.infoesx.audit.lockdownmode.exceptions.changed|List of lockdown exception users has been changed.ExtendedEventThe host has canceled entering maintenance mode.infoesx.audit.maintenancemode.canceled|The host has canceled entering maintenance mode.ExtendedEventThe host has entered maintenance mode.infoesx.audit.maintenancemode.entered|The host has entered maintenance mode.ExtendedEventThe host has begun entering maintenance mode.infoesx.audit.maintenancemode.entering|The host has begun entering maintenance mode.ExtendedEventThe host has exited maintenance mode.infoesx.audit.maintenancemode.exited|The host has exited maintenance mode.ExtendedEventThe host has failed entering maintenance mode.erroresx.audit.maintenancemode.failed|The host has failed entering maintenance mode.EventExFirewall configuration has changed.infoesx.audit.net.firewall.config.changed|Firewall configuration has changed. Operation '{1}' for rule set {2} succeeded.ExtendedEventFirewall has been disabled.warningesx.audit.net.firewall.disabled|Firewall has been disabled.EventExFirewall has been enabled for port.infoesx.audit.net.firewall.enabled|Firewall has been enabled for port {1}.EventExPort is now protected by Firewall.infoesx.audit.net.firewall.port.hooked|Port {1} is now protected by Firewall.EventExPort is no longer protected with Firewall.warningesx.audit.net.firewall.port.removed|Port {1} is no longer protected with Firewall.EventExLACP disabledinfoesx.audit.net.lacp.disable|LACP for VDS {1} is disabled.EventExLACP eabledinfoesx.audit.net.lacp.enable|LACP for VDS {1} is enabled.EventExuplink is connectedinfoesx.audit.net.lacp.uplink.connected|LACP info: uplink {1} on VDS {2} got connected.EventExThe host has canceled entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.canceled|The host has canceled entering '{1}'.EventExThe host has entered a partial maintenance mode.infoesx.audit.partialmaintenancemode.entered|The host has entered '{1}'.EventExThe host has begun entering a partial maintenance mode.infoesx.audit.partialmaintenancemode.entering|The host has begun entering '{1}'.EventExThe host has exited a partial maintenance mode.infoesx.audit.partialmaintenancemode.exited|The host has exited '{1}'.EventExThe host has failed entering a partial maintenance mode.erroresx.audit.partialmaintenancemode.failed|The host has failed entering '{1}'.ExtendedEventThe ESXi command line shell has been disabled.infoesx.audit.shell.disabled|The ESXi command line shell has been disabled.ExtendedEventThe ESXi command line shell has been enabled.infoesx.audit.shell.enabled|The ESXi command line shell has been enabled.ExtendedEventSSH access has been disabled.infoesx.audit.ssh.disabled|SSH access has been disabled.ExtendedEventSSH access has been enabled.infoesx.audit.ssh.enabled|SSH access has been enabled.EventExSSH session was closed.infoesx.audit.ssh.session.closed|SSH session was closed for '{1}@{2}'.EventExSSH login has failed.infoesx.audit.ssh.session.failed|SSH login has failed for '{1}@{2}'.EventExSSH session was opened.infoesx.audit.ssh.session.opened|SSH session was opened for '{1}@{2}'.EventExPowering off hostwarningesx.audit.subsystem.host.poweroff.reason|The host is being powered off. Reason for powering off: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.reboot.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.EventExRebooting hostwarningesx.audit.subsystem.host.restart.reason|The host is being rebooted. Reason for reboot: {1}, User: {2}, Subsystem: {3}.ExtendedEventSupershell session has been started by a user.warningSupershell session has been started by a user.esx.audit.supershell.access|Supershell session has been started by a user.EventExTest with an int argumenterroresx.audit.test.test1d|Test with {1}EventExTest with a string argumenterroresx.audit.test.test1s|Test with {1}ExtendedEventUSB configuration has changed.infoUSB configuration has changed on host {host.name} in cluster {computeResource.name}.USB configuration has changed on host {host.name}.USB configuration has changed.esx.audit.usb.config.changed|USB configuration has changed on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExEnforcement level changed for all security domains.warningesx.audit.uw.secpolicy.alldomains.level.changed|The enforcement level for all security domains has been changed to {1}. The enforcement level must always be set to enforcing.EventExEnforcement level changed for security domain.warningesx.audit.uw.secpolicy.domain.level.changed|The enforcement level for security domain {1} has been changed to {2}. The enforcement level must always be set to enforcing.ExtendedEventExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.warningesx.audit.uw.security.User.ExecInstalledOnly.disabled|ExecInstalledOnly has been disabled. This allows the execution of non-installed binaries on the host. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.Infoesx.audit.uw.security.User.ExecInstalledOnly.enabled|ExecInstalledOnly has been enabled. This prevents the execution of non-installed binaries on the host.EventExExecution of non-installed file prevented.warningesx.audit.uw.security.execInstalledOnly.violation|Execution of unknown (non VIB installed) binary '{1}' prevented. Unknown content can cause malware attacks similar to Ransomware.EventExExecution of non-installed file detected.warningesx.audit.uw.security.execInstalledOnly.warning|Execution of unknown (non VIB installed) binary '{1}'. Unknown content can cause malware attacks similar to Ransomware.ExtendedEventLVM device discovered.infoesx.audit.vmfs.lvm.device.discovered|One or more LVM devices have been discovered on this host.EventExRead IO performance maybe impacted for diskinfoRead IO performance maybe impacted for disk {1}: {2}Read IO performance maybe impacted for disk {1}: {2}esx.audit.vmfs.sesparse.bloomfilter.disabled|Read IO performance maybe impacted for disk {1}: {2}EventExFile system mounted.infoesx.audit.vmfs.volume.mounted|File system {1} on volume {2} has been mounted in {3} mode on this host.EventExLVM volume un-mounted.infoesx.audit.vmfs.volume.umounted|The volume {1} has been safely un-mounted. The datastore is no longer accessible on this host.EventExvSAN device is added back successfully after MEDIUM error.infovSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.devicerebuild|vSAN device {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExvSAN diskgroup is rebuilt successfully after MEDIUM error.infovSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.diskgrouprebuild|vSAN diskgroup {1} is rebuilt successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExFound components with invalid metadatawarning{1} components found with invalid metadata on disk {2} {3}esx.audit.vob.vsan.lsom.foundInvalidMetadataComp|{1} components found with invalid metadata on disk {2} {3}EventExvSAN storagepool is added back successfully after MEDIUM error.infovSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.esx.audit.vob.vsan.lsom.storagepoolrebuild|vSAN storagepool {1} is added back successfully after MEDIUM error. Old UUID {2} New UUID {3}.EventExTest with both int and sting arguments.infoesx.audit.vobdtestcorrelator.test|Test with both string: {2} and int: {1}.ExtendedEventvSAN clustering services have been enabled.infovSAN clustering and directory services have been enabled.esx.audit.vsan.clustering.enabled|vSAN clustering and directory services have been enabled.ExtendedEventvSAN virtual NIC has been added.infovSAN virtual NIC has been added.esx.audit.vsan.net.vnic.added|vSAN virtual NIC has been added.ExtendedEventvSAN network configuration has been removed.errorvSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.esx.audit.vsan.net.vnic.deleted|vSAN network configuration has been removed. The host may experience problems communicating with other hosts in vSAN cluster.EventExvSAN RDMA changed for vmknic.infovSAN RDMA changed for vmknic {1}.esx.audit.vsan.rdma.changed|vSAN RDMA changed for vmknic {1}.ExtendedEventHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445warningHost detected weak SSL protocols and disabled them. Please refer to KB article: KB 2151445esx.audit.weak.ssl.protocol|Weak SSL protocols found and disabled. Please refer to KB article: KB 1234567ExtendedEventA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.infoA vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.esx.clear.coredump.configured|A vmkcore disk partition is available and/or a network coredump server has been configured. Host core dumps will be saved.ExtendedEventAt least one coredump target has been configured. Host core dumps will be saved.infoAt least one coredump target has been configured. Host core dumps will be saved.esx.clear.coredump.configured2|At least one coredump target has been configured. Host core dumps will be saved.EventExNVDIMM Energy Source is sufficiently charged.infoesx.clear.hardware.nvd.health.module.es.charged|NVDIMM (handle {1}, idString {2}): Energy Source is sufficiently charged.EventExRestored network connectivity to portgroupsinfoesx.clear.net.connectivity.restored|Network connectivity restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExRestored Network Connectivity to DVPortsinfoesx.clear.net.dvport.connectivity.restored|Network connectivity restored on DVPorts: {1}. Physical NIC {2} is up.EventExRestored Network Redundancy to DVPortsinfoesx.clear.net.dvport.redundancy.restored|Uplink redundancy restored on DVPorts: {1}. Physical NIC {2} is up recently.EventExlag transition upinfoesx.clear.net.lacp.lag.transition.up|LACP info: LAG {1} on VDS {2} is up.EventExuplink transition upinfoesx.clear.net.lacp.uplink.transition.up|LACP info: uplink {1} on VDS {2} is moved into link aggregation group.EventExuplink is unblockedinfoesx.clear.net.lacp.uplink.unblocked|LACP info: uplink {1} on VDS {2} is unblocked.EventExRestored uplink redundancy to portgroupsinfoesx.clear.net.redundancy.restored|Uplink redundancy restored on virtual switch {1}, portgroups: {2}. Physical NIC {3} is up.EventExLink state upinfoesx.clear.net.vmnic.linkstate.up|Physical NIC {1} linkstate is up.EventExStorage Device I/O Latency has improvedinfoesx.clear.psastor.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.psastor.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.psastor.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExScsi Device I/O Latency has improvedinfoesx.clear.scsi.device.io.latency.improved|Device {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExDevice has been turned on administratively.infoesx.clear.scsi.device.state.on|Device {1}, has been turned on administratively.EventExDevice that was permanently inaccessible is now online.infoesx.clear.scsi.device.state.permanentloss.deviceonline|Device {1}, that was permanently inaccessible is now online. No data consistency guarantees.EventExExited the All Paths Down stateinfoesx.clear.storage.apd.exit|Device or filesystem with identifier {1} has exited the All Paths Down state.EventExRestored connectivity to storage deviceinfoesx.clear.storage.connectivity.restored|Connectivity to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored path redundancy to storage deviceinfoesx.clear.storage.redundancy.restored|Path redundancy to storage device {1} (Datastores: {2}) restored. Path {3} is active again.EventExRestored connection to NFS serverinfoesx.clear.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExNFS volume I/O Latency has improvedinfoesx.clear.vmfs.nfs.volume.io.latency.improved|NFS volume {1} performance has improved. I/O latency reduced from {2} microseconds to {3} microseconds.EventExvSAN device has come online.infovSAN device {1} has come online.esx.clear.vob.vsan.pdl.online|vSAN device {1} has come online.EventExTest with both int and sting arguments.infoesx.clear.vobdtestcorrelator.test|Test with both string: {1} {3} and int: {2}.ExtendedEventvSAN clustering services have now been enabled.infovSAN clustering and directory services have now been enabled.esx.clear.vsan.clustering.enabled|vSAN clustering and directory services have now been enabled.ExtendedEventvSAN now has at least one active network configuration.infovSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.network.available|vSAN now has a usable network configuration. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExA previously reported vmknic now has a valid IP.infovmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.esx.clear.vsan.vmknic.ready|vmknic {1} now has an IP address. Earlier reported connectivity problems, if any, can now be ignored because they are resolved.EventExVVol container has come online.infoesx.clear.vvol.container.online|VVol container {1} has come online.EventExA 3rd party component on ESXi has reported an error.erroresx.problem.3rdParty.error|A 3rd party component, {1}, running on ESXi has reported an error. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.info|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported an informational event.infoesx.problem.3rdParty.information|A 3rd party component, {1}, running on ESXi has reported an informational event. If needed, please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA 3rd party component on ESXi has reported a warning.warningesx.problem.3rdParty.warning|A 3rd party component, {1}, running on ESXi has reported a warning related to a problem. Please follow the knowledge base link ({2}) to see the steps to remedy the problem as reported by {3}. The message reported is: {4}.EventExA corrected memory error occurrederroresx.problem.apei.bert.memory.error.corrected|A corrected memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA fatal memory error occurrederroresx.problem.apei.bert.memory.error.fatal|A fatal memory error occurred in the last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA recoverable memory error occurrederroresx.problem.apei.bert.memory.error.recoverable|A recoverable memory error occurred in last boot. The following details were reported. Physical Addr: {1}, Physical Addr Mask: {2}, Node: {3}, Card: {4}, Module: {5}, Bank: {6}, Device: {7}, Row: {8}, Column: {9} Error type: {10}EventExA corrected PCIe error occurrederroresx.problem.apei.bert.pcie.error.corrected|A corrected PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA fatal PCIe error occurrederroresx.problem.apei.bert.pcie.error.fatal|Platform encounterd a fatal PCIe error in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExA recoverable PCIe error occurrederroresx.problem.apei.bert.pcie.error.recoverable|A recoverable PCIe error occurred in last boot. The following details were reported. Port Type: {1}, Device: {2}, Bus #: {3}, Function: {4}, Slot: {5}, Device Vendor: {6}, Version: {7}, Command Register: {8}, Status Register: {9}.EventExAn application running on ESXi host has crashed and core file creation failed.warningesx.problem.application.core.dumpFailed|An application ({1}) running on ESXi host has crashed ({2} time(s) so far), but core dump creation failed.EventExAn application running on ESXi host has crashed and a core file was created.warningesx.problem.application.core.dumped|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). A core file might have been created at {3}.EventExAn application running on ESXi host has crashed and an encrypted core file was created.warningesx.problem.application.core.dumped.encrypted|An application ({1}) running on ESXi host has crashed ({2} time(s) so far). An encrypted core file using keyId {3} might have been created at {4}.ExtendedEventCritical failure detected during boot, please refer to KB 93107.errorA critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.esx.problem.boot.failure.detected|A critical failure was detected during system boot. The host cannot currently run workloads. Please refer to KB 93107 for more details.ExtendedEventSystem clock no longer synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.lostsync|system clock no longer synchronized to upstream time serversExtendedEventSystem clock synchronized to upstream time serverswarningesx.problem.clock.correction.adjtime.sync|system clock synchronized to upstream time serversExtendedEventSystem clock lost synchronization to upstream time serverswarningesx.problem.clock.correction.adjtime.unsync|system clock lost synchronization to upstream time serversEventExApplication system changed clock, synchronization lostwarningesx.problem.clock.correction.changed|{1} stepped system clock to {2}.{3}, synchronization lostEventExAllowed system clock update with large time changewarningesx.problem.clock.correction.delta.allowed|Clock stepped to {1}.{2}, but delta {3} > {4} secondsEventExFailed system clock update with large time changeerroresx.problem.clock.correction.delta.failed|Clock step to {1}.{2} failed, delta {3} > {4} seconds, number of large corrections > {5}EventExAllowed system clock update with large time change, but number of future updates limitedwarningesx.problem.clock.correction.delta.warning|Clock stepped to {1}.{2}, but delta {3} > {4} seconds, {5}/{6} large correctionsEventExSystem clock stepped, lost synchronizationwarningesx.problem.clock.correction.step.unsync|system clock stepped to {1}.{2}, lost synchronizationEventExSystem clock maximum number of large corrections changedwarningesx.problem.clock.parameter.set.maxLargeCorrections|system clock max number of correction set to {1}EventExSystem clock maximum negative phase correction changedwarningesx.problem.clock.parameter.set.maxNegPhaseCorrection|system clock max negative phase correction set to {1}EventExSystem clock maximum positive phase correction changedwarningesx.problem.clock.parameter.set.maxPosPhaseCorrection|system clock max positive phase correction set to {1}EventExSystem clock count of number of large corrections changedwarningesx.problem.clock.parameter.set.numLargeCorrections|system clock number of large correction set to {1}EventExSystem clock VOB report interval changedwarningesx.problem.clock.parameter.set.vobReportInterval|system clock max number of correction set to {1}ExtendedEventSystem clock state has been resetwarningesx.problem.clock.state.reset|system clock state has been resetEventExThe storage capacity of the coredump targets is insufficient to capture a complete coredump.warningThe storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.esx.problem.coredump.capacity.insufficient|The storage capacity of the coredump targets is insufficient to capture a complete coredump. Recommended coredump capacity is {1} MiB.EventExThe free space available in default coredump copy location is insufficient to copy new coredumps.warningThe free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.esx.problem.coredump.copyspace|The free space available in default coredump copy location is insufficient to copy new coredumps. Recommended free space is {1} MiB.EventExThe given partition has insufficient amount of free space to extract the coredump.warningThe given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.esx.problem.coredump.extraction.failed.nospace|The given partition has insufficient amount of free space to extract the coredump. At least {1} MiB is required.ExtendedEventNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.warningNo vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured|No vmkcore disk partition is available and no network coredump server has been configured. Host core dumps cannot be saved.ExtendedEventNo coredump target has been configured. Host core dumps cannot be saved.warningNo coredump target has been configured. Host core dumps cannot be saved.esx.problem.coredump.unconfigured2|No coredump target has been configured. Host core dumps cannot be saved.ExtendedEventDRAM ECC not enabled. Please enable it in BIOS.erroresx.problem.cpu.amd.mce.dram.disabled|DRAM ECC not enabled. Please enable it in BIOS.ExtendedEventNot all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. erroresx.problem.cpu.intel.ioapic.listing.error|Not all IO-APICs are listed in the DMAR. Not enabling interrupt remapping on this platform. ExtendedEventMCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.erroresx.problem.cpu.mce.invalid|MCE monitoring will be disabled as an unsupported CPU was detected. Please consult the ESX HCL for information on supported hardware.EventExHigh number of corrected errors on a page.infoesx.problem.cpu.page.correctederrors.high|High number of corrected errors on host physical page number {1}EventExDisabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.erroresx.problem.cpu.smp.ht.invalid|Disabling HyperThreading due to invalid configuration: Number of threads: {1}, Number of PCPUs: {2}.EventExFound {1} PCPUs, but only using {2} of them due to specified limit.erroresx.problem.cpu.smp.ht.numpcpus.max|Found {1} PCPUs, but only using {2} of them due to specified limit.EventExDisabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.erroresx.problem.cpu.smp.ht.partner.missing|Disabling HyperThreading due to invalid configuration: HT partner {1} is missing from PCPU {2}.EventExError copying ConfigStore from backup.errorError copying ConfigStore from backup.esx.problem.cs.createstore.copy.backup.error|Error copying ConfigStore from backup {1}.ExtendedEventFailed an operation on the ConfigStore database.errorFailed an operation on the ConfigStore database.esx.problem.cs.db.operation.error|Failed an operation on the ConfigStore database.ExtendedEventFailed to setup desired configuration.errorFailed to setup desired configuration.esx.problem.cs.desired.config.error|Failed to setup desired configuration.ExtendedEventError cleaning up Datafile store.errorError cleaning up Datafile store.esx.problem.cs.dfs.cleanup.error|Error cleaning up Datafile store.ExtendedEventDataFile store cannot be restored.errorDataFile store cannot be restored.esx.problem.cs.dfs.restore.error|DataFile store cannot be restored.EventExError processing schema file.errorError processing schema file.esx.problem.cs.schema.file.error|Error processing schema file {1}.EventExInvalid metadata in schema file.errorInvalid metadata in schema file.esx.problem.cs.schema.metadata.error|Invalid metadata in schema file {1}.EventExVibId validation failed for schema file.errorVibId validation failed for schema file.esx.problem.cs.schema.validation.error|VibId validation failed for schema file {1}.EventExError in upgrading config.errorError in upgrading config.esx.problem.cs.upgrade.config.error|Error in upgrading config {1}.EventExUnable to obtain a DHCP lease.erroresx.problem.dhclient.lease.none|Unable to obtain a DHCP lease on interface {1}.EventExNo expiry time on offered DHCP lease.erroresx.problem.dhclient.lease.offered.noexpiry|No expiry time on offered DHCP lease from {1}.EventExThe maintenance mode state for some Data Processing Units may be out of sync with the host.warningThe maintenance mode state for some Data Processing Units may be out of sync with the host.esx.problem.dpu.maintenance.sync.failed|The maintenance mode state for Data Processing Units with ids '{dpus}' may be out of sync with the host.EventExSome drivers need special notice.warningDriver for device {1} is {2}. Please refer to KB article: {3}.esx.problem.driver.abnormal|Driver for device {1} is {2}. Please refer to KB article: {3}.EventExHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. Entropy daemon has become non functional because of cache size change. Please refer to KB 89074 for more details.esx.problem.entropy.config.error|Host is configured with external entropy source. Entropy daemon has become non functional because of an {1} change. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.empty|Host is configured with external entropy source. The entropy available in the memory cache and storage cache is exhausted. Please refer to KB 89074 for more details.ExtendedEventHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.errorHost is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.esx.problem.entropy.inmemory.empty|Host is configured with external entropy source. The entropy available in the memory cache is exhausted. Please refer to KB 89074 for more details.EventExCould not install image profile.erroresx.problem.esximage.install.error|Could not install image profile: {1}EventExHost doesn't meet image profile hardware requirements.erroresx.problem.esximage.install.invalidhardware|Host doesn't meet image profile '{1}' hardware requirements: {2}EventExCould not stage image profile.erroresx.problem.esximage.install.stage.error|Could not stage image profile '{1}': {2}ExtendedEventThe host can not support the applied EVC mode.warningesx.problem.evc.incompatible|The host can not support the applied EVC mode.EventExSkipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.device.invalid|Skipping interrupt routing entry with bad device number: {1}. This is a BIOS bug.EventExSkipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.erroresx.problem.hardware.acpi.interrupt.routing.pin.invalid|Skipping interrupt routing entry with bad device pin: {1}. This is a BIOS bug.EventExFPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.clear|FPIN FC congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.creditstall|FPIN FC credit stall congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.congestion.devicespecific|FPIN FC device specific congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.lostcredit|FPIN FC lost credit congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.congestion.oversubscription|FPIN FC oversubscription congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.devicespecific|FPIN FC device specific delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.timeout|FPIN FC delivery time out: Host WWPN {1}, target WWPN {2}.EventExFPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.delivery.unabletoroute|FPIN FC delivery unable to route: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.delivery.unknown|FPIN FC unknown delivery notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.devicespecific|FPIN FC device specific link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidCRC|FPIN FC link invalid CRC: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.invalidtransmissionword|FPIN FC link invalid transmission word: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link failure: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.linkfailure|FPIN FC link failure: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsignal|FPIN FC link loss of signal: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.lossofsynchronization|FPIN FC link loss of synchronization: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.primitivesequenceprotocolerror|FPIN FC link primitive sequence protocol error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.linkintegrity.uncorrectableFECerror|FPIN FC link uncorrectable FEC error: Host WWPN {1}, target WWPN {2}.EventExFPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.linkintegrity.unknown|FPIN FC unknown link integrity notification: Host WWPN {1}, target WWPN {2}.EventExFPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.clear|FPIN FC peer congestion clear: Host WWPN {1}, target WWPN {2}.EventExFPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.creditstall|FPIN FC credit stall peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.infoesx.problem.hardware.fpin.fc.peercongestion.devicespecific|FPIN FC device specific peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.lostcredit|FPIN FC lost credit peer congestion: Host WWPN {1}, target WWPN {2}.EventExFPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.warningesx.problem.hardware.fpin.fc.peercongestion.oversubscription|FPIN FC oversubscription peer congestion: Host WWPN {1}, target WWPN {2}.EventExIOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.erroresx.problem.hardware.ioapic.missing|IOAPIC Num {1} is missing. Please check BIOS settings to enable this IOAPIC.ExtendedEventFailed to communicate with the BMC. IPMI functionality will be unavailable on this system.erroresx.problem.hardware.ipmi.bmc.bad|Failed to communicate with the BMC. IPMI functionality will be unavailable on this system.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime ({3}) Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature ({3} C) Error tripped.EventExNVDIMM: Lifetime Error tripped.erroresx.problem.hardware.nvd.health.alarms.lifetime.error|NVDIMM (handle {1}, idString {2}): Lifetime ({3}) Error tripped.EventExNVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.erroresx.problem.hardware.nvd.health.lastshutdownstatus|NVDIMM (handle {1}, idString {2}): Last Shutdown Status ({3}) Not a clean Shutdown, there was either a platform or memory device-related failure while saving data targeted for this memory device.EventExNVDIMM Configuration error detected.erroresx.problem.hardware.nvd.health.module.config.error|NVDIMM (handle {1}, idString {2}): Configuration error detected.EventExNVDIMM Controller failure detected.erroresx.problem.hardware.nvd.health.module.ctlr.fail|NVDIMM (handle {1}, idString {2}): Controller failure detected. Access to the device and its capabilities are lost.EventExNVDIMM Controller firmware error detected.erroresx.problem.hardware.nvd.health.module.ctlr.fw.error|NVDIMM (handle {1}, idString {2}): Controller firmware error detected.EventExNVDIMM Energy Source still charging.warningesx.problem.hardware.nvd.health.module.es.charging|NVDIMM (handle {1}, idString {2}): Energy Source still charging but does not have sufficient charge to support a backup. Persistency is temporarily lost for the device.EventExNVDIMM Energy Source failure detected.erroresx.problem.hardware.nvd.health.module.es.fail|NVDIMM (handle {1}, idString {2}): Energy Source failure detected. Persistency is lost for the device.EventExNVDIMM Previous ARM operation failed.warningesx.problem.hardware.nvd.health.module.ops.arm.fail|NVDIMM (handle {1}, idString {2}): Previous ARM operation failed.EventExNVDIMM Previous ERASE operation failed.warningesx.problem.hardware.nvd.health.module.ops.erase.fail|NVDIMM (handle {1}, idString {2}): Previous ERASE operation failed.EventExThe Platform flush failed. The restored data may be inconsistent.erroresx.problem.hardware.nvd.health.module.ops.flush.fail|NVDIMM (handle {1}, idString {2}): The Platform flush failed. The restored data may be inconsistent.EventExNVDIMM Last RESTORE operation failed.erroresx.problem.hardware.nvd.health.module.ops.restore.fail|NVDIMM (handle {1}, idString {2}): Last RESTORE operation failed.EventExNVDIMM Previous SAVE operation failed.erroresx.problem.hardware.nvd.health.module.ops.save.fail|NVDIMM (handle {1}, idString {2}): Previous SAVE operation failed.EventExNVDIMM Count of DRAM uncorrectable ECC errors above threshold.warningesx.problem.hardware.nvd.health.module.uce|NVDIMM (handle {1}, idString {2}): Count of DRAM uncorrectable ECC errors above threshold.EventExNVDIMM Vendor specific error.erroresx.problem.hardware.nvd.health.module.vendor.error|NVDIMM (handle {1}, idString {2}): Vendor specific error.EventExNVDIMM: Energy Source Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.lifetime.error|NVDIMM (handle {1}, idString {2}): Energy Source Lifetime Error tripped.EventExNVDIMM: Energy Source Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.es.temperature.error|NVDIMM (handle {1}, idString {2}): Energy Source Temperature Error tripped.EventExNVDIMM: Module Lifetime Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.lifetime.error|NVDIMM (handle {1}, idString {2}): Module Lifetime Error tripped.EventExNVDIMM: Module Temperature Error tripped.erroresx.problem.hardware.nvd.health.vmw.alarms.module.temperature.error|NVDIMM (handle {1}, idString {2}): Module Temperature Error tripped.EventExNVDIMM: All data may be lost in the event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInPowerLoss|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of power loss.EventExNVDIMM: All data may be lost in the event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossInShutdown|NVDIMM (handle {1}, idString {2}): All data may be lost in the event of shutdown.EventExNVDIMM: Subsequent reads may fail or return invalid data and subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.allDataLossNow|NVDIMM (handle {1}, idString {2}): Subsequent reads may fail or return invalid data and subsequent writes may not persist.EventExNVDIMM: Performance degraded.erroresx.problem.hardware.nvd.health.vmw.statusflags.perfDegraded|NVDIMM (handle {1}, idString {2}): Performance degraded.EventExNVDIMM: Write persistency loss may happen in event of power loss.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInPowerLoss|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of power loss.EventExNVDIMM: Write persistency loss may happen in event of shutdown.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossInShutdown|NVDIMM (handle {1}, idString {2}): Write persistency loss may happen in event of shutdown.EventExNVDIMM: Subsequent writes may not persist.erroresx.problem.hardware.nvd.health.vmw.statusflags.wpLossNow|NVDIMM (handle {1}, idString {2}): Subsequent writes may not persist.ExtendedEventTPM 2.0 device detected but a connection cannot be established.warningesx.problem.hardware.tpm2.connection|TPM 2.0 device detected but a connection cannot be established.ExtendedEventTPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.nosha256|TPM 2.0 SHA-256 PCR bank not found to be active. Please activate it in the BIOS.ExtendedEventTPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.erroresx.problem.hardware.tpm2.notis|TPM 2.0 device does not have the TIS interface active. Please activate it in the BIOS.ExtendedEventUnable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.warningesx.problem.hardware.tpm2.ownership|Unable to acquire ownership of TPM 2.0 device. Please clear TPM through the BIOS.ExtendedEventesx.problem.hardware.tpm2.provisioning|EventExA physical disk has a predictive failure.warningA physical disk has a predictive failure.esx.problem.hcm.event.disk.predictive.failure|A physical disk has a predictive failure ({1}).ExtendedEventAn unread host kernel core dump has been found.warningesx.problem.host.coredump|An unread host kernel core dump has been found.EventExHostd crashed and a core file was created.warningesx.problem.hostd.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExHostd crashed and an encrypted core file was created.warningesx.problem.hostd.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventThis host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.infoesx.problem.hyperthreading.unmitigated|This host is potentially vulnerable to issues described in CVE-2018-3646, please refer to https://kb.vmware.com/s/article/55636 for details and VMware recommendations.ExtendedEventSome of the config entries in the VM inventory were skipped because they are invalid.warningesx.problem.inventory.invalidConfigEntries|Some of the config entries in the VM inventory were skipped because they are invalid.EventExAn iofilter installed on the host has stopped functioning.errorIOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}esx.problem.iofilter.disabled|IOFilter {1} has stopped functioning due to an unrecoverable error. Reason: {2}EventExStorage I/O Control version mismatchinfoesx.problem.iorm.badversion|Host {1} cannot participate in Storage I/O Control(SIOC) on datastore {2} because the version number {3} of the SIOC agent on this host is incompatible with number {4} of its counterparts on other hosts connected to this datastore.EventExUnmanaged workload detected on SIOC-enabled datastoreinfoesx.problem.iorm.nonviworkload|An unmanaged I/O workload is detected on a SIOC-enabled datastore: {1}.EventExThe metadata store has degraded on one of the hosts in the cluster.errorThe metadata store has degraded on host {1}.esx.problem.metadatastore.degraded|The metadata store has degraded on host {1}.ExtendedEventThe metadata store is healthy.infoThe metadata store is healthy.esx.problem.metadatastore.healthy|The metadata store is healthy.ExtendedEventFailed to create default migration heapwarningesx.problem.migrate.vmotion.default.heap.create.failed|Failed to create default migration heap. This might be the result of severe host memory pressure or virtual address space exhaustion. Migration might still be possible, but will be unreliable in cases of extreme host memory pressure.EventExError with migration listen socketerroresx.problem.migrate.vmotion.server.pending.cnx.listen.socket.shutdown|The ESXi host's vMotion network server encountered an error while monitoring incoming network connections. Shutting down listener socket. vMotion might not be possible with this host until vMotion is manually re-enabled. Failure status: {1}EventExThe max_vfs module option has been set for at least one module.warningSetting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.esx.problem.module.maxvfs.set|Setting the max_vfs option for module {1} may not work as expected. It may be overridden by per-device SRIOV configuration.EventExLost Network Connectivityerroresx.problem.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Connectivity to DVPortserroresx.problem.net.dvport.connectivity.lost|Lost network connectivity on DVPorts: {1}. Physical NIC {2} is down.EventExNetwork Redundancy Degraded on DVPortswarningesx.problem.net.dvport.redundancy.degraded|Uplink redundancy degraded on DVPorts: {1}. Physical NIC {2} is down.EventExLost Network Redundancy on DVPortswarningesx.problem.net.dvport.redundancy.lost|Lost uplink redundancy on DVPorts: {1}. Physical NIC {2} is down.EventExNo IPv6 TSO supporterroresx.problem.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid fenceId configuration on dvPorterroresx.problem.net.fence.port.badfenceid|VMkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: invalid fenceId.EventExMaximum number of fence networks or portserroresx.problem.net.fence.resource.limited|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: maximum number of fence networks or ports have been reached.EventExSwitch fence property is not seterroresx.problem.net.fence.switch.unavailable|Vmkernel failed to set fenceId {1} on distributed virtual port {2} on switch {3}. Reason: dvSwitch fence property is not set.EventExFirewall configuration operation failed. The changes were not applied.erroresx.problem.net.firewall.config.failed|Firewall configuration operation '{1}' failed. The changes were not applied to rule set {2}.EventExAdding port to Firewall failed.erroresx.problem.net.firewall.port.hookfailed|Adding port {1} to Firewall failed.EventExFailed to set gatewayerroresx.problem.net.gateway.set.failed|Cannot connect to the specified gateway {1}. Failed to set it.EventExNetwork memory pool thresholdwarningesx.problem.net.heap.belowthreshold|{1} free size dropped below {2} percent.EventExlag transition downwarningesx.problem.net.lacp.lag.transition.down|LACP warning: LAG {1} on VDS {2} is down.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse|LACP error: No peer response on uplink {1} for VDS {2}.EventExNo peer responseerroresx.problem.net.lacp.peer.noresponse.2|LACP error: No peer response on VDS {1}.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.incompatible|LACP error: Current teaming policy on VDS {1} is incompatible, supported is IP hash only.EventExCurrent teaming policy is incompatibleerroresx.problem.net.lacp.policy.linkstatus|LACP error: Current teaming policy on VDS {1} is incompatible, supported link failover detection is link status only.EventExuplink is blockedwarningesx.problem.net.lacp.uplink.blocked|LACP warning: uplink {1} on VDS {2} is blocked.EventExuplink is disconnectedwarningesx.problem.net.lacp.uplink.disconnected|LACP warning: uplink {1} on VDS {2} got disconnected.EventExuplink duplex mode is differenterroresx.problem.net.lacp.uplink.fail.duplex|LACP error: Duplex mode across all uplink ports must be full, VDS {1} uplink {2} has different mode.EventExuplink speed is differenterroresx.problem.net.lacp.uplink.fail.speed|LACP error: Speed across all uplink ports must be same, VDS {1} uplink {2} has different speed.EventExAll uplinks must be activeerroresx.problem.net.lacp.uplink.inactive|LACP error: All uplinks on VDS {1} must be active.EventExuplink transition downwarningesx.problem.net.lacp.uplink.transition.down|LACP warning: uplink {1} on VDS {2} is moved out of link aggregation group.EventExInvalid vmknic specified in /Migrate/Vmknicwarningesx.problem.net.migrate.bindtovmk|The ESX advanced configuration option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Update the configuration option with a valid vmknic. Alternatively, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExUnsupported vMotion network latency detectedwarningesx.problem.net.migrate.unsupported.latency|ESXi has detected {1}ms round-trip vMotion network latency between host {2} and {3}. High latency vMotion networks are supported only if both ESXi hosts have been configured for vMotion latency tolerance.EventExFailed to apply for free portserroresx.problem.net.portset.port.full|Portset {1} has reached the maximum number of ports ({2}). Cannot apply for any more free ports.EventExVlan ID of the port is invaliderroresx.problem.net.portset.port.vlan.invalidid|{1} VLANID {2} is invalid. VLAN ID must be between 0 and 4095.EventExTry to register an unsupported portset classwarningesx.problem.net.portset.unsupported.psclass|{1} is not a VMware supported portset class, the relevant module must be unloaded.EventExVirtual NIC connection to switch failedwarningesx.problem.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningesx.problem.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExLost Network Redundancywarningesx.problem.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExRSPAN src session conflict with teamingerroresx.problem.net.rspan.teaming.uplink.io.conflict|Failed to set RSPAN src session {1} on portset {2} due to it disallows uplink I/O which conflicts with {3} teaming policy {4}.EventExThe teaming policy has an invalid uplinkerroresx.problem.net.teaming.policy.invalid.uplink|Failed to update teaming policy {1} on portset {2} due to an invalid uplink {3} which disallows normal I/O.EventExFailed to set MTU on an uplinkwarningesx.problem.net.uplink.mtu.failed|VMkernel failed to set the MTU value {1} on the uplink {2}.EventExA duplicate IP address was detected on a vmknic interfacewarningesx.problem.net.vmknic.ip.duplicate|A duplicate IP address was detected for {1} on the interface {2}. The current owner is {3}.EventExLink state downwarningesx.problem.net.vmnic.linkstate.down|Physical NIC {1} linkstate is down.EventExLink state unstablewarningesx.problem.net.vmnic.linkstate.flapping|Taking down physical NIC {1} because the link is unstable.EventExNic Watchdog Resetwarningesx.problem.net.vmnic.watchdog.reset|Uplink {1} has recovered from a transient failure due to watchdog timeoutEventExNTP daemon stopped. Time correction out of bounds.erroresx.problem.ntpd.clock.correction.error|NTP daemon stopped. Time correction {1} > {2} seconds. Manually set the time and restart ntpd.EventExOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningOSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.osdata.partition.full|OSData is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212ExtendedEventConfigured OSData cannot be found. Please refer to KB article: KB 87212.warningConfigured OSData cannot be found. Please refer to KB article: KB 87212.esx.problem.osdata.path.notfound|Configured OSData cannot be found. Please refer to KB article: KB 87212.EventExVirtual machine killed as it kept using a corrupted memory page.erroresx.problem.pageretire.mce.injected|Killing virtual machine with config path {1} because at least {2} uncorrectable memory error machine check exceptions were injected for guest physical page {3} but the virtual machine's operating system kept using the page.EventExA virtual machine was killed as it kept using a corrupted memory page.errorThe virtual machine was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.esx.problem.pageretire.mce.injected.2|{1} was killed as it kept using a corrupted memory page {3} even though {2} uncorrectable memory machine check exceptions were injected.EventExMemory page retirement requested by platform firmware.infoesx.problem.pageretire.platform.retire.request|Memory page retirement requested by platform firmware. FRU ID: {1}. Refer to System Hardware Log: {2}EventExNumber of host physical memory pages that have been selected for retirement but could not yet be retired is high.warningesx.problem.pageretire.selectedbutnotretired.high|Number of host physical memory pages that have been selected for retirement but could not yet be retired is high: ({1})EventExNumber of host physical memory pages selected for retirement exceeds threshold.warningesx.problem.pageretire.selectedmpnthreshold.host.exceeded|Number of host physical memory pages that have been selected for retirement ({1}) exceeds threshold ({2}).ExtendedEventNo memory to allocate APD Eventwarningesx.problem.psastor.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExStorage Device close failed.warningesx.problem.psastor.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.psastor.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.psastor.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExStorage Device I/O Latency going highwarningesx.problem.psastor.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExPlugin's isLocal entry point failedwarningesx.problem.psastor.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.psastor.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.psastor.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.psastor.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExDevice has been turned off administratively.infoesx.problem.psastor.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.psastor.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.psastor.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.psastor.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExMaximum number of storage pathserroresx.problem.psastor.psastorpath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.psastor.unsupported.plugin.type|Storage Device Allocation not supported for plugin type {1}EventExFailed to delete resource group.warningFailed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.Failed to delete resource groups with names '{rgnames}'.esx.problem.resourcegroup.delete.failed|Failed to delete resource groups with names '{rgnames}'.EventExFailed to Set the Virtual Machine's Latency Sensitivitywarningesx.problem.sched.latency.abort|Unable to apply latency-sensitivity setting to virtual machine {1}. No valid placement on the host.EventExNo Cache Allocation Resourcewarningesx.problem.sched.qos.cat.noresource|Unable to support cache allocation for virtual machine {1}. Out of resources.EventExNo Cache Allocation Supportwarningesx.problem.sched.qos.cat.notsupported|Unable to support L3 cache allocation for virtual machine {1}. No processor capabilities.EventExNo Cache Monitoring Resourcewarningesx.problem.sched.qos.cmt.noresource|Unable to support cache monitoring for virtual machine {1}. Out of resources.EventExNo Cache Monitoring Supportwarningesx.problem.sched.qos.cmt.notsupported|Unable to support L3 cache monitoring for virtual machine {1}. No processor capabilities.ExtendedEventScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.warningScratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.esx.problem.scratch.on.usb|Scratch is configured to SD-Card/USB device. This may result in system failure. Please add a secondary persistent device.EventExScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212warningScratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212esx.problem.scratch.partition.full|Scratch is low on available space ({1} MiB free). This may result in system failure. Please refer to KB article: KB 87212EventExSize of scratch partition is too small.warningSize of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.esx.problem.scratch.partition.size.small|Size of scratch partition {1} is too small. Recommended scratch partition size is {2} MiB.EventExNo scratch partition has been configured.warningNo scratch partition has been configured. Recommended scratch partition size is {} MiB.esx.problem.scratch.partition.unconfigured|No scratch partition has been configured. Recommended scratch partition size is {} MiB.ExtendedEventNo memory to allocate APD Eventwarningesx.problem.scsi.apd.event.descriptor.alloc.failed|No memory to allocate APD (All Paths Down) event subsystem.EventExScsi Device close failed.warningesx.problem.scsi.device.close.failed|"Failed to close the device {1} properly, plugin {2}.EventExDevice detach failedwarningesx.problem.scsi.device.detach.failed|Detach failed for device :{1}. Exceeded the number of devices that can be detached, please cleanup stale detach entries.EventExFailed to attach filter to device.warningesx.problem.scsi.device.filter.attach.failed|Failed to attach filters to device '%s' during registration. Plugin load failed or the filter rules are incorrect.EventExInvalid XCOPY request for devicewarningesx.problem.scsi.device.invalid.xcopy.request|Invalid XCOPY request for device {1}. Host {2}, Device {3}, Plugin {4}, {5} sense, sense.key = {6}, sense.asc = {7}, sense.ascq = {8}: {9}EventExPlugin trying to issue command to device does not have a valid storage plugin type.warningesx.problem.scsi.device.io.bad.plugin.type|Bad plugin type for device {1}, plugin {2}EventExFailed to obtain INQUIRY data from the devicewarningesx.problem.scsi.device.io.inquiry.failed|Failed to get standard inquiry for device {1} from Plugin {2}.ExtendedEventScsi device queue parameters incorrectly set.warningesx.problem.scsi.device.io.invalid.disk.qfull.value|QFullSampleSize should be bigger than QFullThreshold. LUN queue depth throttling algorithm will not function as expected. Please set the QFullSampleSize and QFullThreshold disk configuration values in ESX correctly.EventExScsi Device I/O Latency going highwarningesx.problem.scsi.device.io.latency.high|Device {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExQErr cannot be changed on device. Please change it manually on the device if possible.warningesx.problem.scsi.device.io.qerr.change.config|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The system is not configured to change the QErr setting of device. The QErr value supported by system is 0x{3}. Please check the SCSI ChangeQErrSetting configuration value for ESX.EventExScsi Device QErr setting changedwarningesx.problem.scsi.device.io.qerr.changed|QErr set to 0x{1} for device {2}. This may cause unexpected behavior. The device was originally configured to the supported QErr setting of 0x{3}, but this has been changed and could not be changed back.EventExPlugin's isLocal entry point failedwarningesx.problem.scsi.device.is.local.failed|Failed to verify if the device {1} from plugin {2} is a local - not shared - deviceEventExPlugin's isPseudo entry point failedwarningesx.problem.scsi.device.is.pseudo.failed|Failed to verify if the device {1} from plugin {2} is a pseudo deviceEventExPlugin's isSSD entry point failedwarningesx.problem.scsi.device.is.ssd.failed|Failed to verify if the device {1} from plugin {2} is a Solid State Disk deviceEventExMaximum number of storage deviceserroresx.problem.scsi.device.limitreached|The maximum number of supported devices of {1} has been reached. A device from plugin {2} could not be created.EventExFailed to apply NMP SATP option during device discovery.warningesx.problem.scsi.device.nmp.satp.option.failed|Invalid config parameter: \"{1}\" provided in the nmp satp claimrule, this setting was not applied while claiming the path {2}EventExDevice has been turned off administratively.infoesx.problem.scsi.device.state.off|Device {1}, has been turned off administratively.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss|Device {1} has been removed or is permanently inaccessible. Affected datastores (if any): {2}.EventExPermanently inaccessible device has no more opens.infoesx.problem.scsi.device.state.permanentloss.noopens|Permanently inaccessible device {1} has no more opens. It is now safe to unmount datastores (if any) {2} and delete the device.EventExDevice has been plugged back in after being marked permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.pluggedback|Device {1} has been plugged back in after being marked permanently inaccessible. No data consistency guarantees.EventExDevice has been removed or is permanently inaccessible.erroresx.problem.scsi.device.state.permanentloss.withreservationheld|Device {1} has been removed or is permanently inaccessible, while holding a reservation. Affected datastores (if any): {2}.EventExThin Provisioned Device Nearing Capacitywarningesx.problem.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold. Affected datastores (if any): {2}.EventExToo many errors observed for devicewarningesx.problem.scsi.device.too.many.io.error|Too many errors observed for device {1} errPercentage {2}EventExvVol PE path going out of vVol-incapable adaptererroresx.problem.scsi.scsipath.badpath.unreachpe|Sanity check failed for path {1}. The path is to a vVol PE, but it goes out of adapter {2} which is not PE capable. Path dropped.EventExCannot safely determine vVol PEerroresx.problem.scsi.scsipath.badpath.unsafepe|Sanity check failed for path {1}. Could not safely determine if the path is to a vVol PE. Path dropped.EventExMaximum number of storage pathserroresx.problem.scsi.scsipath.limitreached|The maximum number of supported paths of {1} has been reached. Path {2} could not be added.EventExStorage plugin of unsupported type tried to register.warningesx.problem.scsi.unsupported.plugin.type|Scsi Device Allocation not supported for plugin type {1}ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.infoesx.problem.sgx.addpackage|Support for Intel Software Guard Extensions (SGX) has been disabled because a new CPU package was added to the host. Please refer to VMware Knowledge Base article 71367 for more details and remediation steps.ExtendedEventSupport for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.infoesx.problem.sgx.htenabled|Support for Intel Software Guard Extensions (SGX) has been disabled because HyperThreading is used by the host. Please refer to VMware Knowledge Base article 71367 for more details.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.esx.problem.slp.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. SLP is enabled on the host. Please refer to KB 95798 for more details.EventExAll paths are downwarningesx.problem.storage.apd.start|Device or filesystem with identifier {1} has entered the All Paths Down state.EventExAll Paths Down timed out, I/Os will be fast failedwarningesx.problem.storage.apd.timeout|Device or filesystem with identifier {1} has entered the All Paths Down Timeout state after being in the All Paths Down state for {2} seconds. I/Os will now be fast failed.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.devicepor|Frequent PowerOn Reset Unit Attentions are occurring on device {1}. This might indicate a storage problem. Affected datastores: {2}EventExLost Storage Connectivityerroresx.problem.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExFrequent PowerOn Reset Unit Attention of Storage Pathwarningesx.problem.storage.connectivity.pathpor|Frequent PowerOn Reset Unit Attentions are occurring on path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExFrequent State Changes of Storage Pathinfoesx.problem.storage.connectivity.pathstatechanges|Frequent path state changes are occurring for path {1}. This might indicate a storage problem. Affected device: {2}. Affected datastores: {3}EventExiSCSI discovery target login connection problemerroresx.problem.storage.iscsi.discovery.connect.error|iSCSI discovery to {1} on {2} failed. The iSCSI Initiator could not establish a network connection to the discovery address.EventExiSCSI Discovery target login errorerroresx.problem.storage.iscsi.discovery.login.error|iSCSI discovery to {1} on {2} failed. The Discovery target returned a login error of: {3}.EventExiSCSI iSns Discovery errorerroresx.problem.storage.iscsi.isns.discovery.error|iSCSI iSns discovery to {1} on {2} failed. ({3} : {4}).EventExiSCSI Target login connection problemerroresx.problem.storage.iscsi.target.connect.error|Login to iSCSI target {1} on {2} failed. The iSCSI initiator could not establish a network connection to the target.EventExiSCSI Target login errorerroresx.problem.storage.iscsi.target.login.error|Login to iSCSI target {1} on {2} failed. Target returned login error of: {3}.EventExiSCSI target permanently removederroresx.problem.storage.iscsi.target.permanently.lost|The iSCSI target {2} was permanently removed from {1}.EventExiSCSI target was permanently removederroresx.problem.storage.iscsi.target.permanently.removed|The iSCSI target {1} was permanently removed from {2}.EventExDegraded Storage Path Redundancywarningesx.problem.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. Affected datastores: {3}.EventExLost Storage Path Redundancywarningesx.problem.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningSystem swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.esx.problem.swap.systemSwap.isPDL.cannot.remove|System swap at path {1} was affected by the PDL of its datastore and was removed. System swap has been reconfigured.EventExSystem swap was affected by the PDL of its datastore and was removed. System swap has been reconfigured.warningesx.problem.swap.systemSwap.isPDL.cannot.remove.2|System swap was affected by the PDL of {1} and was removed. System swap has been reconfigured.EventExSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningSystem swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.esx.problem.swap.systemSwap.isPDL.removed.reconfig.failure|System swap at path {1} was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.EventExSystem swap was affected by the PDL of its datastore. It was removed but the subsequent reconfiguration failed.warningesx.problem.swap.systemSwap.isPDL.removed.reconfig.failure.2|System swap was affected by the PDL of {1}. It was removed but the subsequent reconfiguration failed.ExtendedEventSystem logging is not configured.warningSystem logging is not configured on host {host.name}.esx.problem.syslog.config|System logging is not configured on host {host.name}. Please check Syslog options for the host under Configuration -> Software -> Advanced Settings in vSphere client.ExtendedEventSystem logs are stored on non-persistent storage.warningSystem logs on host {host.name} are stored on non-persistent storage.esx.problem.syslog.nonpersistent|System logs on host {host.name} are stored on non-persistent storage. Consult product documentation to configure a syslog server or a scratch partition.ExtendedEventTest with no argumentserroresx.problem.test.test0|Test with no argumentsEventExTest with both int and string argumentserroresx.problem.test.test2|Test with both {1} and {2}ExtendedEventUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.warningUpgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.esx.problem.unsupported.tls.protocols|Upgrade detected unsupported TLS protocols, resetting option /UserVars/ESXiVPsDisabledProtocols to default. From vSphere 8.0 onwards, protocols prior to tlsv1.2 are no longer supported and must remain disabled.EventExA VFAT filesystem is full.erroresx.problem.vfat.filesystem.full.other|The VFAT filesystem {1} (UUID {2}) is full.EventExA VFAT filesystem, being used as the host's scratch partition, is full.erroresx.problem.vfat.filesystem.full.scratch|The host's scratch partition, which is the VFAT filesystem {1} (UUID {2}), is full.EventExConfigstore is reaching its critical size limit. Please refer to the KB 93362 for more details.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left. Please refer to the KB 93362 for more details.EventExA ramdisk has a very high usage. Please refer to the KB 93362 for more details.warningRamdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.esx.problem.visorfs.configstore.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left. Please refer to the KB 93362 for more details.ExtendedEventAn operation on the root filesystem has failed.erroresx.problem.visorfs.failure|An operation on the root filesystem has failed.EventExThe root filesystem's file table is full.erroresx.problem.visorfs.inodetable.full|The root filesystem's file table is full. As a result, the file {1} could not be created by the application '{2}'.EventExA ramdisk is full.erroresx.problem.visorfs.ramdisk.full|The ramdisk '{1}' is full. As a result, the file {2} could not be written.EventExA ramdisk's file table is full.erroresx.problem.visorfs.ramdisk.inodetable.full|The file table of the ramdisk '{1}' is full. As a result, the file {2} could not be created by the application '{3}'.EventExConfig store is reaching its critical size limit.errorRamdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.error|Ramdisk '{1}' is reaching its critical size limit. Approx {2}% space left.EventExA ramdisk has a very high usage.warningRamdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.Ramdisk '{1}' usage is very high. Approx {2}% space left.esx.problem.visorfs.ramdisk.usage.warning|Ramdisk '{1}' usage is very high. Approx {2}% space left.EventExA VM could not fault in the a page. The VM is terminated as further progress is impossible.erroresx.problem.vm.kill.unexpected.fault.failure|The VM using the config file {1} could not fault in a guest physical page from the hypervisor level swap file at {2}. The VM is terminated as further progress is impossible.EventExA virtual machine could not fault in the a page. It is terminated as further progress is impossible.errorThe virtual machine could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleesx.problem.vm.kill.unexpected.fault.failure.2|{1} could not fault in a guest physical page from the hypervisor level swap file on {2}. The VM is terminated as further progress is impossibleEventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.forcefulPageRetire.64|The VM using the config file {1} contains the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the VM is forcefully powered off.EventExA virtual machine cointained a host physical page that was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.errorThe virtual machine contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.esx.problem.vm.kill.unexpected.forcefulPageRetire.64.2|{1} contained the host physical page {2} which was scheduled for immediate retirement. To avoid system instability the virtual machine is forcefully powered off.EventExA VM did not respond to swap actions and is forcefully powered off to prevent system instability.erroresx.problem.vm.kill.unexpected.noSwapResponse|The VM using the config file {1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA virtual machine did not respond to swap actions. It is terminated as further progress is impossible.errorThe virtual machine did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.esx.problem.vm.kill.unexpected.noSwapResponse.2|{1} did not respond to {2} swap actions in {3} seconds and is forcefully powered off to prevent system instability.EventExA VM is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.erroresx.problem.vm.kill.unexpected.vmtrack|The VM using the config file {1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.errorThe virtual machine is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.esx.problem.vm.kill.unexpected.vmtrack.2|{1} is allocating too many pages while system is critically low in free memory. It is forcefully terminated to prevent system instability.EventExA user world daemon of a virtual machine could not fault in the a page. The VM is terminated as further progress is impossible.errorThe user world daemon of this virtual machine could not fault in a page. The virtual machine is terminated as further progress is impossible.esx.problem.vm.kill.unexpected.vmx.fault.failure.2|The user world daemon of {1} could not fault in a page. The virtual machine is terminated as further progress is impossible.EventExMulti-extent ATS-only VMFS Volume unable to use ATSerroresx.problem.vmfs.ats.incompatibility.detected|Multi-extent ATS-only volume '{1}' ({2}) is unable to use ATS because HardwareAcceleratedLocking is disabled on this host: potential for introducing filesystem corruption. Volume should not be used from other hosts.EventExDevice Backing VMFS has lost ATS Supporterroresx.problem.vmfs.ats.support.lost|ATS-Only VMFS volume '{1}' not mounted. Host does not support ATS or ATS initialization has failed.EventExVMFS Locked By Remote Hosterroresx.problem.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.erroresx.problem.vmfs.extent.offline|An attached device {1} may be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system came onlineinfoesx.problem.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Heartbeat Corruption Detected.erroresx.problem.vmfs.heartbeat.corruptondisk|At least one corrupt on-disk heartbeat region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Volume Connectivity Restoredinfoesx.problem.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfoesx.problem.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterroresx.problem.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerroresx.problem.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Volume will remain in read-only metadata mode with limited write support until journal can be created.EventExTrying to acquire lock on an already locked file. - File descriptionerror{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}esx.problem.vmfs.lock.busy.filedesc|{1} Lock(s) held on a file on volume {2}. numHolders: {3}. gblNumHolders: {4}. Locking Host(s) MAC: {5}EventExTrying to acquire lock on an already locked file. FilenameerrorLock(s) held on file {1} by other host(s).esx.problem.vmfs.lock.busy.filename|Lock(s) held on file {1} by other host(s).EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExVMFS Lock Corruption Detectederroresx.problem.vmfs.lock.corruptondisk.v2|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected.errorInconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.lockmode.inconsistency.detected|Inconsistent lockmode change detected for VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. Protocol error during ATS transition. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExFailed to mount NFS volumeerroresx.problem.vmfs.nfs.mount.failed|NFS mount failed for {1}:{2} volume {3}. Status: {4}EventExLost connection to NFS servererroresx.problem.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExvmknic configured for NFS has been removedwarningesx.problem.vmfs.nfs.vmknic.removed|vmknic {1} removed, NFS{2} datastore {3} configured with the vmknic will be inaccessible.EventExNFS volume average I/O Latency has exceeded configured threshold for the current configured periodwarningesx.problem.vmfs.nfs.volume.io.latency.exceed.threshold.period|NFS volume {1} average I/O latency {2}(us) has exceeded threshold {3}(us) for last {4} minutesEventExNFS volume I/O Latency going highwarningesx.problem.vmfs.nfs.volume.io.latency.high|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds.EventExNFS volume I/O Latency exceeding thresholdwarningesx.problem.vmfs.nfs.volume.io.latency.high.exceed.threshold|NFS volume {1} performance has deteriorated. I/O latency increased from average value of {2} microseconds to {3} microseconds. Exceeded threshold {4} microsecondsEventExNo space on NFS volume.warningesx.problem.vmfs.nfs.volume.no.space|{1}: No space on NFS volume.EventExVMFS Resource Corruption Detectederroresx.problem.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExInconsistent VMFS lockmode detected on spanned volume.errorInconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanned.lockmode.inconsistency.detected|Inconsistent lockmode change detected for spanned VMFS volume '{1} ({2})': volume was configured for {3} lockmode at time of open and now it is configured for {4} lockmode but this host is not using {5} lockmode. All operations on this volume will fail until this host unmounts and remounts the volume.EventExIncompatible VMFS span state detected.errorIncompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.esx.problem.vmfs.spanstate.incompatibility.detected|Incompatible span change detected for VMFS volume '{1} ({2})': volume was not spanned at time of open but now it is, and this host is using ATS-only lockmode but the volume is not ATS-only. Volume descriptor refresh operations will fail until this host unmounts and remounts the volume.EventExRemote logging host has become unreachable.erroresx.problem.vmsyslogd.remote.failure|The host "{1}" has become unreachable. Remote logging to this host has stopped.ExtendedEventLogging to storage has failed.erroresx.problem.vmsyslogd.storage.failure|Logging to storage has failed. Logs are no longer being stored locally on this host.EventExThe configured log directory cannot be used. The default directory will be used instead.erroresx.problem.vmsyslogd.storage.logdir.invalid|The configured log directory {1} cannot be used. The default directory {2} will be used instead.EventExLog daemon has failed for an unexpected reason.erroresx.problem.vmsyslogd.unexpected|Log daemon has failed for an unexpected reason: {1}EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.errorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk group {2}.EventExvSAN detected LSN mismatch in mirrorswarningvSAN detected LSN mismatch in mirrors for object {1}.esx.problem.vob.vsan.dom.lsnmismatcherror|vSAN detected LSN mismatch in mirrors for object {1}.EventExResync encountered no space errorwarningResync encountered no space error for component {1} on disk {2}.esx.problem.vob.vsan.dom.nospaceduringresync|Resync encountered no space error for component {1} on disk {2}. Resync will resume once space is freed up on this disk. Need around {3}MB to resync the component on this diskEventExResync is delayed.warningResync is delayed for component {1} on disk {2} for object {3}.esx.problem.vob.vsan.dom.resyncdecisiondelayed|Resync is delayed for component {1} on disk {2} until data availability is regained for object {3} on the remote site.EventExResync timed outwarningResync timed out for component {2} on disk {3}.esx.problem.vob.vsan.dom.resynctimeout|Resync timed out as no progress was made in {1} minute(s) for component {2} on disk {3}. Resync will be tried again for this component. The remaining resync is around {4}MB.EventExvSAN detected and fixed a medium or checksum error.warningvSAN detected and fixed a medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskerrorfixed|vSAN detected and fixed a medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.esx.problem.vob.vsan.dom.singlediskunrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk {2}.EventExvSAN detected an unrecoverable medium or checksum error.warningvSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.esx.problem.vob.vsan.dom.unrecoverableerror|vSAN detected an unrecoverable medium or checksum error for component {1} on disk group {2}.EventExNVMe critical health warning for disk. The disk's backup device has failed.errorNVMe critical health warning for disk {1}. The disk's backup device has failed.esx.problem.vob.vsan.lsom.backupfailednvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's backup device has failed.EventExOffline event on component.warningOffline event issued for component: {1}, flag: {2}, reason: {3}.esx.problem.vob.vsan.lsom.componentoffline|Offline event issued for component: {1}, flag: {2}, reason: {3}.EventExvSAN Node: Near node component count limit.warningvSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).esx.problem.vob.vsan.lsom.componentthreshold|vSAN Node: {1} reached threshold of {2} %% opened components ({3} of {4}).EventExEvacuation has failed for device and it will be retried by DDH.errorEvacuation has failed for device {1} and it will be retried by DDH.esx.problem.vob.vsan.lsom.ddhEvacFailed|Evacuation has failed for device {1} and it will be retried by DDH.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.devicerepair|vSAN device {1} is being repaired due to I/O failures, and will be out of service until the repair is complete. If the device is part of a dedup disk group, the entire disk group will be out of service until the repair is complete.EventExvSAN device has high latency. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithhighlatency|vSAN device {1} has high latency. It will be evacuated and unmounted, consider replacing it.EventExvSAN device smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.errorvSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.esx.problem.vob.vsan.lsom.devicewithsmartfailure|vSAN device {1} smart health status is impending failure. It will be evacuated and unmounted, consider replacing it.EventExvSAN device is under permanent failure.errorvSAN device {1} is under permanent failure.esx.problem.vob.vsan.lsom.diskerror|vSAN device {1} is under permanent failure.EventExFailed to create a new disk group.errorFailed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.esx.problem.vob.vsan.lsom.diskgrouplimit|Failed to create new disk group {1}. The system has reached the maximum amount of disks groups allowed {2} for the current amount of memory {3}. Add more memory.EventExvSAN diskgroup log is congested.errorvSAN diskgroup {1} log is congestedesx.problem.vob.vsan.lsom.diskgrouplogcongested|vSAN diskgroup {1} log is congested.EventExvSAN disk group is under congestion. It will be remediated. No action is needed.warningvSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.esx.problem.vob.vsan.lsom.diskgroupundercongestion|vSAN disk group {1} is under {2} congestion. It will be remediated. No action is needed.EventExFailed to add disk to disk group.errorFailed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.esx.problem.vob.vsan.lsom.disklimit2|Failed to add disk {1} to disk group. The system has reached the maximum amount of disks allowed {2} for the current amount of memory {3} GB. Add more memory.EventExvSAN device is under propagated error.errorvSAN device {1} is under propagated erroresx.problem.vob.vsan.lsom.diskpropagatederror|vSAN device {1} is under propagated error.EventExvSAN device is under propagated permanent error.errorvSAN device {1} is under propagated permanent erroresx.problem.vob.vsan.lsom.diskpropagatedpermerror|vSAN device {1} is under propagated permanent error.EventExvSAN device is unhealthy.errorvSAN device {1} is unhealthyesx.problem.vob.vsan.lsom.diskunhealthy|vSAN device {1} is unhealthy.EventExEvacuation failed for device due to insufficient resources and it will be retried.errorEvacuation failed for device {1} due to insufficient resources and it will be retried.esx.problem.vob.vsan.lsom.evacFailedInsufficientResources|Evacuation failed for device {1} due to insufficient resources and it will be retried. Please make resources available for evacuation.EventExDeleted invalid metadata component.warningDeleted invalid metadata component: {1}.esx.problem.vob.vsan.lsom.invalidMetadataComponent|Deleted invalid metadata component: {1}.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.metadataURE|vSAN device {1} encountered an unrecoverable read error. This disk will be evacuated and rebuilt. If the device is part of a dedup disk group, the entire disk group will be evacuated and rebuilt.EventExNVMe disk critical health warning for disk. Disk is now read only.errorNVMe critical health warning for disk {1}. Disk is now read only.esx.problem.vob.vsan.lsom.readonlynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1} is: The NVMe disk has become read only.EventExNVMe critical health warning for disk. The disk has become unreliable.errorNVMe critical health warning for disk {1}. The disk has become unreliable.esx.problem.vob.vsan.lsom.reliabilitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk has become unreliable.EventExNVMe critical health warning for disk. The disk's spare capacity is below threshold.errorNVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.esx.problem.vob.vsan.lsom.sparecapacitynvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's spare capacity is below threshold.EventExvSAN device is being evacuated and rebuilt due to an unrecoverable read error.errorvSAN device {1} is being evacuated and rebuilt due to an unrecoverable read error.esx.problem.vob.vsan.lsom.storagepoolURE|vSAN device {1} encountered an unrecoverable read error. This disk will be rebuilt.EventExvSAN device is being repaired due to I/O failures.errorvSAN device {1} is being repaired due to I/O failures.esx.problem.vob.vsan.lsom.storagepoolrepair|vSAN device {1} is being repaired due to I/O failures and will be out of service until the repair is complete.EventExNo response for I/O on vSAN device.errorNo response for I/O on vSAN device {1}.esx.problem.vob.vsan.lsom.storagepoolstuckio|No response for I/O on vSAN device {1}.EventExvSAN device detected suspended I/Os.errorvSAN device {1} detected suspended I/Os.esx.problem.vob.vsan.lsom.stuckio|vSAN device {1} detected suspended I/Os. Taking the host out of service to avoid affecting the vSAN cluster.EventExvSAN device detected stuck I/O error.errorvSAN device {1} detected stuck I/O error.esx.problem.vob.vsan.lsom.stuckiooffline|vSAN device {1} detected stuck I/O error. Marking the device as offline.EventExvSAN device is under propagated stuck I/O error.errorvSAN device {1} is under propagated stuck I/O error.esx.problem.vob.vsan.lsom.stuckiopropagated|vSAN device {1} is under propagated stuck I/O error. Marking the device as offline.EventExvSAN device detected I/O timeout error.errorvSAN device {1} detected I/O timeout error.esx.problem.vob.vsan.lsom.stuckiotimeout|vSAN device {1} detected I/O timeout error. This may lead to stuck I/O.EventExNVMe critical health warning for disk. The disk's temperature is beyond threshold.errorNVMe critical health warning for disk {1}. The disk's temperature is beyond bethreshold.esx.problem.vob.vsan.lsom.temperaturenvmediskhealthcriticalwarning|NVMe critical health warning for disk {1}. The disk's temperature beyond threshold.EventExvSAN device has gone offline.errorvSAN device {1} has gone offline.esx.problem.vob.vsan.pdl.offline|vSAN device {1} has gone offline.EventExA ZDOM object is paused due to continuous fail-stops.warningZDOM object {1} is paused on host {2}, numFailStops={3}.esx.problem.vob.vsan.zdom.failstoppaused|ZDOM object {1} is paused on host {2}, numFailStops={3}.ExtendedEventTest with no arguments.infoesx.problem.vobdtestcorrelator.test.0|Test with no argumentsEventExTest with int argument.infoesx.problem.vobdtestcorrelator.test.1d|Test with int argument: {1}EventExTest with sting argument.infoesx.problem.vobdtestcorrelator.test.1s|Test with sting argument: {1}EventExTest with huge sting argument.infoesx.problem.vobdtestcorrelator.test.hugestr|Test with huge sting argument: {1}EventExVpxa crashed and a core file was created.warningesx.problem.vpxa.core.dumped|{1} crashed ({2} time(s) so far) and a core file might have been created at {3}. This might have caused connections to the host to be dropped.EventExVpxa crashed and an encrypted core file was created.warningesx.problem.vpxa.core.dumped.encrypted|{1} crashed ({2} time(s) so far) and an encrypted core file using keyId {3} might have been created at {4}. This might have caused connections to the host to be dropped.ExtendedEventvSAN clustering services have been disabled.warningvSAN clustering and directory services have been disabled thus will be no longer available.esx.problem.vsan.clustering.disabled|vSAN clustering and directory services have been disabled thus will be no longer available.EventExData component found on witness host.warningData component {1} found on witness host is ignored.esx.problem.vsan.dom.component.datacomponent.on.witness.host|Data component {1} found on witness host is ignored.EventExvSAN Distributed Object Manager failed to initializewarningvSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.esx.problem.vsan.dom.init.failed.status|vSAN Distributed Object Manager failed to initialize. While the ESXi host might still be part of the vSAN cluster, some of the vSAN related services might fail until this problem is resolved. Failure Status: {1}.EventExOne or more disks exceed its/their warning usage of estimated endurance threshold.infoOne or more disks exceed its/their warning usage of estimated endurance threshold.esx.problem.vsan.health.ssd.endurance|Disks {Disk Name} in Cluster {Cluster Name} have exceeded warning usage of their estimated endurance threshold {Disk Percentage Threshold}, currently at {Disk Percentage Used} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds the estimated endurance threshold.errorOne of the disks exceeds the estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.error|Disks {1} have exceeded their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks exceeds 90% of its estimated endurance threshold.warningOne of the disks exceeds 90% of its estimated endurance threshold.esx.problem.vsan.health.ssd.endurance.warning|Disks {1} have exceeded 90 percent usage of their estimated endurance threshold, currently at {2} percent usage (respectively), based on SMART data. The percentage usage ranges from 0 to 255, inclusive. Instances where the usage exceeds 100 percent are uncommon.EventExOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.errorOne of the disks is detected with PDL in vSAN ESA Cluster. Please check the host for further details.esx.problem.vsan.health.vsanesa.pdl|Disk {1} is detected with PDL in vSAN ESA Cluster. Please check the host for further details.EventExvSAN device Memory/SSD congestion has changed.infoLSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.esx.problem.vsan.lsom.congestionthreshold|LSOM {1} Congestion State: {2}. Congestion Threshold: {3} Current Congestion: {4}.EventExA vmknic added to vSAN network configuration doesn't have valid IP. Network is not ready.errorvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.esx.problem.vsan.net.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. There are no other active network configuration and therefore the vSAN node doesn't have network connectivity.ExtendedEventvSAN doesn't haven any redundancy in its network configuration.warningvSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.lost|vSAN network configuration doesn't have any redundancy. This might be a problem if further network configuration is removed.ExtendedEventvSAN is operating on reduced network redundancy.warningvSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.esx.problem.vsan.net.redundancy.reduced|vSAN network configuration redundancy has been reduced. This might be a problem if further network configuration is removed.ExtendedEventvSAN doesn't have any network configuration for use.errorvSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.esx.problem.vsan.no.network.connectivity|vSAN doesn't have any network configuration. This can severely impact several objects in the vSAN datastore.EventExA vmknic added to vSAN network configuration doesn't have valid IP. It will not be in use.warningvmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.esx.problem.vsan.vmknic.not.ready|vmknic {1} that is currently configured to be used with vSAN doesn't have an IP address yet. However, there are other network configuration which are active. If those configurations are removed that may cause problems.EventEx Failed to add shared virtual disk. Maximum count reachederroresx.problem.vscsi.shared.vmdk.add.failure.max.count|Failed to add shared virtual disk. Maximum number of shared vmdks supported per ESX host are {1}EventExNo free slots availableerroresx.problem.vscsi.shared.vmdk.no.free.slot.available|No Free slot available. Maximum number of virtual machinies supported in MSCS cluster are {1}EventExFailed to power on virtual machines on shared VMDK with running virtual machineerroresx.problem.vscsi.shared.vmdk.virtual.machine.power.on.failed|Two or more virtual machines (\"{1}\" and \"{2}\") sharing same virtual disk are not allowed to be Powered-On on same host.EventExVVol container has gone offline.erroresx.problem.vvol.container.offline|VVol container {1} has gone offline: isPEAccessible {2}, isVPAccessible {3}.ExtendedEventCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated|CIM service on ESXi is deprecated and will be removed in the next major release. CIM is enabled on the host. Please refer to KB 95798 for more details.EventExCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.warningCIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.esx.problem.wbem.deprecated.thirdPartyProv|CIM service on ESXi is deprecated and will be removed in the next major release. There are 3rd party CIM providers ({1}) installed on the host. Please reach out to the 3rd party CIM vendor for a replacement solution. Please refer to KB 95798 for more details.EventExApplication consistent sync completed.infoApplication consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Application consistent sync completed for virtual machine {vm.name} on host {host.name}.Application consistent sync completed for virtual machine {vm.name}.Application consistent sync completed.hbr.primary.AppQuiescedDeltaCompletedEvent|Application consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)ExtendedEventConnection to VR Server restored.infoConnection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Connection to VR Server restored for virtual machine {vm.name} on host {host.name}.Connection to VR Server restored for virtual machine {vm.name}.Connection to VR Server restored.hbr.primary.ConnectionRestoredToHbrServerEvent|Connection to VR Server restored for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExSync stopped.warningSync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}Sync stopped: {reason.@enum.hbr.primary.ReasonForDeltaAbort}hbr.primary.DeltaAbortedEvent|Sync stopped for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForDeltaAbort}EventExSync completed.infoSync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync completed for virtual machine {vm.name} on host {host.name}.Sync completed for virtual machine {vm.name}.Sync completed.hbr.primary.DeltaCompletedEvent|Sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventSync started.infoSync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Sync started by {userName} for virtual machine {vm.name} on host {host.name}.Sync started by {userName} for virtual machine {vm.name}.Sync started by {userName}.hbr.primary.DeltaStartedEvent|Sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFile system consistent sync completed.infoFile system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.File system consistent sync completed for virtual machine {vm.name} on host {host.name}.File system consistent sync completed for virtual machine {vm.name}.File system consistent sync completed.hbr.primary.FSQuiescedDeltaCompletedEvent|File system consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred)EventExFailed to start sync.errorFailed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartDeltaEvent|Failed to start sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExFailed to start full sync.errorFailed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name} on host {host.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync for virtual machine {vm.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}Failed to start full sync: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}hbr.primary.FailedToStartSyncEvent|Failed to start full sync for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.fault.ReplicationVmFault.ReasonForFault}EventExDisk replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}Replication configuration is invalid for disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}hbr.primary.InvalidDiskReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}, disk {diskKey}: {reasonForFault.@enum.fault.ReplicationDiskConfigFault.ReasonForFault}EventExVirtual machine replication configuration is invalid.errorReplication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name} on host {host.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid for virtual machine {vm.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}Replication configuration is invalid: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}hbr.primary.InvalidVmReplicationConfigurationEvent|Replication configuration is invalid for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reasonForFault.@enum.fault.ReplicationVmConfigFault.ReasonForFault}ExtendedEventVR Server does not support network compression.warningVR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support network compression for virtual machine {vm.name} on host {host.name}.VR Server does not support network compression for virtual machine {vm.name}.VR Server does not support network compression.hbr.primary.NetCompressionNotOkForServerEvent|VR Server does not support network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server supports network compression.infoVR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server supports network compression for virtual machine {vm.name} on host {host.name}.VR Server supports network compression for virtual machine {vm.name}.VR Server supports network compression.hbr.primary.NetCompressionOkForServerEvent|VR Server supports network compression for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExNo connection to VR Server.warningNo connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}No connection to VR Server: {reason.@enum.hbr.primary.ReasonForNoServerConnection}hbr.primary.NoConnectionToHbrServerEvent|No connection to VR Server for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerConnection}EventExVR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}errorVR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name} on host {host.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error for virtual machine {vm.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}VR Server error: {reason.@enum.hbr.primary.ReasonForNoServerProgress}hbr.primary.NoProgressWithHbrServerEvent|VR Server error for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}: {reason.@enum.hbr.primary.ReasonForNoServerProgress}ExtendedEventPrepare Delta Time exceeds configured RPO.warningPrepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name}.Prepare Delta Time exceeds configured RPO for virtual machine {vm.name}.Prepare Delta Time exceeds configured RPO.hbr.primary.PrepareDeltaTimeExceedsRpoEvent|Prepare Delta Time exceeds configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventQuiescing is not supported for this virtual machine.warningQuiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing is not supported for virtual machine {vm.name} on host {host.name}.Quiescing is not supported for virtual machine {vm.name}.Quiescing is not supported for this virtual machine.hbr.primary.QuiesceNotSupported|Quiescing is not supported for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server is compatible with the configured RPO.infoVR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server is compatible with the configured RPO for virtual machine {vm.name}.VR Server is compatible with the configured RPO.hbr.primary.RpoOkForServerEvent|VR Server is compatible with the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventVR Server does not support the configured RPO.warningVR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name}.VR Server does not support the configured RPO for virtual machine {vm.name}.VR Server does not support the configured RPO.hbr.primary.RpoTooLowForServerEvent|VR Server does not support the configured RPO for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExFull sync completed.infoFull sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync completed for virtual machine {vm.name} on host {host.name}.Full sync completed for virtual machine {vm.name}.Full sync completed.hbr.primary.SyncCompletedEvent|Full sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).ExtendedEventFull sync started.infoFull sync started for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Full sync started for virtual machine {vm.name} on host {host.name}.Full sync started for virtual machine {vm.name}.Full sync started.hbr.primary.SyncStartedEvent|Full sync started by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.ExtendedEventReplication paused.infoReplication paused for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Replication paused for virtual machine {vm.name} on host {host.name}.Replication paused for virtual machine {vm.name}.Replication paused.hbr.primary.SystemPausedReplication|Replication paused by {userName} for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}.EventExQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.warningQuiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name}.Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed.hbr.primary.UnquiescedDeltaCompletedEvent|Quiescing failed or the virtual machine is powered off. Unquiesced crash consistent sync completed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({bytes} bytes transferred).EventExReplication configuration changed.infoReplication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} on host {host.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed for virtual machine {vm.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).Replication configuration changed ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).hbr.primary.VmReplicationConfigurationChangedEvent|Replication configuration changed for virtual machine {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} ({numDisks} disks, {rpo} minutes RPO, VR Server is {vrServerAddress}:{vrServerPort}).AccountCreatedEventAccount createdinfoAn account was createdAccount {spec.id} was created on host {host.name} <EventLongDescription id="vim.event.AccountCreatedEvent"> <description> An account has been created on the host </description> </EventLongDescription> AccountRemovedEventAccount removedinfoAccount {account} was removedAccount {account} was removed on host {host.name} <EventLongDescription id="vim.event.AccountRemovedEvent"> <description> An account has been removed from the host </description> </EventLongDescription> AccountUpdatedEventAccount updatedinfoAccount {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated, the description was changed from '{prevDescription}' to '{spec.description}'Account {spec.id} was updated on host {host.name}, the description was changed from '{prevDescription}' to '{spec.description}' <EventLongDescription id="vim.event.AccountUpdatedEvent"> <description> An account has been updated on the host </description> </EventLongDescription> AdminPasswordNotChangedEventAdministrator password not changedinfoThe default password for the root user has not been changedThe default password for the root user on the host {host.name} has not been changed <EventLongDescription id="vim.event.AdminPasswordNotChangedEvent"> <description> The default password for the Administrator user on the host has not been changed </description> <cause> <description> You have not changed the password for the Administrator user on the host so the default password is still active </description> <action> Change the password for the Administrator user on the host </action> </cause> </EventLongDescription> AlarmAcknowledgedEventAlarm acknowledgedinfoAcknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}' on {entity.name}Acknowledged alarm '{alarm.name}'Acknowledged alarm '{alarm.name}' on {entity.name}AlarmActionTriggeredEventAlarm action triggeredinfoAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarm '{alarm.name}' on {entity.name} triggered an actionAlarmClearedEventAlarm clearedinfoManually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' from {from.@enum.ManagedEntity.Status}Manually cleared alarm '{alarm.name}' on {entity.name} from {from.@enum.ManagedEntity.Status}AlarmCreatedEventAlarm createdinfoCreated alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}' on {entity.name}Created alarm '{alarm.name}'Created alarm '{alarm.name}' on {entity.name}AlarmEmailCompletedEventAlarm email sentinfoAlarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}Alarm '{alarm.name}' sent email to {to}Alarm '{alarm.name}' on {entity.name} sent email to {to}AlarmEmailFailedEventCannot send alarm emailerrorAlarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to}Alarm '{alarm.name}' cannot send email to {to}Alarm '{alarm.name}' on {entity.name} cannot send email to {to} <EventLongDescription id="vim.event.AlarmEmailFailedEvent"> <description> An error occurred while sending email notification of a triggered alarm </description> <cause> <description>Failed to send email for a triggered alarm</description> <action>Check the vCenter Server SMTP settings for sending email notifications</action> </cause> </EventLongDescription> AlarmEvent<Alarm Event>info<internal>AlarmReconfiguredEventAlarm reconfiguredinfoReconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured alarm '{alarm.name}'Reconfigured alarm '{alarm.name}' on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}. <EventLongDescription id="vim.event.AlarmReconfiguredEvent"> <description> An alarm has been reconfigured </description> <cause> <description>A user has reconfigured an alarm</description> </cause> </EventLongDescription> AlarmRemovedEventAlarm removedinfoRemoved alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}' on {entity.name}Removed alarm '{alarm.name}'Removed alarm '{alarm.name}' on {entity.name}AlarmScriptCompleteEventAlarm script completedinfoAlarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}Alarm '{alarm.name}' ran script {script}Alarm '{alarm.name}' on {entity.name} ran script {script}AlarmScriptFailedEventAlarm script not completederrorAlarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg}Alarm '{alarm.name}' did not complete script: {reason.msg}Alarm '{alarm.name}' on {entity.name} did not complete script: {reason.msg} <EventLongDescription id="vim.event.AlarmScriptFailedEvent"> <description> The vCenter Server logs this event if an error occurs while running a script after an alarm triggers. </description> <cause> <description>There was an error running the script</description> <action>Fix the script or failure condition</action> </cause> </EventLongDescription> AlarmSnmpCompletedEventAlarm SNMP trap sentinfoAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarm '{alarm.name}': an SNMP trap was sentAlarm '{alarm.name}': an SNMP trap for entity {entity.name} was sentAlarmSnmpFailedEventAlarm SNMP trap not senterrorAlarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' did not send SNMP trap: {reason.msg}Alarm '{alarm.name}' on entity {entity.name} did not send SNMP trap: {reason.msg} <EventLongDescription id="vim.event.AlarmSnmpFailedEvent"> <description> The vCenter Server logs this event if an error occurs while sending an SNMP trap when an alarm triggers. </description> <cause> <description>An SNMP trap could not be sent for a triggered alarm</description> <action>Check the vCenter Server SNMP settings. Make sure that the vCenter Server network can handle SNMP packets.</action> </cause> </EventLongDescription> AlarmStatusChangedEventAlarm status changedinfoAlarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}Alarm '{alarm.name}' on {entity.name} changed from {from.@enum.ManagedEntity.Status} to {to.@enum.ManagedEntity.Status}AllVirtualMachinesLicensedEventAll virtual machines are licensedinfoAll running virtual machines are licensedAlreadyAuthenticatedSessionEventAlready authenticatedinfoUser cannot logon since the user is already logged onAuthorizationEvent<Authorization Event>info<internal>BadUsernameSessionEventInvalid user nameerrorCannot login {userName}@{ipAddress} <EventLongDescription id="vim.event.BadUsernameSessionEvent"> <description> A user attempted to log in with an unknown or invalid username </description> <cause> <description> The username is unknown to the system </description> <action> Use a username that is included in the system user directory </action> <action> On Linux, verify that the user directory is correctly configured </action> <action> If you are using Active Directory, check the health of the domain controller </action> </cause> <cause> <description> The user provided an invalid password </description> <action> Supply the correct password </action> </cause> </EventLongDescription> CanceledHostOperationEventCanceled host operationinfoThe operation performed on host {host.name} was canceledThe operation performed on host {host.name} was canceledThe operation was canceledThe operation performed on host {host.name} in {datacenter.name} was canceled <EventLongDescription id="vim.event.CanceledHostOperationEvent"> <description> An operation performed on the host was canceled </description> <cause> <description> A previous event in the sequence of events will provide more information about the cause of this cancellation </description> </cause> </EventLongDescription> ClusterComplianceCheckedEventChecked cluster for complianceinfoChecked cluster {computeResource.name} for complianceCluster was checked for compliance with profile {profile.name}Checked cluster for compliance <EventLongDescription id="vim.event.ClusterComplianceCheckedEvent"> <description> The cluster was checked for compliance with a cluster profile </description> <cause> <description> The user initiated a compliance check on the cluster against a cluster profile </description> </cause> <cause> <description> A scheduled has initiated a compliance check for the cluster against a cluster profile </description> </cause> </EventLongDescription> ClusterCreatedEventCluster createdinfoCreated cluster {computeResource.name}Created in folder {parent.name}Created cluster {computeResource.name} in {datacenter.name}ClusterDestroyedEventCluster deletedinfoRemoved cluster {computeResource.name}Removed clusterRemoved cluster {computeResource.name} in datacenter {datacenter.name}ClusterEvent<Cluster Event>info<internal>ClusterOvercommittedEventCluster overcommittederrorInsufficient capacity in cluster {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in cluster {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.ClusterOvercommittedEvent"> <description> The cumulative CPU and/or memory resources of all hosts in the cluster are not adequate to satisfy the resource reservations of all virtual machines in the cluster </description> <cause> <description>You attempted to power on a virtual machine bypassing vCenter Server. This condition occurs when you attempt the power on using the vSphere Client directly connected to the host.</description> <action>In a DRS cluster, do not power on virtual machines bypassing vCenter Server</action> </cause> <cause> <description>A host was placed in Maintenance, Standby, or Disconnected Mode</description> <action>Bring any host in Maintenance, Standby, or Disconnected mode out of these modes</action> </cause> </EventLongDescription> ClusterReconfiguredEventCluster reconfiguredinfoReconfigured cluster {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Cluster reconfiguredReconfigured cluster {computeResource.name} in datacenter {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ClusterReconfiguredEvent"> <description> The cluster configuration was changed. The cluster configuration includes information about the DRS, DPM, EVC and vSphere HA settings of the cluster. All DRS rules are also stored in the cluster configuration. Editing the cluster configuration may trigger an invocation of DRS and/or enabling/disabling of vSphere HA on each host in the cluster. </description> </EventLongDescription> ClusterStatusChangedEventCluster status changedinfoConfiguration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on cluster {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.ClusterStatusChangedEvent"> <description> The cluster status has changed. This status is the status of the root resource pool that encompasses the entire cluster. A cluster status change may be accompanied by the removal of a configuration issue if one was previously detected. A cluster status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The cluster status changed to yellow</description> <action>Add more resources (more hosts), or reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The cluster status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> CustomFieldDefAddedEventCustom field definition addedinfoCreated new custom field definition {name}CustomFieldDefEvent<Custom Field Definition Event>info<internal>CustomFieldDefRemovedEventCustom field definition removedinfoRemoved field definition {name}CustomFieldDefRenamedEventCustom field definition renamedinfoRenamed field definition from {name} to {newName}CustomFieldEvent<Custom Field Event>info<internal>CustomFieldValueChangedEventCustom field value changedinfoChanged custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} from '{prevState}' to '{value}'Changed custom field {name} from '{prevState}' to '{value}'Changed custom field {name} on {entity.name} in {datacenter.name} from '{prevState}' to '{value}'CustomizationEvent<Customization Event>info<internal>CustomizationFailed<An error occurred during customization>infoAn error occurred during customization, Reason: {reason.@enum.CustomizationFailed.ReasonCode}An error occurred during customization on VM {vm.name}, Reason: {reason.@enum.CustomizationFailed.ReasonCode}. See customization log at {logLocation} on the guest OS for details.CustomizationLinuxIdentityFailedCustomization Linux Identity FailederrorAn error occurred while setting up Linux identity. See log file '{logLocation}' on guest OS for details. <EventLongDescription id="vim.event.CustomizationLinuxIdentityFailed"> <description> The guest operating system Linux distribution is not supported by the customization scripts. Please refer to the VMware vSphere Compatibility Matrix for the list of the supported Linux distributions. </description> <cause> <description> Customization of the target guest operating system Linux distribution is not supported. </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> CustomizationNetworkSetupFailedCannot complete customization network setuperrorAn error occurred while setting up network properties of the guest OS. See the log file {logLocation} in the guest OS for details. <EventLongDescription id="vim.event.CustomizationNetworkSetupFailed"> <description> The customization scripts failed to set the parameters in the corresponding configuration files for Linux or in the Windows registry </description> <cause> <description> The Customization Specification contains an invalid host name or domain name </description> <action> Review the guest operating system log files for this event for more details </action> <action> Provide a valid host name for the target guest operating system. The name must comply with the host name and domain name definitions in RFC 952, 1035, 1123, 2181. </action> </cause> <cause> <description> Could not find a NIC with the MAC address specified in the Customization Package </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that there was no change in the virtual NIC MAC address between the creation of the Customization Package and its deployment. Deployment occurs during the first boot of the virtual machine after customization has been scheduled. </action> </cause> <cause> <description> The customization code needs read/write permissions for certain configuration files. These permissions were not granted to the 'root' account on Linux or to the account used by the VMware Tools Service on the Windows guest operating system. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Grant read/write permissions to the 'root' account for Linux or to the account used by the VMware Tools Service on the Windows guest operating system and the registry keys that need to be modified by the customization code </action> </cause> </EventLongDescription> CustomizationStartedEventStarted customizationinfoStarted customization of VM {vm.name}. Customization log located at {logLocation} in the guest OS.CustomizationSucceededCustomization succeededinfoCustomization of VM {vm.name} succeeded. Customization log located at {logLocation} in the guest OS.CustomizationSysprepFailedCannot complete customization SyspreperrorThe version of Sysprep {sysprepVersion} provided for customizing VM {vm.name} does not match the version of guest OS {systemVersion}. See the log file {logLocation} in the guest OS for more information. <EventLongDescription id="vim.event.CustomizationSysprepFailed"> <description> The sysprep files in the folder corresponding to the selected target guest operating system are not compatible with the actual version of the guest operation system </description> <cause> <description> The sysprep files in the folder corresponding to the target guest operating system (for example Windows XP) are for a different guest operating system (for example Windows 2003) </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> <cause> <description> The sysprep files in the folder corresponding to the guest operating system are for a different Service Pack, for example the guest operating system is Windows XP SP2 and but the sysprep files are for Windows XP SP1. </description> <action> On the machine running vCenter Server, place the correct sysprep files in the folder corresponding to the target guest operating system </action> </cause> </EventLongDescription> CustomizationUnknownFailureUnknown customization errorerrorAn error occurred while customizing VM {vm.name}. For details reference the log file {logLocation} in the guest OS. <EventLongDescription id="vim.event.CustomizationUnknownFailure"> <description> The customization component failed to set the required parameters inside the guest operating system </description> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the registry keys used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on registry keys used by the customization code </action> </cause> <cause> <description> On Windows, the user account under which the customization code runs has no read/write permissions for the files and folders used by the customization code. Customization code is usually run under the 'Local System' account but you can change this by selecting a different account for VMware Tools Service execution. </description> <action> Review the guest operating system log files for this event for more details </action> <action> Determine which user account is selected for VMware Tools Service execution and confirm that this account has read/write permissions on the files and folders used by the customization code </action> </cause> <cause> <description> On Linux, an invalid or unsupported time zone is passed to the customization scripts and the time zone configuration failed as a result </description> <action> Review the guest operating system log files for this event for more details </action> <action> Confirm that a supported time zone is passed in Customization Specification. </action> </cause> <cause> <description> On Linux, the guest operating system 'root' account does not have read/write permissions for the configuration files that the customization scripts need to modify ('/etc/hosts') </description> <action> Grant read/write permissions for the configuration files to the guest operating system 'root' account </action> </cause> <cause> <description> To enable guest customization on Linux, in case open-vm-tools are used, you must also install the deployPkg plug-in. </description> <action> Follow kb.vmware.com/kb/2075048 to install the open-vm-tools deployPkg plug-in. </action> </cause> <cause> <description> Customization of the target guest operating system is not supported </description> <action> Consult with VMware on when the specific Linux distribution will be supported. If the Linux distribution is already supported in a newer release, consider upgrading. </action> </cause> </EventLongDescription> DVPortgroupCreatedEventdvPort group createdinfodvPort group {net.name} was added to switch {dvs}.dvPort group {net.name} in {datacenter.name} was added to switch {dvs.name}.DVPortgroupDestroyedEventdvPort group deletedinfodvPort group {net.name} was deleted.dvPort group {net.name} in {datacenter.name} was deleted.DVPortgroupEventdvPort group eventinfodvPort group eventdvPort group eventDVPortgroupReconfiguredEventdvPort group reconfiguredinfodvPort group {net.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}dvPort group {net.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DVPortgroupRenamedEventdvPort group renamedinfodvPort group {oldName} was renamed to {newName}.dvPort group {oldName} in {datacenter.name} was renamed to {newName}DasAdmissionControlDisabledEventvSphere HA admission control disabledinfovSphere HA admission control disabled for cluster {computeResource.name}vSphere HA admission control disabledvSphere HA admission control disabled for cluster {computeResource.name} in {datacenter.name}DasAdmissionControlEnabledEventvSphere HA admission control enabledinfovSphere HA admission control enabled for cluster {computeResource.name}vSphere HA admission control enabledvSphere HA admission control enabled for cluster {computeResource.name} in {datacenter.name}DasAgentFoundEventvSphere HA agent foundinfoRe-established contact with a primary host in this vSphere HA clusterDasAgentUnavailableEventvSphere HA agent unavailableerrorUnable to contact a primary vSphere HA agent in cluster {computeResource.name}Unable to contact a primary vSphere HA agentUnable to contact a primary vSphere HA agent in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasAgentUnavailableEvent"> <description> vCenter Server is not able to contact any good primary hosts in the vSphere HA cluster. vSphere HA protection may not be available for virtual machines running in the cluster. In addition, you cannot enable or reconfigure vSphere HA on hosts in the cluster until contact between vCenter Server and a good primary host is restored. </description> <cause> <description> There was a network outage, and all hosts show up in the inventory as "not responding" </description> <action>Restore the network</action> </cause> <cause> <description>All the primary hosts in the cluster failed</description> <action> If the failed primary hosts cannot be restored, disable vSphere HA on the cluster, wait for the Unconfigure vSphere HA tasks to complete on all hosts, and re-enable vSphere HA on the cluster </action> </cause> </EventLongDescription> DasClusterIsolatedEventAll vSphere HA hosts isolatederrorAll hosts in the vSphere HA cluster {computeResource.name} were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster were isolated from the network. Check the network configuration for proper network redundancy in the management networkAll hosts in the vSphere HA cluster {computeResource.name} in {datacenter.name} were isolated from the network. Check the network configuration for proper network redundancy in the management network.DasDisabledEventvSphere HA disabled for clusterinfovSphere HA disabled for cluster {computeResource.name}vSphere HA disabled for this clustervSphere HA disabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasDisabledEvent"> <description> vSphere HA has been disabled on this host due to a user action. vSphere HA is disabled when a host is disconnected from vCenter Server or placed into maintenance or standby mode. Virtual machines on other hosts in the cluster will not be failed over to this host in the event of a host failure. In addition, if the host is disconnected, any virtual machines running on this host will not be failed if the host fails. Further, no attempt will be made by vSphere HA VM and Application Monitoring to reset VMs. </description> </EventLongDescription> DasEnabledEventvSphere HA enabled for clusterinfovSphere HA enabled for cluster {computeResource.name}vSphere HA enabled for this clustervSphere HA enabled for cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasEnabledEvent"> <description> vSphere HA has been enabled on this host due to a user action. vSphere HA is enabled when a host is added to or moved into a vSphere HA cluster or when vSphere HA is enabled on a cluster. If the host was already in a vSphere HA cluster, vSphere HA will be enabled when the host is reconnected to vCenter Server or brought out of maintenance or standby mode. vSphere HA will attempt to protect any VMs that are running on the host at the time that HA is enabled on it. </description> </EventLongDescription> DasHostFailedEventvSphere HA host failederrorA possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name}A possible host failure has been detected by vSphere HA on {failedHost.name} in cluster {computeResource.name} in {datacenter.name}DasHostIsolatedEventvSphere HA host isolatedwarningHost {isolatedHost.name} has been isolated from cluster {computeResource.name}Host {isolatedHost.name} has been isolatedHost has been isolated from clusterHost {isolatedHost.name} has been isolated from cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DasHostIsolatedEvent"> <description> vSphere HA detected that the host is network isolated. When a host is in this state, vSphere HA applies the power-off or shutdown host isolation response to virtual machines running on the host, and continues to monitor the virtual machines that are left powered on. While a host is in this state, vSphere HA's ability to restart virtual machines after a failure is impacted. vSphere HA only powers off or shuts down a virtual machine if the agent on the host determines that a master agent is responsible for the virtual machine. </description> <cause> <description> A host is network isolated if both of the following conditions are met: (1) isolation addresses have been configured and the host is unable to ping them; (2) the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <action> Resolve the networking problem that is preventing the host from pinging its isolation addresses and communicating with other hosts. Ensure that there is redundancy in the management networks used by vSphere HA. With redundancy, vSphere HA is able to communicate over more than one path thus reducing the chance of a host becoming isolated. </action> </cause> </EventLongDescription> DatacenterCreatedEventDatacenter createdinfoCreated in folder {parent.name}Created datacenter {datacenter.name}Created datacenter {datacenter.name} in folder {parent.name}DatacenterEvent<Datacenter Event>info<internal>DatacenterRenamedEventDatacenter renamedinfoRenamed datacenterRenamed datacenter from {oldName} to {newName}Renamed datacenter from {oldName} to {newName}DatastoreCapacityIncreasedEventDatastore capacity increasedinfoDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytesDatastore {datastore.name} increased in capacity from {oldCapacity} bytes to {newCapacity} bytes in {datacenter.name}DatastoreDestroyedEventDatastore deletedinfoRemoved unconfigured datastore {datastore.name}Removed unconfigured datastore {datastore.name}DatastoreDiscoveredEventDatastore discoveredinfoDiscovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name} on {host.name}Discovered datastore {datastore.name}Discovered datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreDiscoveredEvent"> <description> A datastore was discovered on a host </description> <cause> <description> A host that has access to this datastore was added to the datacenter </description> </cause> <cause> <description> The storage backing this datastore was unmasked to a host in the datacenter </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host </description> </cause> <cause> <description> A user or system action caused this datastore to be created on a host and the datastore was visible on at least one other host in the datacenter prior to this operation. </description> </cause> </EventLongDescription> DatastoreDuplicatedEventDatastore duplicatederrorMultiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detected on host {host.name}Multiple datastores named {datastore} detectedMultiple datastores named {datastore} detected on host {host.name} in {datacenter.name}DatastoreEvent<Datastore Event>info<internal>DatastoreFileCopiedEventFile or directory copied to datastoreinfoCopy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Copy of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileDeletedEventFile or directory deletedinfoDeletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Deletion of file or directory {targetFile} from {datastore.name} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreFileEvent<Datastore File Event>info<internal>DatastoreFileMovedEventFile or directory moved to datastoreinfoMove of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'Move of file or directory {sourceFile} from {sourceDatastore.name} to {datastore.name} as {targetFile} was initiated from '{sourceOfOperation}' and completed with status '{succeeded.@enum.DatastoreFileEvent.Succeeded}'DatastoreIORMReconfiguredEventReconfigured Storage I/O Control on datastoreinfoReconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}Reconfigured Storage I/O Control on datastore {datastore.name}DatastorePrincipalConfiguredDatastore principal configuredinfoConfigured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal} on host {host.name}Configured datastore principal {datastorePrincipal}Configured datastore principal {datastorePrincipal} on host {host.name} in {datacenter.name}DatastoreRemovedOnHostEventDatastore removed from hostinfoRemoved datastore {datastore.name} from {host.name}Removed datastore {datastore.name}Removed datastore {datastore.name} from {host.name} in {datacenter.name}DatastoreRenamedEventDatastore renamedinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name}DatastoreRenamedOnHostEventDatastore renamed from hostinfoRenamed datastore from {oldName} to {newName}Renamed datastore from {oldName} to {newName} in {datacenter.name} <EventLongDescription id="vim.event.DatastoreRenamedOnHostEvent"> <description> A datastore was renamed on a host managed by vCenter Server </description> <cause> <description> vCenter Server discovered datastore on a host and renamed the datastore because it already exists in the vCenter Server inventory under a different name. vCenter Server might also have renamed the datastore because the name conflicts with another datastore in the same datacenter. </description> </cause> </EventLongDescription> DrsDisabledEventDRS disabledinfoDisabled DRS on cluster {computeResource.name}Disabled DRSDisabled DRS on cluster {computeResource.name} in datacenter {datacenter.name}DrsEnabledEventDRS enabledinfoEnabled DRS on cluster {computeResource.name} with automation level {behavior}Enabled DRS with automation level {behavior}Enabled DRS on {computeResource.name} with automation level {behavior} in {datacenter.name}DrsEnteredStandbyModeEventDRS entered standby modeinfoDRS put {host.name} into standby modeDRS put {host.name} into standby modeDRS put the host into standby modeDRS put {host.name} into standby modeDrsEnteringStandbyModeEventDRS entering standby modeinfoDRS is putting {host.name} into standby modeDRS is putting {host.name} into standby modeDRS is putting the host into standby modeDRS is putting {host.name} into standby modeDrsExitStandbyModeFailedEventDRS cannot exit the host out of standby modeerrorDRS cannot move {host.name} out of standby modeDRS cannot move {host.name} out of standby modeDRS cannot move the host out of standby modeDRS cannot move {host.name} out of standby mode <EventLongDescription id="vim.event.DrsExitStandbyModeFailedEvent"> <description> DPM failed to power on a host in standby mode. DPM tried to power on a host using IPMI, iLO or Wake-on-LAN protocol, but the host did not power on. </description> <cause> <description>DPM could not communicate with the BMC on the host</description> <action>Verify the IPMI/iLO credentials entered in vCenter Server</action> <action>Verify that LAN access is enabled in the BMC</action> </cause> <cause> <description>The vMotion NIC on the host does not support Wake-on-LAN</description> <action>Select a vMotion NIC that supports Wake-on-LAN</action> </cause> </EventLongDescription> DrsExitedStandbyModeEventDRS exited standby modeinfoDRS moved {host.name} out of standby modeDRS moved {host.name} out of standby modeDRS moved the host out of standby modeDRS moved {host.name} out of standby modeDrsExitingStandbyModeEventDRS exiting standby modeinfoDRS is moving {host.name} out of standby modeDRS is moving {host.name} out of standby modeDRS is moving the host out of standby modeDRS is moving {host.name} out of standby modeDrsInvocationFailedEventDRS invocation not completederrorDRS invocation not completedDRS invocation not completedDRS invocation not completed <EventLongDescription id="vim.event.DrsInvocationFailedEvent"> <description> A DRS invocation failed to complete successfully. This condition can occur for a variety of reasons, some of which may be transient. </description> <cause> <description>An error was encountered during a DRS invocation</description> <action>Disable and re-enable DRS</action> </cause> </EventLongDescription> DrsRecoveredFromFailureEventDRS has recovered from the failureinfoDRS has recovered from the failureDRS has recovered from the failureDRS has recovered from the failureDrsResourceConfigureFailedEventCannot complete DRS resource configurationerrorUnable to apply DRS resource settings on host. {reason.msg}. This can significantly reduce the effectiveness of DRS.Unable to apply DRS resource settings on host {host.name} in {datacenter.name}. {reason.msg}. This can significantly reduce the effectiveness of DRS. <EventLongDescription id="vim.event.DrsResourceConfigureFailedEvent"> <description> The DRS resource settings could not be successfully applied to a host in the cluster. This condition is typically transient. </description> <cause> <description>DRS resource settings could not be applied to a host.</description> <action>DRS generates resource settings that map the cluster values to the host. However, in this case, the values could not be successfully applied to the host. This is typically a transient error caused by delayed synchronization from DRS to the host. If this condition persists, enable debug logging in vpxa and contact VMware Support. </action> </cause> </EventLongDescription> DrsResourceConfigureSyncedEventDRS resource configuration synchronizedinfoResource configuration specification returns to synchronization from previous failureResource configuration specification returns to synchronization from previous failure on host '{host.name}' in {datacenter.name}DrsRuleComplianceEventVM is now compliant with DRS VM-Host affinity rulesinfo{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} is now compliant with DRS VM-Host affinity rulesvirtual machine on {host.name} is now compliant with DRS VM-Host affinity rules{vm.name} on {host.name} in {datacenter.name} is now compliant with DRS VM-Host affinity rulesDrsRuleViolationEventVM is violating a DRS VM-Host affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} is violating a DRS VM-Host affinity rule{vm.name} is violating a DRS VM-Host affinity rulevirtual machine on {host.name} is violating a DRS VM-Host affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host affinity ruleDrsSoftRuleViolationEventThe VM is violating a DRS VM-Host soft affinity ruleinfo{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} is violating a DRS VM-Host soft affinity rulevirtual machine on {host.name} is violating a DRS VM-Host soft affinity rule{vm.name} on {host.name} in {datacenter.name} is violating a DRS VM-Host soft affinity ruleDrsVmMigratedEventDRS VM migratedinfoDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name}DRS migrated {vm.name} from {sourceHost.name} to {host.name}DRS migrated {vm.name} from {sourceHost.name}Migrated from {sourceHost.name} to {host.name} by DRSDRS migrated {vm.name} from {sourceHost.name} to {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmMigratedEvent"> <description> A virtual machine was migrated based on a DRS recommendation. The recommendation might have been made be to achieve better load balancing in the cluster or to evacuate a host in the cluster that is being put into Standby or Maintenance Mode. </description> <cause> <description>DRS recommended the migration of a virtual machine</description> </cause> </EventLongDescription> DrsVmPoweredOnEventDRS VM powered oninfoDRS powered on {vm.name} on {host.name}DRS powered on {vm.name} on {host.name}DRS powered on {vm.name}DRS powered on the virtual machine on {host.name}DRS powered on {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.DrsVmPoweredOnEvent"> <description> A virtual machine was powered on by the user and DRS choose a host for the virtual machine based on the current cluster load distribution combined with the virtual machine's resource requirements </description> <cause> <description>DRS chose a host for a virtual machine that was being powered on</description> </cause> </EventLongDescription> DuplicateIpDetectedEventDuplicate IP detectedinfoVirtual machine {macAddress} has a duplicate IP {duplicateIP}Virtual machine {macAddress} on host {host.name} has a duplicate IP {duplicateIP}DvpgImportEventImport Operation eventinfoImport operation with type {importType} was performed on {net.name}Import operation with type {importType} was performed on {net.name}DvpgRestoreEventRestore Operation eventinfoRestore operation was performed on {net.name}Restore operation was performed on {net.name}DvsCreatedEventvSphere Distributed Switch createdinfoA vSphere Distributed Switch {dvs.name} was createdA vSphere Distributed Switch {dvs.name} was created in {datacenter.name}.DvsDestroyedEventvSphere Distributed Switch deletedinfovSphere Distributed Switch {dvs.name} was deleted.vSphere Distributed Switch {dvs.name} in {datacenter.name} was deleted.DvsEventvSphere Distributed Switch eventinfovSphere Distributed Switch eventvSphere Distributed Switch eventDvsHealthStatusChangeEventHealth check status of the switch changed.infoHealth check status changed in vSphere Distributed Switch {dvs.name} on host {host.name}Health check status changed in vSphere Distributed Switch {dvs.name}Health check status was changed in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}DvsHostBackInSyncEventThe vSphere Distributed Switch configuration on the host was synchronized with that of the vCenter Server.infoThe vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host was synchronized with that of the vCenter Server.DvsHostJoinedEventHost joined the vSphere Distributed SwitchinfoThe host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name}.The host {hostJoined.name} joined the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostLeftEvent Host left vSphere Distributed SwitchinfoThe host {hostLeft.name} left the vSphere Distributed Switch {dvs.name}.The host {hostLeft.name} left the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostStatusUpdatedHost status changed on the vSphere Distributed SwitchinfoThe host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name}.The host {hostMember.name} changed status on the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsHostWentOutOfSyncEventThe vSphere Distributed Switch configuration on the host differed from that of the vCenter Server.warningThe vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server.The vSphere Distributed Switch {dvs.name} configuration on the host differed from that of the vCenter Server. <EventLongDescription id="vim.event.DvsHostWentOutOfSyncEvent"> <description> The vSphere Distributed Switch configuration on the host differed from that of the vCenter Server </description> <cause> <description> The host was not connected to the vCenter Server when updates were sent </description> </cause> <cause> <description> vCenter Server failed to push the vSphere Distributed Switch configuration to the host in the past</description> </cause> </EventLongDescription> DvsImportEventImport Operation eventinfoImport operation with type {importType} was performed on {dvs.name}Import operation with type {importType} was performed on {dvs.name}DvsMergedEventvSphere Distributed Switch mergedinfovSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name}.vSphere Distributed Switch {srcDvs.name} was merged into {dstDvs.name} in {datacenter.name}.DvsPortBlockedEventdvPort blockedinfoThe dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was blocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortConnectedEventdvPort connectedinfoThe dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was connected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortCreatedEventdvPort createdinfoNew ports were created in the vSphere Distributed Switch {dvs.name}.New ports were created in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDeletedEventdvPort deletedinfoPorts were deleted in the vSphere Distributed Switch {dvs.name}.Deleted ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortDisconnectedEventdvPort disconnectedinfoThe dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was disconnected in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortEnteredPassthruEventdvPort in passthrough modeinfoThe dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortExitedPassthruEventdvPort not in passthrough modeinfoThe dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was not in passthrough mode in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortJoinPortgroupEventA dvPort was moved into the dvPort group.infoThe dvPort {portKey} was moved into the dvPort group {portgroupName}.The dvPort {portKey} was moved into the dvPort group {portgroupName} in {datacenter.name}.DvsPortLeavePortgroupEventA dvPort was moved out of the dvPort group.infoThe dvPort {portKey} was moved out of the dvPort group {portgroupName}.The dvPort {portKey} was moved out of the dvPort group {portgroupName} in {datacenter.name}.DvsPortLinkDownEventdvPort link was downinfoThe dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was down in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortLinkUpEventdvPort link was upinfoThe dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} link was up in the vSphere Distributed Switch {dvs.name} in {datacenter.name}DvsPortReconfiguredEventdvPort reconfiguredinfoPorts were reconfigured in the vSphere Distributed Switch {dvs.name}.
Ports changed {portKey}.
Changes are {configChanges}Reconfigured ports in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.
Ports changed {portKey}.
Changes are {configChanges}DvsPortRuntimeChangeEventdvPort runtime information changed.infoThe dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} runtime information changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsPortUnblockedEventdvPort unblockedinfoThe dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} was unblocked in the vSphere Distributed Switch {dvs.name} in {datacenter.name}. It was in {prevBlockState.@enum.DvsEvent.PortBlockState} state before.DvsPortVendorSpecificStateChangeEventdvPort vendor specific state changed.infoThe dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name}.The dvPort {portKey} vendor specific state changed in the vSphere Distributed Switch {dvs.name} in {datacenter.name}.DvsReconfiguredEventvSphere Distributed Switch reconfiguredinfoThe vSphere Distributed Switch {dvs.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}The vSphere Distributed Switch {dvs.name} in {datacenter.name} was reconfigured.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}DvsRenamedEventvSphere Distributed Switch renamedinfoThe vSphere Distributed Switch {oldName} was renamed to {newName}.The vSphere Distributed Switch {oldName} in {datacenter.name} was renamed to {newName}.DvsRestoreEventRestore Operation eventinfoRestore operation was performed on {dvs.name}Restore operation was performed on {dvs.name}DvsUpgradeAvailableEventAn upgrade for the vSphere Distributed Switch is available.infoAn upgrade for vSphere Distributed Switch {dvs.name} is available. An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is available.DvsUpgradeInProgressEventAn upgrade for the vSphere Distributed Switch is in progress.infoAn upgrade for vSphere Distributed Switch {dvs.name} is in progress.An upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} is in progress.DvsUpgradeRejectedEventCannot complete the upgrade for the vSphere Distributed SwitchinfoAn upgrade for vSphere Distributed Switch {dvs.name} was rejected.Cannot complete an upgrade for the vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name}DvsUpgradedEventThe vSphere Distributed Switch was upgraded.infovSphere Distributed Switch {dvs.name} was upgraded.vSphere Distributed Switch {dvs.name} in datacenter {datacenter.name} was upgraded.EnteredMaintenanceModeEventEntered maintenance modeinfoHost {host.name} in {datacenter.name} has entered maintenance modeHost {host.name} in {datacenter.name} has entered maintenance modeEnter maintenance mode completed. All virtual machine operations are disabledHost {host.name} in {datacenter.name} has entered maintenance modeEnteredStandbyModeEventEntered standby modeinfoEntered standby modeThe host {host.name} is in standby modeEnteringMaintenanceModeEventEntering maintenance modeinfoHost {host.name} has started to enter maintenance modeHost {host.name} has started to enter maintenance modeStarted to enter maintenance mode. Waiting for virtual machines to shut down, suspend, or migrateHost {host.name} in {datacenter.name} has started to enter maintenance modeEnteringStandbyModeEventEntering standby modeinfoEntering standby modeThe host {host.name} is entering standby modeErrorUpgradeEventUpgrade errorerror{message} <EventLongDescription id="vim.event.ErrorUpgradeEvent"> <description> An error occurred during agent upgrade </description> </EventLongDescription> Event<Event>info<internal>ExitMaintenanceModeEventExit maintenance modeinfoHost {host.name} has exited maintenance modeHost {host.name} has exited maintenance modeExited maintenance modeHost {host.name} in {datacenter.name} has exited maintenance modeExitStandbyModeFailedEventCannot exit standby modeerrorCould not exit standby modeThe host {host.name} could not exit standby modeExitedStandbyModeEventExited standby modeinfoExited standby modeThe host {host.name} is no longer in standby modeExitingStandbyModeEventExiting standby modeinfoExiting standby modeThe host {host.name} is exiting standby modeFailoverLevelRestoredvSphere HA failover resources are sufficientinfoSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name}Sufficient resources are available to satisfy vSphere HA failover levelSufficient resources are available to satisfy vSphere HA failover level in cluster {computeResource.name} in {datacenter.name}GeneralEventGeneral eventinfoGeneral event: {message}GeneralHostErrorEventHost errorerrorError detected on {host.name}: {message}Error detected on {host.name}: {message}{message}Error detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostErrorEvent"> <description> An error occurred on the host </description> <cause> <description> The agent cannot send heartbeats because of a networking related failure on host </description> </cause> <cause> <description> The agent failed to update the configuration file on host </description> </cause> <cause> <description> The agent failed to save the configuration file to disk on host </description> </cause> <cause> <description> The provisioning module failed to load. As a result, all provisioning operations will fail on host. </description> </cause> </EventLongDescription> GeneralHostInfoEventHost informationinfoIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostInfoEvent"> <description> A general information event occurred on the host </description> </EventLongDescription> GeneralHostWarningEventHost warningwarningIssue detected on {host.name}: {message}Issue detected on {host.name}: {message}{message}Issue detected on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralHostWarningEvent"> <description> A general warning event occurred on the host </description> <cause> <description> Virtual machine creation might fail because the agent was unable to retrieve virtual machine creation options from the host </description> </cause> </EventLongDescription> GeneralUserEventUser eventuserUser logged event: {message} <EventLongDescription id="vim.event.GeneralUserEvent"> <description> A general user event occurred on the host </description> <cause> <description> A user initiated an action on the host </description> </cause> </EventLongDescription> GeneralVmErrorEventVM errorerrorError detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message}Error detected for {vm.name}: {message}{message} on {host.name}Error detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmErrorEvent"> <description> An error occurred on the virtual machine </description> </EventLongDescription> GeneralVmInfoEventVM informationinfoIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmInfoEvent"> <description> A general information event occurred on the virtual machine </description> </EventLongDescription> GeneralVmWarningEventVM warningwarningIssue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message}Issue detected for {vm.name}: {message}{message} on {host.name}Issue detected for {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.GeneralVmWarningEvent"> <description> A general warning event occurred on the virtual machine </description> </EventLongDescription> GhostDvsProxySwitchDetectedEventThe vSphere Distributed Switch corresponding to the proxy switches on the host does not exist in vCenter Server or does not contain this host.infoThe vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host does not exist in vCenter Server or does not contain this host.The vSphere Distributed Switch corresponding to the proxy switches {switchUuid} on the host {host.name} does not exist in vCenter Server or does not contain this host. <EventLongDescription id="vim.event.GhostDvsProxySwitchDetectedEvent"> <description> vCenter Server found a vSphere Distributed Switch proxy switch on the host that does not match any vSphere Distributed Switch in vCenter Server </description> <cause> <description> The vSphere Distributed Switch corresponding to the vSphere Distributed Switch proxy switch on the host was deleted while host was disconnected from the vCenter Server </description> </cause> <cause> <description> The host is no longer a member of the vSphere Distributed Switch that the proxy switch in the host corresponds to </description> </cause> </EventLongDescription> GhostDvsProxySwitchRemovedEventA ghost proxy switch on the host was resolved.infoA ghost proxy switch {switchUuid} on the host was resolved.A ghost proxy switch {switchUuid} on the host {host.name} was resolved.GlobalMessageChangedEventMessage changedinfoThe message changed: from '{prevMessage}' to '{message}'HealthStatusChangedEventStatus changeinfo{componentName} status changed from {oldStatus} to {newStatus}HostAddFailedEventCannot add hosterrorCannot add host {hostname}Cannot add host {hostname}Cannot add host {hostname} to datacenter {datacenter.name} <EventLongDescription id="vim.event.HostAddFailedEvent"> <description> Adding a host failed </description> </EventLongDescription> HostAddedEventHost AddedinfoAdded host {host.name}Added host {host.name}Added host {host.name} to datacenter {datacenter.name}HostAdminDisableEventHost administrator access disabledwarningAdministrator access to the host is disabledAdministrator access to the host {host.name} is disabled <EventLongDescription id="vim.event.HostAdminDisableEvent"> <description> Host permissions have been changed so that only the account used for vCenter Server operations has Administrator permissions </description> <cause> <description> This condition occurs when vCenter Server removes all other Administrator access to the host because the host has been placed in Lockdown Mode. The host can be managed by vCenter Server only and Only vCenter Server can re-enable Administrator access for other accounts. </description> </cause> </EventLongDescription> HostAdminEnableEventHost administrator access enabledwarningAdministrator access to the host has been restoredAdministrator access to the host {host.name} has been restored <EventLongDescription id="vim.event.HostAdminEnableEvent"> <description> vCenter Server has restored Administrator permissions for host user accounts whose permissions were disabled by Lockdown Mode </description> <cause> <description> This condition occurs when vCenter Server restores Administrator access to host user accounts that lost their Administrator permissions when the host was placed in Lockdown Mode </description> </cause> </EventLongDescription> HostCnxFailedAccountFailedEventCannot connect host and configure management accounterrorCannot connect {host.name}: cannot configure management accountCannot connect {host.name}: cannot configure management accountCannot connect: cannot configure management accountCannot connect {host.name} in {datacenter.name}: cannot configure management account <EventLongDescription id="vim.event.HostCnxFailedAccountFailedEvent"> <description> Could not connect to the host because setting up a management account failed </description> <cause> <description> The account used by vCenter Server to manage the host could not be configured </description> </cause> </EventLongDescription> HostCnxFailedAlreadyManagedEventCannot connect host - already managederrorCannot connect {host.name}: already managed by {serverName}Cannot connect {host.name}: already managed by {serverName}Cannot connect: already managed by {serverName}Cannot connect {host.name} in {datacenter.name}: already managed by {serverName} <EventLongDescription id="vim.event.HostCnxFailedAlreadyManagedEvent"> <description> Could not connect to the host because it is already being managed by a different vCenter Server instance. </description> <cause> <description> The host is already being managed by a different vCenter Server instance </description> <action> Remove the host from the inventory for the other vCenter Server instance </action> <action> Force the addition of the host to the current vCenter Server instance </action> </cause> </EventLongDescription> HostCnxFailedBadCcagentEventCannot connect host - incorrect CcagenterrorCannot connect {host.name} : server agent is not respondingCannot connect {host.name} : server agent is not respondingCannot connect: server agent is not respondingCannot connect host {host.name} in {datacenter.name} : server agent is not responding <EventLongDescription id="vim.event.HostCnxFailedBadCcagentEvent"> <description> Could not connect to the host because the host agent did not respond </description> <cause> <description> No response was received from the host agent </description> <action> Restart the host agent on the ESX/ESXi host </action> </cause> </EventLongDescription> HostCnxFailedBadUsernameEventCannot connect host - incorrect user nameerrorCannot connect {host.name}: incorrect user name or passwordCannot connect {host.name}: incorrect user name or passwordCannot connect: incorrect user name or passwordCannot connect {host.name} in {datacenter.name}: incorrect user name or password <EventLongDescription id="vim.event.HostCnxFailedBadUsernameEvent"> <description> Could not connect to the host due to an invalid username and password combination </description> <cause> <description> Invalid username and password combination </description> <action> Use the correct username and password </action> </cause> </EventLongDescription> HostCnxFailedBadVersionEventCannot connect host - incompatible versionerrorCannot connect {host.name}: incompatible versionCannot connect {host.name}: incompatible versionCannot connect: incompatible versionCannot connect {host.name} in {datacenter.name}: incompatible version <EventLongDescription id="vim.event.HostCnxFailedBadVersionEvent"> <description> Could not connect to the host due to an incompatible vSphere Client version </description> <cause> <description> The version of the vSphere Client is incompatible with the ESX/ESXi host so the connection attempt failed </description> <action> Download and use a compatible vSphere Client version to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedCcagentUpgradeEventCannot connect host - Ccagent upgradeerrorCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect host {host.name}: did not install or upgrade vCenter agent serviceCannot connect: did not install or upgrade vCenter agent serviceCannot connect host {host.name} in {datacenter.name}. Did not install or upgrade vCenter agent service. <EventLongDescription id="vim.event.HostCnxFailedCcagentUpgradeEvent"> <description> Could not connect to the host because a host agent upgrade or installation is in process </description> <cause> <description> The host agent is being upgraded or installed on the host </description> <action> Wait for the host agent upgrade or installation to complete </action> </cause> </EventLongDescription> HostCnxFailedEventCannot connect hosterrorCannot connect host {host.name}: error connecting to hostCannot connect host {host.name}: error connecting to hostCannot connect: error connecting to hostCannot connect {host.name} in {datacenter.name}: error connecting to host <EventLongDescription id="vim.event.HostCnxFailedEvent"> <description> Could not connect to the host due to an unspecified condition </description> <cause> <description> Unknown cause of failure </description> </cause> </EventLongDescription> HostCnxFailedNetworkErrorEventCannot connect host - network errorerrorCannot connect {host.name}: network errorCannot connect {host.name}: network errorCannot connect: network errorCannot connect {host.name} in {datacenter.name}: network error <EventLongDescription id="vim.event.HostCnxFailedNetworkErrorEvent"> <description> Could not connect to the host due to a network error </description> <cause> <description> A Network error occurred while connecting to the host </description> <action> Verify that host networking is configured correctly </action> </cause> </EventLongDescription> HostCnxFailedNoAccessEventCannot connect host - no accesserrorCannot connect {host.name}: account has insufficient privilegesCannot connect {host.name}: account has insufficient privilegesCannot connect: account has insufficient privilegesCannot connect host {host.name} in {datacenter.name}: account has insufficient privileges <EventLongDescription id="vim.event.HostCnxFailedNoAccessEvent"> <description> Could not connect to the host due to insufficient account privileges </description> <cause> <description> The account used to connect to host does not have host access privileges </description> <action> Use an account that has sufficient privileges to connect to the host </action> </cause> </EventLongDescription> HostCnxFailedNoConnectionEventCannot connect host - no connectionerrorCannot connect {host.name}Cannot connect {host.name}Cannot connect to hostCannot connect host {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostCnxFailedNoConnectionEvent"> <description> Could not connect to the host because the host is not in the network </description> <cause> <description> The host that you are attempting to connect to is not present in the network </description> <action> Verify that host networking is configured correctly and the host is connected to the same network as vCenter Server </action> </cause> </EventLongDescription> HostCnxFailedNoLicenseEventCannot connect host - no licenseerrorCannot connect {host.name}: not enough CPU licensesCannot connect {host.name}: not enough CPU licensesCannot connect: not enough CPU licensesCannot connect {host.name} in {datacenter.name}: not enough CPU licenses <EventLongDescription id="vim.event.HostCnxFailedNoLicenseEvent"> <description> Could not connect to the host due to a licensing issue </description> <cause> <description> There are not enough licenses to add the host to the vCenter Server inventory. This event is accompanied by a fault that specifies the missing licenses required to add the host successfully. </description> <action> Add the necessary licenses to vCenter Server and try adding the host again </action> </cause> </EventLongDescription> HostCnxFailedNotFoundEventCannot connect host - host not founderrorCannot connect {host.name}: incorrect host nameCannot connect {host.name}: incorrect host nameCannot connect: incorrect host nameCannot connect {host.name} in {datacenter.name}: incorrect host name <EventLongDescription id="vim.event.HostCnxFailedNotFoundEvent"> <description> Could not connect to the host because vCenter Server could not resolve the host name </description> <cause> <description> Unable to resolve the host name of the host </description> <action> Verify that the correct host name has been supplied for the host </action> <action> Configure the host to use a known-good (resolvable) host name </action> <action> Add the host name to the DNS server </action> </cause> </EventLongDescription> HostCnxFailedTimeoutEventCannot connect host - time-outerrorCannot connect {host.name}: time-out waiting for host responseCannot connect {host.name}: time-out waiting for host responseCannot connect: time-out waiting for host responseCannot connect {host.name} in {datacenter.name}: time-out waiting for host response <EventLongDescription id="vim.event.HostCnxFailedTimeoutEvent"> <description> Could not connect to the host because the connection attempt timed out </description> <cause> <description> A timeout occurred while attempting to connect to the host </description> </cause> </EventLongDescription> HostComplianceCheckedEventChecked host for complianceinfoHost {host.name} checked for compliance with profile {profile.name}Host {host.name} checked for compliance with profile {profile.name}Checked host for compliance with profile {profile.name}Host {host.name} checked for compliance. <EventLongDescription id="vim.event.HostComplianceCheckedEvent"> <description> The host was checked for compliance with a host profile </description> <cause> <description> The user initiated a compliance check on the host against a host profile </description> </cause> <cause> <description> A scheduled task initiated a compliance check for the host against a host profile </description> </cause> </EventLongDescription> HostCompliantEventHost compliant with profileinfoHost is in compliance with the attached profile.Host {host.name} is in compliance with the attached profileHostConfigAppliedEventHost configuration changes applied to hostinfoHost configuration changes applied to {host.name}Host configuration changes applied to {host.name}Host configuration changes applied.Host configuration changes applied.HostConnectedEventHost connectedinfoConnected to {host.name}Connected to {host.name}Established a connectionConnected to {host.name} in {datacenter.name}HostConnectionLostEventHost connection losterrorHost {host.name} is not respondingHost {host.name} is not respondingHost is not respondingHost {host.name} in {datacenter.name} is not responding <EventLongDescription id="vim.event.HostConnectionLostEvent"> <description> Connection to the host has been lost </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostDasDisabledEventvSphere HA agent disabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is disabledvSphere HA agent on {host.name} is disabledvSphere HA agent on this host is disabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is disabledHostDasDisablingEventDisabling vSphere HAinfovSphere HA is being disabled on {host.name}vSphere HA is being disabled on {host.name}Disabling vSphere HAvSphere HA is being disabled on {host.name} in cluster {computeResource.name} in datacenter {datacenter.name}HostDasEnabledEventvSphere HA agent enabled on hostinfovSphere HA agent on {host.name} in cluster {computeResource.name} is enabledvSphere HA agent on {host.name} is enabledvSphere HA agent on this host is enabledvSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} is enabledHostDasEnablingEventEnabling host vSphere HA agentwarningEnabling vSphere HA agent on {host.name}Enabling vSphere HA agent on {host.name}Enabling vSphere HA agentEnabling vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.HostDasEnablingEvent"> <description> vSphere HA is being enabled on this host. </description> </EventLongDescription> HostDasErrorEventvSphere HA agent errorerrorvSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on host {host.name} has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent has an error {message} : {reason.@enum.HostDasErrorEvent.HostDasErrorReason}vSphere HA agent on {host.name} in cluster {computeResource.name} in {datacenter.name} has an error {message}: {reason.@enum.HostDasErrorEvent.HostDasErrorReason}HostDasEvent<Host vSphere HA Event>info<internal>HostDasOkEventvSphere HA agent configuredinfovSphere HA agent on host {host.name} is configured correctlyvSphere HA agent on host {host.name} is configured correctlyvSphere HA agent is configured correctlyvSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} is configured correctlyHostDisconnectedEventHost disconnectedinfoDisconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from host. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}Disconnected from {host.name} in {datacenter.name}. Reason: {reason.@enum.HostDisconnectedEvent.ReasonCode}HostEnableAdminFailedEventCannot restore administrator permissions to hosterrorCannot restore some administrator permissions to the hostCannot restore some administrator permissions to the host {host.name}HostEvent<Host Event>info<internal>HostExtraNetworksEventHost has extra vSphere HA networkserrorHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} has the following extra networks not used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostExtraNetworksEvent"> <description> The host being added to the vSphere HA cluster has more management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, be on the same subnets. This helps to facilitate the pairing up of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnet of the joining member that are not present on the existing member. </description> <cause> <description> The host has extra networks missing on an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for the cluster. </action> </cause> </EventLongDescription> HostGetShortNameFailedEventCannot get short host nameerrorCannot complete command 'hostname -s' or returned incorrect name formatCannot complete command 'hostname -s' on host {host.name} or returned incorrect name format <EventLongDescription id="vim.event.HostGetShortNameFailedEvent"> <description> The hostname -s command has failed on the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostInAuditModeEventHost is in audit mode.infoHost is running in audit mode.Host {host.name} is running in audit mode. The host's configuration will not be persistent across reboots.HostInventoryFullEventHost inventory fullerrorMaximum ({capacity}) number of hosts allowed for this edition of vCenter Server has been reached <EventLongDescription id="vim.event.HostInventoryFullEvent"> <description> The vCenter Server Foundation license key currently allows only three hosts to be added to the inventory. Adding extra hosts results in errors and the logging of this event. </description> <cause> <description>Attempting to add more hosts than the number allowed by the license key assigned to vCenter Server</description> <action>Assign vCenter Server a license key that allows more hosts or has no host limit</action> </cause> </EventLongDescription> HostInventoryUnreadableEventHost Inventory UnreadableinfoThe virtual machine inventory file is damaged or unreadable.The virtual machine inventory file on host {host.name} is damaged or unreadable.HostIpChangedEventHost IP changedinfoIP address changed from {oldIP} to {newIP}IP address of the host {host.name} changed from {oldIP} to {newIP} <EventLongDescription id="vim.event.HostIpChangedEvent"> <description> The IP address of the host was changed </description> <cause> <description> The IP address of the host was changed through vCenter Server </description> </cause> <cause> <description> The IP address of the host was changed through the host </description> </cause> </EventLongDescription> HostIpInconsistentEventHost IP inconsistenterrorConfiguration of host IP address is inconsistent: address resolved to {ipAddress} and {ipAddress2}Configuration of host IP address is inconsistent on host {host.name}: address resolved to {ipAddress} and {ipAddress2}HostIpToShortNameFailedEventHost IP to short name not completederrorCannot resolve IP address to short nameCannot resolve IP address to short name on host {host.name} <EventLongDescription id="vim.event.HostIpToShortNameFailedEvent"> <description> The host's IP address could not be resolved to a short name </description> <cause> <description>The host or DNS records are improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostIsolationIpPingFailedEventvSphere HA isolation address unreachableerrorvSphere HA agent on host {host.name} in cluster {computeResource.name} could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} could not reach isolation address: {isolationIp}vSphere HA agent on this host could not reach isolation address: {isolationIp}vSphere HA agent on host {host.name} in cluster {computeResource.name} in {datacenter.name} could not reach isolation address: {isolationIp} <EventLongDescription id="vim.event.HostIsolationIpPingFailedEvent"> <description> vSphere HA was unable to ping one or more of the isolation IP addresses. The inability to ping the addresses may cause HA to incorrectly declare the host as network isolated. A host is declared as isolated if it cannot ping the configured isolation addresses and the vSphere HA agent on the host is unable to access any of the agents running on the other cluster hosts. </description> <cause> <description>Could not ping the isolation address</description> <action>Correct the cause of the failure to ping the address</action> <action> Use advanced options to change the addresses used by vSphere HA for determining if a host is network isolated. By default, the isolation address is the default gateway of the management network. You can override the default using advanced options, or specify additional addresses to use for determining if a host is network isolated. Set the das.useDefaultIsolationAddress advanced option to "false" if you prefer that vSphere HA not use the default gateway as the isolation address. Specify the das.isolationAddress[X] advanced option for each isolation address that you want to specify. The new values take effect when vSphere HA is reconfigured for each host. </action> </cause> </EventLongDescription> HostLicenseExpiredEventHost license expirederrorA host license for {host.name} has expired <EventLongDescription id="vim.event.HostLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of host licenses on the license server and uses this event to notify you of any host licenses that are about to expire </description> <cause> <description>Host licenses on the license server are about to expire</description> <action>Update the license server to get a new version of the host license</action> </cause> </EventLongDescription> HostLocalPortCreatedEventA host local port is created to recover from management network connectivity loss.infoA host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic}.A host local port {hostLocalPort.portKey} is created on vSphere Distributed Switch {hostLocalPort.switchUuid} to recover from management network connectivity loss on virtual NIC device {hostLocalPort.vnic} on the host {host.name}.HostMissingNetworksEventHost is missing vSphere HA networkserrorHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usageHost {host.name} does not have the following networks used by other hosts for vSphere HA communication:{ips}. Consider using vSphere HA advanced option das.allowNetwork to control network usage <EventLongDescription id="vim.event.HostMissingNetworksEvent"> <description> The host being added to the vSphere HA cluster has fewer management networks than existing hosts in the cluster. When vSphere HA is being configured for a host, an existing host in the cluster is examined for the networks used by vSphere HA for heartbeats and other vSphere HA communication. The joining host is expected to have the same number of management networks, and optimally, have common subnets. This helps facilitate the pairing of source/destination pairs for heartbeats. If common subnets are not detected (using the IP address/subnet mask) between the member being added and the existing members of the cluster, this event is generated and the configuration task fails. The event details report the subnets of the existing member that are not present on the joining member. </description> <cause> <description> The host does not have networks compatible with an existing cluster member </description> <action> Change the host's network configuration to enable vSphere HA traffic on the same subnets as existing hosts in the cluster. vSphere HA will use the Service Console port groups on ESX and, on ESXi hosts, the port groups with the "Management Traffic" checkbox selected. After you change the host's network configuration, reconfigure vSphere HA for this host. </action> <action> Use advanced options to override the default port group selection for vSphere HA cluster communication. You can use the das.allowNetwork[X] advanced option to tell vSphere HA to use the port group specified in this option. For each port group name that should be used, specify one das.allowNetwork[X] advanced option. The vSphere HA configuration examines the host being added for port groups that match the name specified. The configuration task also examines an existing member whose port groups match the name specified. The number of matched port group names must be the same on each host. After setting the advanced options, re-enable vSphere HA for this cluster. </action> </cause> </EventLongDescription> HostMonitoringStateChangedEventvSphere HA host monitoring state changedinfovSphere HA host monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'vSphere HA host monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.ServiceState}' to '{state.@enum.DasConfigInfo.ServiceState}'HostNoAvailableNetworksEventHost has no available networks for vSphere HA communicationerrorHost {host.name} in cluster {computeResource.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}This host currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips}Host {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no available networks for vSphere HA Communication. The following networks are currently used by HA: {ips} <EventLongDescription id="vim.event.HostNoAvailableNetworksEvent"> <description> The host being added to the vSphere HA cluster has no management networks available for vSphere HA cluster communication. The advanced option das.allowNetwork[X] is set, but no port group names match the advanced option for this host. </description> <cause> <description> The host has no port groups that match the names used by the advanced options to control which port groups vSphere HA uses </description> <action> Delete the advanced options das.allowNetwork[X] to allow vSphere HA to select the default management port groups </action> <action> Correct the names of the port groups specified in the advanced options to match those to be used by vSphere HA for this host </action> <action> Specify additional das.allowNetwork[X] advanced options to match the port group names for this host </action> </cause> </EventLongDescription> HostNoHAEnabledPortGroupsEventHost has no port groups enabled for vSphere HAerrorHost {host.name} in cluster {computeResource.name} has no port groups enabled for vSphere HA communication.Host {host.name} has no port groups enabled for vSphere HA communication.This host has no port groups enabled for vSphere HA communication.Host {host.name} in cluster {computeResource.name} in {datacenter.name} has no port groups enabled for vSphere HA communication. <EventLongDescription id="vim.event.HostNoHAEnabledPortGroupsEvent"> <description> vSphere HA has determined that there are no management networks available on the host for vSphere HA inter-agent communication. </description> <cause> <description> The host has no vSphere HA management networks available </description> <action> If this event is observed when the host is being added to a vSphere HA cluster, change the host's network configuration to enable vSphere HA traffic on one or more port groups. By default, vSphere HA will use the Service Console port groups on ESX and ESXi hosts, the port groups with the Management Traffic checkbox selected. If vSphere HA was already configured on the host, it is possible that the host's network settings have changed and invalidated the management network configuration. Review the settings to make sure the port groups configured for management network still exist on the host and for ESXi the Management Traffic option is enabled. Reconfigure vSphere HA on the host after fixing any configuration issues. </action> </cause> </EventLongDescription> HostNoRedundantManagementNetworkEventNo redundant management network for hostwarningHost {host.name} in cluster {computeResource.name} currently has no management network redundancyHost {host.name} currently has no management network redundancyThis host currently has no management network redundancyHost {host.name} in cluster {computeResource.name} in {datacenter.name} currently has no management network redundancy <EventLongDescription id="vim.event.HostNoRedundantManagementNetworkEvent"> <description> vSphere HA has determined that there is only one path for vSphere HA management traffic, resulting in a single point of failure. Best practices require more than one path for vSphere HA to use for heartbeats and cluster communication. A host with a single path is more likely to be declared dead, network partitioned or isolated after a network failure. If declared dead, vSphere HA will not respond if the host subsequently actually fails, while if declared isolated, vSphere HA may apply the isolation response thus impacting the uptime of the virtual machines running on it. </description> <cause> <description>There is only one port group available for vSphere HA communication</description> <action>Configure another Service Console port group on the ESX host</action> <action> Configure another port group on the ESXi host by selecting the "Management Traffic" check box </action> <action> Use NIC teaming on the management port group to allow ESX or ESXi to direct management traffic out of more than one physical NIC in case of a path failure </action> <action> If you accept the risk of not having redundancy for vSphere HA communication, you can eliminate the configuration issue by setting the das.ignoreRedundantNetWarning advanced option to "true" </action> </cause> </EventLongDescription> HostNonCompliantEventHost non-compliant with profileerrorHost is not in compliance with the attached profile.Host {host.name} is not in compliance with the attached profile <EventLongDescription id="vim.event.HostNonCompliantEvent"> <description> The host does not comply with the host profile </description> <cause> <description> The host is not in compliance with the attached profile </description> <action> Check the Summary tab for the host in the vSphere Client to determine the possible cause(s) of noncompliance </action> </cause></EventLongDescription> HostNotInClusterEventHost not in clustererrorNot a cluster member in {datacenter.name}Host {host.name} is not a cluster member in {datacenter.name}HostOvercommittedEventHost resource overcommittederrorInsufficient capacity in host {computeResource.name} to satisfy resource configurationInsufficient capacity to satisfy resource configurationInsufficient capacity in host {computeResource.name} to satisfy resource configuration in {datacenter.name} <EventLongDescription id="vim.event.HostOvercommittedEvent"> <description> A host does not have sufficient CPU and/or memory capacity to satisfy its resource configuration. The host has its own admission control, so this condition should never occur. </description> <cause> <description>A host has insufficient capacity for its resource configuration</description> <action>If you encounter this condition, contact VMware Support </action> </cause> </EventLongDescription> HostPrimaryAgentNotShortNameEventHost primary agent not specified as short nameerrorPrimary agent {primaryAgent} was not specified as a short namePrimary agent {primaryAgent} was not specified as a short name to host {host.name} <EventLongDescription id="vim.event.HostPrimaryAgentNotShortNameEvent"> <description> The primary agent is not specified in short name format </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> </cause> </EventLongDescription> HostProfileAppliedEventHost profile appliedinfoProfile configuration applied to the hostProfile is applied on the host {host.name}HostReconnectionFailedEventCannot reconnect hosterrorCannot reconnect to {host.name}Cannot reconnect to {host.name}Cannot reconnectCannot reconnect to {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostReconnectionFailedEvent"> <description> Could not reestablish a connection to the host </description> <cause> <description> The host is not in a state where it can respond </description> </cause> </EventLongDescription> HostRemovedEventHost removedinfoRemoved host {host.name}Removed host {host.name}Removed from inventoryRemoved host {host.name} in {datacenter.name}HostShortNameInconsistentEventHost short name inconsistenterrorHost names {shortName} and {shortName2} both resolved to the same IP address. Check the host's network configuration and DNS entries <EventLongDescription id="vim.event.HostShortNameInconsistentEvent"> <description> The name resolution check on the host returns different names for the host </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShortNameToIpFailedEventHost short name to IP not completederrorCannot resolve short name {shortName} to IP addressCannot resolve short name {shortName} to IP address on host {host.name} <EventLongDescription id="vim.event.HostShortNameToIpFailedEvent"> <description> The short name of the host can not be resolved to an IP address </description> <cause> <description>The host network is improperly configured</description> <action>Check the host network configuration</action> <action>Check the DNS configuration</action> </cause> </EventLongDescription> HostShutdownEventHost shut downinfoShut down of {host.name}: {reason}Shut down of {host.name}: {reason}Shut down of host: {reason}Shut down of {host.name} in {datacenter.name}: {reason}HostSpecificationChangedEventHost specification is changed on vCenterinfoHost specification of host {host.name} is changed on vCenter.Host specification of host {host.name} is changed on vCenter.Host specification is changed.Host specification of host {host.name} is changed on vCenter.HostSpecificationRequireEventPull host specification from host to vCenterinfoPull host specification of host {host.name} to vCenter.Pull host specification of host {host.name} to vCenter.Pull host specification to vCenter.Pull host specification of host {host.name} to vCenter.HostSpecificationUpdateEventHost specification is changed on hostinfoHost specification is changed on host {host.name}.Host specification is changed on host {host.name}.Host specification is changed.Host specification is changed on host {host.name}.HostStatusChangedEventHost status changedinfoConfiguration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status}Configuration status on host {computeResource.name} changed from {oldStatus.@enum.ManagedEntity.Status} to {newStatus.@enum.ManagedEntity.Status} in {datacenter.name} <EventLongDescription id="vim.event.HostStatusChangedEvent"> <description> The host status has changed. This status is the status of the root resource pool that encompasses the entire host. A host status change may be accompanied by the removal of a configuration issue if one was previously detected. A host status of green indicates that everything is fine. A yellow status indicates that the root resource pool does not have the resources to meet the reservations of its children. A red status means that a node in the resource pool has children whose reservations exceed the configuration of the node. </description> <cause> <description>The host status changed to yellow</description> <action>Reduce the reservation of the resource pools directly under the root to match the new capacity</action> </cause> <cause> <description>The host status changed to red</description> <action>Change the resource settings on the resource pools that are red so that they can accommodate their child virtual machines. If this is not possible, lower the virtual machine reservations. If this is not possible either, power off some virtual machines.</action> </cause> </EventLongDescription> HostSubSpecificationDeleteEventDelete host sub specification {subSpecName}infoDelete host sub specification {subSpecName} of host {host.name}.Delete host sub specification {subSpecName} of host {host.name}.Delete host sub specification.Delete host sub specification {subSpecName} of host {host.name}.HostSubSpecificationUpdateEventHost sub specification {hostSubSpec.name} is changed on hostinfoHost sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed on host {host.name}.Host sub specification {hostSubSpec.name} is changed.Host sub specification {hostSubSpec.name} is changed on host {host.name}.HostSyncFailedEventCannot synchronize hosterrorCannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg}Cannot synchronize host {host.name}. {reason.msg} <EventLongDescription id="vim.event.HostSyncFailedEvent"> <description> Failed to sync with the vCenter Agent on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> HostUpgradeFailedEventHost upgrade failederrorCannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name}Cannot install or upgrade vCenter agent service on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.HostUpgradeFailedEvent"> <description> Failed to connect to the host due to an installation or upgrade issue </description> </EventLongDescription> HostUserWorldSwapNotEnabledEventThe userworld swap is not enabled on the hostwarningThe userworld swap is not enabled on the hostThe userworld swap is not enabled on the host {host.name}HostVnicConnectedToCustomizedDVPortEventSome host vNICs were reconfigured to use dvPorts with port level configuration, which might be different from the dvPort group.infoHost vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.Host {host.name} vNIC {vnic.vnic} was reconfigured to use dvPort {vnic.port.portKey} with port level configuration, which might be different from the dvPort group. It was using dvPort '{prevPortKey}' before.HostWwnChangedEventHost WWN changedwarningWWNs are changedWWNs are changed for {host.name}HostWwnConflictEventHost WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {host.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.HostWwnConflictEvent"> <description> The WWN (World Wide Name) of this host conflicts with the WWN of another host or virtual machine </description> <cause> <description> The WWN of this host conflicts with WWN of another host </description> </cause> <cause> <description> The WWN of this host conflicts with WWN of another virtual machine</description> </cause> </EventLongDescription> IncorrectHostInformationEventIncorrect host informationerrorInformation needed to acquire the correct set of licenses not providedHost {host.name} did not provide the information needed to acquire the correct set of licenses <EventLongDescription id="vim.event.IncorrectHostInformationEvent"> <description> The host did not provide the information needed to acquire the correct set of licenses </description> <cause> <description> The cpuCores, cpuPackages or hostType information on the host is not valid </description> </cause> <cause> <description> The host information is not available because host was added as disconnected </description> </cause> </EventLongDescription> InfoUpgradeEventInformation upgradeinfo{message}InsufficientFailoverResourcesEventvSphere HA failover resources are insufficienterrorInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name}Insufficient resources to satisfy vSphere HA failover levelInsufficient resources to satisfy vSphere HA failover level on cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.InsufficientFailoverResourcesEvent"> <description> The cluster does not have enough unreserved capacity to satisfy the level configured for vSphere HA admission control. Failovers may still be performed by vSphere HA but will be on a best effort basis. </description> <cause> <description> If the "number of host failures to tolerate" policy is configured and a few virtual machines have a much higher CPU or memory reservation than the other virtual machines, vSphere HA admission control can be excessively conservative to ensure that there are enough unfragmented resources if a host fails. </description> <action> Use similar CPU and memory reservations for all virtual machines in the cluster. If this is not possible, consider using a different vSphere HA admission control policy, such as reserving a percentage of cluster resource for failover. Alternatively, you can use advanced options to specify a cap for the slot size. See the vSphere Availability Guide for details. </action> </cause> <cause> <description> Hosts with vSphere HA agent errors are not good candidates for providing failover capacity in the cluster, and their resources are not considered for vSphere HA admission control purposes. If many hosts have an vSphere HA agent error, vCenter Server generates this event. </description> <action> Check the event log of the hosts to determine the cause of the vSphere HA agent errors. After addressing any configuration issues, reconfigure vSphere HA on the affected hosts or on the cluster. </action> </cause> </EventLongDescription> InvalidEditionEventInvalid editionerrorThe license edition '{feature}' is invalid <EventLongDescription id="vim.event.InvalidEditionEvent"> <description> vCenter Server attempted to acquire an undefined feature from the license server </description> <cause> <description>Any operation that requires a feature license such as vMotion, DRS, vSphere HA might result in this event if that feature is not defined on the license server</description> <action>Verify that the feature in question is present on the license server</action> </cause> </EventLongDescription> EventExLicense downgradewarningLicense downgradeLicense downgradeLicense downgradevim.event.LicenseDowngradedEvent|License downgrade: {licenseKey} removes the following features: {lostFeatures} <EventLongDescription id="vim.event.LicenseDowngradedEvent"> <description> The installed license reduces the set of available features. Some of the features, previously available, will not be accessible with the new license. </description> <cause> <description>The license has been replaced.</description> <action>Revert to the license previously installed if it is not already expired.</action> <action>Contact VMware in order to obtain new license with the required features.</action> </cause> </EventLongDescription> LicenseEvent<License Event>info<internal>LicenseExpiredEventLicense expirederrorLicense {feature.featureName} has expiredLicenseNonComplianceEventInsufficient licenses.errorLicense inventory is not compliant. Licenses are overused <EventLongDescription id="vim.event.LicenseNonComplianceEvent"> <description> vCenter Server does not strictly enforce license usage. Instead, it checks for license overuse periodically. If vCenter Server detects overuse, it logs this event and triggers an alarm. </description> <cause> <description>Overuse of licenses</description> <action>Check the license reports through the vSphere Client and reduce the number of entities using the license key or add a new license key with a greater capacity</action> </cause> </EventLongDescription> LicenseRestrictedEventUnable to acquire licenses due to a restriction on the license servererrorUnable to acquire licenses due to a restriction in the option file on the license server. <EventLongDescription id="vim.event.LicenseRestrictedEvent"> <description> vCenter Server logs this event if it is unable to check out a license from the license server due to restrictions in the license file </description> <cause> <description>License file in the license server has restrictions that prevent check out</description> <action>Check the license file and remove any restrictions that you can</action> </cause> </EventLongDescription> LicenseServerAvailableEventLicense server availableinfoLicense server {licenseServer} is availableLicenseServerUnavailableEventLicense server unavailableerrorLicense server {licenseServer} is unavailable <EventLongDescription id="vim.event.LicenseServerUnavailableEvent"> <description> vCenter Server tracks the license server state and logs this event if the license server has stopped responding. </description> <cause> <description>License server is not responding and not available to vCenter Server</description> <action>Verify that the license server is running. If it is, check the connectivity between vCenter Server and the license server.</action> </cause> </EventLongDescription> LocalDatastoreCreatedEventLocal datastore createdinfoCreated local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl}) on {host.name}Created local datastore {datastore.name} ({datastoreUrl})Created local datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}LocalTSMEnabledEventESXi Shell is enabledinfoESXi Shell for the host has been enabledESXi Shell for the host {host.name} has been enabledLockerMisconfiguredEventLocker misconfiguredwarningDatastore {datastore} which is configured to back the locker does not existLockerReconfiguredEventLocker reconfiguredinfoLocker was reconfigured from {oldDatastore} to {newDatastore} datastoreMigrationErrorEventMigration errorerrorUnable to migrate {vm.name} from {host.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate {vm.name}: {fault.msg}Unable to migrate from {host.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine from the source host failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationEvent<Migration Event>info<internal>MigrationHostErrorEventMigration host errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to host {dstHost.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name}: {fault.msg}Unable to migrate {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostErrorEvent"> <description> A virtual machine failed to migrate because it did not meet all compatibility criteria </description> <cause> <description> Migrating a virtual machine to the destination host or datastore failed because the virtual machine did not meet all the compatibility criteria </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationHostWarningEventMigration host warningwarningMigration of {vm.name} from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} to {dstHost.name}: {fault.msg}Migration from {host.name} to {dstHost.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationHostWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination host or datastore is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceErrorEventMigration resource errorerrorUnable to migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Unable to migrate from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Cannot migrate {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceErrorEvent"> <description> A virtual machine failed to migrate due to incompatibilities with target resource pool </description> <cause> <description>Migrating a virtual machine to the destination host or datastore is not possible due to incompatibilities with the target resource pool. </description> <action> Use the vSphere Client to check for errors at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationResourceWarningEventMigration resource warningwarningMigration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration from {host.name} to {dstHost.name} and resource pool {dstPool.name}: {fault.msg}Migration of {vm.name} from {host.name} to {dstHost.name} and resource pool {dstPool.name} in {datacenter.name}: {fault.msg} <EventLongDescription id="vim.event.MigrationResourceWarningEvent"> <description> The virtual machine can be migrated but might lose some functionality after migration is complete </description> <cause> <description> Migrating the virtual machine to the destination resource pool is likely to succeed but some functionality might not work correctly afterward because the virtual machine did not meet all the compatibility criteria. </description> <action> Use the vSphere Client to check for warnings at the time of the failure so that you can identify possible reasons for this problem </action> </cause> </EventLongDescription> MigrationWarningEventMigration warningwarningMigration of {vm.name} from {host.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration of {vm.name}: {fault.msg}Migration from {host.name}: {fault.msg}Migration of {vm.name} from {host.name} in {datacenter.name}: {fault.msg}MtuMatchEventThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to the physical NIC.infoThe MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch matches the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}MtuMismatchEventThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to the physical NIC.errorThe MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}The MTU configured in the vSphere Distributed Switch does not match the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}NASDatastoreCreatedEventNAS datastore createdinfoCreated NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created NAS datastore {datastore.name} ({datastoreUrl})Created NAS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}NetworkRollbackEventNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.errorNetwork configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.Network configuration on the host {host.name} is rolled back as it disconnects the host from vCenter server.NoAccessUserEventNo access for usererrorCannot login user {userName}@{ipAddress}: no permission <EventLongDescription id="vim.event.NoAccessUserEvent"> <description> A user could not log in due to insufficient access permission </description> <cause> <description> The user account has insufficient access permission </description> <action> Log in with a user account that has the necessary access permissions or grant additional access permissions to the current user </action> </cause> </EventLongDescription> NoDatastoresConfiguredEventNo datastores configuredinfoNo datastores have been configuredNo datastores have been configured on the host {host.name}NoLicenseEventNo licenseerrorA required license {feature.featureName} is not reserved <EventLongDescription id="vim.event.NoLicenseEvent"> <description> vCenter Server logs this event if it fails to acquire a feature from the license server for an unknown reason. </description> <cause> <description>Acquiring a feature license fails for an unknown reason</description> <action>Verify that the license server has the license for the feature</action> </cause> </EventLongDescription> NoMaintenanceModeDrsRecommendationForVMNo maintenance mode DRS recommendation for the VMinfoUnable to automatically migrate {vm.name}Unable to automatically migrate from {host.name}Unable to automatically migrate {vm.name} from {host.name} <EventLongDescription id="vim.event.NoMaintenanceModeDrsRecommendationForVM"> <description> DRS failed to generate a vMotion recommendation for a virtual machine on a host entering Maintenance Mode. This condition typically occurs because no other host in the DRS cluster is compatible with the virtual machine. Unless you manually migrate or power off this virtual machine, the host will be unable to enter Maintenance Mode. </description> <cause> <description>DRS failed to evacuate a powered on virtual machine</description> <action>Manually migrate the virtual machine to another host in the cluster</action> <action>Power off the virtual machine</action> <action>Bring any hosts in Maintenance Mode out of that mode</action> <action>Cancel the task that is making the host enter Maintenance Mode </action> </cause> </EventLongDescription> NonVIWorkloadDetectedOnDatastoreEventUnmanaged workload detected on SIOC-enabled datastoreinfoAn unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.An unmanaged I/O workload is detected on a SIOC-enabled datastore: {datastore.name}.NotEnoughResourcesToStartVmEventInsufficient resources for vSphere HA to start the VM. Reason: {reason.@enum.fdm.placementFault}warningInsufficient resources to fail over {vm.name} in {computeResource.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over this virtual machine. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault}Insufficient resources to fail over {vm.name} in {computeResource.name} that resides in {datacenter.name}. vSphere HA will retry the fail over when enough resources are available. Reason: {reason.@enum.fdm.placementFault} <EventLongDescription id="vim.event.NotEnoughResourcesToStartVmEvent"> <description> This event is issued by vSphere HA when the master agent was not able to fail over a virtual machine to one of its compatible hosts. This condition is due to one or more of the causes listed below. When this condition occurs, vSphere HA will report a cause for it in the event summary, but note that additional causes might exist. It is more likely to occur if vSphere HA admission control is disabled or more hosts fail than were provisioned for. When a virtual machine cannot be placed, vSphere HA will retry placing it when the cluster state changes. Also, if vSphere DRS is enabled, it will be invoked to try to defragment the cluster or bring hosts out of Standby Mode. </description> <cause> <description> A virtual machine has bandwidth reservations for CPU, memory, vFlash cache, or virtual NICs. There was no compatible host that had enough resources to satisfy the reservations. </description> <action>Decrease the virtual machine resource reservation</action> <action>Add more host(s) to cluster</action> <action>Bring online any failed hosts or resolve a network partition if one exists</action> <action>If DRS is in manual mode, look for any pending recommendations and approve them so that vSphere HA failover can proceed</action> </cause> <cause> <description> The cluster has vSAN enabled, and one or more hosts that contribute storage to the cluster is inaccessible, preventing vSphere HA from powering on the virtual machine. This applies to virtual machines that have one or more files on a vSAN datastore. </description> <action>Bring online any failed hosts or resolve a network partition if one exists that involves hosts that contribute storage to the vSAN cluster</action> </cause> <cause> <description>One or more datastores that are associated with a virtual machine are inaccessible by any compatible host in the cluster.</description> <action>Bring online any non-responding host that mounts the virtual machine datastores</action> <action>Fix the all-paths-down (APD) or permanent-device-loss (PDL) issues.</action> </cause> <cause> <description>vSphere HA is enforcing virtual machine to virtual machine anti-affinity rules, and the rule cannot be satisfied. </description> <action>Add more hosts to cluster</action> <action>Bring online any non-responding host or resolve a network partition if one exists</action> <action>Remove any anti-affinity rules that are restricting the placement</action> </cause> <cause> <description>The number of VMs that can run on each host is limited. There is no host that can power on the VM without exceeding the limit.</description> <action>Increase the limit if you have set the limitVmsPerESXHost HA advanced option.</action> <action>Bring online any non-responding host or add new hosts to the cluster</action> </cause> </EventLongDescription> OutOfSyncDvsHostThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.warningThe vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.The vSphere Distributed Switch configuration on some hosts differed from that of the vCenter Server.PermissionAddedEventPermission addedinfoPermission created for {principal} on {entity.name}, role is {role.name}, propagation is {propagate.@enum.auth.Permission.propagate}PermissionEvent<Permission Event>info<internal>PermissionRemovedEventPermission removedinfoPermission rule removed for {principal} on {entity.name}PermissionUpdatedEventPermission updatedinfoPermission changed for '{principal}' on '{entity.name}'.
Role changed from '{prevRole.name}' to role '{role.name}'. Propagate changed from '{prevPropagate.@enum.auth.Permission.propagate}' to '{propagate.@enum.auth.Permission.propagate}'.ProfileAssociatedEventProfile attached to hostinfoProfile {profile.name} has been attached.Profile {profile.name} has been attached.Profile {profile.name} has been attached with the host.Profile {profile.name} attached.ProfileChangedEventProfile was changedinfoProfile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.Profile {profile.name} was changed.ProfileCreatedEventProfile createdinfoProfile is created.ProfileDissociatedEventProfile detached from hostinfoProfile {profile.name} has been detached.Profile {profile.name} has been detached. Profile {profile.name} has been detached from the host.Profile {profile.name} detached.ProfileEventinfo<internal>ProfileReferenceHostChangedEventThe profile reference host was changedinfoProfile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.Profile {profile.name} reference host was changed from '{prevReferenceHostName}' to '{referenceHostName}'.ProfileRemovedEventProfile removedinfoProfile {profile.name} was removed.Profile {profile.name} was removed.Profile was removed.RecoveryEventRecovery completed on the host.infoThe host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the virtual management NIC {vnic}. A new port {portKey} was created on vSphere Distributed Switch {dvsUuid}.The host {hostName} network connectivity was recovered on the management virtual NIC {vnic} by connecting to a new port {portKey} on the vSphere Distributed Switch {dvsUuid}.RemoteTSMEnabledEventSSH is enabledinfoSSH for the host has been enabledSSH for the host {host.name} has been enabledResourcePoolCreatedEventResource pool createdinfoCreated resource pool {resourcePool.name} in compute-resource {computeResource.name}Created resource pool {resourcePool.name}Created resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}ResourcePoolDestroyedEventResource pool deletedinfoRemoved resource pool {resourcePool.name} on {computeResource.name}Removed resource pool {resourcePool.name}Removed resource pool {resourcePool.name} on {computeResource.name} in {datacenter.name}ResourcePoolEvent<Resource Pool Event>info<internal>ResourcePoolMovedEventResource pool movedinfoMoved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name}Moved resource pool {resourcePool.name} from {oldParent.name} to {newParent.name} on {computeResource.name} in {datacenter.name}ResourcePoolReconfiguredEventResource pool reconfiguredinfoUpdated configuration for {resourcePool.name} in compute-resource {computeResource.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration on {resourcePool.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Updated configuration for {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name}
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted} <EventLongDescription id="vim.event.ResourcePoolReconfiguredEvent"> <description> The resource pool configuration changed. The resource pool configuration includes information about the resource reservations of the resource pool and the resource reservations of its children. </description> </EventLongDescription> ResourceViolatedEventResource usage exceeds configurationerrorResource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name}'Resource usage exceeds configuration on resource pool {resourcePool.name}Resource usage exceeds configuration for resource pool {resourcePool.name} in compute-resource {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.ResourceViolatedEvent"> <description> The cumulative CPU and/or memory resource consumption of all virtual machines in the resource pool exceeds the resource pool configuration </description> <cause> <description>You attempted to move a virtual machine from one resource pool into another bypassing vCenter Server. This condition occurs when you attempt the move using the vSphere Client directly connected to the host. </description> <action>In a DRS cluster, do not move and power on a virtual machine bypassing vCenter Server</action> </cause> </EventLongDescription> RoleAddedEventRole addedinfoNew role {role.name} createdRoleEvent<Role Event>info<internal>RoleRemovedEventRole removedinfoRole {role.name} removedRoleUpdatedEventRole updatedinfoRole modified.
Previous name: {prevRoleName}, new name: {role.name}.
Added privileges: {privilegesAdded}.
Removed privileges: {privilegesRemoved}.RollbackEventHost Network operation rolled backinfoThe Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.The operation {methodName} on the host {hostName} disconnected the host and was rolled back .The Network API {methodName} on this entity caused the host {hostName} to be disconnected from the vCenter Server. The configuration change was rolled back on the host.ScheduledTaskCompletedEventScheduled task completedinfoTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} on {entity.name} completed successfullyTask {scheduledTask.name} completed successfullyTask {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} completed successfullyScheduledTaskCreatedEventScheduled task createdinfoCreated task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name} on {entity.name}Created task {scheduledTask.name}Created task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskEmailCompletedEventSent scheduled task emailinfoTask {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} sent email to {to}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} sent email to {to}ScheduledTaskEmailFailedEventScheduled task email not senterrorTask {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} cannot send email to {to}: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot send email to {to}: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskEmailFailedEvent"> <description> An error occurred while sending email notification that a scheduled task is running </description> <cause> <description>Failed to send email for the scheduled task</description> <action>Check the vCenter Server SMTP settings for sending emails</action> </cause> </EventLongDescription> ScheduledTaskEvent<Scheduled Task Event>info<internal>ScheduledTaskFailedEventCannot complete scheduled taskerrorTask {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} cannot be completed: {reason.msg}Task {scheduledTask.name} cannot be completed: {reason.msg}Task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name} cannot be completed: {reason.msg} <EventLongDescription id="vim.event.ScheduledTaskFailedEvent"> <description> An error occurred while running a scheduled task </description> <cause> <description>Failed to run a scheduled task</description> <action>Correct the failure condition</action> </cause> </EventLongDescription> ScheduledTaskReconfiguredEventScheduled task reconfiguredinfoReconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name} on {entity.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.Reconfigured task {scheduledTask.name}Reconfigured task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}.ScheduledTaskRemovedEventScheduled task removedinfoRemoved task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name} on {entity.name}Removed task {scheduledTask.name}Removed task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ScheduledTaskStartedEventScheduled task startedinfoRunning task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name} on {entity.name}Running task {scheduledTask.name}Running task {scheduledTask.name} on {entity.name} in datacenter {datacenter.name}ServerLicenseExpiredEventServer license expirederrorA vCenter Server license has expiredServerStartedSessionEventServer started sessioninfovCenter startedSessionEvent<Session Event>info<internal>SessionTerminatedEventSession stoppedinfoA session for user '{terminatedUsername}' has stopped <EventLongDescription id="vim.event.SessionTerminatedEvent"> <description> A session has been terminated </description> </EventLongDescription> ExtendedEventThe time-limited license on the host has expired.warningThe time-limited license on host {host.name} has expired.The time-limited license on host {host.name} has expired.The time-limited license on the host has expired.vim.event.SubscriptionLicenseExpiredEvent|The time-limited license on host {host.name} has expired. To comply with the EULA, renew the license at http://my.vmware.comTaskEventTask eventinfoTask: {info.descriptionId}TaskTimeoutEventTask time-outinfoTask: {info.descriptionId} time-out <EventLongDescription id="vim.event.TaskTimeoutEvent"> <description> A task has been cleaned up because it timed out </description> </EventLongDescription> TeamingMatchEventTeaming configuration in the vSphere Distributed Switch matches the physical switch configurationinfoTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} matches the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} matches the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TeamingMisMatchEventTeaming configuration in the vSphere Distributed Switch does not match the physical switch configurationerrorTeaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} does not match the physical switch configuration. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}Teaming configuration in the vSphere Distributed Switch {dvs.name} on host {host.name} does not match the physical switch configuration in {datacenter.name}. Detail: {healthResult.summary.@enum.dvs.VmwareDistributedVirtualSwitch.TeamingMatchStatus}TemplateBeingUpgradedEventUpgrading templateinfoUpgrading template {legacyTemplate}TemplateUpgradeEvent<Template Upgrade Event>info<internal>TemplateUpgradeFailedEventCannot upgrade templateinfoCannot upgrade template {legacyTemplate} due to: {reason.msg}TemplateUpgradedEventTemplate upgradedinfoTemplate {legacyTemplate} upgrade completedTimedOutHostOperationEventHost operation timed outwarningThe operation performed on host {host.name} timed outThe operation performed on host {host.name} timed outThe operation timed outThe operation performed on {host.name} in {datacenter.name} timed out <EventLongDescription id="vim.event.TimedOutHostOperationEvent"> <description> An operation performed on the host has timed out </description> <cause> <description> A previous event in the sequence of events will provide information on the reason for the timeout </description> </cause> </EventLongDescription> UnlicensedVirtualMachinesEventUnlicensed virtual machinesinfoThere are {unlicensed} unlicensed virtual machines on host {host} - there are only {available} licenses availableUnlicensedVirtualMachinesFoundEventUnlicensed virtual machines foundinfo{unlicensed} unlicensed virtual machines found on host {host}UpdatedAgentBeingRestartedEventRestarting updated agentinfoThe agent is updated and will soon restartThe agent on host {host.name} is updated and will soon restartUpgradeEvent<Upgrade Event>info<internal>UplinkPortMtuNotSupportEventNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.errorNot all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortMtuSupportEventAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass.infoAll VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.All VLAN MTU setting on the external physical switch allows the vSphere Distributed Switch max MTU size packets passing on uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}All VLAN MTU settings on the external physical switch allow the vSphere Distributed Switch maximum MTU size packets to pass on the uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanTrunkedEventThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch.infoThe configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.The configured VLAN in the vSphere Distributed Switch was trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UplinkPortVlanUntrunkedEventNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch.errorNot all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name}.Not all the configured VLANs in the vSphere Distributed Switch were trunked by the physical switch connected to uplink port {healthResult.uplinkPortKey} in vSphere Distributed Switch {dvs.name} on host {host.name} in {datacenter.name}.UserAssignedToGroupUser assigned to groupinfoUser {userLogin} was added to group {group}UserLoginSessionEventUser logininfoUser {userName}@{ipAddress} logged in as {userAgent}UserLogoutSessionEventUser logoutinfoUser {userName}@{ipAddress} logged out (login time: {loginTime}, number of API invocations: {callCount}, user agent: {userAgent})UserPasswordChangedUser password changedinfoPassword was changed for account {userLogin}Password was changed for account {userLogin} on host {host.name}UserUnassignedFromGroupUser removed from groupinfoUser {userLogin} removed from group {group}UserUpgradeEventUser upgradeuser{message} <EventLongDescription id="vim.event.UserUpgradeEvent"> <description> A general user event occurred due to an upgrade </description> </EventLongDescription> VMFSDatastoreCreatedEventVMFS datastore createdinfoCreated VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name}Created VMFS datastore {datastore.name} ({datastoreUrl})Created VMFS datastore {datastore.name} ({datastoreUrl}) on {host.name} in {datacenter.name}VMFSDatastoreExpandedEventVMFS datastore expandedinfoExpanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name} on {host.name}Expanded VMFS datastore {datastore.name}Expanded VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExpandedEvent"> <description> An existing extent in a VMFS volume was grown to increase its capacity </description> <cause> <description> A user or system action caused an extent of an existing VMFS datastore to be grown. Only extents with free space immediately after them are expandable. As a result, the action filled the available adjacent capacity on the LUN. </description> </cause> </EventLongDescription> VMFSDatastoreExtendedEventVMFS datastore extendedinfoExtended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name} on {host.name}Extended VMFS datastore {datastore.name}Extended VMFS datastore {datastore.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VMFSDatastoreExtendedEvent"> <description> An existing VMFS volume was extended to increase its capacity </description> <cause> <description> A user or system action caused the datastore to be extended with a partition on a LUN to increase its capacity. </description> </cause> </EventLongDescription> VMotionLicenseExpiredEventvMotion license expirederrorA vMotion license for {host.name} has expired <EventLongDescription id="vim.event.VMotionLicenseExpiredEvent"> <description> vCenter Server tracks the expiration times of vMotion licenses on the license server and uses this event to notify you of any vMotion licenses that are about to expire </description> <cause> <description>vMotion licenses on the license server are about to expire</description> <action>Update the license server to get a fresher version of the vMotion license</action> </cause> </EventLongDescription> VcAgentUninstallFailedEventCannot uninstall vCenter agenterrorCannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot uninstall vCenter agent from {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUninstallFailedEvent"> <description> An attempt to uninstall the vCenter Agent failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUninstalledEventvCenter agent uninstalledinfovCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalled from {host.name}vCenter agent has been uninstalledvCenter agent has been uninstalled from {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUninstalledEvent"> <description> The vCenter Agent has been uninstalled from host </description> </EventLongDescription> VcAgentUpgradeFailedEventCannot complete vCenter agent upgradeerrorCannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name}. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent. {reason.@enum.fault.AgentInstallFailed.Reason}Cannot upgrade vCenter agent on {host.name} in {datacenter.name}. {reason.@enum.fault.AgentInstallFailed.Reason} <EventLongDescription id="vim.event.VcAgentUpgradeFailedEvent"> <description> A vCenter Agent upgrade attempt failed on the host </description> <cause> <description> The event contains details on why this failure occurred </description> </cause> </EventLongDescription> VcAgentUpgradedEventvCenter agent upgradedinfovCenter agent has been upgraded on {host.name}vCenter agent has been upgraded on {host.name}vCenter agent has been upgradedvCenter agent has been upgraded on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VcAgentUpgradedEvent"> <description> The vCenter Agent has been upgraded on the host </description> </EventLongDescription> VimAccountPasswordChangedEventVIM account password changedinfoVIM account password changedVIM account password was changed on host {host.name} <EventLongDescription id="vim.event.VimAccountPasswordChangedEvent"> <description> The password for the Vim account user on the host has been changed. This account is created by vCenter Server and used to manage the host. </description> <cause> <description> vCenter Server periodically changes the password of the Vim account that it uses to manage the host </description> </cause> </EventLongDescription> VmAcquiredMksTicketEventVM acquired MKS ticketinfoRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} on {host.name} has been openedRemote console to {vm.name} has been openedRemote console has been opened for this virtual machine on {host.name}Remote console to {vm.name} on {host.name} in {datacenter.name} has been opened <EventLongDescription id="vim.event.VmAcquiredMksTicketEvent"> <description> Successfully acquired MKS Ticket for the virtual machine </description> <cause> <description> The MKS Ticket used to connect to the virtual machine remote console has been successfully acquired. </description> </cause> </EventLongDescription> VmAcquiredTicketEventVM acquired ticketinfoA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} has been acquiredA ticket of type {ticketType.@enum.VirtualMachine.TicketType} has been acquired.A ticket for {vm.name} of type {ticketType.@enum.VirtualMachine.TicketType} on {host.name} in {datacenter.name} has been acquiredVmAutoRenameEventVM auto renameinfoInvalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name}. Renamed from {oldName} to {newName}Invalid name for {vm.name}. Renamed from {oldName} to {newName}Conflicting or invalid virtual machine name detected. Renamed from {oldName} to {newName}Invalid name for {vm.name} on {host.name} in {datacenter.name}. Renamed from {oldName} to {newName} <EventLongDescription id="vim.event.VmAutoRenameEvent"> <description> The virtual machine was renamed because of possible name conflicts with another virtual machine </description> <cause> <description>The virtual machine might have been added to the vCenter Server inventory while scanning the datastores of hosts added to the inventory. During such an action, the newly-added virtual machine's name might have been found to be in conflict with a virtual machine name already in the inventory. To resolve this, vCenter Server renames the newly-added virtual machine. </description> </cause> </EventLongDescription> VmBeingClonedEventVM being clonedinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destDatacenter.name}VmBeingClonedNoFolderEventVM being cloned to a vAppinfoCloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Being cloned to {destName} on {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}Cloning {vm.name} on host {host.name}, {ds.name} in {datacenter.name} to {destName} on host {destHost.name}, {destDatastore.name} in a vApp under {destDatacenter.name}VmBeingCreatedEventCreating VMinfoCreating {vm.name} on {host.name}, {ds.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {ds.name} in {datacenter.name}Creating VM on {host.name}, {ds.name} in {datacenter.name}Creating {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmBeingDeployedEventDeploying VMinfoDeploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} from template {srcTemplate.name}Deploying {vm.name} from template {srcTemplate.name}Deploying VM on host {host.name} from template {srcTemplate.name}Deploying {vm.name} on host {host.name} in {datacenter.name} from template {srcTemplate.name} <EventLongDescription id="vim.event.VmBeingDeployedEvent"> <description> A virtual machine is being created from a template </description> <cause> <description> A user action prompted a virtual machine to be created from this template. </description> </cause> </EventLongDescription> VmBeingHotMigratedEventVM is hot migratinginfoMigrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Migrating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingHotMigratedEvent"> <description> A powered-on virtual machine is being migrated with vMotion </description> <cause> <description> A user action might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> <cause> <description> A DRS recommendation might have caused a powered-on virtual machine to be migrated with vMotion </description> </cause> </EventLongDescription> VmBeingMigratedEventVM migratinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingMigratedEvent"> <description> Changing the host on which the virtual machine is executing </description> <cause> <description> A user action caused the virtual machine to be migrated to a different host </description> </cause> </EventLongDescription> VmBeingRelocatedEventVM relocatinginfoRelocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating VM from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Relocating {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmBeingRelocatedEvent"> <description> The virtual machine execution and/or storage is being relocated </description> <cause> <description> A user action might have caused the virtual machine's execution and/or storage to be changed </description> </cause> </EventLongDescription> VmCloneEvent<VM Clone Event>info<internal><internal><internal><internal><internal>VmCloneFailedEventCannot complete VM cloneerrorFailed to clone {vm.name} on {host.name}, {ds.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}Failed to clone {vm.name} on {host.name}, {ds.name} in {datacenter.name} to {destName} on {destHost.name}, {destDatastore.name} in {destFolder.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmCloneFailedEvent"> <description> Cloning a virtual machine failed </description> <cause> <description> An error prevented the virtual machine from being cloned </description> </cause> </EventLongDescription> VmClonedEventVM clonedinfo{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {ds.name} in {datacenter.name}{sourceVm.name} cloned to {host.name}, {ds.name} in {datacenter.name}{sourceVm.name} cloned to {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmConfigMissingEventVM configuration missinginfoConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} on {host.name} cannot be foundConfiguration file for {vm.name} cannot be foundConfiguration file cannot be foundConfiguration file for {vm.name} on {host.name} in {datacenter.name} cannot be found <EventLongDescription id="vim.event.VmConfigMissingEvent"> <description> One or more configuration files for the virtual machine cannot be found </description> <cause> <description> The datastore on which this virtual machine resides may be inaccessible </description> <action> Check the connectivity of the datastore on which this virtual machine resides. If the datastore has a backing LUN, check to see if there are any transient disk failures. </action> </cause> </EventLongDescription> VmConnectedEventVM connectedinfoHost is connectedVirtual machine {vm.name} is connected <EventLongDescription id="vim.event.VmConnectedEvent"> <description> The virtual machine is in a connected state in the inventory and vCenter Server can access it </description> <cause> <description> A user or system action that resulted in operations such as creating, registering, cloning or deploying a virtual machine gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> A user or system action that resulted in operations such as adding or reconnecting a host gave vCenter Server access to the virtual machine </description> </cause> <cause> <description> The state of the virtual machine's host changed from Not Responding to Connected and the host gave vCenter Server access to the virtual machine </description> </cause> </EventLongDescription> VmCreatedEventVM createdinfoNew virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {host.name}, {ds.name} in {datacenter.name}New virtual machine {vm.name} created on {ds.name} in {datacenter.name}Virtual machine created on {host.name}, {ds.name} in {datacenter.name}Created virtual machine {vm.name} on {host.name}, {ds.name} in {datacenter.name}VmDasBeingResetEventvSphere HA is resetting VMinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}.This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode} <EventLongDescription id="vim.event.VmDasBeingResetEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA has been configured, the virtual machine might be reset because the VMware Tools heartbeat or application heartbeat status turned red. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasBeingResetWithScreenshotEventvSphere HA enabled VM reset with screenshotinfo{vm.name} on {host.name} in cluster {computeResource.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} on {host.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}.{vm.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}This virtual machine reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}{vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} reset by vSphere HA. Reason: {reason.@enum.VmDasBeingResetEvent.ReasonCode}. A screenshot is saved at {screenshotFilePath}. <EventLongDescription id="vim.event.VmDasBeingResetWithScreenshotEvent"> <description> The virtual machine was reset by vSphere HA. Depending on how vSphere HA is configured, this condition can occur because the VMware Tools heartbeat or the application heartbeat status turned red. The event contains the location of the screenshot taken of the guest console before it was reset. You can use this information to determine the cause of the heartbeat failure. </description> <cause> <description> The VMware Tools heartbeat turned red. This condition can occur if the operating system failed with a blue screen or becomes unresponsive. It also can occur because VMware Tools failed or was shut down. </description> <action> Check the screenshot image to see if the cause was a guest operating system failure. If the virtual machine is reset frequently, check for a persistent problem with the operating system that requires attention. Consider configuring the cluster so that vSphere HA waits for a longer period after heartbeats are lost before taking action. Specifying a longer period helps avoid triggering resets for transient problems. You can force a longer period by decreasing the "monitoring sensitivity" in the VM Monitoring section of the Edit Cluster wizard. </action> </cause> <cause> <description> The application heartbeat turned red. This condition can occur if the application that is configured to send heartbeats failed or became unresponsive. </description> <action> Determine if the application stopped sending heartbeats because of a configuration error and remediate the problem. </action> </cause> </EventLongDescription> VmDasResetFailedEventvSphere HA cannot reset VMwarningvSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name}vSphere HA cannot reset {vm.name} on {host.name}vSphere HA cannot reset {vm.name}vSphere HA cannot reset this virtual machinevSphere HA cannot reset {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmDasResetFailedEvent"> <description> vSphere HA attempted to reset the virtual machine because of a heartbeat failure from VMware Tools or a guest application, depending on how vSphere HA was configured. However, the reset operation failed. </description> <cause> <description> The most likely reason for the reset failure is that the virtual machine was running another task at the time the reset was initiated. </description> <action>Check to see whether the virtual machine requires attention and reset it manually if necessary.</action> </cause> </EventLongDescription> VmDasUpdateErrorEventVM vSphere HA update errorerrorUnable to update vSphere HA agents given the state of {vm.name}VmDasUpdateOkEventCompleted VM DAS updateinfovSphere HA agents have been updated with the current state of the virtual machineVmDateRolledBackEventVM date rolled backerrorDisconnecting all hosts as the date of virtual machine {vm.name} has been rolled backVmDeployFailedEventCannot deploy VM from templateerrorFailed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}Failed to deploy VM from template {vm.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmDeployFailedEvent"> <description> Failed to deploy a virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine failed to deploy. This condition can occur if there is not enough disk space, the host or virtual machine loses its network connection, the host is disconnected, and so on. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmDeployedEventVM deployedinfoTemplate {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name}Template {srcTemplate.name} deployed on {host.name}, {ds.name}Template {srcTemplate.name} deployed to {vm.name} on {host.name}, {ds.name} <EventLongDescription id="vim.event.VmDeployedEvent"> <description> A virtual machine has been created from the specified template </description> <cause> <description> A user action caused a virtual machine to be created from the template </description> </cause> <cause> <description> A scheduled task caused a virtual machine to be created from the template </description> </cause> </EventLongDescription> VmDisconnectedEventVM disconnectedinfo{vm.name} on host {host.name} is disconnected{vm.name} on host {host.name} is disconnected{vm.name} is disconnected{host.name} is disconnected{vm.name} on host {host.name} in {datacenter.name} is disconnectedVmDiscoveredEventVM discoveredinfoDiscovered {vm.name} on {host.name}Discovered {vm.name} on {host.name}Discovered {vm.name}Discovered on {host.name}Discovered {vm.name} on {host.name} in {datacenter.name}VmDiskFailedEventCannot create VM diskerrorCannot create virtual disk {disk} <EventLongDescription id="vim.event.VmDiskFailedEvent"> <description> Failed to create a virtual disk for the virtual machine for reasons described in the event message </description> <cause> <description> A virtual disk was not created for the virtual machine. This condition can occur if the operation failed to access the disk, the disk did not have enough space, you do not have permission for the operation, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that disk is accessible, has enough space, and that the permission settings allow the operation. </action> </cause> </EventLongDescription> VmEmigratingEventVM emigratinginfoMigrating {vm.name} off host {host.name}Migrating {vm.name} off host {host.name}Migrating {vm.name} off hostMigrating off host {host.name}Migrating {vm.name} off host {host.name} in {datacenter.name}VmEndRecordingEventEnd a recording sessioninfoEnd a recording sessionEnd a recording session on {vm.name}VmEndReplayingEventEnd a replay sessioninfoEnd a replay sessionEnd a replay session on {vm.name}VmEvent<VM Event>info<internal>VmFailedMigrateEventCannot migrate VMerrorCannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} to {destHost.name}, {destDatastore.name}Cannot migrate to {destHost.name}, {destDatastore.name}Cannot migrate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmFailedMigrateEvent"> <description> Failed to migrate the virtual machine for reasons described in the event message </description> <cause> <description> The virtual machine did not migrate. This condition can occur if vMotion IPs are not configured, the source and destination hosts are not accessible, and so on. </description> <action> Check the reason in the event message to find the cause of the failure. Ensure that the vMotion IPs are configured on source and destination hosts, the hosts are accessible, and so on. </action> </cause> </EventLongDescription> VmFailedRelayoutEventCannot complete VM relayout.errorCannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name}: {reason.msg}Cannot complete relayout {vm.name}: {reason.msg}Cannot complete relayout for this virtual machine on {host.name}: {reason.msg}Cannot complete relayout {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedRelayoutEvent"> <description> Failed to lay out a virtual machine </description> <cause> <description> An attempt to lay out a virtual machine on disk failed for reasons described in the event message. This condition can occur for any of several reasons, for example inability to access the disk. </description> <action> Check the reason in the event message to find the cause of the failure and correct the problem. </action> </cause> </EventLongDescription> VmFailedRelayoutOnVmfs2DatastoreEventCannot complete VM relayout on Vmfs2 datastoreerrorCannot complete relayout due to disks on a VMFS2 volumeCannot complete relayout for virtual machine {vm.name} which has disks on a VMFS2 volume. <EventLongDescription id="vim.event.VmFailedRelayoutOnVmfs2DatastoreEvent"> <description> Failed to migrate a virtual machine on VMFS2 datastore </description> <cause> <description> An attempt to migrate a virtual machine failed because the virtual machine still has disk(s) on a VMFS2 datastore. VMFS2 datastores are read-only for ESX 3.0 and later hosts. </description> <action> Upgrade the datastore(s) from VMFS2 to VMFS3 </action> </cause> </EventLongDescription> VmFailedStartingSecondaryEventvCenter cannot start the Fault Tolerance secondary VMerrorvCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason}vCenter cannot start the Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}. Reason: {reason.@enum.VmFailedStartingSecondaryEvent.FailureReason} <EventLongDescription id="vim.event.VmFailedStartingSecondaryEvent"> <description> vCenter Server could not start the Secondary VM because of an error </description> <cause> <description> The remote host is incompatible for Secondary VM. For instance, this condition can occur when the host does not have access to the virtual machine's network or datastore. </description> <action>Ensure that the hosts in the cluster are compatible for FT</action> </cause> <cause> <description>Login to a remote host failed. If the host has been newly added to the inventory or just rebooted, it might take some time for SSL thumbprints to be propagated to the hosts. </description> <action>If the problem persists, disconnect and re-connect the host.</action> </cause> <cause> <description>Registration of the Secondary VM on the remote host failed</description> <action>Determine whether the remote host has access to the datastore that the FT virtual machine resides on</action> </cause> <cause> <description>An error occurred while starting the Secondary VM</description> <action>Determine the cause of the migration error. vCenter Server will try to restart the Secondary VM if it can.</action> </cause> </EventLongDescription> VmFailedToPowerOffEventCannot power off the VM.errorCannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name} on {host.name}. {reason.msg}Cannot power off {vm.name}. {reason.msg}Cannot power off: {reason.msg}Cannot power off {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOffEvent"> <description>The virtual machine failed to power off</description> <cause> <description> The virtual machine might be performing concurrent operations </description> <action>Complete the concurrent operations and retry the power-off operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility. </description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToPowerOnEventCannot power on the VM.errorCannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name}: {reason.msg}Cannot power on {vm.name} on {host.name}: {reason.msg}Cannot power on {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToPowerOnEvent"> <description> The virtual machine failed to power on </description> <cause> <description> Virtual machine power-on attempts can fail because the virtual machine is already in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the power-on failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToRebootGuestEventVM cannot reboot the guest OS.errorCannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot Guest OS. {reason.msg}Cannot reboot the guest OS for {vm.name} on {host.name} in {datacenter.name}. {reason.msg} <EventLongDescription id="vim.event.VmFailedToRebootGuestEvent"> <description> The guest operating system on the virtual machine failed to reboot. </description> <cause> <description> Guest operating system reboot failures can occur because the virtual machine is not in a powered-on state, concurrent operations are running on the virtual machine, and so on. </description> <action> Check the reason in the event message to find the cause of the reboot failure and fix the problem. </action> </cause> </EventLongDescription> VmFailedToResetEventCannot reset VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToResetEvent"> <description>The virtual machine failed to reset</description> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere client and respond to the question or prompt </action> </cause> <cause> <description>There might not be enough available licenses to perform this operation.</description> <action> Obtain the required licenses and retry the reset operation </action> </cause> <cause> <description> Concurrent operations might be executing on the virtual machine </description> <action>Complete the concurrent operations and retry the reset operation</action> </cause> <cause> <description> The host on which the virtual machine is running is entering maintenance mode </description> <action> Wait until the host exits maintenance mode and retry the operation </action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToShutdownGuestEventCannot shut down the guest OSerrorCannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}Cannot shut down the guest OS. {reason.msg}{vm.name} cannot shut down the guest OS on {host.name} in {datacenter.name}: {reason.msg} <EventLongDescription id="vim.event.VmFailedToShutdownGuestEvent"> <description> Guest operating system shutdown failed for the virtual machine </description> <cause> <description> Guest operating system shutdown can fail if VMware Tools is not installed in the virtual machine. </description> <action>Install VMware Tools.</action> </cause> <cause> <description> The virtual machine might be waiting for a response to a question or prompt</description> <action> Go to the Summary tab for the virtual machine in vSphere Client and respond to the question or prompt </action> </cause> <cause> <description> Concurrent operations might be running on the virtual machine </description> <action>Complete the concurrent operations and retry the shutdown operation</action> </cause> <cause> <description>The virtual machine is in an invalid state. Virtual machines can enter an invalid state for many reasons, for example datastore inaccessibility.</description> <action> Identify the reason that the virtual machine entered an invalid state, correct the problem, and retry the operation. </action> </cause> </EventLongDescription> VmFailedToStandbyGuestEventVM cannot standby the guest OSerrorCannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}Cannot standby the guest OS. {reason.msg}{vm.name} cannot standby the guest OS on {host.name} in {datacenter.name}: {reason.msg}VmFailedToSuspendEventCannot suspend VMerrorCannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name} on {host.name}: {reason.msg}Cannot suspend {vm.name}: {reason.msg}Cannot suspend: {reason.msg}Cannot suspend {vm.name} on {host.name} in {datacenter.name}: {reason.msg}VmFailedUpdatingSecondaryConfigvCenter cannot update the Fault Tolerance secondary VM configurationerrorvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name}vCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name}vCenter cannot update the Fault Tolerance secondary VM configurationvCenter cannot update the Fault Tolerance secondary VM configuration for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFailedUpdatingSecondaryConfig"> <description> After a failover, the new Primary VM failed to update the configuration of the Secondary VM </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFailoverFailedvSphere HA virtual machine failover unsuccessfulwarningvSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over this virtual machine. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg}vSphere HA unsuccessfully failed over {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name}. vSphere HA will retry if the maximum number of attempts has not been exceeded. Reason: {reason.msg} <EventLongDescription id="vim.event.VmFailoverFailed"> <description> vSphere HA did not failover this virtual machine. The event includes the details of the fault that was generated when vSphere HA attempted the failover. vSphere HA will retry the failover on another host unless the maximum number of failover attempts have been exceeded. In many cases, the retry will succeed. </description> <cause> <description> The failover did not succeed because a problem occurred while vSphere HA was trying to restart the virtual machine. Possible problems include the inability to register or reconfigure the virtual machine on the new host because another operation on the same virtual machine is already in progress, or because the virtual machine is still powered on. It may also occur if the configuration file of the virtual machine is corrupt. </description> <action> If vSphere HA is unable to failover the virtual machine after repeated attempts, investigate the error reported by each occurrence of this event, or trying powering on the virtual machine and investigate any returned errors. </action> <action> If the error reports that a file is locked, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. If any hosts have been declared dead, investigate whether a networking/storage issue may be the cause. </action> <action> If, however, the error reports that the virtual machine is in an invalid state, there may be an in-progress operation that is preventing access to the virtual machine's files. Investigate whether there are in-progress operations, such as a clone operation that is taking a long time to complete. </action> </cause> </EventLongDescription> VmFaultToleranceStateChangedEventVM Fault Tolerance state changedinfoFault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state on {vm.name} on host {host.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState}Fault Tolerance state of {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} changed from {oldState.@enum.VirtualMachine.FaultToleranceState} to {newState.@enum.VirtualMachine.FaultToleranceState} <EventLongDescription id="vim.event.VmFaultToleranceStateChangedEvent"> <description> The Fault Tolerance state of the virtual machine changed </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceTurnedOffEventVM Fault Tolerance turned offinfoFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name}Fault Tolerance protection has been turned off for {vm.name} on host {host.name}Fault Tolerance protection has been turned off for {vm.name}Fault Tolerance protection has been turned off for this virtual machineFault Tolerance protection has been turned off for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmFaultToleranceTurnedOffEvent"> <description> All Secondary VMs have been removed and Fault Tolerance protection is turned off for this virtual machine. </description> <cause> <description> </description> <action></action> </cause> </EventLongDescription> VmFaultToleranceVmTerminatedEventFault Tolerance VM terminatedinfoThe Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason}The Fault Tolerance VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} has been terminated. {reason.@enum.VmFaultToleranceVmTerminatedEvent.TerminateReason} <EventLongDescription id="vim.event.VmFaultToleranceVmTerminatedEvent"> <description> A Primary VM or Secondary VM became inactive </description> <cause> <description> The Secondary VM became inactive because its operations are no longer synchronized with those of the Primary VM</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Secondary VM became inactive because a hardware or network failure caused the Primary VM to lose the Primary-to-Secondary connection</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> The Fault Tolerant VM became inactive due to a partial hardware failure on the physical host</description> <action>vSphere HA will attempt to restart the Secondary VM</action> </cause> <cause> <description> A user stopped the Fault Tolerant VM</description> <action>The remaining Fault Tolerant VM takes over as the Primary VM. vSphere HA will attempt to restart the Secondary VM.</action> </cause> </EventLongDescription> VmGuestOSCrashedEventGuest operating system crashederror{vm.name} on {host.name}: Guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.{vm.name}: Guest operating system has crashed.This virtual machine's guest operating system has crashed.{vm.name} on {host.name}: Guest operating system has crashed.VmGuestRebootEventGuest rebootinfoGuest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name} on {host.name}Guest OS reboot for {vm.name}Guest OS rebootGuest OS reboot for {vm.name} on {host.name} in {datacenter.name}VmGuestShutdownEventGuest OS shut downinfoGuest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name} on {host.name}Guest OS shut down for {vm.name}Guest OS shut downGuest OS shut down for {vm.name} on {host.name} in {datacenter.name}VmGuestStandbyEventGuest standbyinfoGuest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name} on {host.name}Guest OS standby for {vm.name}Guest OS standbyGuest OS standby for {vm.name} on {host.name} in {datacenter.name}VmHealthMonitoringStateChangedEventvSphere HA VM monitoring state changedinfovSphere HA VM monitoring state in {computeResource.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'vSphere HA VM monitoring state in {computeResource.name} in {datacenter.name} changed from '{prevState.@enum.DasConfigInfo.VmMonitoringState}' to '{state.@enum.DasConfigInfo.VmMonitoringState}'VmInstanceUuidAssignedEventAssign a new instance UUIDinfoAssign a new instance UUID ({instanceUuid})Assign a new instance UUID ({instanceUuid}) to {vm.name} <EventLongDescription id="vim.event.VmInstanceUuidAssignedEvent"> <description>The virtual machine was assigned a new vCenter Server-specific instance UUID </description> <cause> <description> The user who created the virtual machine did not specify a vCenter Server-specific instance UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmInstanceUuidChangedEventInstance UUID ChangedinfoThe instance UUID has been changed from ({oldInstanceUuid}) to ({newInstanceUuid})The instance UUID of {vm.name} has been changed from ({oldInstanceUuid}) to ({newInstanceUuid}) <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine has changed </description> <cause> <description> A user action resulted in a change to the vCenter Server-specific instance UUID of the virtual machine </description> </cause> <cause> <description> vCenter Server changed the instance UUID of the virtual machine because it detected a conflict </description> </cause> </EventLongDescription> VmInstanceUuidConflictEventInstance UUIDs conflicterrorThe instance UUID ({instanceUuid}) conflicts with the instance UUID assigned to {conflictedVm.name}The instance UUID ({instanceUuid}) of {vm.name} conflicts with the instance UUID assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmInstanceUuidChangedEvent"> <description> The vCenter Server-specific instance UUID of the virtual machine conflicted with that of another virtual machine. </description> <cause> <description> Virtual machine instance UUID conflicts can occur if you copy virtual machine files manually without using vCenter Server. </description> </cause> </EventLongDescription> VmMacAssignedEventVM MAC assignedinfoNew MAC address ({mac}) assigned to adapter {adapter}New MAC address ({mac}) assigned to adapter {adapter} for {vm.name}VmMacChangedEventVM MAC changedwarningChanged MAC address from {oldMac} to {newMac} for adapter {adapter}Changed MAC address from {oldMac} to {newMac} for adapter {adapter} for {vm.name} <EventLongDescription id="vim.event.VmMacChangedEvent"> <description> The virtual machine MAC address has changed </description> <cause> <description> A user action changed the virtual machine MAC address </description> </cause> <cause> <description> vCenter changed the virtual machine MAC address because it detected a MAC address conflict </description> </cause> </EventLongDescription> VmMacConflictEventVM MAC conflicterrorThe MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name} <EventLongDescription id="vim.event.VmMacConflictEvent"> <description> The virtual machine MAC address conflicts with that of another virtual machine </description> <cause> <description> This virtual machine's MAC address is the same as that of another virtual machine. Refer to the event details for more information on the virtual machine that caused the conflict. </description> </cause> </EventLongDescription> VmMaxFTRestartCountReachedvSphere HA reached maximum Secondary VM restart count.warningvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart Secondary VM {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxFTRestartCountReached"> <description> The system reached the maximum restart limit in its attempt to restart a Secondary VM </description> <cause> <description>The system exceeded the number of allowed restart attempts for the Secondary VM when it tried to reestablish Fault Tolerance</description> <action>Check the causes for the restart failures and fix them. Then disable and re-enable Fault Tolerance protection.</action> </cause> </EventLongDescription> VmMaxRestartCountReachedvSphere HA reached maximum VM restart countwarningvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name}because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} because the maximum VM restart count was reachedvSphere HA stopped trying to restart this VM because the maximum VM restart count was reachedvSphere HA stopped trying to restart {vm.name} on {host.name} in cluster {computeResource.name} in {datacenter.name} because the maximum VM restart count was reached <EventLongDescription id="vim.event.VmMaxRestartCountReached"> <description> vSphere HA has reached the maximum number of failover attempts for this virtual machine and has not been able to restart it. No further failover attempts will be made. By default vSphere HA attempts to failover a virtual machine 5 times. </description> <cause> <description> Failover can fail for a number of reasons including that the configuration file of the virtual machine is corrupt or one or more of the virtual machines datastores are not accessible by any host in the cluster due to an all paths down condition. In addition, the VM may be powered on a host that the vSphere HA master agent can no longer monitor using the management network or heartbeat datastores, or it may have been powered on by a user on a host outside of the cluster. </description> <action> To determine why previous failover attempts failed, search the events that are logged for the VM for occurrences of the event vSphere HA reports when a failover fails. These events will report the reason for the failed failover. vSphere HA events can be located by searching for the phrase 'vSphere HA'. To determine whether any issues still exist, try to manually power on the virtual machine. If power-on fails, investigate the error that is returned. But, if the power-on remains pending for a long time, investigate whether an all paths down condition exists. Also, if any hosts have been declared dead, investigate whether a networking or storage issue may be the cause. </action> </cause> </EventLongDescription> VmMessageErrorEventVM error messageerrorError message on {vm.name} on {host.name}: {message}Error message on {vm.name} on {host.name}: {message}Error message on {vm.name}: {message}Error message from {host.name}: {message}Error message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageErrorEvent"> <description> An error message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this error occurred </description> </cause> </EventLongDescription> VmMessageEventVM information messageinfoMessage on {vm.name} on {host.name}: {message}Message on {vm.name} on {host.name}: {message}Message on {vm.name}: {message}Message from {host.name}: {message}Message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageEvent"> <description> An information message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on the messages from the virtual machine </description> </cause> </EventLongDescription> VmMessageWarningEventVM warning messagewarningWarning message on {vm.name} on {host.name}: {message}Warning message on {vm.name} on {host.name}: {message}Warning message on {vm.name}: {message}Warning message from {host.name}: {message}Warning message on {vm.name} on {host.name} in {datacenter.name}: {message} <EventLongDescription id="vim.event.VmMessageWarningEvent"> <description> A warning message listing a collection of observations has been reported by the virtual machine </description> <cause> <description> The event contains details on why this warning was issued </description> </cause> </EventLongDescription> VmMigratedEventVM migratedinfoVirtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} was migrated from host {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmMigratedEvent"> <description> The virtual machine's host was changed successfully </description> <cause> <description> A user action caused the virtual machine to be successfully migrated to a different host </description> </cause> </EventLongDescription> VmNoCompatibleHostForSecondaryEventNo compatible host for the Fault Tolerance secondary VMerrorNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name}No compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name}No compatible host for the Fault Tolerance secondary VM {vm.name}No compatible host for the Fault Tolerance secondary VMNo compatible host for the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmNoCompatibleHostForSecondaryEvent"> <description> No compatible host was found when trying to place a Secondary VM </description> <cause> <description>There was no compatible host available to place a Secondary VM</description> <action>Resolve the incompatibilities and retry the operation</action> </cause> </EventLongDescription> VmNoNetworkAccessEventVM No Network AccesswarningNot all networks are accessible by {destHost.name}Not all networks for {vm.name} are accessible by {destHost.name}VmOrphanedEventVM orphanedwarning{vm.name} does not exist on {host.name}{vm.name} does not exist on {host.name}{vm.name} does not existVirtual machine does not exist on {host.name}{vm.name} does not exist on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmOrphanedEvent"> <description> The virtual machine does not exist on the host with which it is associated </description> <cause> <description> The virtual machine was deleted while its host was disconnected from vCenter Server. </description> </cause> </EventLongDescription> VmPowerOffOnIsolationEventvSphere HA powered off VM on isolated hostinfovSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name}vSphere HA powered off this virtual machine on the isolated host {isolatedHost.name}vSphere HA powered off {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name} <EventLongDescription id="vim.event.VmPowerOffOnIsolationEvent"> <description> vSphere HA powered off this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmPoweredOffEventVM powered offinfo{vm.name} on {host.name} is powered off{vm.name} on {host.name} is powered off{vm.name} is powered offVirtual machine on {host.name} is powered off{vm.name} on {host.name} in {datacenter.name} is powered offVmPoweredOnEventVM powered oninfo{vm.name} on {host.name} has powered on{vm.name} on {host.name} has powered on{vm.name} has powered onVirtual machine on {host.name} has powered on{vm.name} on {host.name} in {datacenter.name} has powered onVmPoweringOnWithCustomizedDVPortEventVirtual machine powered on with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.infoVirtual machine powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.Virtual machine {vm.name} powered On with vNICs connected to dvPorts that have a port level configuration, which might be different from the dvPort group configuration.VmPrimaryFailoverEventFault Tolerance VM failovererrorFault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM failed over to {host.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}Fault Tolerance VM ({vm.name}) failed over to {host.name} in cluster {computeResource.name} in {datacenter.name}. {reason.@enum.VirtualMachine.NeedSecondaryReason}VmReconfiguredEventVM reconfiguredinfoReconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured virtual machine.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}Reconfigured {vm.name} on {host.name} in {datacenter.name}.
Modified:
{configChanges.modified} Added:
{configChanges.added} Deleted:
{configChanges.deleted}VmRegisteredEventVM registeredinfoRegistered {vm.name} on {host.name}Registered {vm.name} on {host.name} in {datacenter.name}Registered {vm.name}Registered VM on {host.name} in {datacenter.name}Registered {vm.name} on {host.name} in {datacenter.name}VmRelayoutSuccessfulEventVM relayout completedinfoRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} on {host.name} completedRelayout of {vm.name} completedRelayout of the virtual machine completedRelayout of {vm.name} on {host.name} in {datacenter.name} completedVmRelayoutUpToDateEventVM relayout up-to-dateinfo{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} on {host.name} is in the correct format and relayout is not necessary{vm.name} is in the correct format and relayout is not necessaryIn the correct format and relayout is not necessary{vm.name} on {host.name} in {datacenter.name} is in the correct format and relayout is not necessaryVmReloadFromPathEventVirtual machine reloaded from pathinfo{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.{vm.name} reloaded from new configuration {configPath}.Virtual machine on {host.name} reloaded from new configuration {configPath}.{vm.name} on {host.name} reloaded from new configuration {configPath}.VmReloadFromPathFailedEventVirtual machine not reloaded from patherror{vm.name} on {host.name} could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from path {configPath}.{vm.name} could not be reloaded from {configPath}.This virtual machine could not be reloaded from {configPath}.{vm.name} on {host.name} could not be reloaded from {configPath}. <EventLongDescription id="vim.event.VmReloadFromPathFailedEvent"> <description> Reloading the virtual machine from a new datastore path failed </description> <cause> <description>The destination datastore path was inaccessible or invalid </description> <action>Use a valid destination datastore path </action> </cause> <cause> <description>The virtual machine is in an invalid state </description> <action>Check the virtual machine state power state. If the virtual machine is powered on, power it off </action> </cause> <cause> <description>The virtual machine is enabled for Fault Tolerance </description> <action>Disable Fault Tolerance for the virtual machine and retry the operation </action> </cause> </EventLongDescription> VmRelocateFailedEventFailed to relocate VMerrorFailed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate from {host.name}, {ds.name} to {destHost.name}, {destDatastore.name}Failed to relocate {vm.name} from {host.name}, {ds.name} in {datacenter.name} to {destHost.name}, {destDatastore.name} in {destDatacenter.name} <EventLongDescription id="vim.event.VmRelocateFailedEvent"> <description> Virtual machine relocation to a different host or datastore failed </description> <cause> <description> Virtual machine relocation can fail for a number of reasons, including network outages, insufficient disk space, and so on </description> <action> Consider the task related to this event, evaluate the failure reason, and take action accordingly </action> </cause> </EventLongDescription> VmRelocateSpecEvent<VM Relocate Spec Event>info<internal><internal><internal><internal><internal>VmRelocatedEventVM relocatedinfoVirtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name}Virtual machine {vm.name} relocated from {sourceHost.name}, {sourceDatastore.name} in {sourceDatacenter.name} to {host.name}, {ds.name} in {datacenter.name} <EventLongDescription id="vim.event.VmRelocatedEvent"> <description> The virtual machine execution and/or storage was successfully relocated </description> <cause> <description> A user action caused the virtual machine's execution and/or storage to be successfully changed </description> </cause> </EventLongDescription> VmRemoteConsoleConnectedEventVM remote console connectedinfoRemote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name} on host {host.name}Remote console connected to {vm.name}Remote console connectedRemote console connected to {vm.name} on host {host.name}VmRemoteConsoleDisconnectedEventVM remote console disconnectedinfoRemote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name} on host {host.name}Remote console disconnected from {vm.name}Remote console connectedRemote console disconnected from {vm.name} on host {host.name}VmRemovedEventVM removedinfoRemoved {vm.name} on {host.name}Removed {vm.name} on {host.name}Removed {vm.name}RemovedRemoved {vm.name} on {host.name} from {datacenter.name}VmRenamedEventVM renamedwarningRenamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName}Renamed from {oldName} to {newName}Renamed {vm.name} from {oldName} to {newName} in {datacenter.name}VmRequirementsExceedCurrentEVCModeEventVirtual machine is using features that exceed the capabilities of the host's current EVC mode.warningFeature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.Feature requirements of {vm.name} exceed capabilities of this host's current EVC mode.Feature requirements of this virtual machine exceed capabilities of this host's current EVC mode.Feature requirements of {vm.name} exceed capabilities of {host.name}'s current EVC mode.VmResettingEventVM resettinginfo{vm.name} on {host.name} is reset{vm.name} on {host.name} is reset{vm.name} is resetVirtual machine on {host.name} is reset{vm.name} on {host.name} in {datacenter.name} is resetVmResourcePoolMovedEventVM resource pool movedinfoMoved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name}Moved from resource pool {oldParent.name} to {newParent.name}Moved {vm.name} from resource pool {oldParent.name} to {newParent.name} in {datacenter.name}VmResourceReallocatedEventVM resource reallocatedinfoResource allocation changed
Modified:
{configChanges.modified}Changed resource allocation for {vm.name}
Modified:
{configChanges.modified}VmRestartedOnAlternateHostEventVM restarted on alternate hostinfoVirtual machine {vm.name} was restarted on this host since {sourceHost.name} failedVirtual machine was restarted on {host.name} since {sourceHost.name} failedVirtual machine {vm.name} was restarted on {host.name} since {sourceHost.name} failedVmResumingEventVM resuminginfo{vm.name} on {host.name} is resuming{vm.name} on {host.name} is resuming{vm.name} is resumingVirtual machine on {host.name} is resuming{vm.name} on {host.name} in {datacenter.name} is resumingVmSecondaryAddedEventFault Tolerance secondary VM addedinfoA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name}A Fault Tolerance secondary VM has been added for {vm.name} on host {host.name}A Fault Tolerance secondary VM has been added for {vm.name}A Fault Tolerance secondary VM has been added for this VMA Fault Tolerance secondary VM has been added for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryDisabledBySystemEventvCenter disabled Fault ToleranceerrorvCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance because the Secondary VM could not be powered On.vCenter disabled Fault Tolerance on VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} because the Secondary VM could not be powered On. <EventLongDescription id="vim.event.VmSecondaryDisabledBySystemEvent"> <description> vCenter Server disabled a Secondary VM because it could not power on the Secondary VM </description> <cause> <description>vCenter Server failed to power on the Secondary VM </description> <action>Check the reason in the event message for more details, fix the failure, and re-enable Fault Tolerance protection to power on the Secondary VM.</action> </cause> </EventLongDescription> VmSecondaryDisabledEventDisabled Fault Tolerance secondary VMinfoDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Disabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Disabled Fault Tolerance secondary VM for {vm.name}Disabled Fault Tolerance secondary VM for this virtual machineDisabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryEnabledEventEnabled Fault Tolerance secondary VMinfoEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Enabled Fault Tolerance secondary VM for {vm.name} on host {host.name}Enabled Fault Tolerance secondary VM for {vm.name}Enabled Fault Tolerance secondary VM for this VMEnabled Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmSecondaryStartedEventStarted Fault Tolerance secondary VMinfoStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Started Fault Tolerance secondary VM for {vm.name} on host {host.name}Started Fault Tolerance secondary VM for {vm.name}Started Fault Tolerance secondary VM for this virtual machineStarted Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmShutdownOnIsolationEventvSphere HA shut down VM on isolated hostinfovSphere HA shut down {vm.name} on the isolated host {isolatedHost.name} in cluster {computeResource.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down this virtual machine on the isolated host {isolatedHost.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation}vSphere HA shut down {vm.name} was shut down on the isolated host {isolatedHost.name} in cluster {computeResource.name} in {datacenter.name}: {shutdownResult.@enum.VmShutdownOnIsolationEvent.Operation} <EventLongDescription id="vim.event.VmShutdownOnIsolationEvent"> <description> vSphere HA shut down this virtual machine because the host it was running on was isolated from the management network. </description> </EventLongDescription> VmStartRecordingEventStart a recording sessioninfoStart a recording sessionStart a recording session on {vm.name}VmStartReplayingEventStart a replay sessioninfoStart a replay sessionStart a replay session on {vm.name}VmStartingEventVM startinginfo{vm.name} on {host.name} is starting{vm.name} on {host.name} is starting{vm.name} is startingVirtual machine is starting{vm.name} on {host.name} in {datacenter.name} is startingVmStartingSecondaryEventStarting Fault Tolerance secondary VMinfoStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name}Starting Fault Tolerance secondary VM for {vm.name} on host {host.name} in clusterStarting Fault Tolerance secondary VM for {vm.name}Starting Fault Tolerance secondary VM for this virtual machineStarting Fault Tolerance secondary VM for {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name}VmStaticMacConflictEventVM static MAC conflicterrorThe static MAC address ({mac}) conflicts with MAC assigned to {conflictedVm.name}The static MAC address ({mac}) of {vm.name} conflicts with MAC assigned to {conflictedVm.name}VmStoppingEventVM stoppinginfo{vm.name} on {host.name} is stopping{vm.name} on {host.name} is stopping{vm.name} is stoppingVirtual machine is stopping{vm.name} on {host.name} in {datacenter.name} is stoppingVmSuspendedEventVM suspendedinfo{vm.name} on {host.name} is suspended{vm.name} on {host.name} is suspended{vm.name} is suspendedVirtual machine is suspended{vm.name} on {host.name} in {datacenter.name} is suspendedVmSuspendingEventVM being suspendedinfo{vm.name} on {host.name} is being suspended{vm.name} on {host.name} is being suspended{vm.name} is being suspendedVirtual machine is being suspended{vm.name} on {host.name} in {datacenter.name} is being suspendedVmTimedoutStartingSecondaryEventStarting the Fault Tolerance secondary VM timed outerrorStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} timed out within {timeout} msStarting the Fault Tolerance secondary VM timed out within {timeout} msStarting the Fault Tolerance secondary VM {vm.name} on host {host.name} in cluster {computeResource.name} in {datacenter.name} timed out within {timeout} ms <EventLongDescription id="vim.event.VmTimedoutStartingSecondaryEvent"> <description> An attempt to start a Secondary VM timed out. </description> <cause> <description>A user attempted to turn on or enable Fault Tolerance, triggering the start of the Secondary VM. The start operation timed out and, as a result, vCenter Server disables Fault Tolerance. </description> <action>Fix any problems and re-enable Fault Tolerance protection</action> </cause> <cause> <description>The secondary VM was started in response to a failure, but the start attempt timed out</description> <action> vSphere HA will attempt to power on the Secondary VM</action> </cause> </EventLongDescription> VmUnsupportedStartingEventVM unsupported guest OS is startingwarningUnsupported guest OS {guestId} for {vm.name}Unsupported guest OS {guestId} for {vm.name} on {host.name}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name}Unsupported guest OS {guestId}Unsupported guest OS {guestId} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUnsupportedStartingEvent"> <description> Attempting to power on a virtual machine that has an unsupported guest operating system </description> <cause> <description> A user action initiated a virtual machine power-on operation, but the virtual machine has an unsupported guest operating system. </description> </cause> </EventLongDescription> VmUpgradeCompleteEventVM upgrade completeinfoVirtual machine compatibility upgraded to {version.@enum.vm.hwVersion}VmUpgradeFailedEventCannot upgrade VMerrorCannot upgrade virtual machine compatibility.VmUpgradingEventUpgrading VMinfoUpgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility to {version.@enum.vm.hwVersion}Upgrading virtual machine compatibility of {vm.name} in {datacenter.name} to {version.@enum.vm.hwVersion} <EventLongDescription id="vim.event.VmUpgradingEvent"> <description>The virtual hardware on this virtual machine is being upgraded</description> <cause> <description>A user-initiated action triggered an upgrade of the virtual machine hardware</description> </cause> <cause> <description>A scheduled task started an upgrade of the virtual machine hardware</description> </cause> </EventLongDescription> VmUuidAssignedEventVM UUID assignedinfoAssigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name}Assigned new BIOS UUID ({uuid}) to {vm.name}Assigned new BIOS UUID ({uuid})Assigned new BIOS UUID ({uuid}) to {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidAssignedEvent"> <description>The virtual machine was assigned a new BIOS UUID</description> <cause> <description>The user who created the virtual machine did not specify a BIOS UUID at creation time. vCenter Server generated a new UUID and assigned it to the virtual machine. </description> </cause> </EventLongDescription> VmUuidChangedEventVM UUID ChangedwarningChanged BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name}BIOS UUID was changed from {oldUuid} to {newUuid}Changed BIOS UUID from {oldUuid} to {newUuid} for {vm.name} on {host.name} in {datacenter.name} <EventLongDescription id="vim.event.VmUuidChangedEvent"> <description>The virtual machine BIOS UUID has changed</description> <cause> <description> A user changed the virtual machine BIOS UUID directly on the host </description> </cause> </EventLongDescription> VmUuidConflictEventVM UUID ConflicterrorBIOS ID ({uuid}) conflicts with that of {conflictedVm.name}BIOS ID ({uuid}) of {vm.name} conflicts with that of {conflictedVm.name}VmVnicPoolReservationViolationClearEventVirtual NIC Network Resource Pool Reservation Violation Clear eventinfoThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedThe reservation violation on the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is clearedVmVnicPoolReservationViolationRaiseEventVirtual NIC Network Resource Pool Reservation Violation eventinfoThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedThe reservation allocated to the virtual NIC network resource pool {vmVnicResourcePoolName} with key {vmVnicResourcePoolKey} on {dvs.name} is violatedVmWwnAssignedEventVM WWN assignedinfoNew WWNs assignedNew WWNs assigned to {vm.name} <EventLongDescription id="vim.event.VmWwnAssignedEvent"> <description> The virtual machine was assigned a WWN (World Wide Name) </description> <cause> <description>The virtual machine was assigned a WWN because it was created with an RDM (Raw Device Mappings) disk or was reconfigured to access an RDM disk </description> </cause> </EventLongDescription> VmWwnChangedEventVM WWN changedwarningWWNs are changedWWNs are changed for {vm.name} <EventLongDescription id="vim.event.VmWwnChangedEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine was changed </description> <cause> <description>The virtual machine was assigned a new WWN, possibly due to a conflict caused by another virtual machine being assigned the same WWN </description> </cause> </EventLongDescription> VmWwnConflictEventVM WWN conflicterrorThe WWN ({wwn}) conflicts with the currently registered WWNThe WWN ({wwn}) of {vm.name} conflicts with the currently registered WWN <EventLongDescription id="vim.event.VmWwnConflictEvent"> <description> The WWN (World Wide Name) assigned to the virtual machine has a conflict </description> <cause> <description>The WWN assigned to this virtual machine was the same as that of a different virtual machine. </description> <action> Check the event details for more information on the conflict and correct the problem. </action>\</cause> </EventLongDescription> WarningUpgradeEventWarning upgradewarning{message}IScsiBootFailureEventBoot from iSCSI failed.warningBooting from iSCSI failed.Booting from iSCSI failed with an error. See the VMware Knowledge Base for information on configuring iBFT networking.EventExLost Network Connectivityerrorvprob.net.connectivity.lost|Lost network connectivity on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExNo IPv6 TSO supporterrorvprob.net.e1000.tso6.notsupported|Guest-initiated IPv6 TCP Segmentation Offload (TSO) packets ignored. Manually disable TSO inside the guest operating system in virtual machine {1}, or use a different virtual adapter.EventExInvalid vmknic specified in /Migrate/Vmknicwarningvprob.net.migrate.bindtovmk|The ESX advanced config option /Migrate/Vmknic is set to an invalid vmknic: {1}. /Migrate/Vmknic specifies a vmknic that vMotion binds to for improved performance. Please update the config option with a valid vmknic or, if you do not want vMotion to bind to a specific vmknic, remove the invalid vmknic and leave the option blank.EventExVirtual NIC connection to switch failedwarningvprob.net.proxyswitch.port.unavailable|Virtual NIC with hardware address {1} failed to connect to distributed virtual port {2} on switch {3}. There are no more ports available on the host proxy switch.EventExNetwork Redundancy Degradedwarningvprob.net.redundancy.degraded|Uplink redundancy degraded on virtual switch {1}. Physical NIC {2} is down. {3} uplinks still up. Affected portgroups:{4}.EventExLost Network Redundancywarningvprob.net.redundancy.lost|Lost uplink redundancy on virtual switch {1}. Physical NIC {2} is down. Affected portgroups:{3}.EventExThin Provisioned Device Nearing Capacitywarningvprob.scsi.device.thinprov.atquota|Space utilization on thin-provisioned device {1} exceeded configured threshold.EventExLost Storage Connectivityerrorvprob.storage.connectivity.lost|Lost connectivity to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExDegraded Storage Path Redundancywarningvprob.storage.redundancy.degraded|Path redundancy to storage device {1} degraded. Path {2} is down. {3} remaining active paths. Affected datastores: {4}.EventExLost Storage Path Redundancywarningvprob.storage.redundancy.lost|Lost path redundancy to storage device {1}. Path {2} is down. Affected datastores: {3}.EventExVMFS Locked By Remote Hosterrorvprob.vmfs.error.volume.is.locked|Volume on device {1} is locked, possibly because some remote host encountered an error during a volume operation and could not recover.EventExDevice backing an extent of a file system is offline.errorvprob.vmfs.extent.offline|An attached device {1} might be offline. The file system {2} is now in a degraded state. While the datastore is still available, parts of data that reside on the extent that went offline might be inaccessible.EventExDevice backing an extent of a file system is online.infovprob.vmfs.extent.online|Device {1} backing file system {2} came online. This extent was previously offline. All resources on this device are now available.EventExVMFS Volume Connectivity Restoredinfovprob.vmfs.heartbeat.recovered|Successfully restored access to volume {1} ({2}) following connectivity issues.EventExVMFS Volume Connectivity Degradedinfovprob.vmfs.heartbeat.timedout|Lost access to volume {1} ({2}) due to connectivity issues. Recovery attempt is in progress and outcome will be reported shortly.EventExVMFS Volume Connectivity Losterrorvprob.vmfs.heartbeat.unrecoverable|Lost connectivity to volume {1} ({2}) and subsequent recovery attempts have failed.EventExNo Space To Create VMFS Journalerrorvprob.vmfs.journal.createfailed|No space for journal on volume {1} ({2}). Opening volume in read-only metadata mode with limited write support.EventExVMFS Lock Corruption Detectederrorvprob.vmfs.lock.corruptondisk|At least one corrupt on-disk lock was detected on volume {1} ({2}). Other regions of the volume may be damaged too.EventExLost connection to NFS servererrorvprob.vmfs.nfs.server.disconnect|Lost connection to server {1} mount point {2} mounted as {3} ({4}).EventExRestored connection to NFS serverinfovprob.vmfs.nfs.server.restored|Restored connection to server {1} mount point {2} mounted as {3} ({4}).EventExVMFS Resource Corruption Detectederrorvprob.vmfs.resource.corruptondisk|At least one corrupt resource metadata region was detected on volume {1} ({2}). Other regions of the volume might be damaged too.EventExCopied Library Iteminfocom.vmware.cl.CopyLibraryItemEvent|Copied Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}({sourceLibraryItemId}), source Library {sourceLibraryName}.EventExFailed to copy Library Itemerrorcom.vmware.cl.CopyLibraryItemFailEvent|Failed to copy Library Item {targetLibraryItemName} to Library {targetLibraryName}. Source Library Item {sourceLibraryItemName}, source Library {sourceLibraryName}.EventExCreated Libraryinfocom.vmware.cl.CreateLibraryEvent|Created Library {libraryName}EventExFailed to create Libraryerrorcom.vmware.cl.CreateLibraryFailEvent|Failed to create Library {libraryName}EventExCreated Library Iteminfocom.vmware.cl.CreateLibraryItemEvent|Created Library Item {libraryItemName} in Library {libraryName}.EventExFailed to create Library Itemerrorcom.vmware.cl.CreateLibraryItemFailEvent|Failed to create Library Item {libraryItemName} in Library {libraryName}.EventExDeleted Libraryinfocom.vmware.cl.DeleteLibraryEvent|Deleted Library {libraryName}EventExFailed to delete Libraryerrorcom.vmware.cl.DeleteLibraryFailEvent|Failed to delete Library {libraryName}EventExDeleted Library Iteminfocom.vmware.cl.DeleteLibraryItemEvent|Deleted Library Item {libraryItemName} in Library {libraryName}.EventExFailed to delete Library Itemerrorcom.vmware.cl.DeleteLibraryItemFailEvent|Failed to delete Library Item {libraryItemName} in Library {libraryName}.EventExPublished Libraryinfocom.vmware.cl.PublishLibraryEvent|Published Library {libraryName}EventExFailed to publish Libraryerrorcom.vmware.cl.PublishLibraryFailEvent|Failed to publish Library {libraryName}EventExPublished Library Iteminfocom.vmware.cl.PublishLibraryItemEvent|Published Library Item {libraryItemName} in Library {libraryName}EventExFailed to publish Library Itemerrorcom.vmware.cl.PublishLibraryItemFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName}EventExPublished Library Item to Subscriptioninfocom.vmware.cl.PublishLibraryItemSubscriptionEvent|Published Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library Item to Subscriptionerrorcom.vmware.cl.PublishLibraryItemSubscriptionFailEvent|Failed to publish Library Item {libraryItemName} in Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExPublished Library to Subscriptioninfocom.vmware.cl.PublishLibrarySubscriptionEvent|Published Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExFailed to publish Library to Subscriptionerrorcom.vmware.cl.PublishLibrarySubscriptionFailEvent|Failed to publish Library {libraryName} to Subscribed Library {subscribedLibraryName} on vCenter Server {subscriberVcenterHostname}EventExCreated Subscriptioninfocom.vmware.cl.SubscriptionCreateEvent|Created subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to create Subscriptionerrorcom.vmware.cl.SubscriptionCreateFailEvent|Failed to create subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExDeleted Subscriptioninfocom.vmware.cl.SubscriptionDeleteEvent|Deleted subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to delete Subscriptionerrorcom.vmware.cl.SubscriptionDeleteFailEvent|Failed to delete subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExUpdated Subscriptioninfocom.vmware.cl.SubscriptionUpdateEvent|Updated subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExFailed to update Subscriptionerrorcom.vmware.cl.SubscriptionUpdateFailEvent|Failed to update subscription {subscriptionId} of published library {publishedLibraryId} for subscribed library {subscribedLibraryId}.EventExSynchronized Libraryinfocom.vmware.cl.SyncLibraryEvent|Synchronized Library {libraryName}EventExFailed to Synchronize Libraryerrorcom.vmware.cl.SyncLibraryFailEvent|Failed to Synchronize Library {libraryName}EventExSynchronized Library Iteminfocom.vmware.cl.SyncLibraryItemEvent|Synchronized Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Itemerrorcom.vmware.cl.SyncLibraryItemFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}EventExFailed to Synchronize Library Iteminfocom.vmware.cl.SyncNfcFailEvent|Failed to Synchronize Library Item {libraryItemName} in Library {libraryName}. Failure may be due to a network error or a host entering maintenance mode.EventExUpdated Libraryinfocom.vmware.cl.UpdateLibraryEvent|Updated Library {libraryName}EventExFailed to update Libraryerrorcom.vmware.cl.UpdateLibraryFailEvent|Failed to update Library {libraryName}EventExUpdated Library Iteminfocom.vmware.cl.UpdateLibraryItemEvent|Updated Library Item {libraryItemName} in Library {libraryName}.EventExFailed to update Library Itemerrorcom.vmware.cl.UpdateLibraryItemFailEvent|Failed to update Library Item {libraryItemName} in Library {libraryName}.EventExCould not locate Library Item file on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryItemFileOnRestoreEvent|File '{fileName}' in Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library Item folder on the storage backing after restorecom.vmware.cl.restore.DeletedLibraryItemOnRestoreEvent|Folder for Library Item '{libraryItemName}' could not be located on the storage backing after restoreEventExCould not locate Library folder on the storage backing after restorewarningcom.vmware.cl.restore.DeletedLibraryOnRestoreEvent|Library '{libraryName}' folder could not be located on the storage backing after restoreEventExCould not locate Library Item content after restorecom.vmware.cl.restore.MissingLibraryItemContentOnRestoreEvent|The content of Library Item '{libraryItemName}' could not be located on storage after restoreEventExNew Library Item file found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemFileOnRestoreEvent|New Library Item file '{fileName}' found on the storage backing for Library Item '{libraryItemName}' after restore. Path to the file on storage: '{filePath}'EventExNew Library Item folder found on the storage backing after restorewarningcom.vmware.cl.restore.NewLibraryItemOnRestoreEvent|New Library Item folder '{itemFolderName}' found on the storage backing for Library '{libraryName}' after restore. Path to the item folder on storage: '{itemFolderPath}'ExtendedEventCancel LWD snapshotinfoCancelling LWD snapshotcom.vmware.dp.events.cancelsnapshot|Cancelling LWD snapshotExtendedEventLWD snapshot is cancelledinfoLWD snapshot is cancelledcom.vmware.dp.events.cancelsnapshotdone|LWD snapshot is cancelledExtendedEventFailed to cancel LWD snapshoterrorFailed to cancel LWD snapshotcom.vmware.dp.events.cancelsnapshotfailed|Failed to cancel LWD snapshotExtendedEventPerform 'commit' phase of LWD-based restoreinfoPerforming 'commit' phase of LWD-based restorecom.vmware.dp.events.commitrestore|Performing 'commit' phase of LWD-based restoreExtendedEvent'commit' phase of LWD-based restore is completedinfo'commit' phase of LWD-based restore is completedcom.vmware.dp.events.commitrestoredone|'commit' phase of LWD-based restore is completedExtendedEvent'commit' phase of LWD-based restore failederror'commit' phase of LWD-based restore failedcom.vmware.dp.events.commitrestorefailed|'commit' phase of LWD-based restore failedExtendedEventEnabling protection services on hosts in the clusterinfoEnabling protection services on hosts in the clusterEnabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservices|Enabling protection services on hosts in the clusterExtendedEventFinished enabling protection services on hosts in the clusterinfoFinished enabling protection services on hosts in the clusterFinished enabling protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesdone|Finished enabling protection services on hosts in the clusterExtendedEventFailed to enable protection services on hosts in the clustererrorFailed to enable protection services on hosts in the clusterFailed to enable protection services on hosts in the clustercom.vmware.dp.events.enableprotectionservicesfailed|Failed to enable protection services on hosts in the clusterExtendedEventPerform 'prepare' phase of LWD-based restoreinfoPerforming 'prepare' phase of LWD-based restorecom.vmware.dp.events.preparerestore|Perform 'prepare' phase of LWD restoreExtendedEvent'prepare' phase of LWD-based restore is completedinfo'prepare' phase of LWD-based restore is completedcom.vmware.dp.events.preparerestoredone|'prepare' phase of LWD-based restore is completedExtendedEvent'prepare' phase of LWD-based restore failederror'prepare' phase of LWD-based restore failedcom.vmware.dp.events.preparerestorefailed|'prepare' phase of LWD-based restore failedExtendedEventEnable LWD data protectioninfoEnabling LWD data protectioncom.vmware.dp.events.protect|Enabling LWD data protectionExtendedEventLWD data protection enabledinfoLWD data protection enabledcom.vmware.dp.events.protectdone|LWD data protection enabledExtendedEventFailed to enable LWD data protectionerrorFailed to enable LWD data protectioncom.vmware.dp.events.protectfailed|Failed to enable LWD data protectionExtendedEventQuerying entity for protection infoinfoQuerying entity for protection infocom.vmware.dp.events.queryprotectedentityinfo|Querying entity for protection infoExtendedEventFinished querying entity for protection infoinfoFinished querying entity for protection infocom.vmware.dp.events.queryprotectedentityinfodone|Finished querying entity for protection infoExtendedEventFailed to query entity for protection infoerrorFailed to query entity for protection infocom.vmware.dp.events.queryprotectedentityinfofailed|Failed to query entity for protection infoExtendedEventRetire LWD snapshotinfoRetiring LWD snapshotcom.vmware.dp.events.retiresnapshot|Retiring LWD snapshotExtendedEventLWD snapshot is retiredinfoLWD snapshot is retiredcom.vmware.dp.events.retiresnapshotdone|LWD snapshot is retiredExtendedEventFailed to retire LWD snapshoterrorFailed to retire LWD snapshotcom.vmware.dp.events.retiresnapshotfailed|Failed to retire LWD snapshotExtendedEventTake LWD application-consistent snapshotinfoTaking LWD application-consistent snapshotcom.vmware.dp.events.snapshot.applicationconsistent|Taking LWD application-consistent snapshotExtendedEventTake LWD crash-consistent snapshotinfoTaking LWD crash-consistent snapshotcom.vmware.dp.events.snapshot.crashconsistent|Taking LWD crash-consistent snapshotExtendedEventTake LWD metadata-only snapshotinfoTaking LWD metadata-only snapshotcom.vmware.dp.events.snapshot.metadataonly|Taking LWD metadata-only snapshotExtendedEventTake LWD VSS application-consistent snapshotinfoTaking LWD VSS application-consistent snapshotcom.vmware.dp.events.snapshot.vssappconsistent|Taking LWD VSS application-consistent snapshotExtendedEventLWD application-consistent snapshot takeninfoLWD application-consistent snapshot takencom.vmware.dp.events.snapshotdone.applicationconsistent|LWD application-consistent snapshot takenExtendedEventLWD crash-consistent snapshot takeninfoLWD crash-consistent snapshot takencom.vmware.dp.events.snapshotdone.crashconsistent|LWD crash-consistent snapshot takenExtendedEventLWD metadata-only snapshot takeninfoLWD metadata-only snapshot takencom.vmware.dp.events.snapshotdone.metadataonly|LWD metadata-only snapshot takenExtendedEventLWD VSS application-consistent snapshot takeninfoLWD VSS application-consistent snapshot takencom.vmware.dp.events.snapshotdone.vssappconsistent|LWD VSS application-consistent snapshot takenExtendedEventLWD application-consistent snapshot failederrorLWD application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.applicationconsistent|LWD application-consistent snapshot failedExtendedEventLWD crash-consistent snapshot failederrorLWD crash-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.crashconsistent|LWD crash-consistent snapshot failedExtendedEventLWD metadata-only snapshot failederrorLWD metadata-only snapshot failedcom.vmware.dp.events.snapshotfailed.metadataonly|LWD metadata-only snapshot failedExtendedEventLWD VSS application-consistent snapshot failederrorLWD VSS application-consistent snapshot failedcom.vmware.dp.events.snapshotfailed.vssappconsistent|LWD VSS application-consistent snapshot failedExtendedEventPerform LWD snapshot syncinfoPerforming LWD snapshot synccom.vmware.dp.events.sync|Performing LWD snapshot syncExtendedEventLWD snapshot sync is completedinfoLWD snapshot sync is completedcom.vmware.dp.events.syncdone|LWD snapshot sync is completedExtendedEventLWD snapshot sync failederrorLWD snapshot sync failedcom.vmware.dp.events.syncfailed|LWD snapshot sync failedExtendedEventDisable LWD data protectioninfoDisabling LWD data protectioncom.vmware.dp.events.unprotect|Disabling LWD data protectionExtendedEventLWD data protection disabledinfoLWD data protection disabledcom.vmware.dp.events.unprotectdone|LWD data protection disabledExtendedEventFailed to disable LWD data protectionerrorFailed to disable LWD data protectioncom.vmware.dp.events.unprotectfailed|Failed to disable LWD data protectionEventExDeployed entity from Content Libraryinfocom.vmware.ovfs.DeployEvent|Deployed entity from Library Item {libraryItemName} in Library {libraryName}EventExFailed to deploy entity from Content Libraryerrorcom.vmware.ovfs.DeployFailEvent|Failed to deploy entity from Library Item {libraryItemName} in Library {libraryName}EventExCloned entity to Content Libraryinfocom.vmware.ovfs.ExportEvent|Cloned entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExFailed to clone entity to Content Libraryerrorcom.vmware.ovfs.ExportFailEvent|Failed to clone entity {entityName} to Library Item {libraryItemName} in Library {libraryName}EventExinfocom.vmware.rbd.activateRuleSet|Activate Rule SetEventExwarningcom.vmware.rbd.fdmPackageMissing|A host in a HA cluster does not have the 'vmware-fdm' package in its image profileEventExwarningcom.vmware.rbd.hostProfileRuleAssocEvent|A host profile associated with one or more active rules was deleted.EventExerrorcom.vmware.rbd.hostScriptFailure|An error encountered while running a user defined script: {scriptName} on the host: {ip}. Status: {status}EventExwarningcom.vmware.rbd.ignoreMachineIdentity|Ignoring the AutoDeploy.MachineIdentity event, since the host is already provisioned through Auto DeployEventExinfocom.vmware.rbd.pxeBootNoImageRule|Unable to PXE boot host since it does not match any rulesEventExinfocom.vmware.rbd.pxeBootUnknownHost|PXE Booting unknown hostEventExinfocom.vmware.rbd.pxeProfileAssoc|Attach PXE ProfileEventExinfocom.vmware.rbd.scriptBundleAssoc|Script Bundle Name: {name} attached to moref {moref}, entity-id {entity-id}EventExerrorcom.vmware.rbd.vmcaCertGenerationFailureEvent|Failed to generate host certificates using VMCAEventExCreated Harbor registryinfocom.vmware.registry.HarborCreateEvent|Created Harbor registry {registryName} on cluster {clusterId}.EventExFailed to create Harbor registryerrorcom.vmware.registry.HarborCreateFailEvent|Failed to create Harbor registry {registryName} on cluster {clusterId}.EventExDeleted Harbor registryinfocom.vmware.registry.HarborDeleteEvent|Deleted Harbor registry {registryName} on cluster {clusterId}.EventExFailed to delete Harbor registryerrorcom.vmware.registry.HarborDeleteFailEvent|Failed to delete Harbor registry {registryName} on cluster {clusterId}.EventExCreated Harbor projectinfocom.vmware.registry.HarborProjectCreateEvent|Created Harbor project {projectName} for registry {registryId}.EventExFailed to create Harbor projecterrorcom.vmware.registry.HarborProjectCreateFailEvent|Failed to create Harbor project {projectName} for registry {registryId}.EventExDeleted Harbor projectinfocom.vmware.registry.HarborProjectDeleteEvent|Deleted Harbor project {projectName} for registry {registryId}.EventExFailed to delete Harbor projecterrorcom.vmware.registry.HarborProjectDeleteFailEvent|Failed to delete Harbor project {projectName} for registry {registryId}.EventExCreated Harbor project memberinfocom.vmware.registry.HarborProjectMemberCreateEvent|Created Harbor project member {memberName} for project {projectName}.EventExFailed to create Harbor project membererrorcom.vmware.registry.HarborProjectMemberCreateFailEvent|Failed to create Harbor project member {memberName} for project {projectName}.EventExDeleted Harbor project memberinfocom.vmware.registry.HarborProjectMemberDeleteEvent|Deleted Harbor project member {memberName} from project {projectName}.EventExFailed to delete Harbor project membererrorcom.vmware.registry.HarborProjectMemberDeleteFailEvent|Failed to delete Harbor project member {memberName} from project {projectName}.EventExUpdated Harbor project memberinfocom.vmware.registry.HarborProjectMemberUpdateEvent|Updated Harbor project member {memberName} for project {projectName}.EventExFailed to update Harbor project membererrorcom.vmware.registry.HarborProjectMemberUpdateFailEvent|Failed to update Harbor project member {memberName} for project {projectName}.EventExPurged Harbor projectinfocom.vmware.registry.HarborProjectPurgeEvent|Purged Harbor project {projectName} for registry {registryId}.EventExFailed to purge Harbor projecterrorcom.vmware.registry.HarborProjectPurgeFailEvent|Failed to purge Harbor project {projectName} for registry {registryId}.EventExRestoring Harbor registryinfocom.vmware.registry.HarborRestoreEvent|Restoring Harbor registry {registryName} on cluster {clusterId}.EventExFailed to restore Harbor registryerrorcom.vmware.registry.HarborRestoreFailEvent|Failed to restore Harbor registry {registryName} on cluster {clusterId}.EventExRestored Harbor registryinfocom.vmware.registry.HarborRestoreSuccessEvent|Restored Harbor registry {registryName} on cluster {clusterId}.ExtendedEventProactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!errorcom.vmware.vc.proactivehdw.DbError|Proactive hardware management: Database errors encountered in an internal operation. Please check vSAN health logs for more details and resolve the underlying issue as soon as possible!EventExProactive hardware management: Host is disabled with proactive hardware management.warningcom.vmware.vc.proactivehdw.Disabled|Host is disabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: Host is enabled with proactive hardware management.infocom.vmware.vc.proactivehdw.Enabled|Host is enabled with proactive hardware management with HSM from vendor: {VendorDisplayName}.EventExProactive hardware management: received a failure health update from vendor.errorcom.vmware.vc.proactivehdw.Failure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: Polled health updates from HSM are discarded due to health update response content size limit being exceeded.warningcom.vmware.vc.proactivehdw.HealthUpdatesResponseLimitExceed|Proactive hardware management: Polled health updates from HSM {VendorDisplayName} are discarded due to health update response content size limit being exceeded. Refer to vSAN health logs for more details.EventExProactive hardware management: Some health updates from HSM are discarded due to validation failures.warningcom.vmware.vc.proactivehdw.HealthUpdatesValidationFail|Proactive hardware management: Some health updates from HSM {VendorDisplayName} are discarded due to validation failures. Refer to vSAN health logs for more details.EventExProactive hardware management: Error occurred when posting host-level event for unregistration of HSMerrorcom.vmware.vc.proactivehdw.HostEventPostFailed|Proactive hardware management: After HSM {VendorDisplayName} was unregistered an internal error prevented a host event from posting. The following hosts are affected: {AffectedHosts}.EventExProactive hardware management: Failed to contact an HSMerrorcom.vmware.vc.proactivehdw.HsmCommunicationError|Proactive hardware management: Failed to contact HSM with vendor: {VendorDisplayName}.EventExProactive hardware management: Error occured in poll HSM requesterrorcom.vmware.vc.proactivehdw.HsmRequestError|Proactive hardware management: Internal error occurred during polling HSM from vendor {VendorDisplayName}.EventExProactive hardware management: HSM is unregistered.infocom.vmware.vc.proactivehdw.HsmUnregistration|Proactive hardware management: HSM is unregistered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: received a predictive failure health update from vendor.warningcom.vmware.vc.proactivehdw.PredictiveFailure|Proactive hardware management received a health update from vendor: {VendorDisplayName} with ID: {HealthUpdateId} and Info ID: {HealthUpdateInfoId}, targeted at a hardware component identified by vSphere ID: {TargetComponentVSphereId} and hardware ID: {TargetComponentVendorId}. In case the target hardware component is a vSAN disk, more details are available at vSAN storage vendor reported drive health page.EventExProactive hardware management: HSM is unregistered but with a failure in removing resource bundle.errorcom.vmware.vc.proactivehdw.ResourceBundleCleanupError|Proactive hardware management: HSM from {VendorDisplayName} is unregistered but with a failure in removing resource bundle - likely the resource bundle is currently in use. Please refer to vSAN health logs for the underlying cause and perform manual clean up on the resource bundle.EventExProactive hardware management: Failed to create/update subscription for HSM due to a communication error with HSMerrorcom.vmware.vc.proactivehdw.SubscriptionHsmCommError|Proactive hardware management: Failed to create/update subscription for HSM {VendorDisplayName} due to a communication error with HSM.EventExProactive hardware management: Failed to create/update subscription for HSM due to internal errorerrorcom.vmware.vc.proactivehdw.SubscriptionInternalError|Proactive hardware management: Failed to perform subscription create/update for HSM {VendorDisplayName} due to an internal error. Please refer to the vSAN health logs for more details.EventExProactive hardware management: A new HSM is registered.infocom.vmware.vc.proactivehdw.registration.NewRegistration|Proactive hardware management: A new HSM is registered from vendor: '{VendorDisplayName}'.EventExProactive hardware management: HSM registration is updated.infocom.vmware.vc.proactivehdw.registration.UpdateSuccess|Proactive hardware management: The registration information on the following HSM: '{VendorDisplayName}' has been updated. Here are its supported health update infos: '{EnabledHealthUpdateInfos}'ExtendedEventinfocom.vmware.vcIntegrity.CancelTask|Canceling task on [data.name].ExtendedEventinfocom.vmware.vcIntegrity.CheckNotification|Successfully downloaded notifications. New notifications: [data.Notifications]ExtendedEventerrorcom.vmware.vcIntegrity.CheckNotificationFailed|Could not download notifications.ExtendedEventerrorcom.vmware.vcIntegrity.CheckPXEBootHostFailure|Cannot determine whether host {host.name} is PXE booted. The host will be excluded for the current operation.ExtendedEventwarningcom.vmware.vcIntegrity.ClusterConfigurationOutOfCompliance|Hosts in Cluster [data.resource] are out of compliance.ExtendedEventerrorcom.vmware.vcIntegrity.ClusterOperationCancelledDueToCertRefresh|In-flight VUM task on Cluster [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventwarningcom.vmware.vcIntegrity.CriticallyLowDiskSpace|VMware vSphere Lifecycle Manager is critically low on storage space! Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.DisableToolsRemediateOnReboot|Successfully disabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.DisableToolsRemediateOnRebootFailed|Could not disable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.DownloadAlert|VMware vSphere Lifecycle Manager download alert (critical/total): ESX [data.esxCritical]/[data.esxTotal]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadFailedPatchBinary|Could not download patch packages for following patches: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestPackage|Successfully downloaded guest patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestPackageFailed|Could not download guest patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixPackage|Successfully downloaded guest patch packages for UNIX. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixPackageFailed|Could not download guest patch packages for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUnixUpdate|Successfully downloaded guest patch definitions for UNIX. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUnixUpdateFailed|Could not download guest patch definitions for UNIX.ExtendedEventinfocom.vmware.vcIntegrity.DownloadGuestUpdate|Successfully downloaded guest patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadGuestUpdateFailed|Could not download guest patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostPackage|Successfully downloaded host patch packages. New packages: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostPackageFailed|Could not download host patch packages.ExtendedEventinfocom.vmware.vcIntegrity.DownloadHostUpdate|Successfully downloaded host patch definitions. New patches: [data.Updates]ExtendedEventerrorcom.vmware.vcIntegrity.DownloadHostUpdateFailed|Could not download host patch definitions.ExtendedEventinfocom.vmware.vcIntegrity.EnableToolsRemediateOnReboot|Successfully enabled the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.EnableToolsRemediateOnRebootFailed|Could not enable the option for VMware Tools upgrade on VM power cycle for [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.FailToLock|There are running tasks for the entity [data.name] that cannot finish within a specific time. The operation will stop.ExtendedEventcom.vmware.vcIntegrity.FtFailedEvent|ExtendedEventerrorcom.vmware.vcIntegrity.GADvdMountError|VMware vSphere Lifecycle Manager Guest Agent could not access the DVD drive on {vm.name}. Verify that a DVD drive is available and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAError|An internal error occurred in communication with VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Verify that the VM is powered on and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GAInstallFailed|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name}. Make sure that the VM is powered on.ExtendedEventinfocom.vmware.vcIntegrity.GAInstalled|VMware vSphere Lifecycle Manager Guest Agent successfully installed on {vm.name}.ExtendedEventerrorcom.vmware.vcIntegrity.GARuntimeError|An unknown internal error occurred during the required operation on {vm.name}. Check the logs for more details and retry the operation.ExtendedEventerrorcom.vmware.vcIntegrity.GATimeout|VMware vSphere Lifecycle Manager Guest Agent could not respond in time on {vm.name}. Verify that the VM is powered on and that the Guest Agent is running.ExtendedEventwarningcom.vmware.vcIntegrity.HostConfigurationOutOfCompliance|Configuration of Host [data.resource] is out of compliance.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallClose|Close [data.name] firewall ports.ExtendedEventinfocom.vmware.vcIntegrity.HostFirewallOpen|Open [data.name] firewall ports.ExtendedEventerrorcom.vmware.vcIntegrity.HostOperationCancelledDueToCertRefresh|In-flight VUM task on Host [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportCancelled|Host patch offline bundle upload is canceled by user.ExtendedEventinfocom.vmware.vcIntegrity.HostPatchBundleImportSuccess|[data.numBulletins] new bulletins uploaded successfully through offline bundle.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchBundleImportUnknownError|Host patch offline bundle upload did not succeed.ExtendedEventcom.vmware.vcIntegrity.HostPatchInputRecalledFailure|ExtendedEventcom.vmware.vcIntegrity.HostPatchPrerequisiteRecalledFailure|ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be remediated. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry remediation operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchRemediateInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be remediated. Remove either of the patch from the baseline and retry the remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageHostConflict|Host patch [data.patch] conflicts with the package [data.conflictPackage] installed on the host and cannot be staged. Remove the patch from the baseline or include any suggested additional patches in the baseline and retry stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.HostPatchStageInputConflict|Host patch [data.patch] conflicts with patch [data.conflictPatch] included in the baseline and cannot be staged. Remove either of the patch from the baseline and retry the stage operation.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmFtEvent|Cannot remediate host {host.name} because it is a part of a VMware DPM enabled cluster and contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmScanEvent|Cannot scan host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedDpmStageEvent|Cannot stage host {host.name} because it is a part of a VMware DPM enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtDiffPatchesEvent|Host {host.name} has FT enabled VMs. If you apply different patches to hosts in a cluster, FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtEvent|Cannot remediate host {host.name} because it contains one or more Primary or Secondary VMs on which FT is enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedFtPairEvent|Host {host.name} has FT enabled VMs. The host on which the Secondary VMs reside is not selected for remediation. As a result FT cannot be re-enabled.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedHacEvent|Cannot remediate host {host.name} because it is a part of a HA admission control enabled cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedPxeUpgradeUnsupported|Upgrade operations are not supported on host {host.name} because it is PXE booted.ExtendedEventwarningcom.vmware.vcIntegrity.HostSkippedRemovableDeviceEvent|Cannot remediate host {host.name} because it has VMs with a connected removable device. Disconnect all removable devices before remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorEsxFileDownload|Host [data.name] cannot download files from the VMware vSphere Lifecycle Manager patch store. Check the network connectivity and firewall setup, and verify that the host can access the configured patch store.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorNotInstallable|The selected patches [data.arg1] cannot be installed on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateConflictDependencies|The patches selected for remediation on the host [data.name] depend on other patches that have conflicts.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDefault|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateDeviceAttached|Remediation did not succeed for [data.name]. The host has virtual machines [data.arg1] with connected removable media devices. This prevents the host from entering maintenance mode. Disconnect the removable devices and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateEnterMmode|Remediation did not succeed for [data.name]. The host could not enter maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateExitMmode|Remediation did not succeed for [data.name]. The host could not exit maintenance mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReboot|Remediation did not succeed for [data.name]. The host did not reboot after remediation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRebootReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect after a reboot.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostReconnect|Remediation did not succeed for [data.name]. VMware vSphere Lifecycle Manager timed out waiting for the host to reconnect.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateHostRestoreVm|Remediation did not succeed for [data.name]. Restoring the power state or device connection state for one or more virtual machines on the host did not succeed.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateMetadataCorrupt|Remediation did not succeed for [data.name]. The patch metadata is corrupted. This might be caused by an invalid format of metadata content. You can try to re-download the patches.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpdateErrorRemediateVibDownload|Remediation did not succeed for [data.name]. There were errors while downloading one or more software packages. Check the VMware vSphere Lifecycle Manager network connectivity settings.ExtendedEventcom.vmware.vcIntegrity.HostUpdateErrorVsanHealthCheckFailed|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeAgentDeployFailure|Cannot deploy upgrade agent on host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailBootDiskSize|The boot disk has a size of [data.found] MiB, the minimum requirement of the upgrade image is [data.expected] MiB.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailConflictingVibs|The upgrade contains conflicting VIBs. Remove the conflicting VIBs or use Image Builder to create a custom upgrade ISO image that contains the newer versions of the conflicting VIBs, and try to upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreakageUnsure|Cannot determine whether the upgrade breaks Cisco Nexus 1000V virtual network switch feature on the host. If the host does not have the feature, you can ignore this warning.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailDvsBreaks|Cisco Nexus 1000V virtual network switch feature installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailEESXInsufficientSpaceForImage|Cannot create a ramdisk of size [data.expected]MB to store the upgrade image. Check if the host has sufficient memory.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailESXInsufficientSpaceForImage|Upgrade requires at least [data.expected]MB free space on boot partition to store the upgrade image, only [data.found]MB found. Retry after freeing up sufficient space or perform a CD-based installation.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailHostHardwareMismatch|The upgrade is not supported on the host hardware. The upgrade ISO image contains VIBs that failed the host hardware compatibility check.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPInImage|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V software package [data.expected] installed on the host. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleDPUSupportedHost|The host is managing a DPU(s) and is a part of vLCM baselines-managed cluster, which is not supported. Move the host to vLCM image-managed cluster and try again.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleHostAcceptance|The upgrade package is not compatible with the host. Use an upgrade package that meets the host's acceptance level or change the host's acceptance level to match that of the upgrade package.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePartitionLayout|The host cannot be upgraded due to incompatible partition layout.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatiblePasswords|The passwords cannot be migrated because the password encryption scheme is incompatible.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleSphereletVersion|Spherelet version [data.found] is not compatible with ESXi 8.0 and later version. Please upgrade your WCP cluster to install a compatible Spherelet version, or remove Spherelet if the host is not in a WCP cluster.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailIncompatibleWithDvsCP|Cisco Nexus 1000V virtual network switch software package [data.found] in the upgrade image is incompatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientEntropyCache|Storage entropy cache is not full. A full entropy cache is required for upgrade. Refer to KB 89854 for steps on how to refill the cache.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientMemory|Insufficient memory found on the host: [data.expected]MB required, [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailInsufficientSpaceForConfig|Upgrade requires at least [data.expected]MB free space on a local VMFS datastore, only [data.found]MB found.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailLockerSpaceAvail|The system has insufficient locker space for the image profile.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPBreaksDvsCP|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image that is compatible with the Cisco Nexus 1000V VSM. Upgrading the host will remove the feature from the host.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingDPInImage|There is no Cisco Nexus 1000V virtual network switch software package in the upgrade image [data.found]. Upgrading the host will remove the feature from the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailMissingGunzipChecksumVibs|These VIB(s) on the host do not have the required sha-256 gunzip checksum for their payloads: [data.found]. This will prevent VIB security verification and secure boot from functioning properly. Please remove these VIBs and check with your vendor for a replacement of these VIBs.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeBootBank|The system image on the attached iso lacks a storage driver for the installed bootbank.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNativeNic|The system image on the attached iso lacks a NIC driver for the management network traffic.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMD5RootPassword|The root password is not using MD5 hashing, causing it to be authenticated up to only 8 characters. For instructions on how to correct this, see VMware KB 1024500 at http://kb.vmware.com/kb/1024500.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoMinCpuCores|New ESXi version requires a minimum of [data.expected] processor cores.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNoVt|Processor does not support hardware virtualization or it is disabled in BIOS. Virtual machine performance may be slow.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNonVmwareSoftware|The software modules [data.found] found on the host are not part of the upgrade image. These modules will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNvdsToCvdsMigration|ESXi host is not ready for NSX-T vSphere Distributed Switch (VDS) migration included with this ESXi upgrade. Please run Upgrade Readiness Tool (URT) from the NSX-T Manager managing this host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailNxbitEnabled|No eXecute (NX) bit is not enabled on the host. New ESXi version requires a CPU with NX/XD bit supported and enabled.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPendingReboot|Host software configuration requires a reboot. Reboot the host and try upgrade again.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailPowerPathBreaks|EMC PowerPath module [data.found] installed on the host will be removed during upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailRRFTVMsPresent|Legacy FT is not compatible with upgraded version. Disable legacy FT.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailScriptInitFailed|Host upgrade validity checks are not successful.ExtendedEventcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailTbootRequired|ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnknown|The upgrade precheck script returned unknown error.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedConfig|Error in ESX configuration file (esx.conf).ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedDevices|Unsupported devices [data.found] found on the host.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedHostVersion|Host version [data.found] is not supported for upgrade.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedLongMode|Host CPU is unsupported. New ESXi version requires a 64-bit CPU with support for LAHF/SAHF instructions in long mode.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedSHA1Cert|SHA-1 signature found in host certificate {data.cert} with subject {data.subject}. Support for certificates with weak signature algorithm SHA-1 has been removed in ESXi 8.0. To proceed with upgrade, replace it with a SHA-2 signature based certificate. Refer to release notes and KB 89424 for more details.ExtendedEventwarningcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailUnsupportedTPMVersion|TPM 1.2 device detected. Support for TPM version 1.2 is discontinued. Installation may proceed, but may cause the system to behave unexpectedly.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradePrecheckTestFailVFATCorruption|A problem with one or more vFAT bootbank partitions was detected. Please refer to KB 91136 and run dosfsck on bootbank partitions.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeProgressAborted|Host upgrade installer stopped.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressAuth|Host upgrade in progress: Configuring authentication.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressBootloader|Host upgrade in progress: Boot setup.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressClearpart|Host upgrade in progress: Clearing partitions.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressComplete|Host upgrade installer completed.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressKeyboard|Host upgrade in progress: Setting keyboard.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressLanguage|Host upgrade in progress: Setting language.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMigrating|Host upgrade in progress: Migrating ESX v3 configuration to ESX v4.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressMount|Host upgrade in progress: Mounting file systems.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressNetworking|Host upgrade in progress: Installing network configuration.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPackages|Host upgrade in progress: Installing packages.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartphys|Host upgrade in progress: Partitioning physical hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPartvirt|Host upgrade in progress: Partitioning virtual hard drives.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressPostscript|Host upgrade in progress: Running postinstallation script.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressRootpass|Host upgrade in progress: Setting root passwordExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressTimezone|Host upgrade in progress: Setting timezone.ExtendedEventinfocom.vmware.vcIntegrity.HostUpgradeProgressUnknown|Host upgrade in progress.ExtendedEventerrorcom.vmware.vcIntegrity.HostUpgradeRunScriptFailure|Cannot run upgrade script on host.ExtendedEventerrorcom.vmware.vcIntegrity.ImageRecommendationGenerationError|The image recommendation generation failed.ExtendedEventinfocom.vmware.vcIntegrity.ImageRecommendationGenerationFinished|The image recommendation generation finished.ExtendedEventerrorcom.vmware.vcIntegrity.IncompatibleTools|Could not install VMware vSphere Lifecycle Manager Guest Agent on {vm.name} because VMware Tools is not installed or is of an incompatible version. The required version is [data.requiredVersion] and the installed version is [data.installedVersion].ExtendedEventinfocom.vmware.vcIntegrity.InstallAddOnUpdate|The following additional patches are included to resolve a conflict for installation on [data.entityName]: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestion|To resolve a conflict for installation on [data.entityName], the following additional patches might need to be included in the baseline: [data.message].ExtendedEventinfocom.vmware.vcIntegrity.InstallSuggestionNotFound|VMware vSphere Lifecycle Manager could not find patches to resolve the conflict for installation on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdate|Installation of patches [data.updateId] started on host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.InstallUpdateComplete|Installation of patches succeeded on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.InstallUpdateError|Could not install patches on [data.entityName].ExtendedEventerrorcom.vmware.vcIntegrity.LinuxOffLineScanNotSupported|Cannot scan [data.name] for patches. Scan of powered off or suspended Linux VMs is not supported.ExtendedEventwarningcom.vmware.vcIntegrity.LowDiskSpace|VMware vSphere Lifecycle Manager is running out of storage space. Location: [data.Volume]. Available space: [data.FreeSpace]MB.ExtendedEventinfocom.vmware.vcIntegrity.MetadataCorrupted|Patch definition for [data.name] is corrupt. Check the logs for more details. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.MetadataNotFound|Patch definitions for [data.name] are missing. Download patch definitions first.ExtendedEventerrorcom.vmware.vcIntegrity.NoRequiredLicense|There is no VMware vSphere Lifecycle Manager license for [data.name] for the required operation.ExtendedEventinfocom.vmware.vcIntegrity.NotificationCriticalInfoAlert|VMware vSphere Lifecycle Manager informative notification (critical) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationDownloadAlert|VMware vSphere Lifecycle Manager notification download alertExtendedEventinfocom.vmware.vcIntegrity.NotificationImportantInfoAlert|VMware vSphere Lifecycle Manager informative notification (important) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationModerateInfoAlert|VMware vSphere Lifecycle Manager informative notification (moderate) alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallAlert|VMware vSphere Lifecycle Manager recall alertExtendedEventinfocom.vmware.vcIntegrity.NotificationRecallFixAlert|VMware vSphere Lifecycle Manager recall fix alertExtendedEventerrorcom.vmware.vcIntegrity.OperationCancelledDueToCertRefresh|In-flight VUM task on [data.name] is cancelled due to VC TLS certificate replacement. For more details, please refer to https://kb.vmware.com/s/article/90842.ExtendedEventcom.vmware.vcIntegrity.PXEBootedHostEvent|ExtendedEventinfocom.vmware.vcIntegrity.PackageImport|Package [data.name] is successfully imported.ExtendedEventerrorcom.vmware.vcIntegrity.PackageImportFailure|Import of package: [data.name] did not succeed.ExtendedEventinfocom.vmware.vcIntegrity.RebootHostComplete|Host [data.entityName] is successfully rebooted.ExtendedEventerrorcom.vmware.vcIntegrity.RebootHostError|Cannot reboot host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostStart|Start rebooting host [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.RebootHostWait|Waiting for host [data.entityName] to reboot.ExtendedEventerrorcom.vmware.vcIntegrity.ReconfigureClusterFailedEvent|VMware vSphere Lifecycle Manager could not restore HA admission control/DPM settings for cluster {computeResource.name} to their original values. These settings have been changed for patch installation. Check the cluster settings and restore them manually.ExtendedEventinfocom.vmware.vcIntegrity.Remediate|Remediation succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedHost|Could not remediate {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDisconnectedVm|Could not remediate {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateDpmDisableHost|Could not remediate host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.RemediateFailed|Remediation did not succeed for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostInvalidPowerState|Cannot remediate the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateHostOnUnsupportedHost|Could not remediate {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediateOrphanedVm|Could not remediate orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.RemediateStart|Remediating object [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.RemediateVmOnUnsupportedHost|Could not remediate {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.RemediationStatusEvent|Current progress of remediation: [data.noOfSucceededHosts] hosts completed successfully, [data.noOfFailedHosts] hosts completed with errors, [data.noOfHostsBeingRemediatedCurrently] hosts are being remediated, [data.noOfWaitingHosts] hosts are waiting to start remediation, and [data.noOfRetryHosts] hosts could not enter maintenance mode and are waiting to retry.ExtendedEventinfocom.vmware.vcIntegrity.Scan|Successfully scanned [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanCancelled|Scanning of [data.name] is canceled by user.ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedHost|Could not scan {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDisconnectedVm|Could not scan {vm.name} because the virtual machine has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanDpmDisableHost|Could not scan host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.ScanFailed|Could not scan [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostInvalidPowerState|Cannot scan the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.ScanHostOnUnsupportedHost|Could not scan {host.name} for patches because it is of unsupported version [data.version].ExtendedEventwarningcom.vmware.vcIntegrity.ScanMissingUpdate|Found a missing patch: [data.message] when scanning [data.name]. Re-downloading patch definitions might resolve this problem.ExtendedEventinfocom.vmware.vcIntegrity.ScanOrphanedVm|Could not scan orphaned VM {vm.name}.ExtendedEventinfocom.vmware.vcIntegrity.ScanStart|Scanning object [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.ScanUnsupportedVolume|{vm.name} contains an unsupported volume [data.volumeLabel]. Scan results for this VM might be incomplete.ExtendedEventerrorcom.vmware.vcIntegrity.ScanVmOnUnsupportedHost|Could not scan {vm.name} because host {host.name} is of unsupported version [data.version].ExtendedEventerrorcom.vmware.vcIntegrity.SequentialRemediateFailedEvent|An error occured during the sequential remediation of hosts in cluster {computeResource.name}. Check the related events for more details.ExtendedEventinfocom.vmware.vcIntegrity.SkipSuspendedVm|Suspended VM {vm.name} has been skipped.ExtendedEventinfocom.vmware.vcIntegrity.Stage|Staging succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.StageDisconnectedHost|Could not stage patches to {host.name} because the host has an invalid connection state: [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageDpmDisableHost|Could not stage patches to host {host.name} because its power state is invalid. The host is in standby mode and the individual VMware DPM settings of the host are set to Disabled or Manual.ExtendedEventerrorcom.vmware.vcIntegrity.StageFailed|Staging did not succeed for [data.name][data.message].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostInvalidPowerState|Cannot stage patches to the host {host.name} because its power state is [data.state].ExtendedEventerrorcom.vmware.vcIntegrity.StageHostOnUnsupportedHost|Could not stage patches to {host.name} because it is of unsupported version [data.version].ExtendedEventinfocom.vmware.vcIntegrity.StageStart|Staging patches to host [data.name].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdate|Started staging of patches [data.updateId] on [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.StageUpdateComplete|Staging of patch to [data.entityName] succeeded.ExtendedEventerrorcom.vmware.vcIntegrity.StageUpdateError|Cannot stage patch [data.updateId] to [data.entityName].ExtendedEventinfocom.vmware.vcIntegrity.SysprepDisabled|Sysprep is disabled during the remediation.ExtendedEventinfocom.vmware.vcIntegrity.SysprepEnabled|Sysprep settings are restored.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepHandleFailure|Cannot access the sysprep settings for VM {vm.name}. Retry the operation after disabling sysprep for the VM.ExtendedEventerrorcom.vmware.vcIntegrity.SysprepNotFound|Cannot locate the sysprep settings for VM {vm.name}. For Windows 7 and Windows 2008 R2, offline VM remediation is supported only if the system volume is present in the primary disk partition. Retry the operation after disabling sysprep for the VM.ExtendedEventinfocom.vmware.vcIntegrity.ToolsRemediate|VMware Tools upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.ToolsRemediateFailed|VMware Tools upgrade did not succeed for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.ToolsScan|Successfully scanned [data.name] for VMware Tools upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.ToolsScanFailed|Could not scan [data.name] for VMware Tools upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsScanInstallNotSupported|VMware Tools is not installed on [data.name]. VMware vSphere Lifecycle Manager supports upgrading only an existing VMware Tools installation.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeRemediateSkippedOnHost|VMware Tools upgrade was not performed on {vm.name}. VMware Tools upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade is not supported for virtual appliances.ExtendedEventwarningcom.vmware.vcIntegrity.ToolsUpgradeScanSkippedOnHost|VMware Tools upgrade scan was not performed on {vm.name}. VMware Tools upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. VMware Tools upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedHostRemediateSpecialVMEvent|The host [data.name] has a VM [data.vm] with VMware vSphere Lifecycle Manager or VMware vCenter Server installed. The VM must be moved to another host for the remediation to proceed.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedLinuxAction|Action is not supported for Linux VM/VA {vm.name}. VMware Tools is not installed or the machine cannot start.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedOs|Scan or remediation is not supported on [data.name] because of unsupported OS [data.os].ExtendedEventinfocom.vmware.vcIntegrity.UnsupportedPXEBootHost|Scanning, remediation, and staging are not supported on PXE booted ESXi hosts.ExtendedEventerrorcom.vmware.vcIntegrity.UnsupportedSpecialVMEvent|VM [data.name] has either VMware vSphere Lifecycle Manager or VMware vCenter Server installed. This VM will be ignored for scan and remediation.ExtendedEventwarningcom.vmware.vcIntegrity.UnsupportedVaAction|Action is not supported for offline or suspended virtual appliance {vm.name}. ExtendedEventerrorcom.vmware.vcIntegrity.VAAutoUpdateOn|Auto update is set to ON for virtual appliance [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADiscovery|Successfully discovered virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADiscoveryFailed|Could not discover virtual appliance [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadGenericFailure|Could not download virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadInvalidUrl|[data.name] is not a valid virtual appliance download URL.ExtendedEventerrorcom.vmware.vcIntegrity.VADownloadMetadataFailure|Could not download virtual appliance upgrade metadata for [data.name].ExtendedEventinfocom.vmware.vcIntegrity.VADownloadSuccess|Successfully downloaded virtual appliance upgrade metadata.ExtendedEventerrorcom.vmware.vcIntegrity.VARepositoryAddressNotSet|No repository address is set for virtual appliance [data.name]. The appliance does not support updates by vCenter Server.ExtendedEventinfocom.vmware.vcIntegrity.VAScan|Successfully scanned [data.name] for VA upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VAScanFailed|Could not scan [data.name] for VA upgrades.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeRemediate|Virtual Hardware upgrade succeeded for [data.name].ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateFailed|Could not perform Virtual Hardware upgrade on [data.name].ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeRemediateSkippedOnHost|Virtual Hardware upgrade was not performed for {vm.name}. Virtual Hardware upgrade is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade is not supported for virtual appliances.ExtendedEventinfocom.vmware.vcIntegrity.VMHardwareUpgradeScan|Successfully scanned [data.name] for Virtual Hardware upgrades.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeScanFailed|Could not scan [data.name] for Virtual Hardware upgrades.ExtendedEventwarningcom.vmware.vcIntegrity.VMHardwareUpgradeScanSkippedOnHost|Virtual Hardware upgrade scan was not performed for {vm.name}. Virtual Hardware upgrade scan is supported only for VMs that run on ESX/ESXi 4.0 and higher. Virtual Hardware upgrade scan is not supported for virtual appliances.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotInstalled|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not installed. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsNotLatest|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools is not the latest version supported by the host. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnknown|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is unknown. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMHardwareUpgradeToolsUnmanaged|Virtual Hardware upgrade did not succeed for {vm.name}, because VMware Tools state is not managed by VMware vSphere. To upgrade virtual hardware, VMware Tools must be the latest version.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsAutoUpgradeUnsupported|The version of VMware Tools installed in {vm.name} does not support automatic upgrade. Upgrade VMware Tools manually.ExtendedEventerrorcom.vmware.vcIntegrity.VMToolsNotRunning|Error while waiting for VMware Tools to respond. Verify that VMware Tools is running in VM {vm.name}.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForInstall|Patch [data.inputBulletin] was excluded from the remediation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the remediation. You can also add the baselines to a baseline group for convenience and perform the remediation.ExtendedEventwarningcom.vmware.vcIntegrity.VibPrerequisitesMissingForStage|Patch [data.inputBulletin] was excluded from the stage operation because its prerequisite [data.missingPrereq] is neither installed on the host nor included in the baseline. Include the prerequisites in a Patch or Extension baseline and retry the stage operation. You can also add the baselines to a baseline group for convenience and perform the stage operation.ExtendedEventerrorcom.vmware.vcIntegrity.VmDevicesRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original removable device connection settings for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the settings for the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmMigrationFailedEvent|Cannot migrate VM {vm.name} from [data.srcHost] to [data.destHost].ExtendedEventerrorcom.vmware.vcIntegrity.VmPowerRestoreFailedEvent|VMware vSphere Lifecycle Manager could not restore the original power state for all VMs in cluster {computeResource.name}. These settings have been changed for patch installation. You can manually restore the original power state of the VMs.ExtendedEventerrorcom.vmware.vcIntegrity.VmotionCompatibilityCheckFailedEvent|Cannot check compatibility of the VM {vm.name} for migration with vMotion to host [data.hostName].EventExAgency createdinfocom.vmware.vim.eam.agency.create|{agencyName} created by {ownerName}EventExAgency destroyedinfocom.vmware.vim.eam.agency.destroyed|{agencyName} removed from the vSphere ESX Agent ManagerEventExAgency state changedinfocom.vmware.vim.eam.agency.goalstate|{agencyName} changed goal state from {oldGoalState} to {newGoalState}EventExAgency status changedinfocom.vmware.vim.eam.agency.statusChanged|Agency status changed from {oldStatus} to {newStatus}EventExAgency reconfiguredinfocom.vmware.vim.eam.agency.updated|Configuration updated {agencyName}EventExCluster Agent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterPowerOn|Cluster Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailableAfterProvisioning|Cluster Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExCluster Agent VM is about to be powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.cluster.markAgentVmAsAvailablePrePowerOn|Cluster Agent VM {vm.name} is about to be powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent added to hostinfocom.vmware.vim.eam.agent.created|Agent added to host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyed|Agent removed from host {host.name} ({agencyName})EventExAgent removed from hostinfocom.vmware.vim.eam.agent.destroyedNoHost|Agent removed from host ({agencyName})EventExAgent VM has been powered on. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterPowerOn|Agent VM {vm.name} has been powered on. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent VM has been provisioned. Mark agent as available to resume agent workflow.infocom.vmware.vim.eam.agent.markAgentVmAsAvailableAfterProvisioning|Agent VM {vm.name} has been provisioned. Mark agent as available to resume agent workflow ({agencyName}) .EventExAgent status changedinfocom.vmware.vim.eam.agent.statusChanged|Agent status changed from {oldStatus} to {newStatus}EventExAgent VM is deletedinfocom.vmware.vim.eam.agent.task.deleteVm|Agent VM {vmName} is deleted on host {host.name} ({agencyName})EventExAgent VM is provisionedinfocom.vmware.vim.eam.agent.task.deployVm|Agent VM {vm.name} is provisioned on host {host.name} ({agencyName})EventExAgent VM powered offinfocom.vmware.vim.eam.agent.task.powerOffVm|Agent VM {vm.name} powered off, on host {host.name} ({agencyName})EventExAgent VM powered oninfocom.vmware.vim.eam.agent.task.powerOnVm|Agent VM {vm.name} powered on, on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibInstalled|Agent installed VIB {vib} on host {host.name} ({agencyName})EventExVIB installedinfocom.vmware.vim.eam.agent.task.vibUninstalled|Agent uninstalled VIB {vib} on host {host.name} ({agencyName})EventExwarningcom.vmware.vim.eam.issue.agencyDisabled|Agency is disabledEventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentOVF|Unable to access agent OVF package at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cannotAccessAgentVib|Unable to access agent VIB module at {url} ({agencyName})EventExcom.vmware.vim.eam.issue.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.certificateNotTrusted|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInMaintenanceMode|EventExcom.vmware.vim.eam.issue.cluster.agent.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterResources|Cluster Agent VM cannot be powered on due to insufficient resources on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.insufficientClusterSpace|Cluster Agent VM on cluster {computeResource.name} cannot be provisioned due to insufficient space on cluster datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.invalidConfig|Cluster Agent VM {vm.name} on cluster {computeResource.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmDatastore|Cluster Agent VM datastore(s) {customAgentVmDatastoreName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.missingClusterVmNetwork|Cluster Agent VM network(s) {customAgentVmNetworkName} not available in cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.ovfInvalidProperty|OVF environment used to provision cluster Agent VM on cluster {computeResource.name} has one or more invalid properties ({agencyName})EventExcom.vmware.vim.eam.issue.cluster.agent.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotDeployed|Cluster Agent VM is missing on cluster {computeResource.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmNotRemoved|Cluster Agent VM {vm.name} is provisioned when it should be removed ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOff|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmPoweredOn|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.cluster.agent.vmSuspended|Cluster Agent VM {vm.name} on cluster {computeResource.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostInMaintenanceMode|Agent cannot complete an operation since the host {host.name} is in maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.hostInPartialMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.hostInStandbyMode|Agent cannot complete an operation since the host {host.name} is in standby mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.hostNotReachable|Host {host.name} must be powered on and connected to complete agent operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.immediateHostRebootRequired|Host {host.name} must be rebooted immediately to unblock agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.incompatibleHostVersion|Agent is not deployed due to incompatible host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientResources|Agent cannot be provisioned due to insufficient resources on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.insufficientSpace|Agent on {host.name} cannot be provisioned due to insufficient space on datastore ({agencyName})EventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotDeleteSoftware|Cannot remove the Baseline associated with agency {agencyName} from VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.cannotStageSoftware|The software defined by agency {agencyName} cannot be staged in VMware Update ManagerEventExerrorcom.vmware.vim.eam.issue.integrity.agency.vUMUnavailable|VMware Update Manager was unavailable during agency {agencyName} operationsEventExerrorcom.vmware.vim.eam.issue.invalidConfig|Agent VM {vm.name} on host {host.name} has an invalid configuration ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmDatastore|No agent datastore configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noAgentVmNetwork|No agent network configuration on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmDatastore|Agent datastore(s) {customAgentVmDatastoreName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noCustomAgentVmNetwork|Agent network(s) {customAgentVmNetworkName} not available on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmDatastore|Agent datastore cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.noDiscoverableAgentVmNetwork|Agent network(s) cannot be discovered on host {host.name} as per selection policy ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidFormat|OVF used to provision agent on host {host.name} has invalid format ({agencyName})EventExerrorcom.vmware.vim.eam.issue.ovfInvalidProperty|OVF environment used to provision agent on host {host.name} has one or more invalid properties ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotConfigureSolutions|The required solutions defined by agency {agencyName} cannot be configured in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.cannotUploadDepot|Software defined by agency {agencyName} cannot be uploaded in vSphere Lifecycle ManagerEventExerrorcom.vmware.vim.eam.issue.personality.agency.inaccessibleDepot|Unable to access software defined by agency {agencyName}EventExerrorcom.vmware.vim.eam.issue.personality.agency.invalidDepot|Software defined by agency {agencyName} contains invalid vSphere Lifecycle Manager related metadataEventExerrorcom.vmware.vim.eam.issue.personality.agency.pMUnavailable|vSphere Lifecycle Manager was unavailable during agency {agencyName} operationsEventExinfocom.vmware.vim.eam.issue.personality.agent.awaitingPMRemediation|Agent requires application of configured solutions through vSphere Lifecycle Manager on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.personality.agent.blockedByAgencyOperation|Agency issues related to vSphere Lifecycle Manager require resolution to unblock host {host.name} ({agencyName})EventExinfocom.vmware.vim.eam.issue.resolved|Issue {type} resolved (key {key})EventExerrorcom.vmware.vim.eam.issue.vibCannotPutHostInMaintenanceMode|Cannot put host into maintenance mode ({agencyName})EventExcom.vmware.vim.eam.issue.vibCannotPutHostOutOfMaintenanceMode|EventExerrorcom.vmware.vim.eam.issue.vibDependenciesNotMetByHost|VIB module dependencies for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibInvalidFormat|Invalid format for VIB module at {url} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibNotInstalled|VIB module for agent is not installed/removed on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequirementsNotMetByHost|VIB system requirements for agent are not met by host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostInMaintenanceMode|Host must be put into maintenance mode to complete agent VIB operation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresHostReboot|Host {host.name} must be reboot to complete agent VIB installation ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualInstallation|VIB {vib} requires manual installation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vibRequiresManualUninstallation|VIB {vib} requires manual uninstallation on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmCorrupted|Agent VM {vm.name} on host {host.name} is corrupted ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmDeployed|Agent VM {vm.name} is provisioned on host {host.name} when it should be removed ({agencyName})EventExcom.vmware.vim.eam.issue.vmInaccessible|EventExerrorcom.vmware.vim.eam.issue.vmNotDeployed|Agent VM is missing on host {host.name} ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmOrphaned|Orphaned agent VM {vm.name} on host {host.name} detected ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOff|Agent VM {vm.name} on host {host.name} is expected to be powered on ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmPoweredOn|Agent VM {vm.name} on host {host.name} is expected to be powered off ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmRequiresHostOutOfMaintenanceMode|Agent cannot deploy Agent VM since the host {host.name} is in maintenance mode ({agencyName})EventExerrorcom.vmware.vim.eam.issue.vmSuspended|Agent VM {vm.name} on host {host.name} is expected to be powered on but is suspended ({agencyName})ExtendedEventInvalid loginwarningcom.vmware.vim.eam.login.invalid|Failed login to vSphere ESX Agent ManagerEventExSuccessful login to vSphere ESX Agent Managerinfocom.vmware.vim.eam.login.succeeded|Successful login by {user} into vSphere ESX Agent ManagerEventExUser logged out of vSphere ESX Agent Managerinfocom.vmware.vim.eam.logout|User {user} logged out of vSphere ESX Agent Manager by logging out of the vCenter serverEventExUnauthorized access in vSphere ESX Agent Managerwarningcom.vmware.vim.eam.unauthorized.access|Unauthorized access by {user} in vSphere ESX Agent ManagerEventExChecked in virtual machine into a virtual machine template iteminfocom.vmware.vmtx.LibraryItemCheckInEvent|Checked in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to check in virtual machine into a virtual machine template itemerrorcom.vmware.vmtx.LibraryItemCheckInFailEvent|Failed to check in virtual machine '{vmName}' into the library item '{libraryItemName}' in library '{libraryName}'EventExDeleted the virtual machine checked out from the VM template iteminfocom.vmware.vmtx.LibraryItemCheckOutDeleteEvent|Deleted the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExFailed to delete the virtual machine checked out from the VM template itemerrorcom.vmware.vmtx.LibraryItemCheckOutDeleteFailEvent|Failed to delete the virtual machine '{vmName}' checked out from the VM template item '{libraryItemName}' in library '{libraryName}'EventExChecked out virtual machine template item as a virtual machineinfocom.vmware.vmtx.LibraryItemCheckOutEvent|Checked out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExFailed to check out virtual machine template item as a virtual machineerrorcom.vmware.vmtx.LibraryItemCheckOutFailEvent|Failed to check out library item '{libraryItemName}' in library '{libraryName}' as a virtual machine '{vmName}'EventExA virtual machine checked out from the VM template item was orphaned after restorewarningcom.vmware.vmtx.LibraryItemCheckoutOrphanedOnRestoreEvent|A virtual machine (ID: {vmId}) checked out from the VM template item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was orphaned after restoreEventExCloned virtual machine to Content Library as VM templateinfocom.vmware.vmtx.LibraryItemCreateEvent|Cloned virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExFailed to clone virtual machine to Content Library as VM templateerrorcom.vmware.vmtx.LibraryItemCreateFailEvent|Failed to clone virtual machine '{vmName}' to library item '{libraryItemName}' in library '{libraryName}'EventExDeleted a version of the virtual machine template iteminfocom.vmware.vmtx.LibraryItemDeleteVersionEvent|Deleted VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'ExtendedEventFailed to delete a version of the virtual machine template itemerrorcom.vmware.vmtx.LibraryItemDeleteVersionFailEvent|Failed to delete VM template '{vmName}' of the library item '{libraryItemName}' in library '{libraryName}'EventExDeployed virtual machine from Content Libraryinfocom.vmware.vmtx.LibraryItemDeployEvent|Deployed virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExFailed to deploy virtual machine from Content Libraryerrorcom.vmware.vmtx.LibraryItemDeployFailEvent|Failed to deploy virtual machine '{vmName}' from library item '{libraryItemName}' in library '{libraryName}'EventExRolled back virtual machine template item to a previous versioninfocom.vmware.vmtx.LibraryItemRollbackEvent|Rolled back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'ExtendedEventFailed to roll back virtual machine template item to a previous versionerrorcom.vmware.vmtx.LibraryItemRollbackFailEvent|Failed to roll back library item '{libraryItemName}' in library '{libraryName}' to VM template '{vmName}'EventExA virtual machine template managed by Content Library was converted to a virtual machineerrorcom.vmware.vmtx.LibraryItemTemplateConvertedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was converted to a virtual machineEventExA virtual machine template managed by Content Library was converted to a virtual machine after restorewarningcom.vmware.vmtx.LibraryItemTemplateConvertedOnRestoreEvent|The virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was found converted to a virtual machine after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will be deleted because the virtual machine template (ID: {vmId}) that the item manages was deletedEventExCould not locate a virtual machine template managed by Content Library after restorewarningcom.vmware.vmtx.LibraryItemTemplateDeletedOnRestoreEvent|Could not locate the virtual machine template (ID: {vmId}) of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) after restoreEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplateLatestVersionDeletedEvent|Library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) is rolled back to the previous version because the latest VM template (ID: {vmId}) was deletedEventExA virtual machine template managed by Content Library was deletederrorcom.vmware.vmtx.LibraryItemTemplatePreviousVersionDeletedEvent|Previous VM template (ID: {vmId}) of the library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) was deletedEventExA virtual machine template managed by Content Library was renamedwarningcom.vmware.vmtx.LibraryItemTemplateRenamedEvent|The name of library item '{libraryItemName}' (ID: {libraryItemId}) in library '{libraryName}' (ID: {libraryId}) will change to '{newItemName}' because the virtual machine template (ID: {vmId}) that the item manages was renamedExtendedEventAdded witness host to the cluster.infoAdded witness host to the cluster.com.vmware.vsan.clusterconfig.events.witnessadditiondone|Added witness host to the cluster.ExtendedEventRemoved witness host from the cluster.infoRemoved witness host from the cluster.com.vmware.vsan.clusterconfig.events.witnessremovaldone|Removed witness host from the cluster.ExtendedEventAdd disk group back to the vSAN cluster.infoAdd disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.adddisks|Add disk group back to the vSAN cluster on host {host.name}.ExtendedEventFailed to add disk group back to the vSAN cluster.errorFailed to add disk group back to the vSAN cluster.com.vmware.vsan.diskconversion.events.addfail|Failed to add disk group back to the vSAN cluster on host {host.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formatdone|Disk format conversion is done on cluster {computeResource.name}.ExtendedEventDisk format conversion is done.infoDisk format conversion is done.com.vmware.vsan.diskconversion.events.formathostdone|Disk format conversion is done on host {host.name}.ExtendedEventFailed to migrate vsanSparse objects.errorFailed to migrate vsanSparse objects.com.vmware.vsan.diskconversion.events.migrationfail|Failed to migrate vsanSparse objects on cluster {computeResource.name}.ExtendedEventNo disk conversion performed, all mounted disk groups on host are compliantinfoNo disk conversion performed, all mounted disk groups on host are compliant.com.vmware.vsan.diskconversion.events.noneed|No disk conversion performed, all mounted disk groups on host {host.name} are already compliant.ExtendedEventCheck existing objects on the vSAN cluster.infoCheck existing objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objectcheck|Check existing objects on the vSAN cluster.ExtendedEventObject conversion is done.infoObject conversion is done.com.vmware.vsan.diskconversion.events.objectdone|Object conversion is done.ExtendedEventFailed to convert objects on the vSAN cluster.errorFailed to convert objects on the vSAN cluster.com.vmware.vsan.diskconversion.events.objecterror|Failed to convert objects on the vSAN cluster.ExtendedEventRemove disk group from the vSAN cluster.infoRemove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removedisks|Remove disk group from the vSAN cluster on host {host.name}.ExtendedEventFailed to remove disk group from the vSAN cluster.errorFailed to remove disk group from the vSAN cluster.com.vmware.vsan.diskconversion.events.removefail|Failed to remove disk group on host {host.name} from the vSAN cluster.ExtendedEventRestore disk group from last break point.infoRestore disk group from last break point..com.vmware.vsan.diskconversion.events.restore|Restore disk group from last break point.ExtendedEventNo disk conversion performed, host has no mounted disk groups.infoNo disk conversion performed, host has no mounted disk groups.com.vmware.vsan.diskconversion.events.skiphost|No disk conversion performed, host {host.name} has no mounted disk groups.ExtendedEventCheck cluster status for disk format conversion.infoCheck cluster status for disk format conversion.com.vmware.vsan.diskconversion.events.statuscheck|Check status of cluster {computeResource.name} status for disk format conversion.ExtendedEventcom.vmware.vsan.diskconversion.events.syncingtimeout|ExtendedEventUpdate the vSAN cluster system settings.infoUpdate the vSAN cluster system settings.com.vmware.vsan.diskconversion.events.updatesetting|Update the vSAN cluster system settings on host {host.name}.ExtendedEventDisk format conversion failed in what if upgrade.infoDisk format conversion faild in what if upgrade check.com.vmware.vsan.diskconversion.events.whatifupgradefailed|Disk format conversion failed in what if upgrade check.EventExMark ssd(s) as capacity flash.infoMark {disks} as capacity flash.com.vmware.vsan.diskmgmt.events.tagcapacityflash|Mark {disks} as capacity flash.EventExMark ssd as hdd.infoMark ssd {disk} as hdd.com.vmware.vsan.diskmgmt.events.taghdd|Mark ssd {disk} as hdd.EventExMark remote disk as local disk.infoMark remote disk {disk} as local disk.com.vmware.vsan.diskmgmt.events.taglocal|Mark remote disk {disk} as local disk.EventExMark hdd as ssd.infoMark hdd {disk} as ssd.com.vmware.vsan.diskmgmt.events.tagssd|Mark hdd {disk} as ssd.EventExRemove capacity flash mark from ssd(s).infoRemove capacity flash mark from {disks}.com.vmware.vsan.diskmgmt.events.untagcapacityflash|Remove capacity flash mark from {disks}.EventExAdvisorvSAN Health Test 'Advisor' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.advisor.event|vSAN Health Test 'Advisor' changed from '{prestatus}' to '{curstatus}'EventExAudit CEIP Collected DatavSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.auditceip.event|vSAN online health test 'Audit CEIP Collected Data' status changed from '{prestatus}' to '{curstatus}'EventExCNS Critical Alert - Patch available with important fixesvSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.cnspatchalert.event|vSAN online health test 'CNS Critical Alert - Patch available with important fixes' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.controllercacheconfig.event|vSAN online health test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExCoredump partition size checkvSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.coredumpartitionsize.event|vSAN online health test 'Coredump partition size check' status changed from '{prestatus}' to '{curstatus}'EventExUpgrade vSphere CSI driver with cautionvSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.csidriver.event|vSAN online health test 'Upgrade vSphere CSI driver with caution' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.diskusage.event|vSAN online health test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dualencryption.event|vSAN online health test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExProper vSAN network traffic shaping policy is configuredvSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.dvsportspeedlimit.event|vSAN online health test 'Proper vSAN network traffic shaping policy is configured' status changed from '{prestatus}' to '{curstatus}'EventExEnd of general support for lower vSphere versionvSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.eoscheck.event|vSAN online health test 'End of general support for lower vSphere version' status changed from '{prestatus}' to '{curstatus}'EventExImportant patch available for vSAN issuevSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.fsvlcmpatchalert.event|vSAN online health test 'Important patch available for vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.h730.event|vSAN online health test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExHPE SAS Solid State DrivevSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.hpesasssd.event|vSAN online health test 'HPE SAS Solid State Drive' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration check for large scale clustervSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.largescalecluster.event|vSAN online health test 'vSAN configuration check for large scale cluster' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lavenderalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExvSAN critical alert regarding a potential data inconsistencyvSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilacdeltacomponenttest.event|vSAN online health test 'vSAN critical alert regarding a potential data inconsistency' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Critical Alert - Patch available for critical vSAN issuevSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.lilypatchalert.event|vSAN online health test 'vSAN Critical Alert - Patch available for critical vSAN issue' status changed from '{prestatus}' to '{curstatus}'EventExUrgent patch available for vSAN ESAvSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.marigoldalert.event|vSAN online health test 'Urgent patch available for vSAN ESA' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmode.event|vSAN online health test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.mixedmodeh730.event|vSAN online health test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvSAN storage policy compliance up-to-datevSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.objspbm.event|vSAN online health test 'vSAN storage policy compliance up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.patchalert.event|vSAN online health test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.pnicconsistent.event|vSAN online health test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExVM storage policy is not-recommendedvSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.policyupdate.event|vSAN online health test 'VM storage policy is not-recommended' status changed from '{prestatus}' to '{curstatus}'EventExMaximum host number in vSAN over RDMAvSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.rdmanodesalert.event|vSAN online health test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExESXi system logs stored outside vSAN datastorevSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.scratchconfig.event|vSAN online health test 'ESXi system logs stored outside vSAN datastore' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.smalldiskstest.event|vSAN online health test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExThick-provisioned VMs on vSANvSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.thickprovision.event|vSAN online health test 'Thick-provisioned VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExFix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabledvSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.unmaptest.event|vSAN online health test 'Fix is available for a critical vSAN software defect with Guest Trim/Unmap configuration enabled' status changed from '{prestatus}' to '{curstatus}'EventExvSAN v1 disk in usevSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.v1diskcheck.event|vSAN online health test 'vSAN v1 disk in use' status changed from '{prestatus}' to '{curstatus}'EventExvCenter Server up to datevSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vcuptodate.event|vSAN online health test 'vCenter Server up to date' status changed from '{prestatus}' to '{curstatus}'EventExMultiple VMs share the same vSAN home namespacevSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vmns.event|vSAN online health test 'Multiple VMs share the same vSAN home namespace' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Support InsightvSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanenablesupportinsight.event|vSAN Support Insight's status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpefwtest.event|vSAN online health test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExCustomer advisory for HPE Smart ArrayvSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanhpesmartarraytest.event|vSAN online health test 'Customer advisory for HPE Smart Array' status changed from '{prestatus}' to '{curstatus}'EventExvSAN management service resource checkvSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.vsanmgmtresource.event|vSAN online health test 'vSAN management server system resource check' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.witnesshw.event|vSAN online health test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Advanced Configuration Check for Urgent vSAN ESA PatchvSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cloudhealth.zdomadvcfgenabled.event|vSAN online health test 'vSAN Advanced Configuration Check for Urgent vSAN ESA Patch' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all of the hosts in a vSAN cluster have consistent advanced configuration options.vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.advcfgsync.event|vSAN Health Test 'Advanced vSAN configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN host daemon liveness.vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clomdliveness.event|vSAN Health Test 'vSAN host daemon liveness' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSphere cluster members match vSAN cluster members.vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.clustermembership.event|vSAN Health Test 'vSphere cluster members match vSAN cluster members' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster configuration consistencyvSAN Health Test 'vSAN cluster configuration consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.consistentconfig.event|vSAN Health Test 'vSAN configuration consistency' status changed from '{prestatus}' to '{curstatus}'EventExESA prescriptive disk claimvSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.ddsconfig.event|vSAN Health Test 'ESA prescriptive disk claim' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk group layoutvSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.dglayout.event|vSAN Health Test 'vSAN disk group layout' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN disk balance statusvSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.diskbalance.event|vSAN Health Test 'vSAN disk balance' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Conversion HealthvSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.esaconversionhealth.event|vSAN Health Test 'vSAN ESA Conversion Health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN extended configuration in syncvSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.extendedconfig.event|vSAN Health Test 'vSAN extended configuration in sync' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Managed disk claimvSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.hcldiskclaimcheck.event|vSAN Health Test 'vSAN Managed disk claim' status changed from '{prestatus}' to '{curstatus}'EventExCheck host maintenance mode is in sync with vSAN node decommission state.vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.mmdecominsync.event|vSAN Health Test 'Host maintenance mode is in sync with vSAN node decommission state' status changed from '{prestatus}' to '{curstatus}'EventExvSAN optimal datastore default policy configurationvSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.optimaldsdefaultpolicy.event|vSAN Health Test 'vSAN optimal datastore default policy configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN with RDMA supports up to 32 hosts.vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.rdmanodes.event|vSAN Health Test 'Maximum host number in vSAN over RDMA' status changed from '{prestatus}' to '{curstatus}'EventExResync operations throttlingvSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.resynclimit.event|vSAN Health Test 'Resync operations throttling' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN Cluster time sync status among hosts and VCvSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.timedrift.event|vSAN Health Test 'Time is synchronized across hosts and VC' status changed from '{prestatus}' to '{curstatus}'EventExvSAN disk format statusvSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradelowerhosts.event|vSAN Health Test 'Disk format version' status changed from '{prestatus}' to '{curstatus}'EventExSoftware version compatibilityvSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.upgradesoftware.event|vSAN Health Test 'Software version compatibility' status changed from '{prestatus}' to '{curstatus}'EventExVMware vCenter state is authoritativevSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vcauthoritative.event|vSAN Health Test 'vCenter state is authoritative' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Direct homogeneous disk claimingvSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsandconfigconsistency.event|vSAN Health Test 'vSAN Direct homogeneous disk claiming' status changed from '{prestatus}' to '{curstatus}'EventExvSphere Lifecycle Manager (vLCM) configurationvSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.cluster.vsanesavlcmcheck.event|vSAN Health Test 'vSphere Lifecycle Manager (vLCM) configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks the object format status of all vSAN objects.vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objectformat.event|vSAN Health Test 'vSAN object format health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health status of all vSAN objects.vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.data.objecthealth.event|vSAN Health Test 'vSAN object health' status changed from '{prestatus}' to '{curstatus}'EventExpNic RX/TX PauseRX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.pausecount.event|RX/TX Pause rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX CRC ErrorRX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxcrcerr.event|RX CRC error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Generic ErrorRX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxerr.event|RX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX FIFO ErrorRX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxfifoerr.event|RX FIFO error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Missed ErrorRX missed error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxmisserr.event|RX missed error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic RX Buffer Overflow ErrorRX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.rxoverr.event|RX buffer overflow error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Carrier ErrorTX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txcarerr.event|TX Carrier error rate reaches {value}‰ on Physical Adapter {nicname}.EventExpNic TX Generic ErrorTX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.vsan.health.test.diagnostics.pnic.txerr.event|TX Generic error rate reaches {value}‰ on Physical Adapter {nicname}.EventExRDT Checksum Mismatch ErrorRDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})vsan.health.test.diagnostics.rdt.checksummismatchcount.event|RDT Checksum Mismatch count reaches {value}. (warning threshold: {yellowThreshold}, critical threshold: {redThreshold})EventExData-in-transit encryption configuration checkvSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.ditencryption.ditconfig.event|vSAN Health Test 'Data-in-transit encryption configuration check' status changed from '{prestatus}' to '{curstatus}'EventExDual encryption applied to VMs on vSANvSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.dualencryption.event|vSAN Health Test 'Dual encryption applied to VMs on vSAN' status changed from '{prestatus}' to '{curstatus}'EventExChecks if CPU AES-NI is disabled on hostsvSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.hostcpuaesni.event|vSAN Health Test 'CPU AES-NI is enabled on hosts' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VMware vCenter or any hosts are not connected to Key Management ServersvSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.encryption.kmsconnection.event|vSAN Health Test 'vCenter and all hosts are connected to Key Management Servers' status changed from '{prestatus}' to '{curstatus}'EventExvSAN ESA Prescriptive Disk Claim ConfigurationsHost {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsvsan.health.test.esaprescriptivediskclaim.noeligibledisk|Host {hostName} has no eligible disks to satisfy any of the vSAN ESA prescriptive disk claim specs. Please add host with relevant disks or update disk claim specsEventExCheck vSAN File Service host file server agent vm state.vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.fileserver.event|vSAN Health Test 'vSAN File Service host file system health' status changed from '{prestatus}' to '{curstatus}'EventExInfrastructure HealthvSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.host.event|vSAN Health Test 'Infrastructure Health' status changed from '{prestatus}' to '{curstatus}'EventExFile Share HealthvSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.fileservice.sharehealth.event|vSAN Health Test 'File Share Health' status changed from '{prestatus}' to '{curstatus}'EventExVDS compliance check for hyperconverged cluster configurationvSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.dvshciconfig.event|vSAN Health Test 'VDS compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExHost compliance check for hyperconverged cluster configurationvSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcicluster.hosthciconfig.event|vSAN Health Test 'Host compliance check for hyperconverged cluster configuration' status changed from '{prestatus}' to '{curstatus}'EventExvSAN health alarm enablement statusvSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hciskip.event|vSAN health alarm enablement status changed from '{prestatus}' to '{curstatus}'EventExvSAN HCL DB Auto UpdatevSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.autohclupdate.event|vSAN Health Test 'vSAN HCL DB Auto Update' status changed from '{prestatus}' to '{curstatus}'EventExRAID controller configurationvSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllercacheconfig.event|vSAN Health Test 'RAID controller configuration' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN disk group type (All-Flash or Hybrid) is VMware certified for the used SCSI controllervSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdiskmode.event|vSAN Health Test 'Controller disk group mode is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller driver is VMware certified.vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerdriver.event|vSAN Health Test 'Controller driver is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller firmware is VMware certified.vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerfirmware.event|vSAN Health Test 'Controller firmware is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the controller is compatible with the VMWARE Compatibility GuidevSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controlleronhcl.event|vSAN Health Test 'SCSI controller is VMware certified' status changed from '{prestatus}' to '{curstatus}'EventExDisplays information about whether there is any driver supported for a given controller in the release of ESXi installed.vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.controllerreleasesupport.event|vSAN Health Test 'Controller is VMware certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExvSAN configuration for LSI-3108 based controllervSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.h730.event|vSAN Health Test 'vSAN configuration for LSI-3108 based controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks the age of the VMware Hardware Compatibility Guid database.vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hcldbuptodate.event|vSAN Health Test 'vSAN HCL DB up-to-date' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any host failed to return its hardware information.vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hclhostbadstate.event|vSAN Health Test 'Host issues retrieving hardware info' status changed from '{prestatus}' to '{curstatus}'EventExHost physical memory compliance checkvSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.hostmemcheck.event|vSAN Health Test 'Host physical memory compliance check' status changed from '{prestatus}' to '{curstatus}'EventExController with pass-through and RAID disksvSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmode.event|vSAN Health Test 'Controller with pass-through and RAID disks' status changed from '{prestatus}' to '{curstatus}'EventExvSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 drivervSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.mixedmodeh730.event|vSAN Health Test 'vSAN and VMFS datastores on a Dell H730 controller with the lsi_mr3 driver' status changed from '{prestatus}' to '{curstatus}'EventExvsan.health.test.hcl.nvmeonhcl.event|EventExNetwork Interface Cards (NICs) used in vSAN hosts must meet certain requirements. These NIC requirements assume that the packet loss is not more than 0.0001% in the hyper-converged environments. It's recommended to use NIC which link speed can meet the minimum requirement. Otherwise, there can be a drastic impact on the vSAN performance.vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.pniclinkspeed.event|vSAN Health Test 'Physical NIC link speed meets requirements' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NICs used in this RDMA enabled vSAN cluster are certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmaniciscertified.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the RDMA NIC's driver and firmware combination is certified by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportdriverfirmware.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) driver/firmware is vSAN certified' status changed from '{prestatus}' to '{curstatus}'EventExCheck whether the current ESXi release is certified for the RDMA NIC by the VMware Compatibility Guide (VCG)vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.rdmanicsupportesxrelease.event|vSAN Health Test 'Network (RDMA NIC: RoCE v2) is certified for ESXi release' status changed from '{prestatus}' to '{curstatus}'EventExHPE NVMe Solid State Drives - critical firmware upgrade requiredvSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.hcl.vsanhpefwtest.event|vSAN Health Test 'HPE NVMe Solid State Drives - critical firmware upgrade required' status changed from '{prestatus}' to '{curstatus}'EventExHome objectvSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsihomeobjectstatustest.event|vSAN Health Test 'Home object of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExLUN runtime healthvSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsilunruntimetest.event|vSAN Health Test 'LUN runtime health of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExNetwork configurationvSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicenetworktest.event|vSAN Health Test 'Network configuration of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExService runtime statusvSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.iscsi.iscsiservicerunningtest.event|vSAN Health Test 'Service runtime status of iSCSI target service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN cluster claimed capacity is more than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.error.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExvSAN cluster claimed capacity is less than the entitled capacity.vSAN cluster claimed capacity is less than the entitled capacity.vsan.health.test.licensecapacityusage.green.event|vSAN cluster claimed capacity is less than the entitled capacity.EventExvSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vSAN cluster claimed capacity is more than 100% but less than 110% of the entitled capacity.vsan.health.test.licensecapacityusage.warn.event|vSAN cluster claimed capacity is more than {percentage} percentage of the entitled capacity. Current total claimed capacity per core: {claimedCapPerCore} GB; licensed entitlement: 100 GB. Refer to KB article for details: https://kb.vmware.com/s/article/96100EventExChecks the vSAN cluster storage space utilizationvSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.diskspace.event|vSAN Health Test 'Storage space' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN component limits, disk space and RC reservations assuming one host failure.vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.limit1hf.event|vSAN Health Test 'After 1 additional host failure' status changed from '{prestatus}' to '{curstatus}'EventExChecks the component utilization for the vSAN cluster and each host in the cluster.vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.nodecomponentlimit.event|vSAN Health Test 'Cluster component utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks the vSAN cluster read cache utilizationvSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.limits.rcreservation.event|vSAN Health Test 'Cluster read cache utilization' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the vSAN cluster is partitioned due to a network issue.vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.clusterpartition.event|vSAN Health Test 'vSAN cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExCheck if there are duplicate IP addresses configured for vmknic interfaces.vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.duplicateip.event|vSAN Health Test 'Hosts with duplicate IP addresses' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a connectivity check for vSAN Max Client Network by checking the heartbeats from each host to all other hosts in server clustervSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.externalconnectivity.event|vSAN Health Test 'vSAN Max Client Network connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if API calls from VC to a host are failing while the host is in vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostconnectivity.event|vSAN Health Test 'Hosts with connectivity issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks if VC has an active connection to all hosts in the cluster.vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostdisconnected.event|vSAN Health Test 'Hosts disconnected from VC' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a network latency check via ping small packet size ping test from all hosts to all other hostsvSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.hostlatencycheck.event|vSAN Health Test 'Network latency check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN API calls from each host can reach to other peer hosts in the clustervSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.interhostconnectivity.event|vSAN Health Test 'Interhost connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExCheck if LACP is working properly.vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.lacpstatus.event|vSAN Health Test 'Hosts with LACP issues' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hostsvSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.largeping.event|vSAN Health Test 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster receive the multicast heartbeat of the vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastdeepdive.event|vSAN Health Test 'Active multicast connectivity check' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have matching IP multicast configuration.vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsettings.event|vSAN Health Test 'All hosts have matching multicast settings' status changed from '{prestatus}' to '{curstatus}'EventExChecks if any of the hosts in the vSAN cluster have IP multicast connectivity issue.vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multicastsuspected.event|vSAN Health Test 'Multicast assessment based on other checks' status changed from '{prestatus}' to '{curstatus}'EventExCheck if any host in remote vSAN client or server cluster has more than one vSAN vmknic configured.vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.multiplevsanvmknic.event|vSAN Health Test 'No hosts in remote vSAN have multiple vSAN vmknics configured' status changed from '{prestatus}' to '{curstatus}'EventExPhysical network adapter speed consistencyvSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnicconsistent.event|vSAN Health Test 'Physical network adapter speed consistency' status changed from '{prestatus}' to '{curstatus}'EventExCheck if TSO is enabled for pNIC.vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.pnictso.event|vSAN Health Test 'Hosts with pNIC TSO issues' status changed from '{prestatus}' to '{curstatus}'EventExCheck if the vSAN RDMA enabled physical NIC is configured for lossless traffic.vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.rdmaconfig.event|vSAN Health Test 'RDMA Configuration Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all hosts in client cluster have been in a single partition with all hosts in server vSAN cluster.vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.serverpartition.event|vSAN Health Test 'Server cluster partition' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hostsvSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.smallping.event|vSAN Health Test 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a large packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpinglarge.event|vSAN Health Test for vMotion 'MTU check (ping with large packet size)' status changed from '{prestatus}' to '{curstatus}'EventExPerforms a small packet size ping test from all hosts to all other hosts for vMotionvSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vmotionpingsmall.event|vSAN Health Test for vMotion 'Basic (unicast) connectivity check (normal ping)' status changed from '{prestatus}' to '{curstatus}'EventExCheck if all hosts in server cluster have a dedicated vSAN external vmknic configured.vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanexternalvmknic.event|vSAN Health Test 'All hosts have a dedicated vSAN external vmknic configured in server cluster' status changed from '{prestatus}' to '{curstatus}'EventExChecks if all the hosts in the vSAN cluster have a configured vmknic with vSAN traffic enabled.vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.vsanvmknic.event|vSAN Health Test 'All hosts have a vSAN vmknic configured' status changed from '{prestatus}' to '{curstatus}'EventExCheck all remote VMware vCenter network connectivity.vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.network.xvcconnectivity.event|vSAN Health Test 'Remote vCenter network connectivity' status changed from '{prestatus}' to '{curstatus}'EventExvSAN overall health statusvSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.overallsummary.event|vSAN Health Test 'Overall Health Summary' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service data collectionvSAN Health Test 'Checks the statistics collection of the vSAN performance service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.collection.event|vSAN Health Test 'Checks statistics collection of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service network diagnostic mode statusvSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.diagmode.event|vSAN Health Test 'Network diagnostic mode' status changed from '{prestatus}' to '{curstatus}'EventExNot all hosts are contributing stats to vSAN Performance ServicevSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.hostsmissing.event|vSAN Health Test 'Checks if all host are contributing performance stats' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats primary electionvSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.masterexist.event|vSAN Health Test 'Checks stats primary of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service statusvSAN Health Test 'Checks status of vSAN Performance Service changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.perfsvcstatus.event|vSAN Health Test 'Checks status of vSAN Performance Service' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service stats DB object conflictsvSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.renameddirs.event|vSAN Health Test 'Checks stats DB object conflicts' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the vSAN performance service statistics database objectvSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.statsdb.event|vSAN Health Test 'Checks the health of the vSAN performance service statistics database object' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Performance Service verbose mode statusvSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.perfsvc.verbosemode.event|vSAN Health Test 'Verbose mode' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN has encountered an integrity issue of the metadata of a component on this disk.vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.componentmetadata.event|vSAN Health Test 'Component metadata health' status changed from '{prestatus}' to '{curstatus}'EventExDisks usage on storage controllervSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.diskusage.event|vSAN Health Test 'Disks usage on storage controller' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on vital memory pools, needed for the correct operation of physical disks.vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomheap.event|vSAN Health Test 'Memory pools (heaps)' status changed from '{prestatus}' to '{curstatus}'EventExChecks if vSAN is running low on the vital memory pool, needed for the operation of physical disks.vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.lsomslab.event|vSAN Health Test 'Memory pools (slabs)' status changed from '{prestatus}' to '{curstatus}'EventExStorage Vendor Reported Drive HealthvSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.phmhealth.event|vSAN Health Test 'Storage Vendor Reported Drive Health' status changed from '{prestatus}' to '{curstatus}'EventExChecks the free space on physical disks in the vSAN cluster.vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcapacity.event|vSAN Health Test 'Disk capacity' status changed from '{prestatus}' to '{curstatus}'EventExChecks if the number of components on the physical disk reaches the maximum limitationvSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcomplimithealth.event|vSAN Health Test 'Physical disk component limit health' status changed from '{prestatus}' to '{curstatus}'EventExChecks whether vSAN is using the disk with reduced performance.vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskcongestion.event|vSAN Health Test 'Congestion' status changed from '{prestatus}' to '{curstatus}'EventExChecks if there is an issue retrieving the physical disk information from hosts in the vSAN cluster.vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskhostissues.event|vSAN Health Test 'Physical disk health retrieval issues' status changed from '{prestatus}' to '{curstatus}'EventExChecks the health of the physical disks for all hosts in the vSAN cluster.vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.physdiskoverall.event|vSAN Health Test 'Operation health' status changed from '{prestatus}' to '{curstatus}'EventExvSAN max component sizevSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.physicaldisks.smalldiskstest.event|vSAN Health Test 'vSAN max component size' status changed from '{prestatus}' to '{curstatus}'EventExCluster Name is not found in ssd endurance alarmClusters - {clustername} is/are not found in alarm - vSAN Health Alarm for disk endurance check.vsan.health.test.ssdendurance.clusternotfound.event|Clusters - {clustername} is/are not found. Please edit alarm - 'vSAN Health Alarm for disk endurance check' and correct the cluster name.EventExThe stretched cluster contains multiple unicast agents. This means multiple unicast agents were set on non-witness hostsvSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithmultipleunicastagents.event|vSAN Health Test 'Unicast agent configuration inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain a valid witness hostvSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithoutonewitnesshost.event|vSAN Health Test 'Witness host not found' status changed from '{prestatus}' to '{curstatus}'EventExThe stretched cluster does not contain two valid fault domainsvSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.clusterwithouttwodatafaultdomains.event|vSAN Health Test 'Unexpected number of fault domains' status changed from '{prestatus}' to '{curstatus}'EventExHost should setup unicast agent so that they are able to communicate with the witness nodevSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostunicastagentunset.event|vSAN Health Test 'Unicast agent not configured' status changed from '{prestatus}' to '{curstatus}'EventExHost with an invalid unicast agentvsan.health.test.stretchedcluster.hostwithinvalidunicastagent.event|vSAN Health Test 'Invalid unicast agent' status changed from '{prestatus}' to '{curstatus}'EventExCluster contains hosts that do not support stretched clustervSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.hostwithnostretchedclustersupport.event|vSAN Health Test 'Unsupported host version' status changed from '{prestatus}' to '{curstatus}'EventExUnexpected number of data hosts in shared witness cluster. This means more than 2 data hosts in one shared witness cluster.vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnessclusterdatahostnumexceed.event|vSAN Health Test 'Unexpected number of data hosts in shared witness cluster' status changed from '{prestatus}' to '{curstatus}'EventExPer cluster component limit scaled down for shared witness host because of insufficient memoryvSAN Health Test 'Shared witness per cluster component limit scaled down' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.sharedwitnesscomponentlimitscaleddown.event|vSAN Health Test 'Shared witness per-cluster component limit inconsistent' status changed from '{prestatus}' to '{curstatus}'EventExChecks the network latency between the two fault domains and the witness hostvSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.siteconnectivity.event|vSAN Health Test 'Site latency health' status changed from '{prestatus}' to '{curstatus}'EventExWitness node is managed by vSphere Lifecycle ManagervSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.vlcmwitnessconfig.event|vSAN Health Test 'Witness node is managed by vSphere Lifecycle Manager' status changed from '{prestatus}' to '{curstatus}'EventExThe following witness node resides in one of the data fault domainsvSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessfaultdomaininvalid.event|vSAN Health Test 'Witness host fault domain misconfigured' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster incorporates a witness host inside VMware vCenter clustervSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnessinsidevccluster.event|vSAN Health Test 'Witness host within vCenter cluster' status changed from '{prestatus}' to '{curstatus}'EventExThe following (witness) hosts have invalid preferred fault domainsvSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomaininvalid.event|vSAN Health Test 'Invalid preferred fault domain on witness host' status changed from '{prestatus}' to '{curstatus}'EventExThe preferred fault domain does not exist in the cluster for the following witness hostvSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesspreferredfaultdomainnotexist.event|vSAN Health Test 'Preferred fault domain unset' status changed from '{prestatus}' to '{curstatus}'EventExHardware compatibility issue for witness appliancevsan.health.test.stretchedcluster.witnessupgissue.event|vSAN Health Test 'Hardware compatibility issue for witness appliance' status changed from '{prestatus}' to '{curstatus}'EventExWitness appliance upgrade to vSphere 7.0 or higher with cautionvsan.health.test.stretchedcluster.witnessupgrade.event|vSAN Health Test 'Witness appliance upgrade to vSphere 7.0 or higher with caution' status changed from '{prestatus}' to '{curstatus}'EventExStretched cluster contains witness hosts with no disk claimedvSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.stretchedcluster.witnesswithnodiskmapping.event|vSAN Health Test 'No disk claimed on witness host' status changed from '{prestatus}' to '{curstatus}'EventExVMware Certified vSAN HardwarevSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vsanhardwarecert.event|vSAN Health Test 'VMware Certified vSAN Hardware' status changed from '{prestatus}' to '{curstatus}'EventExvSAN Hosts with new patch availablevSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.patchalert.event|vSAN Health Test 'vSAN Hosts with new patch available' status changed from '{prestatus}' to '{curstatus}'EventExvSAN release catalog up-to-datevSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.releasecataloguptodate.event|vSAN release catalog up-to-date status changed from '{prestatus}' to '{curstatus}'EventExCheck configuration issues for vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumconfig.event|vSAN Health Test for vSAN Build Recommendation Engine 'vSAN Build Recommendation Engine Health' status changed from '{prestatus}' to '{curstatus}'EventExESXi build recommended by vSAN Build Recommendation EnginevSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'vsan.health.test.vum.vumrecommendation.event|vSAN Health Test for vSAN Build Recommendation Engine 'Build recommendation' status changed from '{prestatus}' to '{curstatus}'EventExThis object has the risk of PSOD issue due to improper DOM object flag leakThis object has the risk of PSOD issue due to improper DOM object flag leakvsan.health.test.zdom.leak|Objects {1} have the risk of PSOD issue due to improper DOM object flag leak. Please refer KB https://kb.vmware.com/s/article/89564VirtualMachineFaultToleranceStateFault Tolerance has not been configured for this virtual machinenotConfiguredFault Tolerance is disableddisabledFault Tolerance is enabledenabledFault Tolerant Secondary VM is not runningneedSecondaryFault Tolerance is startingstartingFault Tolerance is runningrunning
12855:20241101:185843.394 End of vmware_service_get_evt_severity() evt_severities:1989
12855:20241101:185843.396 In vmware_service_get_hv_ds_dc_dvs_list()
12855:20241101:185843.400 vmware_service_get_hv_ds_dc_dvs_list() SOAP response:
group-d1triggeredAlarmState365.1group-d1alarm-365yellowfalse39701datacenter-3nameNTK-corptriggeredAlarmStategroup-n7triggeredAlarmStategroup-h5triggeredAlarmStatedatastore-4041datastore-4050datastore-4046datastore-2007datastore-2006datastore-2005group-v4triggeredAlarmStategroup-n4029triggeredAlarmStategroup-v11triggeredAlarmStategroup-v4027triggeredAlarmStatedvs-21nameNTK-DSwitchuuid50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbgroup-v4056triggeredAlarmStatehost-4047host-4043host-4038
12855:20241101:185843.400 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_datacenters_list'
12855:20241101:185843.400 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_datacenters_list' found:0 total:0
12855:20241101:185843.400 In vmware_service_get_alarms_data(), func_parent:'vmware_service_get_hv_ds_dc_dvs_list'
12855:20241101:185843.400 In vmware_service_alarm_details_update() alarm:alarm-365
12855:20241101:185843.402 vmware_service_alarm_details_update() SOAP response:
alarm-365info.descriptionThis alarm is fired when vSphere Health detects new issues in your environment. This alarm will be retriggered even if acknowledged when new issues are detected. Go to Monitor -> Health for a detailed description of the issues.info.enabledtrueinfo.nameSkyline Health has detected issues in your vSphere environmentinfo.systemNameSkyline Health has detected issues in your vSphere environment
12855:20241101:185843.403 End of vmware_service_alarm_details_update() index:0
12855:20241101:185843.403 End of vmware_service_get_alarms_data() func_parent:'vmware_service_get_hv_ds_dc_dvs_list' found:1 total:1
12855:20241101:185843.403 End of vmware_service_get_hv_ds_dc_dvs_list():SUCCEED found hv:3 ds:6 dc:1
12855:20241101:185843.403 In vmware_service_create_datastore() datastore:'datastore-4041'
12855:20241101:185843.405 vmware_service_create_datastore() SOAP response:
datastore-4041infoLocal_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/34100425523270368744177664703687441776642024-10-24T08:57:27.792Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0334252364185616396313666.8267155e10-d4545cb2-5b01-3cecef012e78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682111______8falsetruesummarydatastore-4041Local_ntk-m1-esxi-03ds:///vmfs/volumes/67155e10-d4545cb2-5b01-3cecef012e78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12855:20241101:185843.405 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185843.405 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185843.405 End of vmware_service_create_datastore()
12855:20241101:185843.405 In vmware_service_create_datastore() datastore:'datastore-4050'
12855:20241101:185843.407 vmware_service_create_datastore() SOAP response:
datastore-4050infoLocal_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/3410042552327036874417766468169720922112703687441776642024-11-01T13:06:44.907432Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0134252364185616396313666.8267155cc9-bea5e318-19fd-ac1f6bb14c78t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______8falsetruetruesummarydatastore-4050Local_ntk-m1-esxi-01ds:///vmfs/volumes/67155cc9-bea5e318-19fd-ac1f6bb14c78/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12855:20241101:185843.407 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185843.408 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185843.408 End of vmware_service_create_datastore()
12855:20241101:185843.408 In vmware_service_create_datastore() datastore:'datastore-4046'
12855:20241101:185843.409 vmware_service_create_datastore() SOAP response:
datastore-4046infoLocal_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/34100425523270368744177664703687441776642024-11-01T11:53:36.643999Z7036874417766468169720922112VMFSLocal_ntk-m1-esxi-0234252364185616396313666.8267155ba7-5e9d16d6-0733-3cecef02b6e0t10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R682100______8falsetruesummarydatastore-4046Local_ntk-m1-esxi-02ds:///vmfs/volumes/67155ba7-5e9d16d6-0733-3cecef02b6e0/342523641856341004255232truefalseVMFSnormaltriggeredAlarmState
12855:20241101:185843.410 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185843.410 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185843.410 End of vmware_service_create_datastore()
12855:20241101:185843.410 In vmware_service_create_datastore() datastore:'datastore-2007'
12855:20241101:185843.412 vmware_service_create_datastore() SOAP response:
datastore-2007info3PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/5031560478727036874417766468169720922112703687441776642024-11-01T13:06:44.904493Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID53153660247654416396313666.826704dec9-75e6c68a-c19e-9440c9831520naa.60002ac00000000000000054000228a31falsefalsefalsesummarydatastore-20073PAR_GOROH_SSD_NTK_ID531ds:///vmfs/volumes/6704dec9-75e6c68a-c19e-9440c9831520/53660247654450315604787242237661184truetrueVMFSnormaltriggeredAlarmState
12855:20241101:185843.412 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185843.412 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185843.412 End of vmware_service_create_datastore()
12855:20241101:185843.412 In vmware_service_create_datastore() datastore:'datastore-2006'
12855:20241101:185843.414 vmware_service_create_datastore() SOAP response:
datastore-2006info3PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/1592765972487036874417766468169720922112703687441776642024-11-01T13:06:44.898963Z7036874417766468169720922112VMFS3PAR_KARTOHA_SAS_NTK_ID53516079283814416396313666.826703d63f-3516ce66-4bee-9440c9831520naa.60002ac0000000000000042f000219831falsefalsefalsesummarydatastore-20063PAR_KARTOHA_SAS_NTK_ID535ds:///vmfs/volumes/6703d63f-3516ce66-4bee-9440c9831520/160792838144159276597248truetrueVMFSnormaltriggeredAlarmState
12855:20241101:185843.414 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185843.414 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185843.414 End of vmware_service_create_datastore()
12855:20241101:185843.414 In vmware_service_create_datastore() datastore:'datastore-2005'
12855:20241101:185843.416 vmware_service_create_datastore() SOAP response:
datastore-2005info3PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/8543356846087036874417766468169720922112703687441776642024-11-01T18:34:30.288888Z7036874417766468169720922112VMFS3PAR_GOROH_SSD_NTK_ID530_mgmt107347338854416396313666.826703d517-82086a06-cec0-9440c9831520naa.60002ac0000000000000004a000228a31falsefalsefalsesummarydatastore-20053PAR_GOROH_SSD_NTK_ID530_mgmtds:///vmfs/volumes/6703d517-82086a06-cec0-9440c9831520/10734733885448543356846080truetrueVMFSnormaltriggeredAlarmState
12855:20241101:185843.416 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_datastore'
12855:20241101:185843.416 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_datastore' found:0 total:1
12855:20241101:185843.416 End of vmware_service_create_datastore()
12855:20241101:185843.416 In vmware_service_get_clusters_and_resourcepools()
12855:20241101:185843.418 vmware_service_get_clusters_and_resourcepools() SOAP response:
domain-c1002nameNTK-corptriggeredAlarmStateresgroup-1003nameResourcesparentdomain-c1002resourcePoolresgroup-4001resgroup-4026resgroup-4026nameNTKparentresgroup-1003resourcePoolresgroup-4001namemgmtparentresgroup-1003resourcePool
12855:20241101:185843.419 In vmware_service_process_cluster_data()
12855:20241101:185843.419 In vmware_service_get_alarms_data(), func_parent:'vmware_service_process_cluster_data'
12855:20241101:185843.419 End of vmware_service_get_alarms_data() func_parent:'vmware_service_process_cluster_data' found:0 total:1
12855:20241101:185843.419 End of vmware_service_process_cluster_data():SUCCEED cl:1 rp:3
12855:20241101:185843.419 In vmware_service_get_cluster_state() clusterid:'domain-c1002'
12855:20241101:185843.422 vmware_service_get_cluster_state() SOAP response:
domain-c1002datastoredatastore-2005datastore-2006datastore-2007datastore-4041datastore-4046datastore-4050summary.overallStatusgreen
12855:20241101:185843.422 End of vmware_service_get_cluster_state():SUCCEED
12855:20241101:185843.422 End of vmware_service_get_clusters_and_resourcepools():SUCCEED found cl:1 rp:2
12855:20241101:185843.422 In vmware_service_init_hv() hvid:'host-4047'
12855:20241101:185843.422 In vmware_service_get_hv_data() guesthvid:'host-4047'
12855:20241101:185843.422 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4047false
12855:20241101:185843.436 vmware_service_get_hv_data() SOAP response:
host-4047config.network.dnsConfigfalsentk-esxi-01m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrueac:1f:6b:b1:4c:783ac:1f:6b:b1:4c:7800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrueac:1f:6b:b1:4c:793ac:1f:6b:b1:4c:7900falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:20:55:a8350:7c:6f:20:55:a800falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:20:55:a9350:7c:6f:20:55:a900falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383139353420202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.11255.255.255.19250:7c:6f:20:55:a850 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23017870991381500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4050overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-01.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686832898048summary.hardware.modelSuper Serversummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-ac1f6bb14c78summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage162summary.quickStats.overallMemoryUsage16588summary.quickStats.uptime691250summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6300-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen107-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen103-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen184-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen341-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen516-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen120-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen183-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen186-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen340-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen510-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1170-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen690000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 4 FAN4The sensor is operating under normal conditionsGreen680000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen650000-2RPMnonefanFan Device 1 FAN1The sensor is operating under normal conditionsGreen660000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3100-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4800-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen4400-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5800-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5300-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4060
12855:20241101:185843.437 End of vmware_service_get_hv_data():SUCCEED
12855:20241101:185843.438 In vmware_service_get_hv_pnics_data()
12855:20241101:185843.438 End of vmware_service_get_hv_pnics_data() found:4
12855:20241101:185843.438 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12855:20241101:185843.438 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12855:20241101:185843.439 In vmware_hv_ip_search()
12855:20241101:185843.439 End of vmware_hv_ip_search() ip:10.50.242.11
12855:20241101:185843.439 In vmware_hv_get_parent_data() id:'host-4047'
12855:20241101:185843.442 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12855:20241101:185843.442 End of vmware_hv_get_parent_data():SUCCEED
12855:20241101:185843.442 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4047"
12855:20241101:185843.442 In vmware_service_hv_disks_get_info() hvid:'host-4047'
12855:20241101:185843.442 vmware_service_hv_disks_get_info() count of scsiLun:21
12855:20241101:185843.453 vmware_service_hv_disks_get_info() SOAP response:
host-4047config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].canonicalNamet10.ATA_____SAMSUNG_MZ7LH480HAHQ2D00005______________S45PNC0R681954______config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].modelSAMSUNG MZ7LH480config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].queueDepth31config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].revision904Qconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].serialNumberS45PNC0R681954 config.storageDevice.scsiLun["key-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554e"].vendorATA config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].canonicalNamenaa.2ff70002ac021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].canonicalNamenaa.2ff70002ac0228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].canonicalNamenaa.60002ac0000000000000004a000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].canonicalNamenaa.60002ac00000000000000054000228a3config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020"].vendor3PARdataconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].canonicalNamenaa.60002ac0000000000000042f00021983config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].lunTypediskconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].modelVV config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].operationalStateokconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].queueDepth64config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].revision3315config.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].serialNumberunavailableconfig.storageDevice.scsiLun["key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020"].vendor3PARdata
12855:20241101:185843.453 In vmware_service_hv_disks_parse_info()
12855:20241101:185843.454 End of vmware_service_hv_disks_parse_info() created:6
12855:20241101:185843.454 End of vmware_service_hv_disks_get_info():SUCCEED for 6(vsan:0) / 21
12855:20241101:185843.454 In vmware_service_hv_get_multipath_data() hvid:'host-4047'
12855:20241101:185843.462 vmware_service_hv_get_multipath_data() SOAP response:
host-4047config.storageDevice.multipathInfokey-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554e0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.ScsiDisk-0100000000533435504e43305236383139353420202020202053414d53554ekey-vim.host.MultipathInfo.Path-vmhba1:C0:T0:L0vmhba1:C0:T0:L0activeactivetruekey-vim.host.BlockHba-vmhba1key-vim.host.MultipathInfo.LogicalUnit-0100000000533435504e43305236383139353420202020202053414d53554eFIXEDkey-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a3565620202020020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L530vmhba2:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L530vmhba2:C0:T3:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L530vmhba3:C0:T1:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L530vmhba3:C0:T0:L530activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020012020060002ac0000000000000004a000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a3565620202020020013020060002ac00000000000000054000228a3565620202020key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L531vmhba2:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L531vmhba2:C0:T3:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L531vmhba3:C0:T1:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L531vmhba3:C0:T0:L531activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f00021983565620202020020017020060002ac0000000000000042f00021983565620202020key-vim.host.ScsiDisk-020017020060002ac0000000000000042f00021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L535vmhba3:C0:T3:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L535vmhba3:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L535vmhba2:C0:T2:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L535vmhba2:C0:T1:L535activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202002000001002ff70002ac0228a3565620202020key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.MultipathInfo.Path-vmhba2:C0:T0:L256vmhba2:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202025222972777799456353456231250505902243key-vim.host.MultipathInfo.Path-vmhba2:C0:T3:L256vmhba2:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202024502396837420176993456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T1:L256vmhba3:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023781820897040897633456231250505902243key-vim.host.MultipathInfo.Path-vmhba3:C0:T0:L256vmhba3:C0:T0:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac0228a356562020202023061244956661618273456231250505902243VMW_PSP_RRVMW_SATP_ALUAkey-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202002000001002ff70002ac021983565620202020key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.MultipathInfo.Path-vmhba3:C0:T3:L256vmhba3:C0:T3:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023781820897040858913456231250505898371key-vim.host.MultipathInfo.Path-vmhba3:C0:T2:L256vmhba3:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba3key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202023061244956661579553456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T2:L256vmhba2:C0:T2:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202025222972777799417633456231250505898371key-vim.host.MultipathInfo.Path-vmhba2:C0:T1:L256vmhba2:C0:T1:L256activeactivetruekey-vim.host.FibreChannelHba-vmhba2key-vim.host.MultipathInfo.LogicalUnit-02000001002ff70002ac02198356562020202024502396837420138273456231250505898371VMW_PSP_RRVMW_SATP_ALUA
12855:20241101:185843.462 End of vmware_service_hv_get_multipath_data():SUCCEED
12855:20241101:185843.462 In vmware_hv_ds_access_update() hv id:host-4047 hv dss:4 dss:6
12855:20241101:185843.464 vmware_hv_ds_access_update() SOAP response:
datastore-2005host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2006host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-2007host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtruedatastore-4050host["host-4047"].mountInfo.accessModereadWritehost["host-4047"].mountInfo.accessibletruehost["host-4047"].mountInfo.mountedtrue
12855:20241101:185843.464 In vmware_hv_ds_access_parse()
12855:20241101:185843.464 In vmware_hv_get_ds_access() for DS:datastore-2005
12855:20241101:185843.464 End of vmware_hv_get_ds_access() mountinfo:15
12855:20241101:185843.464 In vmware_hv_get_ds_access() for DS:datastore-2006
12855:20241101:185843.465 End of vmware_hv_get_ds_access() mountinfo:15
12855:20241101:185843.465 In vmware_hv_get_ds_access() for DS:datastore-2007
12855:20241101:185843.465 End of vmware_hv_get_ds_access() mountinfo:15
12855:20241101:185843.465 In vmware_hv_get_ds_access() for DS:datastore-4050
12855:20241101:185843.465 End of vmware_hv_get_ds_access() mountinfo:15
12855:20241101:185843.465 End of vmware_hv_ds_access_parse() parsed:4
12855:20241101:185843.465 End of vmware_hv_ds_access_update():SUCCEED for 4 / 4
12855:20241101:185843.465 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID530_mgmt"
12855:20241101:185843.465 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_KARTOHA_SAS_NTK_ID535"
12855:20241101:185843.465 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"3PAR_GOROH_SSD_NTK_ID531"
12855:20241101:185843.465 vmware_service_init_hv(): for 1 diskextents check multipath at ds:"Local_ntk-m1-esxi-01"
12855:20241101:185843.465 In vmware_service_create_vm() vmid:'vm-4060'
12855:20241101:185843.465 In vmware_service_get_vm_data() vmid:'vm-4060'
12855:20241101:185843.468 vmware_service_get_vm_data() SOAP response:
vm-4060availableFieldconfig.hardware218192falsefalse200IDE 00201IDE 11300PS2 controller 00600700100PCI controller 00500120001000150004000400SIO controller 00600Keyboard3000700Pointing device; Devicefalseautodetect3001500Video card100040961falsefalseautomatic26214412000Device on the virtual machine PCI bus that provides support for the virtual machine communication interface10017-1079927627falsetrue1000LSI Logic16100302000truenoSharing715000AHCI321002401600016000ISO [3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.iso[3PAR_GOROH_SSD_NTK_ID530_mgmt] ISOs/ubuntu-22.04.5-live-server-amd64.isodatastore-2005truetruefalseok1500002000104,857,600 KB[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdatastore-2005persistentfalsefalsefalsefalse6000C29d-45c9-aa9f-3d54-a04187209ee5fa74bccac7959c5d95abe5bffffffffefalsesharingNone100001048576001073741824001000normal-11000normal05-20004000DVSwitch: 50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 db50 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-400628630340996truefalsetrueok1601007assigned00:50:56:b0:80:48true050normal-1trueconfig.instanceUuid50304101-157a-f442-58f4-550f05de33feconfig.uuid42306756-2f64-b85a-a4fe-276cbfa19cb5customValuedatastoredatastore-2005guest.disk/5146047283240655228928/boot20403732481785856000guest.guestFamilylinuxGuestguest.guestFullNameUbuntu Linux (64-bit)guest.guestStaterunningguest.hostNamezabb-ntk-proxyguest.ipAddress10.50.242.76guest.netntk_dmz_vlan_112910.50.242.76fe80::250:56ff:feb0:804800:50:56:b0:80:48true400010.50.242.7628preferredfe80::250:56ff:feb0:804864unknownguest.toolsRunningStatusguestToolsRunningguest.toolsVersion12389layoutEx0[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmxconfig23822382true1[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmsdsnapshotList00true2[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.vmdkdiskDescriptor458458true3[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-flat.vmdkdiskExtent107374182400107374182400true4[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk.nvramnvram86848684true5[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/zabbix-proxy-ntk-13083273.vswpswap85899345928589934592true6[3PAR_GOROH_SSD_NTK_ID530_mgmt] zabbix-proxy-ntk/vmx-zabbix-proxy-ntk-4e9138c7e268bf86a750769daba1b562730af6a5e74aa2ad704e8731824ba105-1.vswpuwswap8598323285983232true2000232024-11-01T18:26:30.170712Zparentgroup-v11resourcePoolresgroup-4001summary.config.memorySizeMB8192summary.config.namezabbix-proxy-ntksummary.config.numCpu2summary.quickStats.balloonedMemory0summary.quickStats.compressedMemory0summary.quickStats.guestMemoryUsage163summary.quickStats.hostMemoryUsage8222summary.quickStats.overallCpuUsage28summary.quickStats.privateMemory8165summary.quickStats.sharedMemory3summary.quickStats.swappedMemory0summary.quickStats.uptimeSeconds76027summary.runtime.consolidationNeededfalsesummary.runtime.powerStatepoweredOnsummary.storage.committed116050111748summary.storage.uncommitted0summary.storage.unshared107374182858triggeredAlarmStategroup-v11nameDiscovered virtual machineparentgroup-v4group-v4namevmparentdatacenter-3
12855:20241101:185843.468 End of vmware_service_get_vm_data():SUCCEED
12855:20241101:185843.470 In vmware_service_get_vm_folder() folder id:'group-v11'
12855:20241101:185843.470 End of vmware_service_get_vm_folder(): vm folder:Discovered virtual machine
12855:20241101:185843.470 In vmware_vm_get_nic_devices()
12855:20241101:185843.470 End of vmware_vm_get_nic_devices() found:1
12855:20241101:185843.470 In vmware_vm_get_disk_devices()
12855:20241101:185843.470 End of vmware_vm_get_disk_devices() found:1
12855:20241101:185843.470 In vmware_vm_get_file_systems()
12855:20241101:185843.470 End of vmware_vm_get_file_systems() found:2
12855:20241101:185843.470 In vmware_vm_get_custom_attrs()
12855:20241101:185843.470 End of vmware_vm_get_custom_attrs() attributes:0
12855:20241101:185843.470 In vmware_service_get_alarms_data(), func_parent:'vmware_service_create_vm'
12855:20241101:185843.470 End of vmware_service_get_alarms_data() func_parent:'vmware_service_create_vm' found:0 total:1
12855:20241101:185843.470 End of vmware_service_create_vm():SUCCEED
12855:20241101:185843.470 End of vmware_service_init_hv():SUCCEED
12855:20241101:185843.471 In vmware_service_init_hv() hvid:'host-4043'
12855:20241101:185843.471 In vmware_service_get_hv_data() guesthvid:'host-4043'
12855:20241101:185843.471 vmware_service_get_hv_data() SOAP request: propertyCollectorHostSystemvmparentdatastoreconfig.virtualNicManagerInfo.netConfigconfig.network.pnicconfig.network.ipRouteConfig.defaultGatewaysummary.managementServerIpconfig.storageDevice.scsiTopologytriggeredAlarmStatesummary.quickStats.overallCpuUsagesummary.config.product.fullNamesummary.hardware.numCpuCoressummary.hardware.cpuMhzsummary.hardware.cpuModelsummary.hardware.numCpuThreadssummary.hardware.memorySizesummary.hardware.modelsummary.hardware.uuidsummary.hardware.vendorsummary.quickStats.overallMemoryUsagesummary.quickStats.uptimesummary.config.product.versionsummary.config.nameoverallStatusruntime.inMaintenanceModesummary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfoconfig.network.dnsConfigparentruntime.connectionStatehardware.systemInfo.serialNumberruntime.healthSystemRuntime.hardwareStatusInfohost-4043false
12855:20241101:185843.481 vmware_service_get_hv_data() SOAP response:
host-4043config.network.dnsConfigfalsentk-esxi-02m1.ntk-corp.ru10.50.242.78m1.ntk-corp.ruconfig.network.ipRouteConfig.defaultGateway10.50.242.1config.network.pnickey-vim.host.PhysicalNic-vmnic0vmnic00000:1c:00.0i40en1000truefalsetrue3c:ec:ef:02:b6:e033c:ec:ef:02:b6:e000falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic1vmnic10000:1c:00.1i40en1000truefalsetrue3c:ec:ef:02:b6:e133c:ec:ef:02:b6:e100falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic2vmnic20000:af:00.0icen25000true25000truefalsefalse50:7c:6f:3b:d8:c6350:7c:6f:3b:d8:c600falsefalsefalsefalsefalsetruetruekey-vim.host.PhysicalNic-vmnic3vmnic30000:af:00.1icen25000true25000truefalsefalse50:7c:6f:3b:d8:c7350:7c:6f:3b:d8:c700falsefalsefalsefalsefalsetruetrueconfig.storageDevice.scsiTopologykey-vim.host.ScsiTopology.Interface-vmhba0key-vim.host.BlockHba-vmhba0key-vim.host.ScsiTopology.Interface-vmhba1key-vim.host.BlockHba-vmhba1key-vim.host.ScsiTopology.Target-vmhba1:0:00key-vim.host.ScsiTopology.Lun-0100000000533435504e43305236383231303020202020202053414d53554e0key-vim.host.ScsiDisk-0100000000533435504e43305236383231303020202020202053414d53554ekey-vim.host.ScsiTopology.Interface-vmhba2key-vim.host.FibreChannelHba-vmhba2key-vim.host.ScsiTopology.Target-vmhba2:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202025222972777799456353456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202024502396837420176993456231250505902243key-vim.host.ScsiTopology.Target-vmhba2:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202025222972777799417633456231250505898371key-vim.host.ScsiTopology.Target-vmhba2:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202024502396837420138273456231250505898371key-vim.host.ScsiTopology.Interface-vmhba3key-vim.host.FibreChannelHba-vmhba3key-vim.host.ScsiTopology.Target-vmhba3:0:33key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023061244956661579553456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:22key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023061244956661618273456231250505902243key-vim.host.ScsiTopology.Target-vmhba3:0:11key-vim.host.ScsiTopology.Lun-02000001002ff70002ac021983565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac021983565620202020key-vim.host.ScsiTopology.Lun-020017020060002ac0000000000000042f00021983565620202020535key-vim.host.ScsiDisk-020017020060002ac0000000000000042f0002198356562020202023781820897040858913456231250505898371key-vim.host.ScsiTopology.Target-vmhba3:0:00key-vim.host.ScsiTopology.Lun-02000001002ff70002ac0228a3565620202020256key-vim.host.ScsiDisk-02000001002ff70002ac0228a3565620202020key-vim.host.ScsiTopology.Lun-020012020060002ac0000000000000004a000228a3565620202020530key-vim.host.ScsiDisk-020012020060002ac0000000000000004a000228a3565620202020key-vim.host.ScsiTopology.Lun-020013020060002ac00000000000000054000228a3565620202020531key-vim.host.ScsiDisk-020013020060002ac00000000000000054000228a356562020202023781820897040897633456231250505902243key-vim.host.ScsiTopology.Interface-vmhba64key-vim.host.FibreChannelHba-vmhba64key-vim.host.ScsiTopology.Interface-vmhba65key-vim.host.FibreChannelHba-vmhba65config.virtualNicManagerInfo.netConfigfaultToleranceLoggingtruevmk0faultToleranceLogging.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagementtruevmk0management.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackmanagement.key-vim.host.VirtualNic-vmk0nvmeRdmatruevmk0nvmeRdma.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStacknvmeTcptruevmk0nvmeTcp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackptpfalsevmk0ptp.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereBackupNFCtruevmk0vSphereBackupNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereProvisioningtruevmk0vSphereProvisioning.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationtruevmk0vSphereReplication.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvSphereReplicationNFCtruevmk0vSphereReplicationNFC.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotiontruevmk0vmotion.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvmotion.key-vim.host.VirtualNic-vmk0vsantruevmk0vsan.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackvsanWitnesstruevmk0vsanWitness.key-vim.host.VirtualNic-vmk0false10.50.242.12255.255.255.19250:7c:6f:3b:d8:c650 30 fb 1e 6f 97 99 58-0c 4a 47 9e e0 de 08 dbdvportgroup-23117871047851500truedefaultTcpipStackdatastoredatastore-2005datastore-2006datastore-2007datastore-4046overallStatusgreenparentdomain-c1002runtime.connectionStateconnectedruntime.healthSystemRuntime.hardwareStatusInfoMemory 0.32.2.178Physical element is functioning as expectedGreenMemory 0.32.6.182Physical element is functioning as expectedGreenMemory 0.32.26.218Physical element is functioning as expectedGreenMemory 0.8.39.55Physical element is functioning as expectedGreenMemory 0.8.41.57Physical element is functioning as expectedGreenMemory 0.8.40.56Physical element is functioning as expectedGreenMemory 0.32.24.216Physical element is functioning as expectedGreenMemory 0.32.0.176Physical element is functioning as expectedGreenMemory 0.32.20.212Physical element is functioning as expectedGreenMemory 0.32.22.214Physical element is functioning as expectedGreenMemory 0.32.18.210Physical element is functioning as expectedGreenMemory 0.8.38.54Physical element is functioning as expectedGreenMemory 0.32.8.184Physical element is functioning as expectedGreenMemory 0.32.16.208Physical element is functioning as expectedGreenProc 0.3.1.1Physical element is functioning as expectedGreenProc 0.3.2.2Physical element is functioning as expectedGreenProc 0.3.21.53Physical element is functioning as expectedGreenProc 0.3.20.52Physical element is functioning as expectedGreenruntime.inMaintenanceModefalsesummary.config.namentk-esxi-02.m1.ntk-corp.rusummary.config.product.fullNameVMware ESXi 8.0.3 build-24280767summary.config.product.version8.0.3summary.hardware.cpuMhz2800summary.hardware.cpuModelIntel(R) Xeon(R) Gold 6242 CPU @ 2.80GHzsummary.hardware.memorySize686831919104summary.hardware.modelSYS-6019P-WTRsummary.hardware.numCpuCores32summary.hardware.numCpuThreads64summary.hardware.uuid00000000-0000-0000-0000-3cecef02b6e0summary.hardware.vendorSupermicrosummary.managementServerIp10.50.242.10summary.quickStats.overallCpuUsage407summary.quickStats.overallMemoryUsage8918summary.quickStats.uptime691373summary.runtime.healthSystemRuntime.systemHealthInfo.numericSensorInfo[Device] Add-in Card 16 AOC_NIC TempThe sensor is operating under normal conditionsGreen6700-2degrees CnonetemperatureSystem Chassis 0 Chassis IntruThe sensor is operating under normal conditionsGreen00unspecifiednoneotherSystem Board 46 1.05V PCHThe sensor is operating under normal conditionsGreen106-2VoltsnonevoltageSystem Board 45 PVNN PCHThe sensor is operating under normal conditionsGreen102-2VoltsnonevoltageSystem Board 44 1.8V PCHThe sensor is operating under normal conditionsGreen182-2VoltsnonevoltageSystem Board 43 3.3VSBThe sensor is operating under normal conditionsGreen335-2VoltsnonevoltageSystem Board 42 5VSBThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageMemory Module 41 VDimmP2DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 40 VDimmP2ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 39 VDimmP1DEFThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageMemory Module 38 VDimmP1ABCThe sensor is operating under normal conditionsGreen119-2VoltsnonevoltageProcessor 21 Vcpu2The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageProcessor 20 Vcpu1The sensor is operating under normal conditionsGreen184-2VoltsnonevoltageBattery 0 VBATThe sensor is operating under normal conditionsGreen325160unspecifiednonebatterySystem Board 34 3.3VCCThe sensor is operating under normal conditionsGreen343-2VoltsnonevoltageSystem Board 33 5VCCThe sensor is operating under normal conditionsGreen507-2VoltsnonevoltageSystem Board 32 12VThe sensor is operating under normal conditionsGreen1164-2VoltsnonevoltageFan Device 6 FAN6The sensor is operating under normal conditionsGreen560000-2RPMnonefanFan Device 5 FAN5The sensor is operating under normal conditionsGreen590000-2RPMnonefanFan Device 3 FAN3The sensor is operating under normal conditionsGreen610000-2RPMnonefanFan Device 2 FAN2The sensor is operating under normal conditionsGreen600000-2RPMnonefanMemory Device 26 P2-DIMMF1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 24 P2-DIMME1 TempThe sensor is operating under normal conditionsGreen3500-2degrees CnonetemperatureMemory Device 22 P2-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 20 P2-DIMMC1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 18 P2-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 16 P2-DIMMA1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 8 P1-DIMME1 TempThe sensor is operating under normal conditionsGreen3200-2degrees CnonetemperatureMemory Device 6 P1-DIMMD1 TempThe sensor is operating under normal conditionsGreen3300-2degrees CnonetemperatureMemory Device 2 P1-DIMMB1 TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureMemory Device 0 P1-DIMMA1 TempThe sensor is operating under normal conditionsGreen3600-2degrees CnonetemperatureSystem Board 21 VRMP2DEF TempThe sensor is operating under normal conditionsGreen3400-2degrees CnonetemperatureSystem Board 20 VRMP2ABC TempThe sensor is operating under normal conditionsGreen4200-2degrees CnonetemperatureSystem Board 19 VRMP1DEF TempThe sensor is operating under normal conditionsGreen4000-2degrees CnonetemperatureSystem Board 18 VRMP1ABC TempThe sensor is operating under normal conditionsGreen4300-2degrees CnonetemperatureSystem Board 17 VRMCpu2 TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 16 VRMCpu1 TempThe sensor is operating under normal conditionsGreen3800-2degrees CnonetemperatureSystem Board 3 Peripheral TempThe sensor is operating under normal conditionsGreen3900-2degrees CnonetemperatureSystem Board 2 System TempThe sensor is operating under normal conditionsGreen2900-2degrees CnonetemperatureSystem Board 1 PCH TempThe sensor is operating under normal conditionsGreen4600-2degrees CnonetemperatureProcessor 2 CPU2 TempThe sensor is operating under normal conditionsGreen5100-2degrees CnonetemperatureProcessor 1 CPU1 TempThe sensor is operating under normal conditionsGreen5000-2degrees CnonetemperaturePower Supply 87 PS2 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowerPower Supply 88 PS1 StatusThe sensor is operating under normal conditionsGreen10sensor-discretenonepowertriggeredAlarmStatevmvm-4057
12855:20241101:185843.481 End of vmware_service_get_hv_data():SUCCEED
12855:20241101:185843.482 In vmware_service_get_hv_pnics_data()
12855:20241101:185843.482 End of vmware_service_get_hv_pnics_data() found:4
12855:20241101:185843.482 In vmware_service_get_alarms_data(), func_parent:'vmware_service_init_hv'
12855:20241101:185843.482 End of vmware_service_get_alarms_data() func_parent:'vmware_service_init_hv' found:0 total:1
12855:20241101:185843.483 In vmware_hv_ip_search()
12855:20241101:185843.483 End of vmware_hv_ip_search() ip:10.50.242.12
12855:20241101:185843.483 In vmware_hv_get_parent_data() id:'host-4043'
12855:20241101:185843.485 vmware_hv_get_parent_data() SOAP response:
domain-c1002nameNTK-corpdatacenter-3nameNTK-corptriggeredAlarmState
12855:20241101:185843.485 End of vmware_hv_get_parent_data():SUCCEED
12855:20241101:185843.485 vmware_service_init_hv(): 4 datastores are connected to hypervisor "host-4043"
12855:20241101:185843.485 In vmware_service_hv_disks_get_info() hvid:'host-4043'
12855:20241101:185843.485 vmware_service_hv_disks_get_info() count of scsiLun:21
12861:20241101:185844.328 In vmware_job_get() queue:2
12861:20241101:185844.329 End of vmware_job_get() queue:2 type:none
12857:20241101:185844.329 In vmware_job_get() queue:2
12857:20241101:185844.329 End of vmware_job_get() queue:2 type:none
12859:20241101:185844.329 In vmware_job_get() queue:2
12859:20241101:185844.330 End of vmware_job_get() queue:2 type:none
12857:20241101:185845.330 zbx_setproctitle() title:'vmware collector #2 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001897 sec]'
12857:20241101:185845.330 In vmware_job_get() queue:2
12857:20241101:185845.330 End of vmware_job_get() queue:2 type:none
12859:20241101:185845.330 In vmware_job_get() queue:2
12859:20241101:185845.330 End of vmware_job_get() queue:2 type:none
12861:20241101:185845.330 zbx_setproctitle() title:'vmware collector #4 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.002012 sec]'
12861:20241101:185845.330 In vmware_job_get() queue:2
12861:20241101:185845.330 End of vmware_job_get() queue:2 type:none
12857:20241101:185846.330 In vmware_job_get() queue:2
12857:20241101:185846.330 End of vmware_job_get() queue:2 type:none
12859:20241101:185846.330 In vmware_job_get() queue:2
12859:20241101:185846.330 End of vmware_job_get() queue:2 type:none
12861:20241101:185846.330 In vmware_job_get() queue:2
12861:20241101:185846.330 End of vmware_job_get() queue:2 type:none
12857:20241101:185847.330 In vmware_job_get() queue:2
12857:20241101:185847.330 End of vmware_job_get() queue:2 type:none
12859:20241101:185847.330 zbx_setproctitle() title:'vmware collector #3 [updated 0, removed 0 VMware services, idle 5.000000 sec during 5.001956 sec]'
12859:20241101:185847.330 In vmware_job_get() queue:2
12859:20241101:185847.330 End of vmware_job_get() queue:2 type:none
12861:20241101:185847.330 In vmware_job_get() queue:2
12861:20241101:185847.330 End of vmware_job_get() queue:2 type:none
12857:20241101:185848.330 In vmware_job_get() queue:2
12857:20241101:185848.330 End of vmware_job_get() queue:2 type:none
12859:20241101:185848.330 In vmware_job_get() queue:2
12859:20241101:185848.330 End of vmware_job_get() queue:2 type:none
12861:20241101:185848.330 In vmware_job_get() queue:2
12861:20241101:185848.330 End of vmware_job_get() queue:2 type:none
12837:20241101:185848.654 received configuration data from server at "10.50.242.78", datalen 437
12857:20241101:185849.330 In vmware_job_get() queue:2
12857:20241101:185849.331 End of vmware_job_get() queue:2 type:none
12861:20241101:185849.331 In vmware_job_get() queue:2
12861:20241101:185849.331 End of vmware_job_get() queue:2 type:none
12859:20241101:185849.331 In vmware_job_get() queue:2
12859:20241101:185849.331 End of vmware_job_get() queue:2 type:none