2021-05-13 12:55:33 +02:00
/ *
Copyright 2021 The Kubernetes Authors .
Licensed under the Apache License , Version 2.0 ( the "License" ) ;
you may not use this file except in compliance with the License .
You may obtain a copy of the License at
http : //www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing , software
distributed under the License is distributed on an "AS IS" BASIS ,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND , either express or implied .
See the License for the specific language governing permissions and
limitations under the License .
* /
package resourcemonitor
import (
"context"
"fmt"
2021-11-04 10:16:57 +02:00
"strconv"
"strings"
2021-05-13 12:55:33 +02:00
"time"
"github.com/jaypipes/ghw"
2023-02-07 16:48:01 +01:00
topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
2022-10-14 15:28:52 +03:00
corev1 "k8s.io/api/core/v1"
2021-05-13 12:55:33 +02:00
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
2021-11-04 10:16:57 +02:00
2022-11-02 16:01:25 +02:00
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
2021-11-04 10:16:57 +02:00
"sigs.k8s.io/node-feature-discovery/pkg/utils"
2022-10-06 14:05:11 +03:00
"sigs.k8s.io/node-feature-discovery/pkg/utils/hostpath"
2021-05-13 12:55:33 +02:00
)
const (
// obtained these values from node e2e tests : https://github.com/kubernetes/kubernetes/blob/82baa26905c94398a0d19e1b1ecf54eb8acb6029/test/e2e_node/util.go#L70
defaultPodResourcesTimeout = 10 * time . Second
)
type nodeResources struct {
2022-10-14 15:28:52 +03:00
perNUMAAllocatable map [ int ] map [ corev1 . ResourceName ] int64
2021-05-13 12:55:33 +02:00
// mapping: resourceName -> resourceID -> nodeID
2021-11-04 10:16:57 +02:00
resourceID2NUMAID map [ string ] map [ string ] int
topo * ghw . TopologyInfo
reservedCPUIDPerNUMA map [ int ] [ ] string
memoryResourcesCapacityPerNUMA utils . NumaMemoryResources
2022-11-02 16:01:25 +02:00
excludeList ExcludeResourceList
2021-05-13 12:55:33 +02:00
}
type resourceData struct {
2021-07-13 23:24:58 +01:00
available int64
2021-05-13 12:55:33 +02:00
allocatable int64
capacity int64
}
2022-11-02 16:01:25 +02:00
func NewResourcesAggregator ( podResourceClient podresourcesapi . PodResourcesListerClient , excludeList ExcludeResourceList ) ( ResourcesAggregator , error ) {
2021-05-13 12:55:33 +02:00
var err error
topo , err := ghw . Topology ( ghw . WithPathOverrides ( ghw . PathOverrides {
2022-10-06 14:05:11 +03:00
"/sys" : string ( hostpath . SysfsDir ) ,
2021-05-13 12:55:33 +02:00
} ) )
if err != nil {
return nil , err
}
2021-11-04 10:16:57 +02:00
memoryResourcesCapacityPerNUMA , err := getMemoryResourcesCapacity ( )
if err != nil {
return nil , err
}
2021-05-13 12:55:33 +02:00
ctx , cancel := context . WithTimeout ( context . Background ( ) , defaultPodResourcesTimeout )
defer cancel ( )
// Pod Resource API client
resp , err := podResourceClient . GetAllocatableResources ( ctx , & podresourcesapi . AllocatableResourcesRequest { } )
if err != nil {
2021-11-05 18:30:50 -04:00
if strings . Contains ( err . Error ( ) , "API GetAllocatableResources disabled" ) {
2023-05-03 11:32:53 +03:00
klog . ErrorS ( err , "Kubelet's pod resources 'GetAllocatableResources' functionality is disabled. " +
"Ensure feature flag 'KubeletPodResourcesGetAllocatable' is set to true. " +
"You can find more about the feature gates from the following URL - " +
2021-11-05 18:30:50 -04:00
"https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/" )
}
return nil , fmt . Errorf ( "failed to get allocatable resources (ensure that KubeletPodResourcesGetAllocatable feature gate is enabled): %w" , err )
2021-05-13 12:55:33 +02:00
}
2022-11-02 16:01:25 +02:00
return NewResourcesAggregatorFromData ( topo , resp , memoryResourcesCapacityPerNUMA , excludeList ) , nil
2021-05-13 12:55:33 +02:00
}
// NewResourcesAggregatorFromData is used to aggregate resource information based on the received data from underlying hardware and podresource API
2022-11-02 16:01:25 +02:00
func NewResourcesAggregatorFromData ( topo * ghw . TopologyInfo , resp * podresourcesapi . AllocatableResourcesResponse , memoryResourceCapacity utils . NumaMemoryResources , excludeList ExcludeResourceList ) ResourcesAggregator {
2021-05-13 12:55:33 +02:00
allDevs := getContainerDevicesFromAllocatableResources ( resp , topo )
return & nodeResources {
2021-11-04 10:16:57 +02:00
topo : topo ,
resourceID2NUMAID : makeResourceMap ( len ( topo . Nodes ) , allDevs ) ,
perNUMAAllocatable : makeNodeAllocatable ( allDevs , resp . GetMemory ( ) ) ,
reservedCPUIDPerNUMA : makeReservedCPUMap ( topo . Nodes , allDevs ) ,
memoryResourcesCapacityPerNUMA : memoryResourceCapacity ,
2022-11-02 16:01:25 +02:00
excludeList : excludeList ,
2021-05-13 12:55:33 +02:00
}
}
// Aggregate provides the mapping (numa zone name) -> Zone from the given PodResources.
2023-02-07 16:48:01 +01:00
func ( noderesourceData * nodeResources ) Aggregate ( podResData [ ] PodResources ) topologyv1alpha2 . ZoneList {
2022-10-14 15:28:52 +03:00
perNuma := make ( map [ int ] map [ corev1 . ResourceName ] * resourceData )
2021-07-13 23:24:58 +01:00
for nodeID := range noderesourceData . topo . Nodes {
nodeRes , ok := noderesourceData . perNUMAAllocatable [ nodeID ]
if ok {
2022-10-14 15:28:52 +03:00
perNuma [ nodeID ] = make ( map [ corev1 . ResourceName ] * resourceData )
2021-11-04 10:16:57 +02:00
for resName , allocatable := range nodeRes {
2022-11-02 16:01:25 +02:00
if noderesourceData . excludeList . IsExcluded ( resName ) {
continue
}
2021-11-04 10:16:57 +02:00
switch {
case resName == "cpu" :
2021-07-13 23:24:58 +01:00
perNuma [ nodeID ] [ resName ] = & resourceData {
2021-11-04 10:16:57 +02:00
allocatable : allocatable ,
available : allocatable ,
capacity : allocatable + int64 ( len ( noderesourceData . reservedCPUIDPerNUMA [ nodeID ] ) ) ,
}
2022-10-14 15:28:52 +03:00
case resName == corev1 . ResourceMemory , strings . HasPrefix ( string ( resName ) , corev1 . ResourceHugePagesPrefix ) :
2021-11-04 10:16:57 +02:00
var capacity int64
if _ , ok := noderesourceData . memoryResourcesCapacityPerNUMA [ nodeID ] ; ! ok {
capacity = allocatable
} else if _ , ok := noderesourceData . memoryResourcesCapacityPerNUMA [ nodeID ] [ resName ] ; ! ok {
capacity = allocatable
} else {
capacity = noderesourceData . memoryResourcesCapacityPerNUMA [ nodeID ] [ resName ]
2021-07-13 23:24:58 +01:00
}
2021-11-04 10:16:57 +02:00
2021-07-13 23:24:58 +01:00
perNuma [ nodeID ] [ resName ] = & resourceData {
2021-11-04 10:16:57 +02:00
allocatable : allocatable ,
available : allocatable ,
capacity : capacity ,
}
default :
perNuma [ nodeID ] [ resName ] = & resourceData {
allocatable : allocatable ,
available : allocatable ,
capacity : allocatable ,
2021-07-13 23:24:58 +01:00
}
}
}
// NUMA node doesn't have any allocatable resources, but yet it exists in the topology
// thus all its CPUs are reserved
} else {
2022-10-14 15:28:52 +03:00
perNuma [ nodeID ] = make ( map [ corev1 . ResourceName ] * resourceData )
2021-07-13 23:24:58 +01:00
perNuma [ nodeID ] [ "cpu" ] = & resourceData {
allocatable : int64 ( 0 ) ,
available : int64 ( 0 ) ,
capacity : int64 ( len ( noderesourceData . reservedCPUIDPerNUMA [ nodeID ] ) ) ,
2021-05-13 12:55:33 +02:00
}
}
}
for _ , podRes := range podResData {
for _ , contRes := range podRes . Containers {
for _ , res := range contRes . Resources {
2022-10-14 15:28:52 +03:00
if res . Name == corev1 . ResourceMemory || strings . HasPrefix ( string ( res . Name ) , corev1 . ResourceHugePagesPrefix ) {
2021-11-04 10:16:57 +02:00
noderesourceData . updateMemoryAvailable ( perNuma , res )
continue
}
2021-07-13 23:24:58 +01:00
noderesourceData . updateAvailable ( perNuma , res )
2021-05-13 12:55:33 +02:00
}
}
}
2023-02-07 16:48:01 +01:00
zones := make ( topologyv1alpha2 . ZoneList , 0 )
2021-05-13 12:55:33 +02:00
for nodeID , resList := range perNuma {
2023-02-07 16:48:01 +01:00
zone := topologyv1alpha2 . Zone {
2021-05-13 12:55:33 +02:00
Name : makeZoneName ( nodeID ) ,
Type : "Node" ,
2023-02-07 16:48:01 +01:00
Resources : make ( topologyv1alpha2 . ResourceInfoList , 0 ) ,
2021-05-13 12:55:33 +02:00
}
costs , err := makeCostsPerNumaNode ( noderesourceData . topo . Nodes , nodeID )
if err != nil {
2023-05-03 11:32:53 +03:00
klog . ErrorS ( err , "failed to calculate costs for NUMA node" , "nodeID" , nodeID )
2021-05-13 12:55:33 +02:00
} else {
2021-11-04 10:16:57 +02:00
zone . Costs = costs
2021-05-13 12:55:33 +02:00
}
for name , resData := range resList {
allocatableQty := * resource . NewQuantity ( resData . allocatable , resource . DecimalSI )
capacityQty := * resource . NewQuantity ( resData . capacity , resource . DecimalSI )
2021-07-13 23:24:58 +01:00
availableQty := * resource . NewQuantity ( resData . available , resource . DecimalSI )
2023-02-07 16:48:01 +01:00
zone . Resources = append ( zone . Resources , topologyv1alpha2 . ResourceInfo {
2021-05-13 12:55:33 +02:00
Name : name . String ( ) ,
2021-07-13 23:24:58 +01:00
Available : availableQty ,
Allocatable : allocatableQty ,
Capacity : capacityQty ,
2021-05-13 12:55:33 +02:00
} )
}
zones = append ( zones , zone )
}
return zones
}
// getContainerDevicesFromAllocatableResources normalize all compute resources to ContainerDevices.
// This is helpful because cpuIDs are not represented as ContainerDevices, but with a different format;
// Having a consistent representation of all the resources as ContainerDevices makes it simpler for
func getContainerDevicesFromAllocatableResources ( availRes * podresourcesapi . AllocatableResourcesResponse , topo * ghw . TopologyInfo ) [ ] * podresourcesapi . ContainerDevices {
var contDevs [ ] * podresourcesapi . ContainerDevices
contDevs = append ( contDevs , availRes . GetDevices ( ) ... )
cpuIDToNodeIDMap := MakeLogicalCoreIDToNodeIDMap ( topo )
cpusPerNuma := make ( map [ int ] [ ] string )
for _ , cpuID := range availRes . GetCpuIds ( ) {
nodeID , ok := cpuIDToNodeIDMap [ int ( cpuID ) ]
if ! ok {
2023-05-03 11:32:53 +03:00
klog . InfoS ( "failed to find the NUMA node for CPU" , "cpuID" , cpuID )
2021-05-13 12:55:33 +02:00
continue
}
cpuIDList := cpusPerNuma [ nodeID ]
cpuIDList = append ( cpuIDList , fmt . Sprintf ( "%d" , cpuID ) )
cpusPerNuma [ nodeID ] = cpuIDList
}
for nodeID , cpuList := range cpusPerNuma {
contDevs = append ( contDevs , & podresourcesapi . ContainerDevices {
2022-10-14 15:28:52 +03:00
ResourceName : string ( corev1 . ResourceCPU ) ,
2021-05-13 12:55:33 +02:00
DeviceIds : cpuList ,
Topology : & podresourcesapi . TopologyInfo {
Nodes : [ ] * podresourcesapi . NUMANode {
{ ID : int64 ( nodeID ) } ,
} ,
} ,
} )
}
return contDevs
}
2021-07-13 23:24:58 +01:00
// updateAvailable computes the actually available resources.
// This function assumes the available resources are initialized to be equal to the allocatable.
2022-10-14 15:28:52 +03:00
func ( noderesourceData * nodeResources ) updateAvailable ( numaData map [ int ] map [ corev1 . ResourceName ] * resourceData , ri ResourceInfo ) {
2021-05-13 12:55:33 +02:00
for _ , resID := range ri . Data {
resName := string ( ri . Name )
resMap , ok := noderesourceData . resourceID2NUMAID [ resName ]
if ! ok {
2023-05-03 11:32:53 +03:00
klog . InfoS ( "unknown resource" , "resourceName" , ri . Name )
2021-05-13 12:55:33 +02:00
continue
}
nodeID , ok := resMap [ resID ]
if ! ok {
2023-05-03 11:32:53 +03:00
klog . InfoS ( "unknown resource" , "resourceName" , resName , "resourceID" , resID )
2021-05-13 12:55:33 +02:00
continue
}
2022-03-15 11:13:04 +02:00
if _ , ok := numaData [ nodeID ] ; ! ok {
2023-05-03 11:32:53 +03:00
klog . InfoS ( "unknown NUMA node id" , "numaID" , nodeID )
2022-03-15 11:13:04 +02:00
continue
}
2021-07-13 23:24:58 +01:00
numaData [ nodeID ] [ ri . Name ] . available --
2021-05-13 12:55:33 +02:00
}
}
// makeZoneName returns the canonical name of a NUMA zone from its ID.
func makeZoneName ( nodeID int ) string {
return fmt . Sprintf ( "node-%d" , nodeID )
}
2021-07-13 23:24:58 +01:00
// makeNodeAllocatable computes the node allocatable as mapping (NUMA node ID) -> Resource -> Allocatable (amount, int).
// The computation is done assuming all the resources to represent the allocatable for are represented on a slice
2021-05-13 12:55:33 +02:00
// of ContainerDevices. No special treatment is done for CPU IDs. See getContainerDevicesFromAllocatableResources.
2022-10-14 15:28:52 +03:00
func makeNodeAllocatable ( devices [ ] * podresourcesapi . ContainerDevices , memoryBlocks [ ] * podresourcesapi . ContainerMemory ) map [ int ] map [ corev1 . ResourceName ] int64 {
perNUMAAllocatable := make ( map [ int ] map [ corev1 . ResourceName ] int64 )
2021-05-13 12:55:33 +02:00
// initialize with the capacities
for _ , device := range devices {
resourceName := device . GetResourceName ( )
for _ , node := range device . GetTopology ( ) . GetNodes ( ) {
nodeID := int ( node . GetID ( ) )
2021-07-13 23:24:58 +01:00
nodeRes , ok := perNUMAAllocatable [ nodeID ]
2021-05-13 12:55:33 +02:00
if ! ok {
2022-10-14 15:28:52 +03:00
nodeRes = make ( map [ corev1 . ResourceName ] int64 )
2021-05-13 12:55:33 +02:00
}
2022-10-14 15:28:52 +03:00
nodeRes [ corev1 . ResourceName ( resourceName ) ] += int64 ( len ( device . GetDeviceIds ( ) ) )
2021-07-13 23:24:58 +01:00
perNUMAAllocatable [ nodeID ] = nodeRes
2021-05-13 12:55:33 +02:00
}
}
2021-11-04 10:16:57 +02:00
for _ , block := range memoryBlocks {
2022-10-14 15:28:52 +03:00
memoryType := corev1 . ResourceName ( block . GetMemoryType ( ) )
2021-11-04 10:16:57 +02:00
blockTopology := block . GetTopology ( )
if blockTopology == nil {
continue
}
for _ , node := range blockTopology . GetNodes ( ) {
nodeID := int ( node . GetID ( ) )
if _ , ok := perNUMAAllocatable [ nodeID ] ; ! ok {
2022-10-14 15:28:52 +03:00
perNUMAAllocatable [ nodeID ] = make ( map [ corev1 . ResourceName ] int64 )
2021-11-04 10:16:57 +02:00
}
if _ , ok := perNUMAAllocatable [ nodeID ] [ memoryType ] ; ! ok {
perNUMAAllocatable [ nodeID ] [ memoryType ] = 0
}
// I do not like the idea to cast from uint64 to int64, but until the memory size does not go over
// 8589934592Gi, it should be ok
perNUMAAllocatable [ nodeID ] [ memoryType ] += int64 ( block . GetSize_ ( ) )
}
}
2021-07-13 23:24:58 +01:00
return perNUMAAllocatable
2021-05-13 12:55:33 +02:00
}
func MakeLogicalCoreIDToNodeIDMap ( topo * ghw . TopologyInfo ) map [ int ] int {
core2node := make ( map [ int ] int )
for _ , node := range topo . Nodes {
for _ , core := range node . Cores {
for _ , procID := range core . LogicalProcessors {
core2node [ procID ] = node . ID
}
}
}
return core2node
}
// makeResourceMap creates the mapping (resource name) -> (device ID) -> (NUMA node ID) from the given slice of ContainerDevices.
// this is useful to quickly learn the NUMA ID of a given (resource, device).
func makeResourceMap ( numaNodes int , devices [ ] * podresourcesapi . ContainerDevices ) map [ string ] map [ string ] int {
resourceMap := make ( map [ string ] map [ string ] int )
for _ , device := range devices {
resourceName := device . GetResourceName ( )
_ , ok := resourceMap [ resourceName ]
if ! ok {
resourceMap [ resourceName ] = make ( map [ string ] int )
}
for _ , node := range device . GetTopology ( ) . GetNodes ( ) {
nodeID := int ( node . GetID ( ) )
for _ , deviceID := range device . GetDeviceIds ( ) {
resourceMap [ resourceName ] [ deviceID ] = nodeID
}
}
}
return resourceMap
}
// makeCostsPerNumaNode builds the cost map to reach all the known NUMA zones (mapping (numa zone) -> cost) starting from the given NUMA zone.
2023-02-07 16:48:01 +01:00
func makeCostsPerNumaNode ( nodes [ ] * ghw . TopologyNode , nodeIDSrc int ) ( [ ] topologyv1alpha2 . CostInfo , error ) {
2021-05-13 12:55:33 +02:00
nodeSrc := findNodeByID ( nodes , nodeIDSrc )
if nodeSrc == nil {
return nil , fmt . Errorf ( "unknown node: %d" , nodeIDSrc )
}
2023-02-07 16:48:01 +01:00
nodeCosts := make ( [ ] topologyv1alpha2 . CostInfo , 0 )
2021-05-13 12:55:33 +02:00
for nodeIDDst , dist := range nodeSrc . Distances {
// TODO: this assumes there are no holes (= no offline node) in the distance vector
2023-02-07 16:48:01 +01:00
nodeCosts = append ( nodeCosts , topologyv1alpha2 . CostInfo {
2021-05-13 12:55:33 +02:00
Name : makeZoneName ( nodeIDDst ) ,
2021-07-13 23:24:58 +01:00
Value : int64 ( dist ) ,
2021-05-13 12:55:33 +02:00
} )
}
return nodeCosts , nil
}
func findNodeByID ( nodes [ ] * ghw . TopologyNode , nodeID int ) * ghw . TopologyNode {
for _ , node := range nodes {
if node . ID == nodeID {
return node
}
}
return nil
}
2021-07-13 23:24:58 +01:00
func makeReservedCPUMap ( nodes [ ] * ghw . TopologyNode , devices [ ] * podresourcesapi . ContainerDevices ) map [ int ] [ ] string {
reservedCPUsPerNuma := make ( map [ int ] [ ] string )
cpus := getCPUs ( devices )
for _ , node := range nodes {
nodeID := node . ID
for _ , core := range node . Cores {
for _ , cpu := range core . LogicalProcessors {
cpuID := fmt . Sprintf ( "%d" , cpu )
_ , ok := cpus [ cpuID ]
if ! ok {
cpuIDList , ok := reservedCPUsPerNuma [ nodeID ]
if ! ok {
cpuIDList = make ( [ ] string , 0 )
}
cpuIDList = append ( cpuIDList , cpuID )
reservedCPUsPerNuma [ nodeID ] = cpuIDList
}
}
}
}
return reservedCPUsPerNuma
}
func getCPUs ( devices [ ] * podresourcesapi . ContainerDevices ) map [ string ] int {
cpuMap := make ( map [ string ] int )
for _ , device := range devices {
if device . GetResourceName ( ) == "cpu" {
for _ , devId := range device . DeviceIds {
cpuMap [ devId ] = int ( device . Topology . Nodes [ 0 ] . ID )
}
}
}
return cpuMap
}
2021-11-04 10:16:57 +02:00
// updateMemoryAvailable computes the actual amount of the available memory.
// This function assumes the available resources are initialized to be equal to the capacity.
2022-10-14 15:28:52 +03:00
func ( noderesourceData * nodeResources ) updateMemoryAvailable ( numaData map [ int ] map [ corev1 . ResourceName ] * resourceData , ri ResourceInfo ) {
2021-11-04 10:16:57 +02:00
if len ( ri . NumaNodeIds ) == 0 {
2023-05-03 11:32:53 +03:00
klog . InfoS ( "no NUMA nodes information is available" , "resourceName" , ri . Name )
2021-11-04 10:16:57 +02:00
return
}
if len ( ri . Data ) != 1 {
2023-05-03 11:32:53 +03:00
klog . InfoS ( "no size information is available" , "resourceName" , ri . Name )
2021-11-04 10:16:57 +02:00
return
}
requestedSize , err := strconv . ParseInt ( ri . Data [ 0 ] , 10 , 64 )
if err != nil {
2023-05-03 11:32:53 +03:00
klog . ErrorS ( err , "failed to parse resource requested size" )
2021-11-04 10:16:57 +02:00
return
}
for _ , numaNodeID := range ri . NumaNodeIds {
if requestedSize == 0 {
return
}
if _ , ok := numaData [ numaNodeID ] ; ! ok {
2023-05-03 11:32:53 +03:00
klog . InfoS ( "failed to find NUMA node ID under the node topology" , "numaID" , numaNodeID )
2021-11-04 10:16:57 +02:00
continue
}
if _ , ok := numaData [ numaNodeID ] [ ri . Name ] ; ! ok {
2023-05-03 11:32:53 +03:00
klog . InfoS ( "failed to find resource under the node topology" , "resourceName" , ri . Name )
2021-11-04 10:16:57 +02:00
return
}
if numaData [ numaNodeID ] [ ri . Name ] . available == 0 {
2023-05-03 11:32:53 +03:00
klog . V ( 4 ) . InfoS ( "no available memory" , "numaID" , numaNodeID , "resourceName" , ri . Name )
2021-11-04 10:16:57 +02:00
continue
}
// For the container pinned only to one NUMA node the calculation is pretty straight forward, the code will
// just reduce the specified NUMA node free size
// For the container pinned to multiple NUMA nodes, the code will reduce the free size of NUMA nodes
// in ascending order. For example, for a container pinned to NUMA node 0 and NUMA node 1,
// it will first reduce the memory of the NUMA node 0 to zero, and after the remaining
// amount of memory from the NUMA node 1.
// This behavior is tightly coupled with the Kubernetes memory manager logic.
if requestedSize >= numaData [ numaNodeID ] [ ri . Name ] . available {
requestedSize -= numaData [ numaNodeID ] [ ri . Name ] . available
numaData [ numaNodeID ] [ ri . Name ] . available = 0
} else {
numaData [ numaNodeID ] [ ri . Name ] . available -= requestedSize
requestedSize = 0
}
}
if requestedSize > 0 {
2023-05-03 11:32:53 +03:00
klog . InfoS ( "requested size was not fully satisfied by NUMA nodes" , "resourceName" , ri . Name )
2021-11-04 10:16:57 +02:00
}
}
func getMemoryResourcesCapacity ( ) ( utils . NumaMemoryResources , error ) {
memoryResources , err := utils . GetNumaMemoryResources ( )
if err != nil {
return nil , err
}
capacity := make ( utils . NumaMemoryResources )
for numaID , resources := range memoryResources {
if _ , ok := capacity [ numaID ] ; ! ok {
2022-10-14 15:28:52 +03:00
capacity [ numaID ] = map [ corev1 . ResourceName ] int64 { }
2021-11-04 10:16:57 +02:00
}
for resourceName , value := range resources {
if _ , ok := capacity [ numaID ] [ resourceName ] ; ! ok {
capacity [ numaID ] [ resourceName ] = 0
}
capacity [ numaID ] [ resourceName ] += value
}
}
return capacity , nil
}