mirror of
https://github.com/kubernetes-sigs/node-feature-discovery.git
synced 2025-03-14 20:56:42 +00:00
resourcemonitor: aggregate and provide the memory and hugepages information
The Kuberenetes pod resource API now exposing the memory and hugepages information for guaranteed pods. We can use this information to update NodeResourceTopology resource with memory and hugepages data. Signed-off-by: Artyom Lukianov <alukiano@redhat.com>
This commit is contained in:
parent
a93b660f7c
commit
45062754fd
5 changed files with 504 additions and 163 deletions
|
@ -19,6 +19,8 @@ package resourcemonitor
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jaypipes/ghw"
|
||||
|
@ -27,6 +29,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/klog/v2"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
|
||||
"sigs.k8s.io/node-feature-discovery/pkg/utils"
|
||||
"sigs.k8s.io/node-feature-discovery/source"
|
||||
)
|
||||
|
||||
|
@ -38,9 +42,10 @@ const (
|
|||
type nodeResources struct {
|
||||
perNUMAAllocatable map[int]map[v1.ResourceName]int64
|
||||
// mapping: resourceName -> resourceID -> nodeID
|
||||
resourceID2NUMAID map[string]map[string]int
|
||||
topo *ghw.TopologyInfo
|
||||
reservedCPUIDPerNUMA map[int][]string
|
||||
resourceID2NUMAID map[string]map[string]int
|
||||
topo *ghw.TopologyInfo
|
||||
reservedCPUIDPerNUMA map[int][]string
|
||||
memoryResourcesCapacityPerNUMA utils.NumaMemoryResources
|
||||
}
|
||||
|
||||
type resourceData struct {
|
||||
|
@ -59,6 +64,11 @@ func NewResourcesAggregator(podResourceClient podresourcesapi.PodResourcesLister
|
|||
return nil, err
|
||||
}
|
||||
|
||||
memoryResourcesCapacityPerNUMA, err := getMemoryResourcesCapacity()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultPodResourcesTimeout)
|
||||
defer cancel()
|
||||
|
||||
|
@ -68,17 +78,18 @@ func NewResourcesAggregator(podResourceClient podresourcesapi.PodResourcesLister
|
|||
return nil, fmt.Errorf("can't receive response: %v.Get(_) = _, %w", podResourceClient, err)
|
||||
}
|
||||
|
||||
return NewResourcesAggregatorFromData(topo, resp), nil
|
||||
return NewResourcesAggregatorFromData(topo, resp, memoryResourcesCapacityPerNUMA), nil
|
||||
}
|
||||
|
||||
// NewResourcesAggregatorFromData is used to aggregate resource information based on the received data from underlying hardware and podresource API
|
||||
func NewResourcesAggregatorFromData(topo *ghw.TopologyInfo, resp *podresourcesapi.AllocatableResourcesResponse) ResourcesAggregator {
|
||||
func NewResourcesAggregatorFromData(topo *ghw.TopologyInfo, resp *podresourcesapi.AllocatableResourcesResponse, memoryResourceCapacity utils.NumaMemoryResources) ResourcesAggregator {
|
||||
allDevs := getContainerDevicesFromAllocatableResources(resp, topo)
|
||||
return &nodeResources{
|
||||
topo: topo,
|
||||
resourceID2NUMAID: makeResourceMap(len(topo.Nodes), allDevs),
|
||||
perNUMAAllocatable: makeNodeAllocatable(allDevs),
|
||||
reservedCPUIDPerNUMA: makeReservedCPUMap(topo.Nodes, allDevs),
|
||||
topo: topo,
|
||||
resourceID2NUMAID: makeResourceMap(len(topo.Nodes), allDevs),
|
||||
perNUMAAllocatable: makeNodeAllocatable(allDevs, resp.GetMemory()),
|
||||
reservedCPUIDPerNUMA: makeReservedCPUMap(topo.Nodes, allDevs),
|
||||
memoryResourcesCapacityPerNUMA: memoryResourceCapacity,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,18 +100,34 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
|
|||
nodeRes, ok := noderesourceData.perNUMAAllocatable[nodeID]
|
||||
if ok {
|
||||
perNuma[nodeID] = make(map[v1.ResourceName]*resourceData)
|
||||
for resName, resCap := range nodeRes {
|
||||
if resName == "cpu" {
|
||||
for resName, allocatable := range nodeRes {
|
||||
switch {
|
||||
case resName == "cpu":
|
||||
perNuma[nodeID][resName] = &resourceData{
|
||||
allocatable: resCap,
|
||||
available: resCap,
|
||||
capacity: resCap + int64(len(noderesourceData.reservedCPUIDPerNUMA[nodeID])),
|
||||
allocatable: allocatable,
|
||||
available: allocatable,
|
||||
capacity: allocatable + int64(len(noderesourceData.reservedCPUIDPerNUMA[nodeID])),
|
||||
}
|
||||
} else {
|
||||
case resName == v1.ResourceMemory, strings.HasPrefix(string(resName), v1.ResourceHugePagesPrefix):
|
||||
var capacity int64
|
||||
if _, ok := noderesourceData.memoryResourcesCapacityPerNUMA[nodeID]; !ok {
|
||||
capacity = allocatable
|
||||
} else if _, ok := noderesourceData.memoryResourcesCapacityPerNUMA[nodeID][resName]; !ok {
|
||||
capacity = allocatable
|
||||
} else {
|
||||
capacity = noderesourceData.memoryResourcesCapacityPerNUMA[nodeID][resName]
|
||||
}
|
||||
|
||||
perNuma[nodeID][resName] = &resourceData{
|
||||
allocatable: resCap,
|
||||
available: resCap,
|
||||
capacity: resCap,
|
||||
allocatable: allocatable,
|
||||
available: allocatable,
|
||||
capacity: capacity,
|
||||
}
|
||||
default:
|
||||
perNuma[nodeID][resName] = &resourceData{
|
||||
allocatable: allocatable,
|
||||
available: allocatable,
|
||||
capacity: allocatable,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -119,6 +146,11 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
|
|||
for _, podRes := range podResData {
|
||||
for _, contRes := range podRes.Containers {
|
||||
for _, res := range contRes.Resources {
|
||||
if res.Name == v1.ResourceMemory || strings.HasPrefix(string(res.Name), v1.ResourceHugePagesPrefix) {
|
||||
noderesourceData.updateMemoryAvailable(perNuma, res)
|
||||
continue
|
||||
}
|
||||
|
||||
noderesourceData.updateAvailable(perNuma, res)
|
||||
}
|
||||
}
|
||||
|
@ -136,7 +168,7 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
|
|||
if err != nil {
|
||||
klog.Infof("cannot find costs for NUMA node %d: %v", nodeID, err)
|
||||
} else {
|
||||
zone.Costs = topologyv1alpha1.CostList(costs)
|
||||
zone.Costs = costs
|
||||
}
|
||||
|
||||
for name, resData := range resList {
|
||||
|
@ -219,7 +251,7 @@ func makeZoneName(nodeID int) string {
|
|||
// makeNodeAllocatable computes the node allocatable as mapping (NUMA node ID) -> Resource -> Allocatable (amount, int).
|
||||
// The computation is done assuming all the resources to represent the allocatable for are represented on a slice
|
||||
// of ContainerDevices. No special treatment is done for CPU IDs. See getContainerDevicesFromAllocatableResources.
|
||||
func makeNodeAllocatable(devices []*podresourcesapi.ContainerDevices) map[int]map[v1.ResourceName]int64 {
|
||||
func makeNodeAllocatable(devices []*podresourcesapi.ContainerDevices, memoryBlocks []*podresourcesapi.ContainerMemory) map[int]map[v1.ResourceName]int64 {
|
||||
perNUMAAllocatable := make(map[int]map[v1.ResourceName]int64)
|
||||
// initialize with the capacities
|
||||
for _, device := range devices {
|
||||
|
@ -234,6 +266,31 @@ func makeNodeAllocatable(devices []*podresourcesapi.ContainerDevices) map[int]ma
|
|||
perNUMAAllocatable[nodeID] = nodeRes
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range memoryBlocks {
|
||||
memoryType := v1.ResourceName(block.GetMemoryType())
|
||||
|
||||
blockTopology := block.GetTopology()
|
||||
if blockTopology == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, node := range blockTopology.GetNodes() {
|
||||
nodeID := int(node.GetID())
|
||||
if _, ok := perNUMAAllocatable[nodeID]; !ok {
|
||||
perNUMAAllocatable[nodeID] = make(map[v1.ResourceName]int64)
|
||||
}
|
||||
|
||||
if _, ok := perNUMAAllocatable[nodeID][memoryType]; !ok {
|
||||
perNUMAAllocatable[nodeID][memoryType] = 0
|
||||
}
|
||||
|
||||
// I do not like the idea to cast from uint64 to int64, but until the memory size does not go over
|
||||
// 8589934592Gi, it should be ok
|
||||
perNUMAAllocatable[nodeID][memoryType] += int64(block.GetSize_())
|
||||
}
|
||||
}
|
||||
|
||||
return perNUMAAllocatable
|
||||
}
|
||||
|
||||
|
@ -330,3 +387,86 @@ func getCPUs(devices []*podresourcesapi.ContainerDevices) map[string]int {
|
|||
}
|
||||
return cpuMap
|
||||
}
|
||||
|
||||
// updateMemoryAvailable computes the actual amount of the available memory.
|
||||
// This function assumes the available resources are initialized to be equal to the capacity.
|
||||
func (noderesourceData *nodeResources) updateMemoryAvailable(numaData map[int]map[v1.ResourceName]*resourceData, ri ResourceInfo) {
|
||||
if len(ri.NumaNodeIds) == 0 {
|
||||
klog.Warningf("no NUMA nodes information is available for device %q", ri.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if len(ri.Data) != 1 {
|
||||
klog.Warningf("no size information is available for the device %q", ri.Name)
|
||||
return
|
||||
}
|
||||
|
||||
requestedSize, err := strconv.ParseInt(ri.Data[0], 10, 64)
|
||||
if err != nil {
|
||||
klog.Errorf("failed to parse resource requested size: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, numaNodeID := range ri.NumaNodeIds {
|
||||
if requestedSize == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
if _, ok := numaData[numaNodeID]; !ok {
|
||||
klog.Warningf("failed to find NUMA node ID %d under the node topology", numaNodeID)
|
||||
continue
|
||||
}
|
||||
|
||||
if _, ok := numaData[numaNodeID][ri.Name]; !ok {
|
||||
klog.Warningf("failed to find resource %q under the node topology", ri.Name)
|
||||
return
|
||||
}
|
||||
|
||||
if numaData[numaNodeID][ri.Name].available == 0 {
|
||||
klog.V(4).Infof("no available memory on the node %d for the resource %q", numaNodeID, ri.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
// For the container pinned only to one NUMA node the calculation is pretty straight forward, the code will
|
||||
// just reduce the specified NUMA node free size
|
||||
// For the container pinned to multiple NUMA nodes, the code will reduce the free size of NUMA nodes
|
||||
// in ascending order. For example, for a container pinned to NUMA node 0 and NUMA node 1,
|
||||
// it will first reduce the memory of the NUMA node 0 to zero, and after the remaining
|
||||
// amount of memory from the NUMA node 1.
|
||||
// This behavior is tightly coupled with the Kubernetes memory manager logic.
|
||||
if requestedSize >= numaData[numaNodeID][ri.Name].available {
|
||||
requestedSize -= numaData[numaNodeID][ri.Name].available
|
||||
numaData[numaNodeID][ri.Name].available = 0
|
||||
} else {
|
||||
numaData[numaNodeID][ri.Name].available -= requestedSize
|
||||
requestedSize = 0
|
||||
}
|
||||
}
|
||||
|
||||
if requestedSize > 0 {
|
||||
klog.Warningf("the resource %q requested size was not fully satisfied by NUMA nodes", ri.Name)
|
||||
}
|
||||
}
|
||||
|
||||
func getMemoryResourcesCapacity() (utils.NumaMemoryResources, error) {
|
||||
memoryResources, err := utils.GetNumaMemoryResources()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
capacity := make(utils.NumaMemoryResources)
|
||||
for numaID, resources := range memoryResources {
|
||||
if _, ok := capacity[numaID]; !ok {
|
||||
capacity[numaID] = map[v1.ResourceName]int64{}
|
||||
}
|
||||
|
||||
for resourceName, value := range resources {
|
||||
if _, ok := capacity[numaID][resourceName]; !ok {
|
||||
capacity[numaID][resourceName] = 0
|
||||
}
|
||||
capacity[numaID][resourceName] += value
|
||||
}
|
||||
}
|
||||
|
||||
return capacity, nil
|
||||
}
|
||||
|
|
|
@ -22,14 +22,17 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/jaypipes/ghw"
|
||||
|
||||
cmp "github.com/google/go-cmp/cmp"
|
||||
. "github.com/smartystreets/goconvey/convey"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
|
||||
topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
|
||||
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
|
||||
"sigs.k8s.io/node-feature-discovery/pkg/utils"
|
||||
)
|
||||
|
||||
func TestResourcesAggregator(t *testing.T) {
|
||||
|
@ -45,78 +48,78 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
Convey("When I aggregate the node resources fake data and no pod allocation", t, func() {
|
||||
availRes := &v1.AllocatableResourcesResponse{
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/net",
|
||||
DeviceIds: []string{"netAAA-0"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/net",
|
||||
DeviceIds: []string{"netAAA-1"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/net",
|
||||
DeviceIds: []string{"netAAA-2"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/net",
|
||||
DeviceIds: []string{"netAAA-3"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/net",
|
||||
DeviceIds: []string{"netBBB-0"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/net",
|
||||
DeviceIds: []string{"netBBB-1"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/gpu",
|
||||
DeviceIds: []string{"gpuAAA"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
|
@ -129,9 +132,53 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
|
||||
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||
},
|
||||
Memory: []*v1.ContainerMemory{
|
||||
{
|
||||
MemoryType: "memory",
|
||||
Size_: 1024,
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MemoryType: "memory",
|
||||
Size_: 1024,
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MemoryType: "hugepages-2Mi",
|
||||
Size_: 1024,
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resAggr = NewResourcesAggregatorFromData(&fakeTopo, availRes)
|
||||
memoryResourcesCapacity := utils.NumaMemoryResources{
|
||||
0: map[corev1.ResourceName]int64{
|
||||
corev1.ResourceMemory: 2048,
|
||||
},
|
||||
1: map[corev1.ResourceName]int64{
|
||||
corev1.ResourceMemory: 2048,
|
||||
corev1.ResourceName("hugepages-2Mi"): 2048,
|
||||
},
|
||||
}
|
||||
resAggr = NewResourcesAggregatorFromData(&fakeTopo, availRes, memoryResourcesCapacity)
|
||||
|
||||
Convey("When aggregating resources", func() {
|
||||
expected := topologyv1alpha1.ZoneList{
|
||||
|
@ -151,15 +198,21 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
Resources: topologyv1alpha1.ResourceInfoList{
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "cpu",
|
||||
Available: resource.MustParse("11"),
|
||||
Allocatable: resource.MustParse("11"),
|
||||
Capacity: resource.MustParse("12"),
|
||||
Available: *resource.NewQuantity(11, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(11, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "fake.io/net",
|
||||
Available: resource.MustParse("4"),
|
||||
Allocatable: resource.MustParse("4"),
|
||||
Capacity: resource.MustParse("4"),
|
||||
Available: *resource.NewQuantity(4, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(4, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(4, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "memory",
|
||||
Available: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -179,21 +232,33 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
Resources: topologyv1alpha1.ResourceInfoList{
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "cpu",
|
||||
Available: resource.MustParse("11"),
|
||||
Allocatable: resource.MustParse("11"),
|
||||
Capacity: resource.MustParse("12"),
|
||||
Available: *resource.NewQuantity(11, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(11, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "fake.io/gpu",
|
||||
Available: resource.MustParse("1"),
|
||||
Allocatable: resource.MustParse("1"),
|
||||
Capacity: resource.MustParse("1"),
|
||||
Available: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "fake.io/net",
|
||||
Available: resource.MustParse("4"),
|
||||
Allocatable: resource.MustParse("4"),
|
||||
Capacity: resource.MustParse("4"),
|
||||
Available: *resource.NewQuantity(2, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(2, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(2, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "hugepages-2Mi",
|
||||
Available: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "memory",
|
||||
Available: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -213,44 +278,45 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
return resource.Resources[x].Name < resource.Resources[y].Name
|
||||
})
|
||||
}
|
||||
log.Printf("result=%v", res)
|
||||
log.Printf("expected=%v", expected)
|
||||
|
||||
log.Printf("result=%+v", res)
|
||||
log.Printf("expected=%+v", expected)
|
||||
log.Printf("diff=%s", cmp.Diff(res, expected))
|
||||
So(cmp.Equal(res, expected), ShouldBeFalse)
|
||||
So(cmp.Equal(res, expected), ShouldBeTrue)
|
||||
})
|
||||
})
|
||||
|
||||
Convey("When I aggregate the node resources fake data and some pod allocation", t, func() {
|
||||
availRes := &v1.AllocatableResourcesResponse{
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/net",
|
||||
DeviceIds: []string{"netAAA"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/net",
|
||||
DeviceIds: []string{"netBBB"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/gpu",
|
||||
DeviceIds: []string{"gpuAAA"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
|
@ -263,27 +329,82 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
|
||||
12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23,
|
||||
},
|
||||
Memory: []*v1.ContainerMemory{
|
||||
{
|
||||
MemoryType: "memory",
|
||||
Size_: 1024,
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
{
|
||||
ID: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MemoryType: "memory",
|
||||
Size_: 1024,
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MemoryType: "hugepages-2Mi",
|
||||
Size_: 1024,
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
{
|
||||
ID: 1,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
resAggr = NewResourcesAggregatorFromData(&fakeTopo, availRes)
|
||||
memoryResourcesCapacity := utils.NumaMemoryResources{
|
||||
0: map[corev1.ResourceName]int64{
|
||||
corev1.ResourceMemory: 2048,
|
||||
},
|
||||
1: map[corev1.ResourceName]int64{
|
||||
corev1.ResourceMemory: 2048,
|
||||
corev1.ResourceName("hugepages-2Mi"): 2048,
|
||||
},
|
||||
}
|
||||
|
||||
resAggr = NewResourcesAggregatorFromData(&fakeTopo, availRes, memoryResourcesCapacity)
|
||||
|
||||
Convey("When aggregating resources", func() {
|
||||
podRes := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "cpu",
|
||||
Data: []string{"5", "7"},
|
||||
},
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "fake.io/net",
|
||||
Data: []string{"netBBB"},
|
||||
},
|
||||
{
|
||||
Name: "memory",
|
||||
Data: []string{"512"},
|
||||
NumaNodeIds: []int{1},
|
||||
},
|
||||
{
|
||||
Name: "hugepages-2Mi",
|
||||
Data: []string{"512"},
|
||||
NumaNodeIds: []int{1},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -307,15 +428,21 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
Resources: topologyv1alpha1.ResourceInfoList{
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "cpu",
|
||||
Available: resource.MustParse("11"),
|
||||
Allocatable: resource.MustParse("11"),
|
||||
Capacity: resource.MustParse("12"),
|
||||
Available: *resource.NewQuantity(11, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(11, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "fake.io/net",
|
||||
Available: resource.MustParse("1"),
|
||||
Allocatable: resource.MustParse("1"),
|
||||
Capacity: resource.MustParse("1"),
|
||||
Available: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "memory",
|
||||
Available: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -336,20 +463,32 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "cpu",
|
||||
Available: resource.MustParse("10"),
|
||||
Allocatable: resource.MustParse("12"),
|
||||
Capacity: resource.MustParse("12"),
|
||||
Allocatable: *resource.NewQuantity(12, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "fake.io/gpu",
|
||||
Available: resource.MustParse("1"),
|
||||
Allocatable: resource.MustParse("1"),
|
||||
Capacity: resource.MustParse("1"),
|
||||
Available: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "fake.io/net",
|
||||
Available: resource.MustParse("0"),
|
||||
Allocatable: resource.MustParse("1"),
|
||||
Capacity: resource.MustParse("1"),
|
||||
Available: *resource.NewQuantity(0, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "hugepages-2Mi",
|
||||
Available: *resource.NewQuantity(512, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
|
||||
},
|
||||
topologyv1alpha1.ResourceInfo{
|
||||
Name: "memory",
|
||||
Available: *resource.NewQuantity(512, resource.DecimalSI),
|
||||
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
|
||||
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -369,8 +508,8 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
return resource.Resources[x].Name < resource.Resources[y].Name
|
||||
})
|
||||
}
|
||||
log.Printf("result=%v", res)
|
||||
log.Printf("expected=%v", expected)
|
||||
log.Printf("result=%+v", res)
|
||||
log.Printf("expected=%+v", expected)
|
||||
log.Printf("diff=%s", cmp.Diff(res, expected))
|
||||
So(cmp.Equal(res, expected), ShouldBeTrue)
|
||||
})
|
||||
|
@ -379,7 +518,7 @@ func TestResourcesAggregator(t *testing.T) {
|
|||
}
|
||||
|
||||
// ghwc topology -f json
|
||||
var testTopology string = `{
|
||||
var testTopology = `{
|
||||
"nodes": [
|
||||
{
|
||||
"id": 0,
|
||||
|
|
|
@ -162,9 +162,24 @@ func (resMon *PodResourcesScanner) Scan() ([]PodResources, error) {
|
|||
}
|
||||
|
||||
for _, device := range container.GetDevices() {
|
||||
numaNodesIDs := getNumaNodeIds(device.GetTopology())
|
||||
contRes.Resources = append(contRes.Resources, ResourceInfo{
|
||||
Name: v1.ResourceName(device.ResourceName),
|
||||
Data: device.DeviceIds,
|
||||
Name: v1.ResourceName(device.ResourceName),
|
||||
Data: device.DeviceIds,
|
||||
NumaNodeIds: numaNodesIDs,
|
||||
})
|
||||
}
|
||||
|
||||
for _, block := range container.GetMemory() {
|
||||
if block.GetSize_() == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
topology := getNumaNodeIds(block.GetTopology())
|
||||
contRes.Resources = append(contRes.Resources, ResourceInfo{
|
||||
Name: v1.ResourceName(block.MemoryType),
|
||||
Data: []string{fmt.Sprintf("%d", block.GetSize_())},
|
||||
NumaNodeIds: topology,
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -194,3 +209,18 @@ func hasDevice(podResource *podresourcesapi.PodResources) bool {
|
|||
klog.Infof("pod:%s doesn't have devices", podResource.GetName())
|
||||
return false
|
||||
}
|
||||
|
||||
func getNumaNodeIds(topologyInfo *podresourcesapi.TopologyInfo) []int {
|
||||
if topologyInfo == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var topology []int
|
||||
for _, node := range topologyInfo.Nodes {
|
||||
if node != nil {
|
||||
topology = append(topology, int(node.ID))
|
||||
}
|
||||
}
|
||||
|
||||
return topology
|
||||
}
|
||||
|
|
|
@ -25,16 +25,14 @@ import (
|
|||
. "github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sclient "k8s.io/client-go/kubernetes"
|
||||
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
|
||||
"sigs.k8s.io/node-feature-discovery/pkg/apihelper"
|
||||
"sigs.k8s.io/node-feature-discovery/pkg/podres"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sclient "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func TestPodScanner(t *testing.T) {
|
||||
|
@ -79,24 +77,44 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{ID: 0},
|
||||
{ID: 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
CpuIds: []int64{0, 1},
|
||||
Memory: []*v1.ContainerMemory{
|
||||
{
|
||||
MemoryType: "hugepages-2Mi",
|
||||
Size_: 512,
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
{ID: 1},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
MemoryType: "memory",
|
||||
Size_: 512,
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
{ID: 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -120,12 +138,14 @@ func TestPodScanner(t *testing.T) {
|
|||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewQuantity(2, resource.DecimalSI),
|
||||
corev1.ResourceName("fake.io/resource"): *resource.NewQuantity(1, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(512, resource.DecimalSI),
|
||||
"hugepages-2Mi": *resource.NewQuantity(512, resource.DecimalSI),
|
||||
},
|
||||
Limits: corev1.ResourceList{
|
||||
corev1.ResourceCPU: *resource.NewQuantity(2, resource.DecimalSI),
|
||||
corev1.ResourceName("fake.io/resource"): *resource.NewQuantity(1, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(100, resource.DecimalSI),
|
||||
corev1.ResourceMemory: *resource.NewQuantity(512, resource.DecimalSI),
|
||||
"hugepages-2Mi": *resource.NewQuantity(512, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -143,20 +163,31 @@ func TestPodScanner(t *testing.T) {
|
|||
So(len(res), ShouldBeGreaterThan, 0)
|
||||
|
||||
expected := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "cpu",
|
||||
Data: []string{"0", "1"},
|
||||
},
|
||||
ResourceInfo{
|
||||
Name: "fake.io/resource",
|
||||
Data: []string{"devA"},
|
||||
{
|
||||
Name: "fake.io/resource",
|
||||
Data: []string{"devA"},
|
||||
NumaNodeIds: []int{0},
|
||||
},
|
||||
{
|
||||
Name: "hugepages-2Mi",
|
||||
Data: []string{"512"},
|
||||
NumaNodeIds: []int{1},
|
||||
},
|
||||
{
|
||||
Name: "memory",
|
||||
Data: []string{"512"},
|
||||
NumaNodeIds: []int{0},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -177,14 +208,14 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response without topology", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
|
@ -238,18 +269,18 @@ func TestPodScanner(t *testing.T) {
|
|||
So(len(res), ShouldBeGreaterThan, 0)
|
||||
|
||||
expected := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "cpu",
|
||||
Data: []string{"0", "1"},
|
||||
},
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "fake.io/resource",
|
||||
Data: []string{"devA"},
|
||||
},
|
||||
|
@ -266,11 +297,11 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response without devices", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
CpuIds: []int64{0, 1},
|
||||
},
|
||||
|
@ -317,14 +348,14 @@ func TestPodScanner(t *testing.T) {
|
|||
So(len(res), ShouldBeGreaterThan, 0)
|
||||
|
||||
expected := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "cpu",
|
||||
Data: []string{"0", "1"},
|
||||
},
|
||||
|
@ -341,14 +372,14 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response without cpus", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
|
@ -399,14 +430,14 @@ func TestPodScanner(t *testing.T) {
|
|||
So(len(res), ShouldBeGreaterThan, 0)
|
||||
|
||||
expected := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "fake.io/resource",
|
||||
Data: []string{"devA"},
|
||||
},
|
||||
|
@ -423,14 +454,14 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response for (non-guaranteed) pods with devices without cpus", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
|
@ -478,14 +509,14 @@ func TestPodScanner(t *testing.T) {
|
|||
})
|
||||
|
||||
expected := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "fake.io/resource",
|
||||
Data: []string{"devA"},
|
||||
},
|
||||
|
@ -501,15 +532,15 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response for (non-guaranteed) pods with devices with cpus", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
CpuIds: []int64{0, 1},
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
|
@ -562,14 +593,14 @@ func TestPodScanner(t *testing.T) {
|
|||
})
|
||||
|
||||
expected := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "fake.io/resource",
|
||||
Data: []string{"devA"},
|
||||
},
|
||||
|
@ -620,19 +651,19 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
Topology: &v1.TopologyInfo{
|
||||
Nodes: []*v1.NUMANode{
|
||||
&v1.NUMANode{ID: 0},
|
||||
{ID: 0},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -688,14 +719,14 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response when pod is in the monitoring namespace", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "pod-res-test",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
|
@ -750,18 +781,18 @@ func TestPodScanner(t *testing.T) {
|
|||
So(len(res), ShouldBeGreaterThan, 0)
|
||||
|
||||
expected := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "pod-res-test",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "cpu",
|
||||
Data: []string{"0", "1"},
|
||||
},
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "fake.io/resource",
|
||||
Data: []string{"devA"},
|
||||
},
|
||||
|
@ -778,11 +809,11 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid empty response for a pod not in the monitoring namespace without devices", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
CpuIds: []int64{0, 1},
|
||||
},
|
||||
|
@ -835,14 +866,14 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get an empty valid response for a pod without cpus when pod is not in the monitoring namespace", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "default",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
|
@ -899,14 +930,14 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response for (non-guaranteed) pods with devices without cpus", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "pod-res-test",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
|
@ -954,14 +985,14 @@ func TestPodScanner(t *testing.T) {
|
|||
})
|
||||
|
||||
expected := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "pod-res-test",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "fake.io/resource",
|
||||
Data: []string{"devA"},
|
||||
},
|
||||
|
@ -977,15 +1008,15 @@ func TestPodScanner(t *testing.T) {
|
|||
Convey("When I successfully get valid response for (non-guaranteed) pods with devices with cpus", func() {
|
||||
resp := &v1.ListPodResourcesResponse{
|
||||
PodResources: []*v1.PodResources{
|
||||
&v1.PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "pod-res-test",
|
||||
Containers: []*v1.ContainerResources{
|
||||
&v1.ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
CpuIds: []int64{0, 1},
|
||||
Devices: []*v1.ContainerDevices{
|
||||
&v1.ContainerDevices{
|
||||
{
|
||||
ResourceName: "fake.io/resource",
|
||||
DeviceIds: []string{"devA"},
|
||||
},
|
||||
|
@ -1038,14 +1069,14 @@ func TestPodScanner(t *testing.T) {
|
|||
})
|
||||
|
||||
expected := []PodResources{
|
||||
PodResources{
|
||||
{
|
||||
Name: "test-pod-0",
|
||||
Namespace: "pod-res-test",
|
||||
Containers: []ContainerResources{
|
||||
ContainerResources{
|
||||
{
|
||||
Name: "test-cnt-0",
|
||||
Resources: []ResourceInfo{
|
||||
ResourceInfo{
|
||||
{
|
||||
Name: "fake.io/resource",
|
||||
Data: []string{"devA"},
|
||||
},
|
||||
|
|
|
@ -34,8 +34,9 @@ type Args struct {
|
|||
|
||||
// ResourceInfo stores information of resources and their corresponding IDs obtained from PodResource API
|
||||
type ResourceInfo struct {
|
||||
Name corev1.ResourceName
|
||||
Data []string
|
||||
Name corev1.ResourceName
|
||||
Data []string
|
||||
NumaNodeIds []int
|
||||
}
|
||||
|
||||
// ContainerResources contains information about the node resources assigned to a container
|
||||
|
|
Loading…
Add table
Reference in a new issue