1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2025-03-28 02:37:11 +00:00

Merge pull request #1053 from jlojosnegros/nrt-api-update-v1alpha2

topology-updater: nrt-api Update to v1alpha2
This commit is contained in:
Kubernetes Prow Robot 2023-02-09 05:54:24 -08:00 committed by GitHub
commit 1084ce8b1d
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 390 additions and 131 deletions

View file

@ -3,7 +3,7 @@ kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes/enhancements/pull/1870
controller-gen.kubebuilder.io/version: v0.7.0
controller-gen.kubebuilder.io/version: v0.11.2
creationTimestamp: null
name: noderesourcetopologies.topology.node.k8s.io
spec:
@ -134,10 +134,137 @@ spec:
- zones
type: object
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
storage: false
- name: v1alpha2
schema:
openAPIV3Schema:
description: NodeResourceTopology describes node resources and their topology.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
attributes:
description: AttributeList contains an array of AttributeInfo objects.
items:
description: AttributeInfo contains one attribute of a Zone.
properties:
name:
type: string
value:
type: string
required:
- name
- value
type: object
type: array
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
topologyPolicies:
description: 'DEPRECATED (to be removed in v1beta1): use top level attributes
if needed'
items:
type: string
type: array
zones:
description: ZoneList contains an array of Zone objects.
items:
description: Zone represents a resource topology zone, e.g. socket,
node, die or core.
properties:
attributes:
description: AttributeList contains an array of AttributeInfo objects.
items:
description: AttributeInfo contains one attribute of a Zone.
properties:
name:
type: string
value:
type: string
required:
- name
- value
type: object
type: array
costs:
description: CostList contains an array of CostInfo objects.
items:
description: CostInfo describes the cost (or distance) between
two Zones.
properties:
name:
type: string
value:
format: int64
type: integer
required:
- name
- value
type: object
type: array
name:
type: string
parent:
type: string
resources:
description: ResourceInfoList contains an array of ResourceInfo
objects.
items:
description: ResourceInfo contains information about one resource
type.
properties:
allocatable:
anyOf:
- type: integer
- type: string
description: Allocatable quantity of the resource, corresponding
to allocatable in node status, i.e. total amount of this
resource available to be used by pods.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
available:
anyOf:
- type: integer
- type: string
description: Available is the amount of this resource currently
available for new (to be scheduled) pods, i.e. Allocatable
minus the resources reserved by currently running pods.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
capacity:
anyOf:
- type: integer
- type: string
description: Capacity of the resource, corresponding to capacity
in node status, i.e. total amount of this resource that
the node has.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
name:
description: Name of the resource.
type: string
required:
- allocatable
- available
- capacity
- name
type: object
type: array
type:
type: string
required:
- name
- type
type: object
type: array
required:
- zones
type: object
served: true
storage: true

View file

@ -4,7 +4,7 @@ kind: CustomResourceDefinition
metadata:
annotations:
api-approved.kubernetes.io: https://github.com/kubernetes/enhancements/pull/1870
controller-gen.kubebuilder.io/version: v0.7.0
controller-gen.kubebuilder.io/version: v0.11.2
creationTimestamp: null
name: noderesourcetopologies.topology.node.k8s.io
spec:
@ -135,6 +135,139 @@ spec:
- zones
type: object
served: true
storage: false
- name: v1alpha2
schema:
openAPIV3Schema:
description: NodeResourceTopology describes node resources and their topology.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
attributes:
description: AttributeList contains an array of AttributeInfo objects.
items:
description: AttributeInfo contains one attribute of a Zone.
properties:
name:
type: string
value:
type: string
required:
- name
- value
type: object
type: array
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
topologyPolicies:
description: 'DEPRECATED (to be removed in v1beta1): use top level attributes
if needed'
items:
type: string
type: array
zones:
description: ZoneList contains an array of Zone objects.
items:
description: Zone represents a resource topology zone, e.g. socket,
node, die or core.
properties:
attributes:
description: AttributeList contains an array of AttributeInfo objects.
items:
description: AttributeInfo contains one attribute of a Zone.
properties:
name:
type: string
value:
type: string
required:
- name
- value
type: object
type: array
costs:
description: CostList contains an array of CostInfo objects.
items:
description: CostInfo describes the cost (or distance) between
two Zones.
properties:
name:
type: string
value:
format: int64
type: integer
required:
- name
- value
type: object
type: array
name:
type: string
parent:
type: string
resources:
description: ResourceInfoList contains an array of ResourceInfo
objects.
items:
description: ResourceInfo contains information about one resource
type.
properties:
allocatable:
anyOf:
- type: integer
- type: string
description: Allocatable quantity of the resource, corresponding
to allocatable in node status, i.e. total amount of this
resource available to be used by pods.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
available:
anyOf:
- type: integer
- type: string
description: Available is the amount of this resource currently
available for new (to be scheduled) pods, i.e. Allocatable
minus the resources reserved by currently running pods.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
capacity:
anyOf:
- type: integer
- type: string
description: Capacity of the resource, corresponding to capacity
in node status, i.e. total amount of this resource that
the node has.
pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
x-kubernetes-int-or-string: true
name:
description: Name of the resource.
type: string
required:
- allocatable
- available
- capacity
- name
type: object
type: array
type:
type: string
required:
- name
- type
type: object
type: array
required:
- zones
type: object
served: true
storage: true
status:
acceptedNames:

2
go.mod
View file

@ -9,7 +9,7 @@ require (
github.com/golang/protobuf v1.5.2
github.com/google/go-cmp v0.5.9
github.com/jaypipes/ghw v0.8.1-0.20210827132705-c7224150a17e
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.0.13
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.0
github.com/klauspost/cpuid/v2 v2.2.3
github.com/onsi/ginkgo/v2 v2.4.0
github.com/onsi/gomega v1.23.0

4
go.sum
View file

@ -420,8 +420,8 @@ github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.0.13 h1:Y1RjPskyGMkVtNL8lq75bEdjqgq8gi+JJ1oWaz/mIJE=
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.0.13/go.mod h1:AkACMQGiTgCt0lQw3m7TTU8PLH9lYKNK5e9DqFf5VuM=
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.0 h1:2uCRJbv+A+fmaUaO0wLZ8oYd6cLE1dRzBQcFNxggH3s=
github.com/k8stopologyawareschedwg/noderesourcetopology-api v0.1.0/go.mod h1:AkACMQGiTgCt0lQw3m7TTU8PLH9lYKNK5e9DqFf5VuM=
github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI=
github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=

View file

@ -83,7 +83,7 @@ func newTopologyGC(config *restclient.Config, stop chan struct{}, gcPeriod time.
}
func (n *topologyGC) deleteNRT(nodeName string) {
if err := n.topoClient.TopologyV1alpha1().NodeResourceTopologies().Delete(context.TODO(), nodeName, metav1.DeleteOptions{}); err != nil {
if err := n.topoClient.TopologyV1alpha2().NodeResourceTopologies().Delete(context.TODO(), nodeName, metav1.DeleteOptions{}); err != nil {
if errors.IsNotFound(err) {
klog.V(2).Infof("NodeResourceTopology for node %s not found, omitting deletion", nodeName)
return
@ -125,7 +125,7 @@ func (n *topologyGC) runGC() {
nodes.Insert(key)
}
nrts, err := n.topoClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := n.topoClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
if err != nil {
klog.Warningf("cannot list NRTs %s", err.Error())
return

View file

@ -21,9 +21,8 @@ import (
"testing"
"time"
nrtapi "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
faketopologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned/fake"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
faketopologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned/fake"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
@ -36,7 +35,7 @@ func TestNRTGC(t *testing.T) {
Convey("When theres is old NRT ", t, func() {
k8sClient := fakek8sclientset.NewSimpleClientset()
fakeClient := faketopologyv1alpha1.NewSimpleClientset(&nrtapi.NodeResourceTopology{
fakeClient := faketopologyv1alpha2.NewSimpleClientset(&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
@ -55,7 +54,7 @@ func TestNRTGC(t *testing.T) {
err := gc.run()
So(err, ShouldBeNil)
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
So(nrts.Items, ShouldHaveLength, 0)
@ -68,12 +67,12 @@ func TestNRTGC(t *testing.T) {
},
})
fakeClient := faketopologyv1alpha1.NewSimpleClientset(&nrtapi.NodeResourceTopology{
fakeClient := faketopologyv1alpha2.NewSimpleClientset(&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
&nrtapi.NodeResourceTopology{
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node2",
},
@ -94,7 +93,7 @@ func TestNRTGC(t *testing.T) {
err := gc.run()
So(err, ShouldBeNil)
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
So(nrts.Items, ShouldHaveLength, 1)
So(nrts.Items[0].GetName(), ShouldEqual, "node1")
@ -114,13 +113,13 @@ func TestNRTGC(t *testing.T) {
},
)
fakeClient := faketopologyv1alpha1.NewSimpleClientset(
&nrtapi.NodeResourceTopology{
fakeClient := faketopologyv1alpha2.NewSimpleClientset(
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
&nrtapi.NodeResourceTopology{
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node2",
},
@ -140,7 +139,7 @@ func TestNRTGC(t *testing.T) {
err := gc.run()
So(err, ShouldBeNil)
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
So(nrts.Items, ShouldHaveLength, 2)
@ -150,7 +149,7 @@ func TestNRTGC(t *testing.T) {
// simple sleep with retry loop to make sure indexer will pick up event and trigger deleteNode Function
deleted := false
for i := 0; i < 5; i++ {
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
if len(nrts.Items) == 1 {
@ -175,13 +174,13 @@ func TestNRTGC(t *testing.T) {
},
)
fakeClient := faketopologyv1alpha1.NewSimpleClientset(
&nrtapi.NodeResourceTopology{
fakeClient := faketopologyv1alpha2.NewSimpleClientset(
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
&nrtapi.NodeResourceTopology{
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node2",
},
@ -201,12 +200,12 @@ func TestNRTGC(t *testing.T) {
err := gc.run()
So(err, ShouldBeNil)
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
So(nrts.Items, ShouldHaveLength, 2)
nrt := v1alpha1.NodeResourceTopology{
nrt := v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "not-existing",
},
@ -214,12 +213,12 @@ func TestNRTGC(t *testing.T) {
go gc.periodicGC(time.Second)
_, err = fakeClient.TopologyV1alpha1().NodeResourceTopologies().Create(context.TODO(), &nrt, metav1.CreateOptions{})
_, err = fakeClient.TopologyV1alpha2().NodeResourceTopologies().Create(context.TODO(), &nrt, metav1.CreateOptions{})
So(err, ShouldBeNil)
// simple sleep with retry loop to make sure GC was triggered
deleted := false
for i := 0; i < 5; i++ {
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
if len(nrts.Items) == 2 {

View file

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/klog/v2"
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
"golang.org/x/net/context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -123,7 +123,7 @@ func (w *nfdTopologyUpdater) Run() error {
// So we are intentionally do this once during the process lifecycle.
// TODO: Obtain node resources dynamically from the podresource API
// zonesChannel := make(chan v1alpha1.ZoneList)
var zones v1alpha1.ZoneList
var zones v1alpha2.ZoneList
excludeList := resourcemonitor.NewExcludeResourceList(w.config.ExcludeList, w.nodeInfo.nodeName)
resAggr, err := resourcemonitor.NewResourcesAggregator(podResClient, excludeList)
@ -172,15 +172,15 @@ func (w *nfdTopologyUpdater) Stop() {
}
}
func (w *nfdTopologyUpdater) updateNodeResourceTopology(zoneInfo v1alpha1.ZoneList) error {
func (w *nfdTopologyUpdater) updateNodeResourceTopology(zoneInfo v1alpha2.ZoneList) error {
cli, err := w.apihelper.GetTopologyClient()
if err != nil {
return err
}
nrt, err := cli.TopologyV1alpha1().NodeResourceTopologies().Get(context.TODO(), w.nodeInfo.nodeName, metav1.GetOptions{})
nrt, err := cli.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), w.nodeInfo.nodeName, metav1.GetOptions{})
if errors.IsNotFound(err) {
nrtNew := v1alpha1.NodeResourceTopology{
nrtNew := v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: w.nodeInfo.nodeName,
},
@ -188,7 +188,7 @@ func (w *nfdTopologyUpdater) updateNodeResourceTopology(zoneInfo v1alpha1.ZoneLi
TopologyPolicies: []string{w.nodeInfo.tmPolicy},
}
_, err := cli.TopologyV1alpha1().NodeResourceTopologies().Create(context.TODO(), &nrtNew, metav1.CreateOptions{})
_, err := cli.TopologyV1alpha2().NodeResourceTopologies().Create(context.TODO(), &nrtNew, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create NodeResourceTopology: %w", err)
}
@ -200,7 +200,7 @@ func (w *nfdTopologyUpdater) updateNodeResourceTopology(zoneInfo v1alpha1.ZoneLi
nrtMutated := nrt.DeepCopy()
nrtMutated.Zones = zoneInfo
nrtUpdated, err := cli.TopologyV1alpha1().NodeResourceTopologies().Update(context.TODO(), nrtMutated, metav1.UpdateOptions{})
nrtUpdated, err := cli.TopologyV1alpha2().NodeResourceTopologies().Update(context.TODO(), nrtMutated, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update NodeResourceTopology: %w", err)
}

View file

@ -24,7 +24,7 @@ import (
"time"
"github.com/jaypipes/ghw"
topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
@ -103,7 +103,7 @@ func NewResourcesAggregatorFromData(topo *ghw.TopologyInfo, resp *podresourcesap
}
// Aggregate provides the mapping (numa zone name) -> Zone from the given PodResources.
func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topologyv1alpha1.ZoneList {
func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topologyv1alpha2.ZoneList {
perNuma := make(map[int]map[corev1.ResourceName]*resourceData)
for nodeID := range noderesourceData.topo.Nodes {
nodeRes, ok := noderesourceData.perNUMAAllocatable[nodeID]
@ -168,12 +168,12 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
}
}
zones := make(topologyv1alpha1.ZoneList, 0)
zones := make(topologyv1alpha2.ZoneList, 0)
for nodeID, resList := range perNuma {
zone := topologyv1alpha1.Zone{
zone := topologyv1alpha2.Zone{
Name: makeZoneName(nodeID),
Type: "Node",
Resources: make(topologyv1alpha1.ResourceInfoList, 0),
Resources: make(topologyv1alpha2.ResourceInfoList, 0),
}
costs, err := makeCostsPerNumaNode(noderesourceData.topo.Nodes, nodeID)
@ -187,7 +187,7 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
allocatableQty := *resource.NewQuantity(resData.allocatable, resource.DecimalSI)
capacityQty := *resource.NewQuantity(resData.capacity, resource.DecimalSI)
availableQty := *resource.NewQuantity(resData.available, resource.DecimalSI)
zone.Resources = append(zone.Resources, topologyv1alpha1.ResourceInfo{
zone.Resources = append(zone.Resources, topologyv1alpha2.ResourceInfo{
Name: name.String(),
Available: availableQty,
Allocatable: allocatableQty,
@ -345,15 +345,15 @@ func makeResourceMap(numaNodes int, devices []*podresourcesapi.ContainerDevices)
}
// makeCostsPerNumaNode builds the cost map to reach all the known NUMA zones (mapping (numa zone) -> cost) starting from the given NUMA zone.
func makeCostsPerNumaNode(nodes []*ghw.TopologyNode, nodeIDSrc int) ([]topologyv1alpha1.CostInfo, error) {
func makeCostsPerNumaNode(nodes []*ghw.TopologyNode, nodeIDSrc int) ([]topologyv1alpha2.CostInfo, error) {
nodeSrc := findNodeByID(nodes, nodeIDSrc)
if nodeSrc == nil {
return nil, fmt.Errorf("unknown node: %d", nodeIDSrc)
}
nodeCosts := make([]topologyv1alpha1.CostInfo, 0)
nodeCosts := make([]topologyv1alpha2.CostInfo, 0)
for nodeIDDst, dist := range nodeSrc.Distances {
// TODO: this assumes there are no holes (= no offline node) in the distance vector
nodeCosts = append(nodeCosts, topologyv1alpha1.CostInfo{
nodeCosts = append(nodeCosts, topologyv1alpha2.CostInfo{
Name: makeZoneName(nodeIDDst),
Value: int64(dist),
})

View file

@ -30,7 +30,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
"sigs.k8s.io/node-feature-discovery/pkg/utils"
)
@ -181,34 +181,34 @@ func TestResourcesAggregator(t *testing.T) {
resAggr = NewResourcesAggregatorFromData(&fakeTopo, availRes, memoryResourcesCapacity, NewExcludeResourceList(map[string][]string{}, ""))
Convey("When aggregating resources", func() {
expected := topologyv1alpha1.ZoneList{
topologyv1alpha1.Zone{
expected := topologyv1alpha2.ZoneList{
topologyv1alpha2.Zone{
Name: "node-0",
Type: "Node",
Costs: topologyv1alpha1.CostList{
topologyv1alpha1.CostInfo{
Costs: topologyv1alpha2.CostList{
topologyv1alpha2.CostInfo{
Name: "node-0",
Value: 10,
},
topologyv1alpha1.CostInfo{
topologyv1alpha2.CostInfo{
Name: "node-1",
Value: 20,
},
},
Resources: topologyv1alpha1.ResourceInfoList{
topologyv1alpha1.ResourceInfo{
Resources: topologyv1alpha2.ResourceInfoList{
topologyv1alpha2.ResourceInfo{
Name: "cpu",
Available: *resource.NewQuantity(11, resource.DecimalSI),
Allocatable: *resource.NewQuantity(11, resource.DecimalSI),
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/net",
Available: *resource.NewQuantity(4, resource.DecimalSI),
Allocatable: *resource.NewQuantity(4, resource.DecimalSI),
Capacity: *resource.NewQuantity(4, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "memory",
Available: *resource.NewQuantity(1024, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
@ -216,45 +216,45 @@ func TestResourcesAggregator(t *testing.T) {
},
},
},
topologyv1alpha1.Zone{
topologyv1alpha2.Zone{
Name: "node-1",
Type: "Node",
Costs: topologyv1alpha1.CostList{
topologyv1alpha1.CostInfo{
Costs: topologyv1alpha2.CostList{
topologyv1alpha2.CostInfo{
Name: "node-0",
Value: 20,
},
topologyv1alpha1.CostInfo{
topologyv1alpha2.CostInfo{
Name: "node-1",
Value: 10,
},
},
Resources: topologyv1alpha1.ResourceInfoList{
topologyv1alpha1.ResourceInfo{
Resources: topologyv1alpha2.ResourceInfoList{
topologyv1alpha2.ResourceInfo{
Name: "cpu",
Available: *resource.NewQuantity(11, resource.DecimalSI),
Allocatable: *resource.NewQuantity(11, resource.DecimalSI),
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/gpu",
Available: *resource.NewQuantity(1, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/net",
Available: *resource.NewQuantity(2, resource.DecimalSI),
Allocatable: *resource.NewQuantity(2, resource.DecimalSI),
Capacity: *resource.NewQuantity(2, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "hugepages-2Mi",
Available: *resource.NewQuantity(1024, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "memory",
Available: *resource.NewQuantity(1024, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
@ -411,34 +411,34 @@ func TestResourcesAggregator(t *testing.T) {
},
}
expected := topologyv1alpha1.ZoneList{
topologyv1alpha1.Zone{
expected := topologyv1alpha2.ZoneList{
topologyv1alpha2.Zone{
Name: "node-0",
Type: "Node",
Costs: topologyv1alpha1.CostList{
topologyv1alpha1.CostInfo{
Costs: topologyv1alpha2.CostList{
topologyv1alpha2.CostInfo{
Name: "node-0",
Value: 10,
},
topologyv1alpha1.CostInfo{
topologyv1alpha2.CostInfo{
Name: "node-1",
Value: 20,
},
},
Resources: topologyv1alpha1.ResourceInfoList{
topologyv1alpha1.ResourceInfo{
Resources: topologyv1alpha2.ResourceInfoList{
topologyv1alpha2.ResourceInfo{
Name: "cpu",
Available: *resource.NewQuantity(11, resource.DecimalSI),
Allocatable: *resource.NewQuantity(11, resource.DecimalSI),
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/net",
Available: *resource.NewQuantity(1, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "memory",
Available: *resource.NewQuantity(1024, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
@ -446,45 +446,45 @@ func TestResourcesAggregator(t *testing.T) {
},
},
},
topologyv1alpha1.Zone{
topologyv1alpha2.Zone{
Name: "node-1",
Type: "Node",
Costs: topologyv1alpha1.CostList{
topologyv1alpha1.CostInfo{
Costs: topologyv1alpha2.CostList{
topologyv1alpha2.CostInfo{
Name: "node-0",
Value: 20,
},
topologyv1alpha1.CostInfo{
topologyv1alpha2.CostInfo{
Name: "node-1",
Value: 10,
},
},
Resources: topologyv1alpha1.ResourceInfoList{
topologyv1alpha1.ResourceInfo{
Resources: topologyv1alpha2.ResourceInfoList{
topologyv1alpha2.ResourceInfo{
Name: "cpu",
Available: resource.MustParse("10"),
Allocatable: *resource.NewQuantity(12, resource.DecimalSI),
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/gpu",
Available: *resource.NewQuantity(1, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/net",
Available: *resource.NewQuantity(0, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "hugepages-2Mi",
Available: *resource.NewQuantity(512, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "memory",
Available: *resource.NewQuantity(512, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),

View file

@ -21,7 +21,7 @@ import (
corev1 "k8s.io/api/core/v1"
topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
)
// Args stores commandline arguments used for resource monitoring
@ -60,5 +60,5 @@ type ResourcesScanner interface {
// ResourcesAggregator aggregates resource information based on the received data from underlying hardware and podresource API
type ResourcesAggregator interface {
Aggregate(podResData []PodResources) topologyv1alpha1.ZoneList
Aggregate(podResData []PodResources) topologyv1alpha2.ZoneList
}

View file

@ -17,49 +17,49 @@ limitations under the License.
package topologypolicy
import (
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/apis/config"
)
// DetectTopologyPolicy returns string type which present
// both Topology manager policy and scope
func DetectTopologyPolicy(policy string, scope string) v1alpha1.TopologyManagerPolicy {
func DetectTopologyPolicy(policy string, scope string) v1alpha2.TopologyManagerPolicy {
switch scope {
case config.PodTopologyManagerScope:
return detectPolicyPodScope(policy)
case config.ContainerTopologyManagerScope:
return detectPolicyContainerScope(policy)
default:
return v1alpha1.None
return v1alpha2.None
}
}
func detectPolicyPodScope(policy string) v1alpha1.TopologyManagerPolicy {
func detectPolicyPodScope(policy string) v1alpha2.TopologyManagerPolicy {
switch policy {
case config.SingleNumaNodeTopologyManagerPolicy:
return v1alpha1.SingleNUMANodePodLevel
return v1alpha2.SingleNUMANodePodLevel
case config.RestrictedTopologyManagerPolicy:
return v1alpha1.RestrictedPodLevel
return v1alpha2.RestrictedPodLevel
case config.BestEffortTopologyManagerPolicy:
return v1alpha1.BestEffortPodLevel
return v1alpha2.BestEffortPodLevel
case config.NoneTopologyManagerPolicy:
return v1alpha1.None
return v1alpha2.None
default:
return v1alpha1.None
return v1alpha2.None
}
}
func detectPolicyContainerScope(policy string) v1alpha1.TopologyManagerPolicy {
func detectPolicyContainerScope(policy string) v1alpha2.TopologyManagerPolicy {
switch policy {
case config.SingleNumaNodeTopologyManagerPolicy:
return v1alpha1.SingleNUMANodeContainerLevel
return v1alpha2.SingleNUMANodeContainerLevel
case config.RestrictedTopologyManagerPolicy:
return v1alpha1.RestrictedContainerLevel
return v1alpha2.RestrictedContainerLevel
case config.BestEffortTopologyManagerPolicy:
return v1alpha1.BestEffortContainerLevel
return v1alpha2.BestEffortContainerLevel
case config.NoneTopologyManagerPolicy:
return v1alpha1.None
return v1alpha2.None
default:
return v1alpha1.None
return v1alpha2.None
}
}

View file

@ -19,74 +19,74 @@ package topologypolicy
import (
"testing"
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
)
func TestDetectTopologyPolicy(t *testing.T) {
testCases := []struct {
scope string
policy string
expected v1alpha1.TopologyManagerPolicy
expected v1alpha2.TopologyManagerPolicy
}{
{
policy: "best-effort",
scope: "pod",
expected: v1alpha1.BestEffortPodLevel,
expected: v1alpha2.BestEffortPodLevel,
},
{
policy: "best-effort",
scope: "container",
expected: v1alpha1.BestEffortContainerLevel,
expected: v1alpha2.BestEffortContainerLevel,
},
{
policy: "restricted",
scope: "container",
expected: v1alpha1.RestrictedContainerLevel,
expected: v1alpha2.RestrictedContainerLevel,
},
{
policy: "restricted",
scope: "pod",
expected: v1alpha1.RestrictedPodLevel,
expected: v1alpha2.RestrictedPodLevel,
},
{
policy: "single-numa-node",
scope: "pod",
expected: v1alpha1.SingleNUMANodePodLevel,
expected: v1alpha2.SingleNUMANodePodLevel,
},
{
policy: "single-numa-node",
scope: "container",
expected: v1alpha1.SingleNUMANodeContainerLevel,
expected: v1alpha2.SingleNUMANodeContainerLevel,
},
{
policy: "none",
scope: "container",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "none",
scope: "pod",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "non-existent",
scope: "pod",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "non-existent",
scope: "container",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "single-numa-node",
scope: "non-existent",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "single-numa-node",
scope: "non-existent",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
}

View file

@ -27,7 +27,7 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
topologyclientset "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned"
appsv1 "k8s.io/api/apps/v1"
@ -237,9 +237,9 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
defer testpod.DeleteAsync(f, podMap)
By("checking the changes in the updated topology")
var finalNodeTopo *v1alpha1.NodeResourceTopology
var finalNodeTopo *v1alpha2.NodeResourceTopology
Eventually(func() bool {
finalNodeTopo, err = topologyClient.TopologyV1alpha1().NodeResourceTopologies().Get(context.TODO(), topologyUpdaterNode.Name, metav1.GetOptions{})
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), topologyUpdaterNode.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("failed to get the node topology resource: %v", err)
return false

View file

@ -25,7 +25,7 @@ import (
"strings"
"time"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
topologyclientset "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned"
"github.com/onsi/gomega"
"sigs.k8s.io/node-feature-discovery/pkg/topologypolicy"
@ -106,11 +106,11 @@ func CreateNodeResourceTopologies(extClient extclient.Interface) (*apiextensions
}
// GetNodeTopology returns the NodeResourceTopology data for the node identified by `nodeName`.
func GetNodeTopology(topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha1.NodeResourceTopology {
var nodeTopology *v1alpha1.NodeResourceTopology
func GetNodeTopology(topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha2.NodeResourceTopology {
var nodeTopology *v1alpha2.NodeResourceTopology
var err error
gomega.EventuallyWithOffset(1, func() bool {
nodeTopology, err = topologyClient.TopologyV1alpha1().NodeResourceTopologies().Get(context.TODO(), nodeName, metav1.GetOptions{})
nodeTopology, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
framework.Logf("failed to get the node topology resource: %v", err)
return false
@ -121,7 +121,7 @@ func GetNodeTopology(topologyClient *topologyclientset.Clientset, nodeName strin
}
// AllocatableResourceListFromNodeResourceTopology extract the map zone:allocatableResources from the given NodeResourceTopology instance.
func AllocatableResourceListFromNodeResourceTopology(nodeTopo *v1alpha1.NodeResourceTopology) map[string]corev1.ResourceList {
func AllocatableResourceListFromNodeResourceTopology(nodeTopo *v1alpha2.NodeResourceTopology) map[string]corev1.ResourceList {
allocRes := make(map[string]corev1.ResourceList)
for _, zone := range nodeTopo.Zones {
if zone.Type != "Node" {
@ -186,7 +186,7 @@ func CompareResourceList(expected, got corev1.ResourceList) (string, int, bool)
// IsValidNodeTopology checks the provided NodeResourceTopology object if it is well-formad, internally consistent and
// consistent with the given kubelet config object. Returns true if the NodeResourceTopology object is consistent and well
// formet, false otherwise; if return false, logs the failure reason.
func IsValidNodeTopology(nodeTopology *v1alpha1.NodeResourceTopology, kubeletConfig *kubeletconfig.KubeletConfiguration) bool {
func IsValidNodeTopology(nodeTopology *v1alpha2.NodeResourceTopology, kubeletConfig *kubeletconfig.KubeletConfiguration) bool {
if nodeTopology == nil || len(nodeTopology.TopologyPolicies) == 0 {
framework.Logf("failed to get topology policy from the node topology resource")
return false
@ -224,7 +224,7 @@ func IsValidNodeTopology(nodeTopology *v1alpha1.NodeResourceTopology, kubeletCon
return foundNodes > 0
}
func isValidCostList(zoneName string, costs v1alpha1.CostList) bool {
func isValidCostList(zoneName string, costs v1alpha2.CostList) bool {
if len(costs) == 0 {
framework.Logf("failed to get topology costs for zone %q from the node topology resource", zoneName)
return false
@ -239,7 +239,7 @@ func isValidCostList(zoneName string, costs v1alpha1.CostList) bool {
return true
}
func isValidResourceList(zoneName string, resources v1alpha1.ResourceInfoList) bool {
func isValidResourceList(zoneName string, resources v1alpha2.ResourceInfoList) bool {
if len(resources) == 0 {
framework.Logf("failed to get topology resources for zone %q from the node topology resource", zoneName)
return false