1
0
Fork 0
mirror of https://github.com/kubernetes-sigs/node-feature-discovery.git synced 2025-03-28 18:57:10 +00:00

nrt-api: move from v1alpha1 to v1alpha2

This commit is contained in:
Jose Luis Ojosnegros Manchón 2023-02-07 16:48:01 +01:00
parent d1d1eda0d2
commit 2967f3307a
10 changed files with 118 additions and 119 deletions

View file

@ -83,7 +83,7 @@ func newTopologyGC(config *restclient.Config, stop chan struct{}, gcPeriod time.
}
func (n *topologyGC) deleteNRT(nodeName string) {
if err := n.topoClient.TopologyV1alpha1().NodeResourceTopologies().Delete(context.TODO(), nodeName, metav1.DeleteOptions{}); err != nil {
if err := n.topoClient.TopologyV1alpha2().NodeResourceTopologies().Delete(context.TODO(), nodeName, metav1.DeleteOptions{}); err != nil {
if errors.IsNotFound(err) {
klog.V(2).Infof("NodeResourceTopology for node %s not found, omitting deletion", nodeName)
return
@ -125,7 +125,7 @@ func (n *topologyGC) runGC() {
nodes.Insert(key)
}
nrts, err := n.topoClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := n.topoClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
if err != nil {
klog.Warningf("cannot list NRTs %s", err.Error())
return

View file

@ -21,9 +21,8 @@ import (
"testing"
"time"
nrtapi "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
faketopologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned/fake"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
faketopologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned/fake"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/informers"
@ -36,7 +35,7 @@ func TestNRTGC(t *testing.T) {
Convey("When theres is old NRT ", t, func() {
k8sClient := fakek8sclientset.NewSimpleClientset()
fakeClient := faketopologyv1alpha1.NewSimpleClientset(&nrtapi.NodeResourceTopology{
fakeClient := faketopologyv1alpha2.NewSimpleClientset(&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
@ -55,7 +54,7 @@ func TestNRTGC(t *testing.T) {
err := gc.run()
So(err, ShouldBeNil)
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
So(nrts.Items, ShouldHaveLength, 0)
@ -68,12 +67,12 @@ func TestNRTGC(t *testing.T) {
},
})
fakeClient := faketopologyv1alpha1.NewSimpleClientset(&nrtapi.NodeResourceTopology{
fakeClient := faketopologyv1alpha2.NewSimpleClientset(&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
&nrtapi.NodeResourceTopology{
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node2",
},
@ -94,7 +93,7 @@ func TestNRTGC(t *testing.T) {
err := gc.run()
So(err, ShouldBeNil)
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
So(nrts.Items, ShouldHaveLength, 1)
So(nrts.Items[0].GetName(), ShouldEqual, "node1")
@ -114,13 +113,13 @@ func TestNRTGC(t *testing.T) {
},
)
fakeClient := faketopologyv1alpha1.NewSimpleClientset(
&nrtapi.NodeResourceTopology{
fakeClient := faketopologyv1alpha2.NewSimpleClientset(
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
&nrtapi.NodeResourceTopology{
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node2",
},
@ -140,7 +139,7 @@ func TestNRTGC(t *testing.T) {
err := gc.run()
So(err, ShouldBeNil)
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
So(nrts.Items, ShouldHaveLength, 2)
@ -150,7 +149,7 @@ func TestNRTGC(t *testing.T) {
// simple sleep with retry loop to make sure indexer will pick up event and trigger deleteNode Function
deleted := false
for i := 0; i < 5; i++ {
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
if len(nrts.Items) == 1 {
@ -175,13 +174,13 @@ func TestNRTGC(t *testing.T) {
},
)
fakeClient := faketopologyv1alpha1.NewSimpleClientset(
&nrtapi.NodeResourceTopology{
fakeClient := faketopologyv1alpha2.NewSimpleClientset(
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node1",
},
},
&nrtapi.NodeResourceTopology{
&v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "node2",
},
@ -201,12 +200,12 @@ func TestNRTGC(t *testing.T) {
err := gc.run()
So(err, ShouldBeNil)
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
So(nrts.Items, ShouldHaveLength, 2)
nrt := v1alpha1.NodeResourceTopology{
nrt := v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: "not-existing",
},
@ -214,12 +213,12 @@ func TestNRTGC(t *testing.T) {
go gc.periodicGC(time.Second)
_, err = fakeClient.TopologyV1alpha1().NodeResourceTopologies().Create(context.TODO(), &nrt, metav1.CreateOptions{})
_, err = fakeClient.TopologyV1alpha2().NodeResourceTopologies().Create(context.TODO(), &nrt, metav1.CreateOptions{})
So(err, ShouldBeNil)
// simple sleep with retry loop to make sure GC was triggered
deleted := false
for i := 0; i < 5; i++ {
nrts, err := fakeClient.TopologyV1alpha1().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
nrts, err := fakeClient.TopologyV1alpha2().NodeResourceTopologies().List(context.TODO(), metav1.ListOptions{})
So(err, ShouldBeNil)
if len(nrts.Items) == 2 {

View file

@ -25,7 +25,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/klog/v2"
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
"golang.org/x/net/context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -123,7 +123,7 @@ func (w *nfdTopologyUpdater) Run() error {
// So we are intentionally do this once during the process lifecycle.
// TODO: Obtain node resources dynamically from the podresource API
// zonesChannel := make(chan v1alpha1.ZoneList)
var zones v1alpha1.ZoneList
var zones v1alpha2.ZoneList
excludeList := resourcemonitor.NewExcludeResourceList(w.config.ExcludeList, w.nodeInfo.nodeName)
resAggr, err := resourcemonitor.NewResourcesAggregator(podResClient, excludeList)
@ -172,15 +172,15 @@ func (w *nfdTopologyUpdater) Stop() {
}
}
func (w *nfdTopologyUpdater) updateNodeResourceTopology(zoneInfo v1alpha1.ZoneList) error {
func (w *nfdTopologyUpdater) updateNodeResourceTopology(zoneInfo v1alpha2.ZoneList) error {
cli, err := w.apihelper.GetTopologyClient()
if err != nil {
return err
}
nrt, err := cli.TopologyV1alpha1().NodeResourceTopologies().Get(context.TODO(), w.nodeInfo.nodeName, metav1.GetOptions{})
nrt, err := cli.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), w.nodeInfo.nodeName, metav1.GetOptions{})
if errors.IsNotFound(err) {
nrtNew := v1alpha1.NodeResourceTopology{
nrtNew := v1alpha2.NodeResourceTopology{
ObjectMeta: metav1.ObjectMeta{
Name: w.nodeInfo.nodeName,
},
@ -188,7 +188,7 @@ func (w *nfdTopologyUpdater) updateNodeResourceTopology(zoneInfo v1alpha1.ZoneLi
TopologyPolicies: []string{w.nodeInfo.tmPolicy},
}
_, err := cli.TopologyV1alpha1().NodeResourceTopologies().Create(context.TODO(), &nrtNew, metav1.CreateOptions{})
_, err := cli.TopologyV1alpha2().NodeResourceTopologies().Create(context.TODO(), &nrtNew, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("failed to create NodeResourceTopology: %w", err)
}
@ -200,7 +200,7 @@ func (w *nfdTopologyUpdater) updateNodeResourceTopology(zoneInfo v1alpha1.ZoneLi
nrtMutated := nrt.DeepCopy()
nrtMutated.Zones = zoneInfo
nrtUpdated, err := cli.TopologyV1alpha1().NodeResourceTopologies().Update(context.TODO(), nrtMutated, metav1.UpdateOptions{})
nrtUpdated, err := cli.TopologyV1alpha2().NodeResourceTopologies().Update(context.TODO(), nrtMutated, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update NodeResourceTopology: %w", err)
}

View file

@ -24,7 +24,7 @@ import (
"time"
"github.com/jaypipes/ghw"
topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/klog/v2"
@ -103,7 +103,7 @@ func NewResourcesAggregatorFromData(topo *ghw.TopologyInfo, resp *podresourcesap
}
// Aggregate provides the mapping (numa zone name) -> Zone from the given PodResources.
func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topologyv1alpha1.ZoneList {
func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topologyv1alpha2.ZoneList {
perNuma := make(map[int]map[corev1.ResourceName]*resourceData)
for nodeID := range noderesourceData.topo.Nodes {
nodeRes, ok := noderesourceData.perNUMAAllocatable[nodeID]
@ -168,12 +168,12 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
}
}
zones := make(topologyv1alpha1.ZoneList, 0)
zones := make(topologyv1alpha2.ZoneList, 0)
for nodeID, resList := range perNuma {
zone := topologyv1alpha1.Zone{
zone := topologyv1alpha2.Zone{
Name: makeZoneName(nodeID),
Type: "Node",
Resources: make(topologyv1alpha1.ResourceInfoList, 0),
Resources: make(topologyv1alpha2.ResourceInfoList, 0),
}
costs, err := makeCostsPerNumaNode(noderesourceData.topo.Nodes, nodeID)
@ -187,7 +187,7 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
allocatableQty := *resource.NewQuantity(resData.allocatable, resource.DecimalSI)
capacityQty := *resource.NewQuantity(resData.capacity, resource.DecimalSI)
availableQty := *resource.NewQuantity(resData.available, resource.DecimalSI)
zone.Resources = append(zone.Resources, topologyv1alpha1.ResourceInfo{
zone.Resources = append(zone.Resources, topologyv1alpha2.ResourceInfo{
Name: name.String(),
Available: availableQty,
Allocatable: allocatableQty,
@ -345,15 +345,15 @@ func makeResourceMap(numaNodes int, devices []*podresourcesapi.ContainerDevices)
}
// makeCostsPerNumaNode builds the cost map to reach all the known NUMA zones (mapping (numa zone) -> cost) starting from the given NUMA zone.
func makeCostsPerNumaNode(nodes []*ghw.TopologyNode, nodeIDSrc int) ([]topologyv1alpha1.CostInfo, error) {
func makeCostsPerNumaNode(nodes []*ghw.TopologyNode, nodeIDSrc int) ([]topologyv1alpha2.CostInfo, error) {
nodeSrc := findNodeByID(nodes, nodeIDSrc)
if nodeSrc == nil {
return nil, fmt.Errorf("unknown node: %d", nodeIDSrc)
}
nodeCosts := make([]topologyv1alpha1.CostInfo, 0)
nodeCosts := make([]topologyv1alpha2.CostInfo, 0)
for nodeIDDst, dist := range nodeSrc.Distances {
// TODO: this assumes there are no holes (= no offline node) in the distance vector
nodeCosts = append(nodeCosts, topologyv1alpha1.CostInfo{
nodeCosts = append(nodeCosts, topologyv1alpha2.CostInfo{
Name: makeZoneName(nodeIDDst),
Value: int64(dist),
})

View file

@ -30,7 +30,7 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
v1 "k8s.io/kubelet/pkg/apis/podresources/v1"
topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
"sigs.k8s.io/node-feature-discovery/pkg/utils"
)
@ -181,34 +181,34 @@ func TestResourcesAggregator(t *testing.T) {
resAggr = NewResourcesAggregatorFromData(&fakeTopo, availRes, memoryResourcesCapacity, NewExcludeResourceList(map[string][]string{}, ""))
Convey("When aggregating resources", func() {
expected := topologyv1alpha1.ZoneList{
topologyv1alpha1.Zone{
expected := topologyv1alpha2.ZoneList{
topologyv1alpha2.Zone{
Name: "node-0",
Type: "Node",
Costs: topologyv1alpha1.CostList{
topologyv1alpha1.CostInfo{
Costs: topologyv1alpha2.CostList{
topologyv1alpha2.CostInfo{
Name: "node-0",
Value: 10,
},
topologyv1alpha1.CostInfo{
topologyv1alpha2.CostInfo{
Name: "node-1",
Value: 20,
},
},
Resources: topologyv1alpha1.ResourceInfoList{
topologyv1alpha1.ResourceInfo{
Resources: topologyv1alpha2.ResourceInfoList{
topologyv1alpha2.ResourceInfo{
Name: "cpu",
Available: *resource.NewQuantity(11, resource.DecimalSI),
Allocatable: *resource.NewQuantity(11, resource.DecimalSI),
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/net",
Available: *resource.NewQuantity(4, resource.DecimalSI),
Allocatable: *resource.NewQuantity(4, resource.DecimalSI),
Capacity: *resource.NewQuantity(4, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "memory",
Available: *resource.NewQuantity(1024, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
@ -216,45 +216,45 @@ func TestResourcesAggregator(t *testing.T) {
},
},
},
topologyv1alpha1.Zone{
topologyv1alpha2.Zone{
Name: "node-1",
Type: "Node",
Costs: topologyv1alpha1.CostList{
topologyv1alpha1.CostInfo{
Costs: topologyv1alpha2.CostList{
topologyv1alpha2.CostInfo{
Name: "node-0",
Value: 20,
},
topologyv1alpha1.CostInfo{
topologyv1alpha2.CostInfo{
Name: "node-1",
Value: 10,
},
},
Resources: topologyv1alpha1.ResourceInfoList{
topologyv1alpha1.ResourceInfo{
Resources: topologyv1alpha2.ResourceInfoList{
topologyv1alpha2.ResourceInfo{
Name: "cpu",
Available: *resource.NewQuantity(11, resource.DecimalSI),
Allocatable: *resource.NewQuantity(11, resource.DecimalSI),
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/gpu",
Available: *resource.NewQuantity(1, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/net",
Available: *resource.NewQuantity(2, resource.DecimalSI),
Allocatable: *resource.NewQuantity(2, resource.DecimalSI),
Capacity: *resource.NewQuantity(2, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "hugepages-2Mi",
Available: *resource.NewQuantity(1024, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "memory",
Available: *resource.NewQuantity(1024, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
@ -411,34 +411,34 @@ func TestResourcesAggregator(t *testing.T) {
},
}
expected := topologyv1alpha1.ZoneList{
topologyv1alpha1.Zone{
expected := topologyv1alpha2.ZoneList{
topologyv1alpha2.Zone{
Name: "node-0",
Type: "Node",
Costs: topologyv1alpha1.CostList{
topologyv1alpha1.CostInfo{
Costs: topologyv1alpha2.CostList{
topologyv1alpha2.CostInfo{
Name: "node-0",
Value: 10,
},
topologyv1alpha1.CostInfo{
topologyv1alpha2.CostInfo{
Name: "node-1",
Value: 20,
},
},
Resources: topologyv1alpha1.ResourceInfoList{
topologyv1alpha1.ResourceInfo{
Resources: topologyv1alpha2.ResourceInfoList{
topologyv1alpha2.ResourceInfo{
Name: "cpu",
Available: *resource.NewQuantity(11, resource.DecimalSI),
Allocatable: *resource.NewQuantity(11, resource.DecimalSI),
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/net",
Available: *resource.NewQuantity(1, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "memory",
Available: *resource.NewQuantity(1024, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
@ -446,45 +446,45 @@ func TestResourcesAggregator(t *testing.T) {
},
},
},
topologyv1alpha1.Zone{
topologyv1alpha2.Zone{
Name: "node-1",
Type: "Node",
Costs: topologyv1alpha1.CostList{
topologyv1alpha1.CostInfo{
Costs: topologyv1alpha2.CostList{
topologyv1alpha2.CostInfo{
Name: "node-0",
Value: 20,
},
topologyv1alpha1.CostInfo{
topologyv1alpha2.CostInfo{
Name: "node-1",
Value: 10,
},
},
Resources: topologyv1alpha1.ResourceInfoList{
topologyv1alpha1.ResourceInfo{
Resources: topologyv1alpha2.ResourceInfoList{
topologyv1alpha2.ResourceInfo{
Name: "cpu",
Available: resource.MustParse("10"),
Allocatable: *resource.NewQuantity(12, resource.DecimalSI),
Capacity: *resource.NewQuantity(12, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/gpu",
Available: *resource.NewQuantity(1, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "fake.io/net",
Available: *resource.NewQuantity(0, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1, resource.DecimalSI),
Capacity: *resource.NewQuantity(1, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "hugepages-2Mi",
Available: *resource.NewQuantity(512, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),
Capacity: *resource.NewQuantity(2048, resource.DecimalSI),
},
topologyv1alpha1.ResourceInfo{
topologyv1alpha2.ResourceInfo{
Name: "memory",
Available: *resource.NewQuantity(512, resource.DecimalSI),
Allocatable: *resource.NewQuantity(1024, resource.DecimalSI),

View file

@ -21,7 +21,7 @@ import (
corev1 "k8s.io/api/core/v1"
topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
topologyv1alpha2 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
)
// Args stores commandline arguments used for resource monitoring
@ -60,5 +60,5 @@ type ResourcesScanner interface {
// ResourcesAggregator aggregates resource information based on the received data from underlying hardware and podresource API
type ResourcesAggregator interface {
Aggregate(podResData []PodResources) topologyv1alpha1.ZoneList
Aggregate(podResData []PodResources) topologyv1alpha2.ZoneList
}

View file

@ -17,49 +17,49 @@ limitations under the License.
package topologypolicy
import (
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/apis/config"
)
// DetectTopologyPolicy returns string type which present
// both Topology manager policy and scope
func DetectTopologyPolicy(policy string, scope string) v1alpha1.TopologyManagerPolicy {
func DetectTopologyPolicy(policy string, scope string) v1alpha2.TopologyManagerPolicy {
switch scope {
case config.PodTopologyManagerScope:
return detectPolicyPodScope(policy)
case config.ContainerTopologyManagerScope:
return detectPolicyContainerScope(policy)
default:
return v1alpha1.None
return v1alpha2.None
}
}
func detectPolicyPodScope(policy string) v1alpha1.TopologyManagerPolicy {
func detectPolicyPodScope(policy string) v1alpha2.TopologyManagerPolicy {
switch policy {
case config.SingleNumaNodeTopologyManagerPolicy:
return v1alpha1.SingleNUMANodePodLevel
return v1alpha2.SingleNUMANodePodLevel
case config.RestrictedTopologyManagerPolicy:
return v1alpha1.RestrictedPodLevel
return v1alpha2.RestrictedPodLevel
case config.BestEffortTopologyManagerPolicy:
return v1alpha1.BestEffortPodLevel
return v1alpha2.BestEffortPodLevel
case config.NoneTopologyManagerPolicy:
return v1alpha1.None
return v1alpha2.None
default:
return v1alpha1.None
return v1alpha2.None
}
}
func detectPolicyContainerScope(policy string) v1alpha1.TopologyManagerPolicy {
func detectPolicyContainerScope(policy string) v1alpha2.TopologyManagerPolicy {
switch policy {
case config.SingleNumaNodeTopologyManagerPolicy:
return v1alpha1.SingleNUMANodeContainerLevel
return v1alpha2.SingleNUMANodeContainerLevel
case config.RestrictedTopologyManagerPolicy:
return v1alpha1.RestrictedContainerLevel
return v1alpha2.RestrictedContainerLevel
case config.BestEffortTopologyManagerPolicy:
return v1alpha1.BestEffortContainerLevel
return v1alpha2.BestEffortContainerLevel
case config.NoneTopologyManagerPolicy:
return v1alpha1.None
return v1alpha2.None
default:
return v1alpha1.None
return v1alpha2.None
}
}

View file

@ -19,74 +19,74 @@ package topologypolicy
import (
"testing"
v1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
)
func TestDetectTopologyPolicy(t *testing.T) {
testCases := []struct {
scope string
policy string
expected v1alpha1.TopologyManagerPolicy
expected v1alpha2.TopologyManagerPolicy
}{
{
policy: "best-effort",
scope: "pod",
expected: v1alpha1.BestEffortPodLevel,
expected: v1alpha2.BestEffortPodLevel,
},
{
policy: "best-effort",
scope: "container",
expected: v1alpha1.BestEffortContainerLevel,
expected: v1alpha2.BestEffortContainerLevel,
},
{
policy: "restricted",
scope: "container",
expected: v1alpha1.RestrictedContainerLevel,
expected: v1alpha2.RestrictedContainerLevel,
},
{
policy: "restricted",
scope: "pod",
expected: v1alpha1.RestrictedPodLevel,
expected: v1alpha2.RestrictedPodLevel,
},
{
policy: "single-numa-node",
scope: "pod",
expected: v1alpha1.SingleNUMANodePodLevel,
expected: v1alpha2.SingleNUMANodePodLevel,
},
{
policy: "single-numa-node",
scope: "container",
expected: v1alpha1.SingleNUMANodeContainerLevel,
expected: v1alpha2.SingleNUMANodeContainerLevel,
},
{
policy: "none",
scope: "container",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "none",
scope: "pod",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "non-existent",
scope: "pod",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "non-existent",
scope: "container",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "single-numa-node",
scope: "non-existent",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
{
policy: "single-numa-node",
scope: "non-existent",
expected: v1alpha1.None,
expected: v1alpha2.None,
},
}

View file

@ -27,7 +27,7 @@ import (
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
topologyclientset "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned"
appsv1 "k8s.io/api/apps/v1"
@ -237,9 +237,9 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
defer testpod.DeleteAsync(f, podMap)
By("checking the changes in the updated topology")
var finalNodeTopo *v1alpha1.NodeResourceTopology
var finalNodeTopo *v1alpha2.NodeResourceTopology
Eventually(func() bool {
finalNodeTopo, err = topologyClient.TopologyV1alpha1().NodeResourceTopologies().Get(context.TODO(), topologyUpdaterNode.Name, metav1.GetOptions{})
finalNodeTopo, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), topologyUpdaterNode.Name, metav1.GetOptions{})
if err != nil {
framework.Logf("failed to get the node topology resource: %v", err)
return false

View file

@ -25,7 +25,7 @@ import (
"strings"
"time"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha2"
topologyclientset "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned"
"github.com/onsi/gomega"
"sigs.k8s.io/node-feature-discovery/pkg/topologypolicy"
@ -106,11 +106,11 @@ func CreateNodeResourceTopologies(extClient extclient.Interface) (*apiextensions
}
// GetNodeTopology returns the NodeResourceTopology data for the node identified by `nodeName`.
func GetNodeTopology(topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha1.NodeResourceTopology {
var nodeTopology *v1alpha1.NodeResourceTopology
func GetNodeTopology(topologyClient *topologyclientset.Clientset, nodeName string) *v1alpha2.NodeResourceTopology {
var nodeTopology *v1alpha2.NodeResourceTopology
var err error
gomega.EventuallyWithOffset(1, func() bool {
nodeTopology, err = topologyClient.TopologyV1alpha1().NodeResourceTopologies().Get(context.TODO(), nodeName, metav1.GetOptions{})
nodeTopology, err = topologyClient.TopologyV1alpha2().NodeResourceTopologies().Get(context.TODO(), nodeName, metav1.GetOptions{})
if err != nil {
framework.Logf("failed to get the node topology resource: %v", err)
return false
@ -121,7 +121,7 @@ func GetNodeTopology(topologyClient *topologyclientset.Clientset, nodeName strin
}
// AllocatableResourceListFromNodeResourceTopology extract the map zone:allocatableResources from the given NodeResourceTopology instance.
func AllocatableResourceListFromNodeResourceTopology(nodeTopo *v1alpha1.NodeResourceTopology) map[string]corev1.ResourceList {
func AllocatableResourceListFromNodeResourceTopology(nodeTopo *v1alpha2.NodeResourceTopology) map[string]corev1.ResourceList {
allocRes := make(map[string]corev1.ResourceList)
for _, zone := range nodeTopo.Zones {
if zone.Type != "Node" {
@ -186,7 +186,7 @@ func CompareResourceList(expected, got corev1.ResourceList) (string, int, bool)
// IsValidNodeTopology checks the provided NodeResourceTopology object if it is well-formad, internally consistent and
// consistent with the given kubelet config object. Returns true if the NodeResourceTopology object is consistent and well
// formet, false otherwise; if return false, logs the failure reason.
func IsValidNodeTopology(nodeTopology *v1alpha1.NodeResourceTopology, kubeletConfig *kubeletconfig.KubeletConfiguration) bool {
func IsValidNodeTopology(nodeTopology *v1alpha2.NodeResourceTopology, kubeletConfig *kubeletconfig.KubeletConfiguration) bool {
if nodeTopology == nil || len(nodeTopology.TopologyPolicies) == 0 {
framework.Logf("failed to get topology policy from the node topology resource")
return false
@ -224,7 +224,7 @@ func IsValidNodeTopology(nodeTopology *v1alpha1.NodeResourceTopology, kubeletCon
return foundNodes > 0
}
func isValidCostList(zoneName string, costs v1alpha1.CostList) bool {
func isValidCostList(zoneName string, costs v1alpha2.CostList) bool {
if len(costs) == 0 {
framework.Logf("failed to get topology costs for zone %q from the node topology resource", zoneName)
return false
@ -239,7 +239,7 @@ func isValidCostList(zoneName string, costs v1alpha1.CostList) bool {
return true
}
func isValidResourceList(zoneName string, resources v1alpha1.ResourceInfoList) bool {
func isValidResourceList(zoneName string, resources v1alpha2.ResourceInfoList) bool {
if len(resources) == 0 {
framework.Logf("failed to get topology resources for zone %q from the node topology resource", zoneName)
return false