mirror of
https://github.com/kubernetes-sigs/node-feature-discovery.git
synced 2024-12-15 17:50:49 +00:00
Standardize "k8s.io/api/core/v1" package short name
Signed-off-by: Feruzjon Muyassarov <feruzjon.muyassarov@intel.com>
This commit is contained in:
parent
3b4f55b543
commit
71434a1392
16 changed files with 206 additions and 206 deletions
|
@ -20,7 +20,7 @@ package apihelper
|
|||
|
||||
import (
|
||||
topologyclientset "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned"
|
||||
api "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
k8sclient "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
|
@ -30,13 +30,13 @@ type APIHelpers interface {
|
|||
GetClient() (*k8sclient.Clientset, error)
|
||||
|
||||
// GetNode returns the Kubernetes node on which this container is running.
|
||||
GetNode(*k8sclient.Clientset, string) (*api.Node, error)
|
||||
GetNode(*k8sclient.Clientset, string) (*corev1.Node, error)
|
||||
|
||||
// GetNodes returns all the nodes in the cluster
|
||||
GetNodes(*k8sclient.Clientset) (*api.NodeList, error)
|
||||
GetNodes(*k8sclient.Clientset) (*corev1.NodeList, error)
|
||||
|
||||
// UpdateNode updates the node via the API server using a client.
|
||||
UpdateNode(*k8sclient.Clientset, *api.Node) error
|
||||
UpdateNode(*k8sclient.Clientset, *corev1.Node) error
|
||||
|
||||
// PatchNode updates the node object via the API server using a client.
|
||||
PatchNode(*k8sclient.Clientset, string, []JsonPatch) error
|
||||
|
@ -48,5 +48,5 @@ type APIHelpers interface {
|
|||
GetTopologyClient() (*topologyclientset.Clientset, error)
|
||||
|
||||
// GetPod returns the Kubernetes pod in a namepace with a name.
|
||||
GetPod(*k8sclient.Clientset, string, string) (*api.Pod, error)
|
||||
GetPod(*k8sclient.Clientset, string, string) (*corev1.Pod, error)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"encoding/json"
|
||||
|
||||
topologyclientset "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned"
|
||||
api "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
k8sclient "k8s.io/client-go/kubernetes"
|
||||
|
@ -52,7 +52,7 @@ func (h K8sHelpers) GetTopologyClient() (*topologyclientset.Clientset, error) {
|
|||
}
|
||||
|
||||
// GetNode retrieves one node object.
|
||||
func (h K8sHelpers) GetNode(cli *k8sclient.Clientset, nodeName string) (*api.Node, error) {
|
||||
func (h K8sHelpers) GetNode(cli *k8sclient.Clientset, nodeName string) (*corev1.Node, error) {
|
||||
// Get the node object using node name
|
||||
node, err := cli.CoreV1().Nodes().Get(context.TODO(), nodeName, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -63,12 +63,12 @@ func (h K8sHelpers) GetNode(cli *k8sclient.Clientset, nodeName string) (*api.Nod
|
|||
}
|
||||
|
||||
// GetNodes retrieves all the node objects.
|
||||
func (h K8sHelpers) GetNodes(cli *k8sclient.Clientset) (*api.NodeList, error) {
|
||||
func (h K8sHelpers) GetNodes(cli *k8sclient.Clientset) (*corev1.NodeList, error) {
|
||||
return cli.CoreV1().Nodes().List(context.TODO(), meta_v1.ListOptions{})
|
||||
}
|
||||
|
||||
// UpdateNode sends updated node object to the apiserver
|
||||
func (h K8sHelpers) UpdateNode(c *k8sclient.Clientset, n *api.Node) error {
|
||||
func (h K8sHelpers) UpdateNode(c *k8sclient.Clientset, n *corev1.Node) error {
|
||||
// Send the updated node to the apiserver.
|
||||
_, err := c.CoreV1().Nodes().Update(context.TODO(), n, meta_v1.UpdateOptions{})
|
||||
if err != nil {
|
||||
|
@ -101,7 +101,7 @@ func (h K8sHelpers) PatchNodeStatus(c *k8sclient.Clientset, nodeName string, pat
|
|||
|
||||
}
|
||||
|
||||
func (h K8sHelpers) GetPod(cli *k8sclient.Clientset, namespace string, podName string) (*api.Pod, error) {
|
||||
func (h K8sHelpers) GetPod(cli *k8sclient.Clientset, namespace string, podName string) (*corev1.Pod, error) {
|
||||
// Get the node object using pod name
|
||||
pod, err := cli.CoreV1().Pods(namespace).Get(context.TODO(), podName, meta_v1.GetOptions{})
|
||||
if err != nil {
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
mock "github.com/stretchr/testify/mock"
|
||||
kubernetes "k8s.io/client-go/kubernetes"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
versioned "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
@ -40,15 +40,15 @@ func (_m *MockAPIHelpers) GetClient() (*kubernetes.Clientset, error) {
|
|||
}
|
||||
|
||||
// GetNode provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockAPIHelpers) GetNode(_a0 *kubernetes.Clientset, _a1 string) (*v1.Node, error) {
|
||||
func (_m *MockAPIHelpers) GetNode(_a0 *kubernetes.Clientset, _a1 string) (*corev1.Node, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *v1.Node
|
||||
if rf, ok := ret.Get(0).(func(*kubernetes.Clientset, string) *v1.Node); ok {
|
||||
var r0 *corev1.Node
|
||||
if rf, ok := ret.Get(0).(func(*kubernetes.Clientset, string) *corev1.Node); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.Node)
|
||||
r0 = ret.Get(0).(*corev1.Node)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -63,15 +63,15 @@ func (_m *MockAPIHelpers) GetNode(_a0 *kubernetes.Clientset, _a1 string) (*v1.No
|
|||
}
|
||||
|
||||
// GetNodes provides a mock function with given fields: _a0
|
||||
func (_m *MockAPIHelpers) GetNodes(_a0 *kubernetes.Clientset) (*v1.NodeList, error) {
|
||||
func (_m *MockAPIHelpers) GetNodes(_a0 *kubernetes.Clientset) (*corev1.NodeList, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *v1.NodeList
|
||||
if rf, ok := ret.Get(0).(func(*kubernetes.Clientset) *v1.NodeList); ok {
|
||||
var r0 *corev1.NodeList
|
||||
if rf, ok := ret.Get(0).(func(*kubernetes.Clientset) *corev1.NodeList); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.NodeList)
|
||||
r0 = ret.Get(0).(*corev1.NodeList)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,15 +86,15 @@ func (_m *MockAPIHelpers) GetNodes(_a0 *kubernetes.Clientset) (*v1.NodeList, err
|
|||
}
|
||||
|
||||
// GetPod provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *MockAPIHelpers) GetPod(_a0 *kubernetes.Clientset, _a1 string, _a2 string) (*v1.Pod, error) {
|
||||
func (_m *MockAPIHelpers) GetPod(_a0 *kubernetes.Clientset, _a1 string, _a2 string) (*corev1.Pod, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
|
||||
var r0 *v1.Pod
|
||||
if rf, ok := ret.Get(0).(func(*kubernetes.Clientset, string, string) *v1.Pod); ok {
|
||||
var r0 *corev1.Pod
|
||||
if rf, ok := ret.Get(0).(func(*kubernetes.Clientset, string, string) *corev1.Pod); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*v1.Pod)
|
||||
r0 = ret.Get(0).(*corev1.Pod)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,11 +160,11 @@ func (_m *MockAPIHelpers) PatchNodeStatus(_a0 *kubernetes.Clientset, _a1 string,
|
|||
}
|
||||
|
||||
// UpdateNode provides a mock function with given fields: _a0, _a1
|
||||
func (_m *MockAPIHelpers) UpdateNode(_a0 *kubernetes.Clientset, _a1 *v1.Node) error {
|
||||
func (_m *MockAPIHelpers) UpdateNode(_a0 *kubernetes.Clientset, _a1 *corev1.Node) error {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(*kubernetes.Clientset, *v1.Node) error); ok {
|
||||
if rf, ok := ret.Get(0).(func(*kubernetes.Clientset, *corev1.Node) error); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/stretchr/testify/mock"
|
||||
"github.com/vektra/errors"
|
||||
"golang.org/x/net/context"
|
||||
api "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
k8sclient "k8s.io/client-go/kubernetes"
|
||||
|
@ -43,12 +43,12 @@ const (
|
|||
mockNodeName = "mock-node"
|
||||
)
|
||||
|
||||
func newMockNode() *api.Node {
|
||||
n := api.Node{}
|
||||
func newMockNode() *corev1.Node {
|
||||
n := corev1.Node{}
|
||||
n.Name = mockNodeName
|
||||
n.Labels = map[string]string{}
|
||||
n.Annotations = map[string]string{}
|
||||
n.Status.Capacity = api.ResourceList{}
|
||||
n.Status.Capacity = corev1.ResourceList{}
|
||||
return &n
|
||||
}
|
||||
|
||||
|
@ -238,7 +238,7 @@ func TestAddingExtResources(t *testing.T) {
|
|||
|
||||
Convey("When the resource already exists", func() {
|
||||
mockNode := newMockNode()
|
||||
mockNode.Status.Capacity[api.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-1")] = *resource.NewQuantity(1, resource.BinarySI)
|
||||
mockNode.Status.Capacity[corev1.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-1")] = *resource.NewQuantity(1, resource.BinarySI)
|
||||
mockResourceLabels := ExtendedResources{nfdv1alpha1.FeatureLabelNs + "/feature-1": "1"}
|
||||
patches := mockMaster.createExtendedResourcePatches(mockNode, mockResourceLabels)
|
||||
So(len(patches), ShouldEqual, 0)
|
||||
|
@ -246,7 +246,7 @@ func TestAddingExtResources(t *testing.T) {
|
|||
|
||||
Convey("When the resource already exists but its capacity has changed", func() {
|
||||
mockNode := newMockNode()
|
||||
mockNode.Status.Capacity[api.ResourceName("feature-1")] = *resource.NewQuantity(2, resource.BinarySI)
|
||||
mockNode.Status.Capacity[corev1.ResourceName("feature-1")] = *resource.NewQuantity(2, resource.BinarySI)
|
||||
mockResourceLabels := ExtendedResources{"feature-1": "1"}
|
||||
expectedPatches := []apihelper.JsonPatch{
|
||||
apihelper.NewJsonPatch("replace", "/status/capacity", "feature-1", "1"),
|
||||
|
@ -265,8 +265,8 @@ func TestRemovingExtResources(t *testing.T) {
|
|||
mockNode := newMockNode()
|
||||
mockResourceLabels := ExtendedResources{nfdv1alpha1.FeatureLabelNs + "/feature-1": "1", nfdv1alpha1.FeatureLabelNs + "/feature-2": "2"}
|
||||
mockNode.Annotations[nfdv1alpha1.AnnotationNs+"/extended-resources"] = "feature-1,feature-2"
|
||||
mockNode.Status.Capacity[api.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-1")] = *resource.NewQuantity(1, resource.BinarySI)
|
||||
mockNode.Status.Capacity[api.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-2")] = *resource.NewQuantity(2, resource.BinarySI)
|
||||
mockNode.Status.Capacity[corev1.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-1")] = *resource.NewQuantity(1, resource.BinarySI)
|
||||
mockNode.Status.Capacity[corev1.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-2")] = *resource.NewQuantity(2, resource.BinarySI)
|
||||
patches := mockMaster.createExtendedResourcePatches(mockNode, mockResourceLabels)
|
||||
So(len(patches), ShouldEqual, 0)
|
||||
})
|
||||
|
@ -274,15 +274,15 @@ func TestRemovingExtResources(t *testing.T) {
|
|||
mockNode := newMockNode()
|
||||
mockResourceLabels := ExtendedResources{nfdv1alpha1.FeatureLabelNs + "/feature-4": "", nfdv1alpha1.FeatureLabelNs + "/feature-2": "2"}
|
||||
mockNode.Annotations[nfdv1alpha1.AnnotationNs+"/extended-resources"] = "feature-4,feature-2"
|
||||
mockNode.Status.Capacity[api.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-4")] = *resource.NewQuantity(4, resource.BinarySI)
|
||||
mockNode.Status.Capacity[api.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-2")] = *resource.NewQuantity(2, resource.BinarySI)
|
||||
mockNode.Status.Capacity[corev1.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-4")] = *resource.NewQuantity(4, resource.BinarySI)
|
||||
mockNode.Status.Capacity[corev1.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-2")] = *resource.NewQuantity(2, resource.BinarySI)
|
||||
patches := mockMaster.createExtendedResourcePatches(mockNode, mockResourceLabels)
|
||||
So(len(patches), ShouldBeGreaterThan, 0)
|
||||
})
|
||||
Convey("When the extended resource is no longer wanted", func() {
|
||||
mockNode := newMockNode()
|
||||
mockNode.Status.Capacity[api.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-1")] = *resource.NewQuantity(1, resource.BinarySI)
|
||||
mockNode.Status.Capacity[api.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-2")] = *resource.NewQuantity(2, resource.BinarySI)
|
||||
mockNode.Status.Capacity[corev1.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-1")] = *resource.NewQuantity(1, resource.BinarySI)
|
||||
mockNode.Status.Capacity[corev1.ResourceName(nfdv1alpha1.FeatureLabelNs+"/feature-2")] = *resource.NewQuantity(2, resource.BinarySI)
|
||||
mockResourceLabels := ExtendedResources{nfdv1alpha1.FeatureLabelNs + "/feature-2": "2"}
|
||||
mockNode.Annotations[nfdv1alpha1.AnnotationNs+"/extended-resources"] = "feature-1,feature-2"
|
||||
patches := mockMaster.createExtendedResourcePatches(mockNode, mockResourceLabels)
|
||||
|
@ -475,7 +475,7 @@ func TestCreatePatches(t *testing.T) {
|
|||
|
||||
func TestRemoveLabelsWithPrefix(t *testing.T) {
|
||||
Convey("When removing labels", t, func() {
|
||||
n := &api.Node{
|
||||
n := &corev1.Node{
|
||||
ObjectMeta: meta_v1.ObjectMeta{
|
||||
Labels: map[string]string{
|
||||
"single-label": "123",
|
||||
|
@ -531,7 +531,7 @@ func sortJsonPatches(p []apihelper.JsonPatch) []apihelper.JsonPatch {
|
|||
}
|
||||
|
||||
// Remove any labels having the given prefix
|
||||
func removeLabelsWithPrefix(n *api.Node, search string) []apihelper.JsonPatch {
|
||||
func removeLabelsWithPrefix(n *corev1.Node, search string) []apihelper.JsonPatch {
|
||||
var p []apihelper.JsonPatch
|
||||
|
||||
for k := range n.Labels {
|
||||
|
|
|
@ -36,7 +36,7 @@ import (
|
|||
"google.golang.org/grpc/health"
|
||||
"google.golang.org/grpc/health/grpc_health_v1"
|
||||
"google.golang.org/grpc/peer"
|
||||
api "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -628,7 +628,7 @@ func createPatches(removeKeys []string, oldItems map[string]string, newItems map
|
|||
|
||||
// createExtendedResourcePatches returns a slice of operations to perform on
|
||||
// the node status
|
||||
func (m *nfdMaster) createExtendedResourcePatches(n *api.Node, extendedResources ExtendedResources) []apihelper.JsonPatch {
|
||||
func (m *nfdMaster) createExtendedResourcePatches(n *corev1.Node, extendedResources ExtendedResources) []apihelper.JsonPatch {
|
||||
patches := []apihelper.JsonPatch{}
|
||||
|
||||
// Form a list of namespaced resource names managed by us
|
||||
|
@ -636,7 +636,7 @@ func (m *nfdMaster) createExtendedResourcePatches(n *api.Node, extendedResources
|
|||
|
||||
// figure out which resources to remove
|
||||
for _, resource := range oldResources {
|
||||
if _, ok := n.Status.Capacity[api.ResourceName(resource)]; ok {
|
||||
if _, ok := n.Status.Capacity[corev1.ResourceName(resource)]; ok {
|
||||
// check if the ext resource is still needed
|
||||
if _, extResNeeded := extendedResources[resource]; !extResNeeded {
|
||||
patches = append(patches, apihelper.NewJsonPatch("remove", "/status/capacity", resource, ""))
|
||||
|
@ -648,7 +648,7 @@ func (m *nfdMaster) createExtendedResourcePatches(n *api.Node, extendedResources
|
|||
// figure out which resources to replace and which to add
|
||||
for resource, value := range extendedResources {
|
||||
// check if the extended resource already exists with the same capacity in the node
|
||||
if quantity, ok := n.Status.Capacity[api.ResourceName(resource)]; ok {
|
||||
if quantity, ok := n.Status.Capacity[corev1.ResourceName(resource)]; ok {
|
||||
val, _ := quantity.AsInt64()
|
||||
if strconv.FormatInt(val, 10) != value {
|
||||
patches = append(patches, apihelper.NewJsonPatch("replace", "/status/capacity", resource, value))
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
"github.com/jaypipes/ghw"
|
||||
topologyv1alpha1 "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/klog/v2"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
|
@ -40,7 +40,7 @@ const (
|
|||
)
|
||||
|
||||
type nodeResources struct {
|
||||
perNUMAAllocatable map[int]map[v1.ResourceName]int64
|
||||
perNUMAAllocatable map[int]map[corev1.ResourceName]int64
|
||||
// mapping: resourceName -> resourceID -> nodeID
|
||||
resourceID2NUMAID map[string]map[string]int
|
||||
topo *ghw.TopologyInfo
|
||||
|
@ -102,11 +102,11 @@ func NewResourcesAggregatorFromData(topo *ghw.TopologyInfo, resp *podresourcesap
|
|||
|
||||
// Aggregate provides the mapping (numa zone name) -> Zone from the given PodResources.
|
||||
func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topologyv1alpha1.ZoneList {
|
||||
perNuma := make(map[int]map[v1.ResourceName]*resourceData)
|
||||
perNuma := make(map[int]map[corev1.ResourceName]*resourceData)
|
||||
for nodeID := range noderesourceData.topo.Nodes {
|
||||
nodeRes, ok := noderesourceData.perNUMAAllocatable[nodeID]
|
||||
if ok {
|
||||
perNuma[nodeID] = make(map[v1.ResourceName]*resourceData)
|
||||
perNuma[nodeID] = make(map[corev1.ResourceName]*resourceData)
|
||||
for resName, allocatable := range nodeRes {
|
||||
switch {
|
||||
case resName == "cpu":
|
||||
|
@ -115,7 +115,7 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
|
|||
available: allocatable,
|
||||
capacity: allocatable + int64(len(noderesourceData.reservedCPUIDPerNUMA[nodeID])),
|
||||
}
|
||||
case resName == v1.ResourceMemory, strings.HasPrefix(string(resName), v1.ResourceHugePagesPrefix):
|
||||
case resName == corev1.ResourceMemory, strings.HasPrefix(string(resName), corev1.ResourceHugePagesPrefix):
|
||||
var capacity int64
|
||||
if _, ok := noderesourceData.memoryResourcesCapacityPerNUMA[nodeID]; !ok {
|
||||
capacity = allocatable
|
||||
|
@ -141,7 +141,7 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
|
|||
// NUMA node doesn't have any allocatable resources, but yet it exists in the topology
|
||||
// thus all its CPUs are reserved
|
||||
} else {
|
||||
perNuma[nodeID] = make(map[v1.ResourceName]*resourceData)
|
||||
perNuma[nodeID] = make(map[corev1.ResourceName]*resourceData)
|
||||
perNuma[nodeID]["cpu"] = &resourceData{
|
||||
allocatable: int64(0),
|
||||
available: int64(0),
|
||||
|
@ -153,7 +153,7 @@ func (noderesourceData *nodeResources) Aggregate(podResData []PodResources) topo
|
|||
for _, podRes := range podResData {
|
||||
for _, contRes := range podRes.Containers {
|
||||
for _, res := range contRes.Resources {
|
||||
if res.Name == v1.ResourceMemory || strings.HasPrefix(string(res.Name), v1.ResourceHugePagesPrefix) {
|
||||
if res.Name == corev1.ResourceMemory || strings.HasPrefix(string(res.Name), corev1.ResourceHugePagesPrefix) {
|
||||
noderesourceData.updateMemoryAvailable(perNuma, res)
|
||||
continue
|
||||
}
|
||||
|
@ -218,7 +218,7 @@ func getContainerDevicesFromAllocatableResources(availRes *podresourcesapi.Alloc
|
|||
|
||||
for nodeID, cpuList := range cpusPerNuma {
|
||||
contDevs = append(contDevs, &podresourcesapi.ContainerDevices{
|
||||
ResourceName: string(v1.ResourceCPU),
|
||||
ResourceName: string(corev1.ResourceCPU),
|
||||
DeviceIds: cpuList,
|
||||
Topology: &podresourcesapi.TopologyInfo{
|
||||
Nodes: []*podresourcesapi.NUMANode{
|
||||
|
@ -233,7 +233,7 @@ func getContainerDevicesFromAllocatableResources(availRes *podresourcesapi.Alloc
|
|||
|
||||
// updateAvailable computes the actually available resources.
|
||||
// This function assumes the available resources are initialized to be equal to the allocatable.
|
||||
func (noderesourceData *nodeResources) updateAvailable(numaData map[int]map[v1.ResourceName]*resourceData, ri ResourceInfo) {
|
||||
func (noderesourceData *nodeResources) updateAvailable(numaData map[int]map[corev1.ResourceName]*resourceData, ri ResourceInfo) {
|
||||
for _, resID := range ri.Data {
|
||||
resName := string(ri.Name)
|
||||
resMap, ok := noderesourceData.resourceID2NUMAID[resName]
|
||||
|
@ -263,8 +263,8 @@ func makeZoneName(nodeID int) string {
|
|||
// makeNodeAllocatable computes the node allocatable as mapping (NUMA node ID) -> Resource -> Allocatable (amount, int).
|
||||
// The computation is done assuming all the resources to represent the allocatable for are represented on a slice
|
||||
// of ContainerDevices. No special treatment is done for CPU IDs. See getContainerDevicesFromAllocatableResources.
|
||||
func makeNodeAllocatable(devices []*podresourcesapi.ContainerDevices, memoryBlocks []*podresourcesapi.ContainerMemory) map[int]map[v1.ResourceName]int64 {
|
||||
perNUMAAllocatable := make(map[int]map[v1.ResourceName]int64)
|
||||
func makeNodeAllocatable(devices []*podresourcesapi.ContainerDevices, memoryBlocks []*podresourcesapi.ContainerMemory) map[int]map[corev1.ResourceName]int64 {
|
||||
perNUMAAllocatable := make(map[int]map[corev1.ResourceName]int64)
|
||||
// initialize with the capacities
|
||||
for _, device := range devices {
|
||||
resourceName := device.GetResourceName()
|
||||
|
@ -272,15 +272,15 @@ func makeNodeAllocatable(devices []*podresourcesapi.ContainerDevices, memoryBloc
|
|||
nodeID := int(node.GetID())
|
||||
nodeRes, ok := perNUMAAllocatable[nodeID]
|
||||
if !ok {
|
||||
nodeRes = make(map[v1.ResourceName]int64)
|
||||
nodeRes = make(map[corev1.ResourceName]int64)
|
||||
}
|
||||
nodeRes[v1.ResourceName(resourceName)] += int64(len(device.GetDeviceIds()))
|
||||
nodeRes[corev1.ResourceName(resourceName)] += int64(len(device.GetDeviceIds()))
|
||||
perNUMAAllocatable[nodeID] = nodeRes
|
||||
}
|
||||
}
|
||||
|
||||
for _, block := range memoryBlocks {
|
||||
memoryType := v1.ResourceName(block.GetMemoryType())
|
||||
memoryType := corev1.ResourceName(block.GetMemoryType())
|
||||
|
||||
blockTopology := block.GetTopology()
|
||||
if blockTopology == nil {
|
||||
|
@ -290,7 +290,7 @@ func makeNodeAllocatable(devices []*podresourcesapi.ContainerDevices, memoryBloc
|
|||
for _, node := range blockTopology.GetNodes() {
|
||||
nodeID := int(node.GetID())
|
||||
if _, ok := perNUMAAllocatable[nodeID]; !ok {
|
||||
perNUMAAllocatable[nodeID] = make(map[v1.ResourceName]int64)
|
||||
perNUMAAllocatable[nodeID] = make(map[corev1.ResourceName]int64)
|
||||
}
|
||||
|
||||
if _, ok := perNUMAAllocatable[nodeID][memoryType]; !ok {
|
||||
|
@ -402,7 +402,7 @@ func getCPUs(devices []*podresourcesapi.ContainerDevices) map[string]int {
|
|||
|
||||
// updateMemoryAvailable computes the actual amount of the available memory.
|
||||
// This function assumes the available resources are initialized to be equal to the capacity.
|
||||
func (noderesourceData *nodeResources) updateMemoryAvailable(numaData map[int]map[v1.ResourceName]*resourceData, ri ResourceInfo) {
|
||||
func (noderesourceData *nodeResources) updateMemoryAvailable(numaData map[int]map[corev1.ResourceName]*resourceData, ri ResourceInfo) {
|
||||
if len(ri.NumaNodeIds) == 0 {
|
||||
klog.Warningf("no NUMA nodes information is available for device %q", ri.Name)
|
||||
return
|
||||
|
@ -469,7 +469,7 @@ func getMemoryResourcesCapacity() (utils.NumaMemoryResources, error) {
|
|||
capacity := make(utils.NumaMemoryResources)
|
||||
for numaID, resources := range memoryResources {
|
||||
if _, ok := capacity[numaID]; !ok {
|
||||
capacity[numaID] = map[v1.ResourceName]int64{}
|
||||
capacity[numaID] = map[corev1.ResourceName]int64{}
|
||||
}
|
||||
|
||||
for resourceName, value := range resources {
|
||||
|
|
|
@ -21,7 +21,7 @@ import (
|
|||
"fmt"
|
||||
"strconv"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/klog/v2"
|
||||
podresourcesapi "k8s.io/kubelet/pkg/apis/podresources/v1"
|
||||
|
@ -75,13 +75,13 @@ func (resMon *PodResourcesScanner) isWatchable(podNamespace string, podName stri
|
|||
// hasExclusiveCPUs returns true if a guaranteed pod is allocated exclusive CPUs else returns false.
|
||||
// In isWatchable() function we check for the pod QoS and proceed if it is guaranteed (i.e. request == limit)
|
||||
// and hence we only check for request in the function below.
|
||||
func hasExclusiveCPUs(pod *v1.Pod) bool {
|
||||
func hasExclusiveCPUs(pod *corev1.Pod) bool {
|
||||
var totalCPU int64
|
||||
var cpuQuantity resource.Quantity
|
||||
for _, container := range pod.Spec.InitContainers {
|
||||
|
||||
var ok bool
|
||||
if cpuQuantity, ok = container.Resources.Requests[v1.ResourceCPU]; !ok {
|
||||
if cpuQuantity, ok = container.Resources.Requests[corev1.ResourceCPU]; !ok {
|
||||
continue
|
||||
}
|
||||
totalCPU += cpuQuantity.Value()
|
||||
|
@ -92,7 +92,7 @@ func hasExclusiveCPUs(pod *v1.Pod) bool {
|
|||
}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
var ok bool
|
||||
if cpuQuantity, ok = container.Resources.Requests[v1.ResourceCPU]; !ok {
|
||||
if cpuQuantity, ok = container.Resources.Requests[corev1.ResourceCPU]; !ok {
|
||||
continue
|
||||
}
|
||||
totalCPU += cpuQuantity.Value()
|
||||
|
@ -107,8 +107,8 @@ func hasExclusiveCPUs(pod *v1.Pod) bool {
|
|||
}
|
||||
|
||||
// hasIntegralCPUs returns true if a container in pod is requesting integral CPUs else returns false
|
||||
func hasIntegralCPUs(pod *v1.Pod, container *v1.Container) bool {
|
||||
cpuQuantity := container.Resources.Requests[v1.ResourceCPU]
|
||||
func hasIntegralCPUs(pod *corev1.Pod, container *corev1.Container) bool {
|
||||
cpuQuantity := container.Resources.Requests[corev1.ResourceCPU]
|
||||
return cpuQuantity.Value()*1000 == cpuQuantity.MilliValue()
|
||||
}
|
||||
|
||||
|
@ -155,7 +155,7 @@ func (resMon *PodResourcesScanner) Scan() ([]PodResources, error) {
|
|||
}
|
||||
contRes.Resources = []ResourceInfo{
|
||||
{
|
||||
Name: v1.ResourceCPU,
|
||||
Name: corev1.ResourceCPU,
|
||||
Data: resCPUs,
|
||||
},
|
||||
}
|
||||
|
@ -165,7 +165,7 @@ func (resMon *PodResourcesScanner) Scan() ([]PodResources, error) {
|
|||
for _, device := range container.GetDevices() {
|
||||
numaNodesIDs := getNumaNodeIds(device.GetTopology())
|
||||
contRes.Resources = append(contRes.Resources, ResourceInfo{
|
||||
Name: v1.ResourceName(device.ResourceName),
|
||||
Name: corev1.ResourceName(device.ResourceName),
|
||||
Data: device.DeviceIds,
|
||||
NumaNodeIds: numaNodesIDs,
|
||||
})
|
||||
|
@ -178,7 +178,7 @@ func (resMon *PodResourcesScanner) Scan() ([]PodResources, error) {
|
|||
|
||||
topology := getNumaNodeIds(block.GetTopology())
|
||||
contRes.Resources = append(contRes.Resources, ResourceInfo{
|
||||
Name: v1.ResourceName(block.MemoryType),
|
||||
Name: corev1.ResourceName(block.MemoryType),
|
||||
Data: []string{fmt.Sprintf("%d", block.GetSize_())},
|
||||
NumaNodeIds: topology,
|
||||
})
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"strconv"
|
||||
"strings"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/klog/v2"
|
||||
resourcehelper "k8s.io/kubernetes/pkg/apis/core/helper"
|
||||
|
@ -40,7 +40,7 @@ var (
|
|||
type NumaMemoryResources map[int]MemoryResourceInfo
|
||||
|
||||
// MemoryResourceInfo holds information of memory resources per resource type.
|
||||
type MemoryResourceInfo map[v1.ResourceName]int64
|
||||
type MemoryResourceInfo map[corev1.ResourceName]int64
|
||||
|
||||
// GetNumaMemoryResources returns total amount of memory and hugepages under NUMA nodes
|
||||
func GetNumaMemoryResources() (NumaMemoryResources, error) {
|
||||
|
@ -64,7 +64,7 @@ func GetNumaMemoryResources() (NumaMemoryResources, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
info[v1.ResourceMemory] = nodeTotalMemory
|
||||
info[corev1.ResourceMemory] = nodeTotalMemory
|
||||
|
||||
// Get hugepages
|
||||
hugepageBytes, err := getHugepagesBytes(filepath.Join(sysBusNodeBasepath, numaNode, "hugepages"))
|
||||
|
@ -112,7 +112,7 @@ func getHugepagesBytes(path string) (MemoryResourceInfo, error) {
|
|||
}
|
||||
|
||||
size, _ := q.AsInt64()
|
||||
name := v1.ResourceName(resourcehelper.HugePageResourceName(q))
|
||||
name := corev1.ResourceName(resourcehelper.HugePageResourceName(q))
|
||||
hugepagesBytes[name] = nr * size
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ import (
|
|||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -109,11 +109,11 @@ func TestGetMemoryResourceCounters(t *testing.T) {
|
|||
}
|
||||
|
||||
if memoryCounters[0]["memory"] != 32718644*1024 {
|
||||
t.Errorf("found unexpected amount of memory under the NUMA node 0: %d", memoryCounters[0][v1.ResourceMemory])
|
||||
t.Errorf("found unexpected amount of memory under the NUMA node 0: %d", memoryCounters[0][corev1.ResourceMemory])
|
||||
}
|
||||
|
||||
if memoryCounters[1]["memory"] != 32718644*1024 {
|
||||
t.Errorf("found unexpected amount of memory under the NUMA node 1: %d", memoryCounters[0][v1.ResourceMemory])
|
||||
t.Errorf("found unexpected amount of memory under the NUMA node 1: %d", memoryCounters[0][corev1.ResourceMemory])
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
extclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -58,7 +58,7 @@ func cleanupNode(cs clientset.Interface) {
|
|||
|
||||
for _, n := range nodeList.Items {
|
||||
var err error
|
||||
var node *v1.Node
|
||||
var node *corev1.Node
|
||||
for retry := 0; retry < 5; retry++ {
|
||||
node, err = cs.CoreV1().Nodes().Get(context.TODO(), n.Name, metav1.GetOptions{})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -102,7 +102,7 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
f := framework.NewDefaultFramework("node-feature-discovery")
|
||||
|
||||
Context("when deploying a single nfd-master pod", func() {
|
||||
var masterPod *v1.Pod
|
||||
var masterPod *corev1.Pod
|
||||
|
||||
BeforeEach(func() {
|
||||
err := testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)
|
||||
|
@ -313,7 +313,7 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
- nodename:
|
||||
- ` + targetNodeName
|
||||
|
||||
cm1 := &v1.ConfigMap{
|
||||
cm1 := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-config-extra-" + string(uuid.NewUUID()),
|
||||
},
|
||||
|
@ -334,7 +334,7 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
- nodename:
|
||||
- "thisNameShouldNeverMatch"`
|
||||
|
||||
cm2 := &v1.ConfigMap{
|
||||
cm2 := &corev1.ConfigMap{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "custom-config-extra-" + string(uuid.NewUUID()),
|
||||
},
|
||||
|
@ -350,21 +350,21 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
volumeName1 := "custom-configs-extra1"
|
||||
volumeName2 := "custom-configs-extra2"
|
||||
workerDS.Spec.Template.Spec.Volumes = append(workerDS.Spec.Template.Spec.Volumes,
|
||||
v1.Volume{
|
||||
corev1.Volume{
|
||||
Name: volumeName1,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: cm1.Name,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
v1.Volume{
|
||||
corev1.Volume{
|
||||
Name: volumeName2,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
ConfigMap: &v1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
ConfigMap: &corev1.ConfigMapVolumeSource{
|
||||
LocalObjectReference: corev1.LocalObjectReference{
|
||||
Name: cm2.Name,
|
||||
},
|
||||
},
|
||||
|
@ -372,12 +372,12 @@ var _ = SIGDescribe("Node Feature Discovery", func() {
|
|||
},
|
||||
)
|
||||
workerDS.Spec.Template.Spec.Containers[0].VolumeMounts = append(workerDS.Spec.Template.Spec.Containers[0].VolumeMounts,
|
||||
v1.VolumeMount{
|
||||
corev1.VolumeMount{
|
||||
Name: volumeName1,
|
||||
ReadOnly: true,
|
||||
MountPath: filepath.Join(custom.Directory, "cm1"),
|
||||
},
|
||||
v1.VolumeMount{
|
||||
corev1.VolumeMount{
|
||||
Name: volumeName2,
|
||||
ReadOnly: true,
|
||||
MountPath: filepath.Join(custom.Directory, "cm2"),
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
|
||||
topologyclientset "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
extclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -45,8 +45,8 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
extClient *extclient.Clientset
|
||||
topologyClient *topologyclientset.Clientset
|
||||
crd *apiextensionsv1.CustomResourceDefinition
|
||||
topologyUpdaterNode *v1.Node
|
||||
workerNodes []v1.Node
|
||||
topologyUpdaterNode *corev1.Node
|
||||
workerNodes []corev1.Node
|
||||
kubeletConfig *kubeletconfig.KubeletConfiguration
|
||||
)
|
||||
|
||||
|
@ -124,7 +124,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (best-effort QoS)")
|
||||
sleeperPod := testutils.BestEffortSleeperPod()
|
||||
|
||||
podMap := make(map[string]*v1.Pod)
|
||||
podMap := make(map[string]*corev1.Pod)
|
||||
pod := f.PodClient().CreateSync(sleeperPod)
|
||||
podMap[pod.Name] = pod
|
||||
defer testutils.DeletePodsAsync(f, podMap)
|
||||
|
@ -164,7 +164,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
|
||||
sleeperPod := testutils.GuaranteedSleeperPod("500m")
|
||||
|
||||
podMap := make(map[string]*v1.Pod)
|
||||
podMap := make(map[string]*corev1.Pod)
|
||||
pod := f.PodClient().CreateSync(sleeperPod)
|
||||
podMap[pod.Name] = pod
|
||||
defer testutils.DeletePodsAsync(f, podMap)
|
||||
|
@ -210,7 +210,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
By("creating a pod consuming exclusive CPUs")
|
||||
sleeperPod := testutils.GuaranteedSleeperPod("1000m")
|
||||
|
||||
podMap := make(map[string]*v1.Pod)
|
||||
podMap := make(map[string]*corev1.Pod)
|
||||
pod := f.PodClient().CreateSync(sleeperPod)
|
||||
podMap[pod.Name] = pod
|
||||
defer testutils.DeletePodsAsync(f, podMap)
|
||||
|
@ -253,7 +253,7 @@ var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|||
})
|
||||
|
||||
// lessAllocatableResources specialize CompareAllocatableResources for this specific e2e use case.
|
||||
func lessAllocatableResources(expected, got map[string]v1.ResourceList) (string, string, bool) {
|
||||
func lessAllocatableResources(expected, got map[string]corev1.ResourceList) (string, string, bool) {
|
||||
zoneName, resName, cmp, ok := testutils.CompareAllocatableResources(expected, got)
|
||||
if !ok {
|
||||
framework.Logf("-> cmp failed (not ok)")
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -40,12 +40,12 @@ const (
|
|||
)
|
||||
|
||||
// GetWorkerNodes returns all nodes labeled as worker
|
||||
func GetWorkerNodes(f *framework.Framework) ([]v1.Node, error) {
|
||||
func GetWorkerNodes(f *framework.Framework) ([]corev1.Node, error) {
|
||||
return GetNodesByRole(f, RoleWorker)
|
||||
}
|
||||
|
||||
// GetByRole returns all nodes with the specified role
|
||||
func GetNodesByRole(f *framework.Framework, role string) ([]v1.Node, error) {
|
||||
func GetNodesByRole(f *framework.Framework, role string) ([]corev1.Node, error) {
|
||||
selector, err := labels.Parse(fmt.Sprintf("%s/%s=", LabelRole, role))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -54,7 +54,7 @@ func GetNodesByRole(f *framework.Framework, role string) ([]v1.Node, error) {
|
|||
}
|
||||
|
||||
// GetBySelector returns all nodes with the specified selector
|
||||
func GetNodesBySelector(f *framework.Framework, selector labels.Selector) ([]v1.Node, error) {
|
||||
func GetNodesBySelector(f *framework.Framework, selector labels.Selector) ([]corev1.Node, error) {
|
||||
nodes, err := f.ClientSet.CoreV1().Nodes().List(context.TODO(), metav1.ListOptions{LabelSelector: selector.String()})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -63,13 +63,13 @@ func GetNodesBySelector(f *framework.Framework, selector labels.Selector) ([]v1.
|
|||
}
|
||||
|
||||
// FilterNodesWithEnoughCores returns all nodes with at least the amount of given CPU allocatable
|
||||
func FilterNodesWithEnoughCores(nodes []v1.Node, cpuAmount string) ([]v1.Node, error) {
|
||||
func FilterNodesWithEnoughCores(nodes []corev1.Node, cpuAmount string) ([]corev1.Node, error) {
|
||||
requestCpu := resource.MustParse(cpuAmount)
|
||||
framework.Logf("checking request %v on %d nodes", requestCpu, len(nodes))
|
||||
|
||||
resNodes := []v1.Node{}
|
||||
resNodes := []corev1.Node{}
|
||||
for _, node := range nodes {
|
||||
availCpu, ok := node.Status.Allocatable[v1.ResourceCPU]
|
||||
availCpu, ok := node.Status.Allocatable[corev1.ResourceCPU]
|
||||
if !ok || availCpu.IsZero() {
|
||||
return nil, fmt.Errorf("node %q has no allocatable CPU", node.Name)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/onsi/gomega"
|
||||
"sigs.k8s.io/node-feature-discovery/pkg/topologypolicy"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
extclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -109,15 +109,15 @@ func GetNodeTopology(topologyClient *topologyclientset.Clientset, nodeName strin
|
|||
}
|
||||
|
||||
// AllocatableResourceListFromNodeResourceTopology extract the map zone:allocatableResources from the given NodeResourceTopology instance.
|
||||
func AllocatableResourceListFromNodeResourceTopology(nodeTopo *v1alpha1.NodeResourceTopology) map[string]v1.ResourceList {
|
||||
allocRes := make(map[string]v1.ResourceList)
|
||||
func AllocatableResourceListFromNodeResourceTopology(nodeTopo *v1alpha1.NodeResourceTopology) map[string]corev1.ResourceList {
|
||||
allocRes := make(map[string]corev1.ResourceList)
|
||||
for _, zone := range nodeTopo.Zones {
|
||||
if zone.Type != "Node" {
|
||||
continue
|
||||
}
|
||||
resList := make(v1.ResourceList)
|
||||
resList := make(corev1.ResourceList)
|
||||
for _, res := range zone.Resources {
|
||||
resList[v1.ResourceName(res.Name)] = res.Allocatable.DeepCopy()
|
||||
resList[corev1.ResourceName(res.Name)] = res.Allocatable.DeepCopy()
|
||||
}
|
||||
if len(resList) == 0 {
|
||||
continue
|
||||
|
@ -131,7 +131,7 @@ func AllocatableResourceListFromNodeResourceTopology(nodeTopo *v1alpha1.NodeReso
|
|||
// and informs the caller if the maps are equal. Here `equal` means the same zoneNames with the same resources, where the resources are equal if they have
|
||||
// the same resources with the same quantities. Returns the name of the different zone, the name of the different resources within the zone,
|
||||
// the comparison result (same semantic as strings.Compare) and a boolean that reports if the resourceLists are consistent. See `CompareResourceList`.
|
||||
func CompareAllocatableResources(expected, got map[string]v1.ResourceList) (string, string, int, bool) {
|
||||
func CompareAllocatableResources(expected, got map[string]corev1.ResourceList) (string, string, int, bool) {
|
||||
if len(got) != len(expected) {
|
||||
framework.Logf("-> expected=%v (len=%d) got=%v (len=%d)", expected, len(expected), got, len(got))
|
||||
return "", "", 0, false
|
||||
|
@ -153,7 +153,7 @@ func CompareAllocatableResources(expected, got map[string]v1.ResourceList) (stri
|
|||
// the comparison result (same semantic as strings.Compare) and a boolean that reports if the resourceLists are consistent.
|
||||
// The ResourceLists are consistent only if the represent the same resource set (all the resources listed in one are
|
||||
// also present in the another; no ResourceList is a superset nor a subset of the other)
|
||||
func CompareResourceList(expected, got v1.ResourceList) (string, int, bool) {
|
||||
func CompareResourceList(expected, got corev1.ResourceList) (string, int, bool) {
|
||||
if len(got) != len(expected) {
|
||||
framework.Logf("-> expected=%v (len=%d) got=%v (len=%d)", expected, len(expected), got, len(got))
|
||||
return "", 0, false
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"github.com/onsi/ginkgo/v2"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
@ -44,23 +44,23 @@ const (
|
|||
)
|
||||
|
||||
// GuarenteedSleeperPod makes a Guaranteed QoS class Pod object which long enough forever but requires `cpuLimit` exclusive CPUs.
|
||||
func GuaranteedSleeperPod(cpuLimit string) *v1.Pod {
|
||||
return &v1.Pod{
|
||||
func GuaranteedSleeperPod(cpuLimit string) *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sleeper-gu-pod",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "sleeper-gu-cnt",
|
||||
Image: PauseImage,
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: v1.ResourceList{
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Limits: corev1.ResourceList{
|
||||
// we use 1 core because that's the minimal meaningful quantity
|
||||
v1.ResourceName(v1.ResourceCPU): resource.MustParse(cpuLimit),
|
||||
corev1.ResourceName(corev1.ResourceCPU): resource.MustParse(cpuLimit),
|
||||
// any random reasonable amount is fine
|
||||
v1.ResourceName(v1.ResourceMemory): resource.MustParse("100Mi"),
|
||||
corev1.ResourceName(corev1.ResourceMemory): resource.MustParse("100Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -70,14 +70,14 @@ func GuaranteedSleeperPod(cpuLimit string) *v1.Pod {
|
|||
}
|
||||
|
||||
// BestEffortSleeperPod makes a Best Effort QoS class Pod object which sleeps long enough
|
||||
func BestEffortSleeperPod() *v1.Pod {
|
||||
return &v1.Pod{
|
||||
func BestEffortSleeperPod() *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "sleeper-be-pod",
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Containers: []v1.Container{
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "sleeper-be-cnt",
|
||||
Image: PauseImage,
|
||||
|
@ -88,7 +88,7 @@ func BestEffortSleeperPod() *v1.Pod {
|
|||
}
|
||||
|
||||
// DeletePodsAsync concurrently deletes all the pods in the given name:pod_object mapping. Returns when the longer operation ends.
|
||||
func DeletePodsAsync(f *framework.Framework, podMap map[string]*v1.Pod) {
|
||||
func DeletePodsAsync(f *framework.Framework, podMap map[string]*corev1.Pod) {
|
||||
var wg sync.WaitGroup
|
||||
for _, pod := range podMap {
|
||||
wg.Add(1)
|
||||
|
@ -112,24 +112,24 @@ func DeletePodSyncByName(f *framework.Framework, podName string) {
|
|||
}
|
||||
|
||||
// NFDMasterPod provide NFD master pod definition
|
||||
func NFDMasterPod(image string, onMasterNode bool) *v1.Pod {
|
||||
p := &v1.Pod{
|
||||
func NFDMasterPod(image string, onMasterNode bool) *corev1.Pod {
|
||||
p := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
GenerateName: "nfd-master-",
|
||||
Labels: map[string]string{"name": "nfd-master-e2e"},
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
Spec: corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "node-feature-discovery",
|
||||
Image: image,
|
||||
ImagePullPolicy: pullPolicy(),
|
||||
Command: []string{"nfd-master"},
|
||||
Env: []v1.EnvVar{
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
|
@ -138,17 +138,17 @@ func NFDMasterPod(image string, onMasterNode bool) *v1.Pod {
|
|||
},
|
||||
},
|
||||
ServiceAccountName: "nfd-master-e2e",
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
},
|
||||
}
|
||||
if onMasterNode {
|
||||
p.Spec.NodeSelector = map[string]string{"node-role.kubernetes.io/master": ""}
|
||||
p.Spec.Tolerations = []v1.Toleration{
|
||||
p.Spec.Tolerations = []corev1.Toleration{
|
||||
{
|
||||
Key: "node-role.kubernetes.io/master",
|
||||
Operator: v1.TolerationOpEqual,
|
||||
Operator: corev1.TolerationOpEqual,
|
||||
Value: "",
|
||||
Effect: v1.TaintEffectNoSchedule,
|
||||
Effect: corev1.TaintEffectNoSchedule,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -156,15 +156,15 @@ func NFDMasterPod(image string, onMasterNode bool) *v1.Pod {
|
|||
}
|
||||
|
||||
// NFDWorkerPod provides NFD worker pod definition
|
||||
func NFDWorkerPod(image string, extraArgs []string) *v1.Pod {
|
||||
p := &v1.Pod{
|
||||
func NFDWorkerPod(image string, extraArgs []string) *corev1.Pod {
|
||||
p := &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nfd-worker-" + string(uuid.NewUUID()),
|
||||
},
|
||||
Spec: *nfdWorkerPodSpec(image, extraArgs),
|
||||
}
|
||||
|
||||
p.Spec.RestartPolicy = v1.RestartPolicyNever
|
||||
p.Spec.RestartPolicy = corev1.RestartPolicyNever
|
||||
|
||||
return p
|
||||
}
|
||||
|
@ -182,7 +182,7 @@ func NFDTopologyUpdaterDaemonSet(kc KubeletConfig, image string, extraArgs []str
|
|||
}
|
||||
|
||||
// newDaemonSet provide the new daemon set
|
||||
func newDaemonSet(name string, podSpec *v1.PodSpec) *appsv1.DaemonSet {
|
||||
func newDaemonSet(name string, podSpec *corev1.PodSpec) *appsv1.DaemonSet {
|
||||
return &appsv1.DaemonSet{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name + "-" + string(uuid.NewUUID()),
|
||||
|
@ -191,7 +191,7 @@ func newDaemonSet(name string, podSpec *v1.PodSpec) *appsv1.DaemonSet {
|
|||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"name": name},
|
||||
},
|
||||
Template: v1.PodTemplateSpec{
|
||||
Template: corev1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: map[string]string{"name": name},
|
||||
},
|
||||
|
@ -202,26 +202,26 @@ func newDaemonSet(name string, podSpec *v1.PodSpec) *appsv1.DaemonSet {
|
|||
}
|
||||
}
|
||||
|
||||
func nfdWorkerPodSpec(image string, extraArgs []string) *v1.PodSpec {
|
||||
return &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
func nfdWorkerPodSpec(image string, extraArgs []string) *corev1.PodSpec {
|
||||
return &corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "node-feature-discovery",
|
||||
Image: image,
|
||||
ImagePullPolicy: pullPolicy(),
|
||||
Command: []string{"nfd-worker"},
|
||||
Args: append([]string{"-server=nfd-master-e2e:8080"}, extraArgs...),
|
||||
Env: []v1.EnvVar{
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "host-boot",
|
||||
MountPath: "/host-boot",
|
||||
|
@ -251,50 +251,50 @@ func nfdWorkerPodSpec(image string, extraArgs []string) *v1.PodSpec {
|
|||
},
|
||||
},
|
||||
ServiceAccountName: "nfd-master-e2e",
|
||||
DNSPolicy: v1.DNSClusterFirstWithHostNet,
|
||||
Volumes: []v1.Volume{
|
||||
DNSPolicy: corev1.DNSClusterFirstWithHostNet,
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "host-boot",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/boot",
|
||||
Type: newHostPathType(v1.HostPathDirectory),
|
||||
Type: newHostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-os-release",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/etc/os-release",
|
||||
Type: newHostPathType(v1.HostPathFile),
|
||||
Type: newHostPathType(corev1.HostPathFile),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-sys",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/sys",
|
||||
Type: newHostPathType(v1.HostPathDirectory),
|
||||
Type: newHostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-usr-lib",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/usr/lib",
|
||||
Type: newHostPathType(v1.HostPathDirectory),
|
||||
Type: newHostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-usr-src",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/usr/src",
|
||||
Type: newHostPathType(v1.HostPathDirectory),
|
||||
Type: newHostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -302,9 +302,9 @@ func nfdWorkerPodSpec(image string, extraArgs []string) *v1.PodSpec {
|
|||
}
|
||||
}
|
||||
|
||||
func nfdTopologyUpdaterPodSpec(kc KubeletConfig, image string, extraArgs []string) *v1.PodSpec {
|
||||
return &v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
func nfdTopologyUpdaterPodSpec(kc KubeletConfig, image string, extraArgs []string) *corev1.PodSpec {
|
||||
return &corev1.PodSpec{
|
||||
Containers: []corev1.Container{
|
||||
{
|
||||
Name: "node-topology-updater",
|
||||
Image: image,
|
||||
|
@ -317,25 +317,25 @@ func nfdTopologyUpdaterPodSpec(kc KubeletConfig, image string, extraArgs []strin
|
|||
"--watch-namespace=rte",
|
||||
"--server=nfd-master-e2e:8080",
|
||||
}, extraArgs...),
|
||||
Env: []v1.EnvVar{
|
||||
Env: []corev1.EnvVar{
|
||||
{
|
||||
Name: "NODE_NAME",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
ValueFrom: &corev1.EnvVarSource{
|
||||
FieldRef: &corev1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Capabilities: &v1.Capabilities{
|
||||
Drop: []v1.Capability{"ALL"},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Capabilities: &corev1.Capabilities{
|
||||
Drop: []corev1.Capability{"ALL"},
|
||||
},
|
||||
RunAsUser: pointer.Int64Ptr(0),
|
||||
ReadOnlyRootFilesystem: pointer.BoolPtr(true),
|
||||
AllowPrivilegeEscalation: pointer.BoolPtr(false),
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
{
|
||||
Name: "kubelet-podresources-conf",
|
||||
MountPath: "/podresources/config.yaml",
|
||||
|
@ -352,32 +352,32 @@ func nfdTopologyUpdaterPodSpec(kc KubeletConfig, image string, extraArgs []strin
|
|||
},
|
||||
},
|
||||
ServiceAccountName: "nfd-topology-updater-e2e",
|
||||
DNSPolicy: v1.DNSClusterFirstWithHostNet,
|
||||
Volumes: []v1.Volume{
|
||||
DNSPolicy: corev1.DNSClusterFirstWithHostNet,
|
||||
Volumes: []corev1.Volume{
|
||||
{
|
||||
Name: "kubelet-podresources-conf",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: kc.ConfigPath,
|
||||
Type: newHostPathType(v1.HostPathFile),
|
||||
Type: newHostPathType(corev1.HostPathFile),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "kubelet-podresources-sock",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: kc.PodResourcesSocketPath,
|
||||
Type: newHostPathType(v1.HostPathSocket),
|
||||
Type: newHostPathType(corev1.HostPathSocket),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "host-sys",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
HostPath: &v1.HostPathVolumeSource{
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
HostPath: &corev1.HostPathVolumeSource{
|
||||
Path: "/sys",
|
||||
Type: newHostPathType(v1.HostPathDirectory),
|
||||
Type: newHostPathType(corev1.HostPathDirectory),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -385,8 +385,8 @@ func nfdTopologyUpdaterPodSpec(kc KubeletConfig, image string, extraArgs []strin
|
|||
}
|
||||
}
|
||||
|
||||
func newHostPathType(typ v1.HostPathType) *v1.HostPathType {
|
||||
hostPathType := new(v1.HostPathType)
|
||||
func newHostPathType(typ corev1.HostPathType) *corev1.HostPathType {
|
||||
hostPathType := new(corev1.HostPathType)
|
||||
*hostPathType = typ
|
||||
return hostPathType
|
||||
}
|
||||
|
@ -412,9 +412,9 @@ func WaitForPodsReady(c clientset.Interface, ns, name string, minReadySeconds in
|
|||
})
|
||||
}
|
||||
|
||||
func pullPolicy() v1.PullPolicy {
|
||||
func pullPolicy() corev1.PullPolicy {
|
||||
if *pullIfNotPresent {
|
||||
return v1.PullIfNotPresent
|
||||
return corev1.PullIfNotPresent
|
||||
}
|
||||
return v1.PullAlways
|
||||
return corev1.PullAlways
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
|
@ -95,8 +95,8 @@ func DeconfigureRBAC(cs clientset.Interface, ns string) error {
|
|||
}
|
||||
|
||||
// Configure service account required by NFD Master
|
||||
func createServiceAccountMaster(cs clientset.Interface, ns string) (*v1.ServiceAccount, error) {
|
||||
sa := &v1.ServiceAccount{
|
||||
func createServiceAccountMaster(cs clientset.Interface, ns string) (*corev1.ServiceAccount, error) {
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nfd-master-e2e",
|
||||
Namespace: ns,
|
||||
|
@ -106,8 +106,8 @@ func createServiceAccountMaster(cs clientset.Interface, ns string) (*v1.ServiceA
|
|||
}
|
||||
|
||||
// Configure service account required by NFD MTopology Updater
|
||||
func createServiceAccountTopologyUpdater(cs clientset.Interface, ns string) (*v1.ServiceAccount, error) {
|
||||
sa := &v1.ServiceAccount{
|
||||
func createServiceAccountTopologyUpdater(cs clientset.Interface, ns string) (*corev1.ServiceAccount, error) {
|
||||
sa := &corev1.ServiceAccount{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nfd-topology-updater-e2e",
|
||||
Namespace: ns,
|
||||
|
|
|
@ -19,26 +19,26 @@ package utils
|
|||
import (
|
||||
"context"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// CreateService creates nfd-master Service
|
||||
func CreateService(cs clientset.Interface, ns string) (*v1.Service, error) {
|
||||
svc := &v1.Service{
|
||||
func CreateService(cs clientset.Interface, ns string) (*corev1.Service, error) {
|
||||
svc := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "nfd-master-e2e",
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Spec: corev1.ServiceSpec{
|
||||
Selector: map[string]string{"name": "nfd-master-e2e"},
|
||||
Ports: []v1.ServicePort{
|
||||
Ports: []corev1.ServicePort{
|
||||
{
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Protocol: corev1.ProtocolTCP,
|
||||
Port: 8080,
|
||||
},
|
||||
},
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
}
|
||||
return cs.CoreV1().Services(ns).Create(context.TODO(), svc, metav1.CreateOptions{})
|
||||
|
|
Loading…
Reference in a new issue