mirror of
https://github.com/kubernetes-sigs/node-feature-discovery.git
synced 2024-12-14 11:57:51 +00:00
fix topology-updater cpu report
This commit is contained in:
parent
86d2809b77
commit
a694d916df
1 changed files with 24 additions and 21 deletions
|
@ -63,6 +63,11 @@ func (resMon *PodResourcesScanner) isWatchable(podNamespace string, podName stri
|
|||
return false, false, err
|
||||
}
|
||||
|
||||
// Check Pod is guaranteed QOS class and has exclusive CPUs or devices
|
||||
if pod.Status.QOSClass != corev1.PodQOSGuaranteed {
|
||||
return false, false, nil
|
||||
}
|
||||
|
||||
isIntegralGuaranteed := hasExclusiveCPUs(pod)
|
||||
|
||||
if resMon.namespace == "*" && (isIntegralGuaranteed || hasDevice) {
|
||||
|
@ -85,9 +90,9 @@ func hasExclusiveCPUs(pod *corev1.Pod) bool {
|
|||
continue
|
||||
}
|
||||
totalCPU += cpuQuantity.Value()
|
||||
isInitContainerGuaranteed := hasIntegralCPUs(pod, &container)
|
||||
if !isInitContainerGuaranteed {
|
||||
return false
|
||||
isInitContainerGuaranteed := hasIntegralCPUs(&container)
|
||||
if isInitContainerGuaranteed {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
|
@ -96,9 +101,9 @@ func hasExclusiveCPUs(pod *corev1.Pod) bool {
|
|||
continue
|
||||
}
|
||||
totalCPU += cpuQuantity.Value()
|
||||
isAppContainerGuaranteed := hasIntegralCPUs(pod, &container)
|
||||
if !isAppContainerGuaranteed {
|
||||
return false
|
||||
isAppContainerGuaranteed := hasIntegralCPUs(&container)
|
||||
if isAppContainerGuaranteed {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -107,7 +112,7 @@ func hasExclusiveCPUs(pod *corev1.Pod) bool {
|
|||
}
|
||||
|
||||
// hasIntegralCPUs returns true if a container in pod is requesting integral CPUs else returns false
|
||||
func hasIntegralCPUs(pod *corev1.Pod, container *corev1.Container) bool {
|
||||
func hasIntegralCPUs(container *corev1.Container) bool {
|
||||
cpuQuantity := container.Resources.Requests[corev1.ResourceCPU]
|
||||
return cpuQuantity.Value()*1000 == cpuQuantity.MilliValue()
|
||||
}
|
||||
|
@ -147,7 +152,7 @@ func (resMon *PodResourcesScanner) Scan() (ScanResponse, error) {
|
|||
for _, podResource := range respPodResources {
|
||||
klog.InfoS("scanning pod", "podName", podResource.GetName())
|
||||
hasDevice := hasDevice(podResource)
|
||||
isWatchable, isIntegralGuaranteed, err := resMon.isWatchable(podResource.GetNamespace(), podResource.GetName(), hasDevice)
|
||||
isWatchable, _, err := resMon.isWatchable(podResource.GetNamespace(), podResource.GetName(), hasDevice)
|
||||
if err != nil {
|
||||
return ScanResponse{}, fmt.Errorf("checking if pod in a namespace is watchable, namespace:%v, pod name %v: %w", podResource.GetNamespace(), podResource.GetName(), err)
|
||||
}
|
||||
|
@ -165,7 +170,6 @@ func (resMon *PodResourcesScanner) Scan() (ScanResponse, error) {
|
|||
Name: container.Name,
|
||||
}
|
||||
|
||||
if isIntegralGuaranteed {
|
||||
cpuIDs := container.GetCpuIds()
|
||||
if len(cpuIDs) > 0 {
|
||||
var resCPUs []string
|
||||
|
@ -179,7 +183,6 @@ func (resMon *PodResourcesScanner) Scan() (ScanResponse, error) {
|
|||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, device := range container.GetDevices() {
|
||||
numaNodesIDs := getNumaNodeIds(device.GetTopology())
|
||||
|
|
Loading…
Reference in a new issue