2022-06-14 08:51:33 +00:00
|
|
|
/*
|
|
|
|
Copyright 2020-2022 The Kubernetes Authors.
|
|
|
|
|
|
|
|
Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
you may not use this file except in compliance with the License.
|
|
|
|
You may obtain a copy of the License at
|
|
|
|
|
|
|
|
http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
|
|
|
Unless required by applicable law or agreed to in writing, software
|
|
|
|
distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
See the License for the specific language governing permissions and
|
|
|
|
limitations under the License.
|
|
|
|
*/
|
|
|
|
|
|
|
|
package e2e
|
|
|
|
|
|
|
|
import (
|
|
|
|
"context"
|
|
|
|
"fmt"
|
|
|
|
"time"
|
|
|
|
|
2022-12-02 09:47:56 +00:00
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
|
|
|
2022-09-07 16:25:57 +00:00
|
|
|
. "github.com/onsi/ginkgo/v2"
|
2022-06-14 08:51:33 +00:00
|
|
|
. "github.com/onsi/gomega"
|
|
|
|
|
|
|
|
"github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/apis/topology/v1alpha1"
|
|
|
|
topologyclientset "github.com/k8stopologyawareschedwg/noderesourcetopology-api/pkg/generated/clientset/versioned"
|
|
|
|
|
2022-11-20 15:18:17 +00:00
|
|
|
appsv1 "k8s.io/api/apps/v1"
|
2022-10-14 12:28:52 +00:00
|
|
|
corev1 "k8s.io/api/core/v1"
|
2022-06-14 08:51:33 +00:00
|
|
|
extclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
|
|
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
|
|
|
|
"k8s.io/kubernetes/test/e2e/framework"
|
2022-09-07 17:38:34 +00:00
|
|
|
"k8s.io/kubernetes/test/e2e/framework/kubelet"
|
2022-11-17 14:57:20 +00:00
|
|
|
admissionapi "k8s.io/pod-security-admission/api"
|
2022-06-14 08:51:33 +00:00
|
|
|
|
|
|
|
testutils "sigs.k8s.io/node-feature-discovery/test/e2e/utils"
|
2022-11-24 11:23:34 +00:00
|
|
|
testds "sigs.k8s.io/node-feature-discovery/test/e2e/utils/daemonset"
|
2022-11-24 10:59:38 +00:00
|
|
|
testpod "sigs.k8s.io/node-feature-discovery/test/e2e/utils/pod"
|
2022-06-14 08:51:33 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
var _ = SIGDescribe("Node Feature Discovery topology updater", func() {
|
|
|
|
var (
|
2022-11-20 15:18:17 +00:00
|
|
|
extClient *extclient.Clientset
|
|
|
|
topologyClient *topologyclientset.Clientset
|
|
|
|
topologyUpdaterNode *corev1.Node
|
|
|
|
topologyUpdaterDaemonSet *appsv1.DaemonSet
|
|
|
|
workerNodes []corev1.Node
|
|
|
|
kubeletConfig *kubeletconfig.KubeletConfiguration
|
2022-06-14 08:51:33 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
f := framework.NewDefaultFramework("node-topology-updater")
|
2022-11-17 14:57:20 +00:00
|
|
|
f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
|
2022-11-20 15:18:17 +00:00
|
|
|
JustBeforeEach(func() {
|
2022-06-14 08:51:33 +00:00
|
|
|
var err error
|
|
|
|
|
|
|
|
if extClient == nil {
|
|
|
|
extClient, err = extclient.NewForConfig(f.ClientConfig())
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
}
|
|
|
|
|
|
|
|
if topologyClient == nil {
|
|
|
|
topologyClient, err = topologyclientset.NewForConfig(f.ClientConfig())
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
}
|
|
|
|
|
|
|
|
By("Creating the node resource topologies CRD")
|
2022-11-20 15:21:49 +00:00
|
|
|
Expect(testutils.CreateNodeResourceTopologies(extClient)).ToNot(BeNil())
|
2022-06-14 08:51:33 +00:00
|
|
|
|
2022-12-02 09:47:56 +00:00
|
|
|
By("Configuring RBAC")
|
2022-11-20 15:21:49 +00:00
|
|
|
Expect(testutils.ConfigureRBAC(f.ClientSet, f.Namespace.Name)).NotTo(HaveOccurred())
|
2022-06-14 08:51:33 +00:00
|
|
|
|
|
|
|
By("Creating nfd-topology-updater daemonset")
|
|
|
|
topologyUpdaterDaemonSet, err = f.ClientSet.AppsV1().DaemonSets(f.Namespace.Name).Create(context.TODO(), topologyUpdaterDaemonSet, metav1.CreateOptions{})
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
By("Waiting for daemonset pods to be ready")
|
2022-11-24 10:59:38 +00:00
|
|
|
Expect(testpod.WaitForReady(f.ClientSet, f.Namespace.Name, topologyUpdaterDaemonSet.Spec.Template.Labels["name"], 5)).NotTo(HaveOccurred())
|
2022-06-14 08:51:33 +00:00
|
|
|
|
|
|
|
label := labels.SelectorFromSet(map[string]string{"name": topologyUpdaterDaemonSet.Spec.Template.Labels["name"]})
|
|
|
|
pods, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(context.TODO(), metav1.ListOptions{LabelSelector: label.String()})
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
Expect(pods.Items).ToNot(BeEmpty())
|
|
|
|
|
|
|
|
topologyUpdaterNode, err = f.ClientSet.CoreV1().Nodes().Get(context.TODO(), pods.Items[0].Spec.NodeName, metav1.GetOptions{})
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
2022-09-07 17:38:34 +00:00
|
|
|
kubeletConfig, err = kubelet.GetCurrentKubeletConfig(topologyUpdaterNode.Name, "", true)
|
2022-06-14 08:51:33 +00:00
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
|
|
|
|
workerNodes, err = testutils.GetWorkerNodes(f)
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
})
|
|
|
|
|
2022-11-20 15:21:49 +00:00
|
|
|
// TODO: replace with regular AfterEach once we have https://github.com/kubernetes/kubernetes/pull/111998 in
|
|
|
|
f.AddAfterEach("Node Feature Discovery topology updater CRD and RBAC removal", func(f *framework.Framework, failed bool) {
|
|
|
|
err := testutils.DeconfigureRBAC(f.ClientSet, f.Namespace.Name)
|
|
|
|
if err != nil {
|
|
|
|
framework.Logf("failed to delete RBAC resources: %v", err)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
2022-12-02 09:47:56 +00:00
|
|
|
Context("with topology-updater daemonset running", func() {
|
2022-11-20 15:18:17 +00:00
|
|
|
BeforeEach(func() {
|
|
|
|
cfg, err := testutils.GetConfig()
|
|
|
|
Expect(err).ToNot(HaveOccurred())
|
|
|
|
|
|
|
|
kcfg := cfg.GetKubeletConfig()
|
|
|
|
By(fmt.Sprintf("Using config (%#v)", kcfg))
|
|
|
|
|
2022-11-24 10:59:38 +00:00
|
|
|
podSpecOpts := []testpod.SpecOption{testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag))}
|
2022-11-24 11:23:34 +00:00
|
|
|
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
|
2022-11-20 15:18:17 +00:00
|
|
|
})
|
|
|
|
|
2022-06-14 08:51:33 +00:00
|
|
|
It("should fill the node resource topologies CR with the data", func() {
|
|
|
|
nodeTopology := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
|
|
|
isValid := testutils.IsValidNodeTopology(nodeTopology, kubeletConfig)
|
|
|
|
Expect(isValid).To(BeTrue(), "received invalid topology: %v", nodeTopology)
|
|
|
|
})
|
|
|
|
|
|
|
|
It("it should not account for any cpus if a container doesn't request exclusive cpus (best effort QOS)", func() {
|
|
|
|
By("getting the initial topology information")
|
|
|
|
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
|
|
|
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (best-effort QoS)")
|
2022-11-24 10:59:38 +00:00
|
|
|
sleeperPod := testpod.BestEffortSleeper()
|
2022-06-14 08:51:33 +00:00
|
|
|
|
2022-10-14 12:28:52 +00:00
|
|
|
podMap := make(map[string]*corev1.Pod)
|
2022-06-14 08:51:33 +00:00
|
|
|
pod := f.PodClient().CreateSync(sleeperPod)
|
|
|
|
podMap[pod.Name] = pod
|
2022-11-24 10:59:38 +00:00
|
|
|
defer testpod.DeleteAsync(f, podMap)
|
2022-06-14 08:51:33 +00:00
|
|
|
|
|
|
|
cooldown := 30 * time.Second
|
|
|
|
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
|
|
|
|
// the object, hance the resource version must NOT change, so we can only sleep
|
|
|
|
time.Sleep(cooldown)
|
|
|
|
By("checking the changes in the updated topology - expecting none")
|
|
|
|
finalNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
|
|
|
|
|
|
|
initialAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(initialNodeTopo)
|
|
|
|
finalAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(finalNodeTopo)
|
|
|
|
if len(initialAllocRes) == 0 || len(finalAllocRes) == 0 {
|
|
|
|
Fail(fmt.Sprintf("failed to find allocatable resources from node topology initial=%v final=%v", initialAllocRes, finalAllocRes))
|
|
|
|
}
|
|
|
|
zoneName, resName, cmp, ok := testutils.CompareAllocatableResources(initialAllocRes, finalAllocRes)
|
|
|
|
framework.Logf("zone=%q resource=%q cmp=%v ok=%v", zoneName, resName, cmp, ok)
|
|
|
|
if !ok {
|
|
|
|
Fail(fmt.Sprintf("failed to compare allocatable resources from node topology initial=%v final=%v", initialAllocRes, finalAllocRes))
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is actually a workaround.
|
|
|
|
// Depending on the (random, by design) order on which ginkgo runs the tests, a test which exclusively allocates CPUs may run before.
|
|
|
|
// We cannot (nor should) care about what runs before this test, but we know that this may happen.
|
|
|
|
// The proper solution is to wait for ALL the container requesting exclusive resources to be gone before to end the related test.
|
|
|
|
// To date, we don't yet have a clean way to wait for these pod (actually containers) to be completely gone
|
|
|
|
// (hence, releasing the exclusively allocated CPUs) before to end the test, so this test can run with some leftovers hanging around,
|
|
|
|
// which makes the accounting harder. And this is what we handle here.
|
|
|
|
isGreaterEqual := (cmp >= 0)
|
|
|
|
Expect(isGreaterEqual).To(BeTrue(), fmt.Sprintf("final allocatable resources not restored - cmp=%d initial=%v final=%v", cmp, initialAllocRes, finalAllocRes))
|
|
|
|
})
|
|
|
|
|
|
|
|
It("it should not account for any cpus if a container doesn't request exclusive cpus (guaranteed QOS, nonintegral cpu request)", func() {
|
|
|
|
By("getting the initial topology information")
|
|
|
|
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
|
|
|
By("creating a pod consuming resources from the shared, non-exclusive CPU pool (guaranteed QoS, nonintegral request)")
|
2022-11-28 18:22:56 +00:00
|
|
|
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
|
|
|
|
corev1.ResourceList{
|
|
|
|
corev1.ResourceCPU: resource.MustParse("500m"),
|
|
|
|
// any random reasonable amount is fine
|
|
|
|
corev1.ResourceMemory: resource.MustParse("100Mi"),
|
|
|
|
}))
|
2022-06-14 08:51:33 +00:00
|
|
|
|
2022-10-14 12:28:52 +00:00
|
|
|
podMap := make(map[string]*corev1.Pod)
|
2022-06-14 08:51:33 +00:00
|
|
|
pod := f.PodClient().CreateSync(sleeperPod)
|
|
|
|
podMap[pod.Name] = pod
|
2022-11-24 10:59:38 +00:00
|
|
|
defer testpod.DeleteAsync(f, podMap)
|
2022-06-14 08:51:33 +00:00
|
|
|
|
|
|
|
cooldown := 30 * time.Second
|
|
|
|
By(fmt.Sprintf("getting the updated topology - sleeping for %v", cooldown))
|
|
|
|
// the object, hance the resource version must NOT change, so we can only sleep
|
|
|
|
time.Sleep(cooldown)
|
|
|
|
By("checking the changes in the updated topology - expecting none")
|
|
|
|
finalNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
|
|
|
|
|
|
|
initialAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(initialNodeTopo)
|
|
|
|
finalAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(finalNodeTopo)
|
|
|
|
if len(initialAllocRes) == 0 || len(finalAllocRes) == 0 {
|
|
|
|
Fail(fmt.Sprintf("failed to find allocatable resources from node topology initial=%v final=%v", initialAllocRes, finalAllocRes))
|
|
|
|
}
|
|
|
|
zoneName, resName, cmp, ok := testutils.CompareAllocatableResources(initialAllocRes, finalAllocRes)
|
|
|
|
framework.Logf("zone=%q resource=%q cmp=%v ok=%v", zoneName, resName, cmp, ok)
|
|
|
|
if !ok {
|
|
|
|
Fail(fmt.Sprintf("failed to compare allocatable resources from node topology initial=%v final=%v", initialAllocRes, finalAllocRes))
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is actually a workaround.
|
|
|
|
// Depending on the (random, by design) order on which ginkgo runs the tests, a test which exclusively allocates CPUs may run before.
|
|
|
|
// We cannot (nor should) care about what runs before this test, but we know that this may happen.
|
|
|
|
// The proper solution is to wait for ALL the container requesting exclusive resources to be gone before to end the related test.
|
|
|
|
// To date, we don't yet have a clean way to wait for these pod (actually containers) to be completely gone
|
|
|
|
// (hence, releasing the exclusively allocated CPUs) before to end the test, so this test can run with some leftovers hanging around,
|
|
|
|
// which makes the accounting harder. And this is what we handle here.
|
|
|
|
isGreaterEqual := (cmp >= 0)
|
|
|
|
Expect(isGreaterEqual).To(BeTrue(), fmt.Sprintf("final allocatable resources not restored - cmp=%d initial=%v final=%v", cmp, initialAllocRes, finalAllocRes))
|
|
|
|
})
|
|
|
|
|
|
|
|
It("it should account for containers requesting exclusive cpus", func() {
|
|
|
|
nodes, err := testutils.FilterNodesWithEnoughCores(workerNodes, "1000m")
|
|
|
|
Expect(err).NotTo(HaveOccurred())
|
|
|
|
if len(nodes) < 1 {
|
|
|
|
Skip("not enough allocatable cores for this test")
|
|
|
|
}
|
|
|
|
|
|
|
|
By("getting the initial topology information")
|
|
|
|
initialNodeTopo := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
|
|
|
By("creating a pod consuming exclusive CPUs")
|
2022-11-28 18:22:56 +00:00
|
|
|
sleeperPod := testpod.GuaranteedSleeper(testpod.WithLimits(
|
|
|
|
corev1.ResourceList{
|
|
|
|
corev1.ResourceCPU: resource.MustParse("1000m"),
|
|
|
|
// any random reasonable amount is fine
|
|
|
|
corev1.ResourceMemory: resource.MustParse("100Mi"),
|
|
|
|
}))
|
2022-11-17 15:47:40 +00:00
|
|
|
// in case there is more than a single node in the cluster
|
|
|
|
// we need to set the node name, so we'll have certainty about
|
|
|
|
// which node we need to examine
|
|
|
|
sleeperPod.Spec.NodeName = topologyUpdaterNode.Name
|
2022-06-14 08:51:33 +00:00
|
|
|
|
2022-10-14 12:28:52 +00:00
|
|
|
podMap := make(map[string]*corev1.Pod)
|
2022-06-14 08:51:33 +00:00
|
|
|
pod := f.PodClient().CreateSync(sleeperPod)
|
|
|
|
podMap[pod.Name] = pod
|
2022-11-24 10:59:38 +00:00
|
|
|
defer testpod.DeleteAsync(f, podMap)
|
2022-06-14 08:51:33 +00:00
|
|
|
|
2022-11-17 15:47:40 +00:00
|
|
|
By("checking the changes in the updated topology")
|
2022-06-14 08:51:33 +00:00
|
|
|
var finalNodeTopo *v1alpha1.NodeResourceTopology
|
|
|
|
Eventually(func() bool {
|
|
|
|
finalNodeTopo, err = topologyClient.TopologyV1alpha1().NodeResourceTopologies().Get(context.TODO(), topologyUpdaterNode.Name, metav1.GetOptions{})
|
|
|
|
if err != nil {
|
|
|
|
framework.Logf("failed to get the node topology resource: %v", err)
|
|
|
|
return false
|
|
|
|
}
|
2022-11-17 15:47:40 +00:00
|
|
|
if finalNodeTopo.ObjectMeta.ResourceVersion == initialNodeTopo.ObjectMeta.ResourceVersion {
|
|
|
|
framework.Logf("node topology resource %s was not updated", topologyUpdaterNode.Name)
|
|
|
|
}
|
2022-06-14 08:51:33 +00:00
|
|
|
|
2022-11-17 15:47:40 +00:00
|
|
|
initialAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(initialNodeTopo)
|
|
|
|
finalAllocRes := testutils.AllocatableResourceListFromNodeResourceTopology(finalNodeTopo)
|
|
|
|
if len(initialAllocRes) == 0 || len(finalAllocRes) == 0 {
|
|
|
|
Fail(fmt.Sprintf("failed to find allocatable resources from node topology initial=%v final=%v", initialAllocRes, finalAllocRes))
|
|
|
|
}
|
|
|
|
|
|
|
|
zoneName, resName, isLess := lessAllocatableResources(initialAllocRes, finalAllocRes)
|
|
|
|
framework.Logf("zone=%q resource=%q isLess=%v", zoneName, resName, isLess)
|
|
|
|
if !isLess {
|
|
|
|
framework.Logf("final allocatable resources not decreased - initial=%v final=%v", initialAllocRes, finalAllocRes)
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}, time.Minute, 5*time.Second).Should(BeTrue(), "didn't get updated node topology info")
|
2022-06-14 08:51:33 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
})
|
|
|
|
|
2022-11-20 15:21:49 +00:00
|
|
|
When("topology-updater configure to exclude memory", func() {
|
|
|
|
BeforeEach(func() {
|
2022-11-22 18:58:53 +00:00
|
|
|
cm := testutils.NewConfigMap("nfd-topology-updater-conf", "nfd-topology-updater.conf", `
|
|
|
|
excludeList:
|
2022-11-20 15:21:49 +00:00
|
|
|
'*': [memory]
|
2022-11-22 18:58:53 +00:00
|
|
|
`)
|
|
|
|
cm, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(context.TODO(), cm, metav1.CreateOptions{})
|
2022-11-20 15:21:49 +00:00
|
|
|
Expect(err).ToNot(HaveOccurred())
|
|
|
|
|
|
|
|
cfg, err := testutils.GetConfig()
|
|
|
|
Expect(err).ToNot(HaveOccurred())
|
|
|
|
|
|
|
|
kcfg := cfg.GetKubeletConfig()
|
|
|
|
By(fmt.Sprintf("Using config (%#v)", kcfg))
|
|
|
|
|
2022-11-24 10:59:38 +00:00
|
|
|
podSpecOpts := []testpod.SpecOption{
|
|
|
|
testpod.SpecWithContainerImage(fmt.Sprintf("%s:%s", *dockerRepo, *dockerTag)),
|
|
|
|
testpod.SpecWithConfigMap(cm.Name, "/etc/kubernetes/node-feature-discovery"),
|
2022-11-22 15:50:54 +00:00
|
|
|
}
|
2022-11-24 11:23:34 +00:00
|
|
|
topologyUpdaterDaemonSet = testds.NFDTopologyUpdater(kcfg, podSpecOpts...)
|
2022-11-20 15:21:49 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
It("noderesourcetopology should not advertise the memory resource", func() {
|
|
|
|
Eventually(func() bool {
|
|
|
|
memoryFound := false
|
|
|
|
nodeTopology := testutils.GetNodeTopology(topologyClient, topologyUpdaterNode.Name)
|
|
|
|
for _, zone := range nodeTopology.Zones {
|
|
|
|
for _, res := range zone.Resources {
|
|
|
|
if res.Name == string(corev1.ResourceMemory) {
|
|
|
|
memoryFound = true
|
|
|
|
framework.Logf("resource:%s was found for nodeTopology:%s on zone:%s while it should not", corev1.ResourceMemory, nodeTopology.Name, zone.Name)
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return memoryFound
|
|
|
|
}, 1*time.Minute, 10*time.Second).Should(BeFalse())
|
|
|
|
})
|
2022-06-14 08:51:33 +00:00
|
|
|
})
|
|
|
|
})
|
|
|
|
|
|
|
|
// lessAllocatableResources specialize CompareAllocatableResources for this specific e2e use case.
|
2022-10-14 12:28:52 +00:00
|
|
|
func lessAllocatableResources(expected, got map[string]corev1.ResourceList) (string, string, bool) {
|
2022-06-14 08:51:33 +00:00
|
|
|
zoneName, resName, cmp, ok := testutils.CompareAllocatableResources(expected, got)
|
|
|
|
if !ok {
|
|
|
|
framework.Logf("-> cmp failed (not ok)")
|
|
|
|
return "", "", false
|
|
|
|
}
|
|
|
|
if cmp < 0 {
|
|
|
|
return zoneName, resName, true
|
|
|
|
}
|
|
|
|
framework.Logf("-> cmp failed (value=%d)", cmp)
|
|
|
|
return "", "", false
|
|
|
|
}
|