Skip to content

Commit

Permalink
Enhance scheduler cache unit tests to cover OIR in pod spec
Browse files Browse the repository at this point in the history
Signed-off-by: vikaschoudhary16 <[email protected]>
  • Loading branch information
vikaschoudhary16 committed Jul 25, 2017
1 parent 8d1b090 commit df4f4d3
Show file tree
Hide file tree
Showing 2 changed files with 90 additions and 19 deletions.
1 change: 1 addition & 0 deletions plugin/pkg/scheduler/schedulercache/BUILD
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@ go_test(
library = ":go_default_library",
tags = ["automanaged"],
deps = [
"//pkg/api/v1/helper:go_default_library",
"//plugin/pkg/scheduler/algorithm/priorities/util:go_default_library",
"//plugin/pkg/scheduler/util:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
Expand Down
108 changes: 89 additions & 19 deletions plugin/pkg/scheduler/schedulercache/cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,15 @@ package schedulercache
import (
"fmt"
"reflect"
"strings"
"testing"
"time"

"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
v1helper "k8s.io/kubernetes/pkg/api/v1/helper"
priorityutil "k8s.io/kubernetes/plugin/pkg/scheduler/algorithm/priorities/util"
schedutil "k8s.io/kubernetes/plugin/pkg/scheduler/util"
)
Expand All @@ -45,10 +47,13 @@ func deepEqualWithoutGeneration(t *testing.T, testcase int, actual, expected *No
func TestAssumePodScheduled(t *testing.T) {
nodeName := "node"
testPods := []*v1.Pod{
makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(nodeName, "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(nodeName, "test-2", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
makeBasePod(nodeName, "test-nonzero", "", "", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}),
makeBasePod(t, nodeName, "test-nonzero", "", "", "", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(t, nodeName, "test", "100m", "500", "oir-foo:3", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "oir-foo:5", []v1.ContainerPort{{HostPort: 8080}}),
makeBasePod(t, nodeName, "test", "100m", "500", "random-invalid-oir-key:100", []v1.ContainerPort{{}}),
}

tests := []struct {
Expand Down Expand Up @@ -100,7 +105,55 @@ func TestAssumePodScheduled(t *testing.T) {
pods: []*v1.Pod{testPods[3]},
usedPorts: map[int]bool{80: true},
},
}}
}, {
pods: []*v1.Pod{testPods[4]},
wNodeInfo: &NodeInfo{
requestedResource: &Resource{
MilliCPU: 100,
Memory: 500,
OpaqueIntResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 3},
},
nonzeroRequest: &Resource{
MilliCPU: 100,
Memory: 500,
},
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[4]},
usedPorts: map[int]bool{80: true},
},
}, {
pods: []*v1.Pod{testPods[4], testPods[5]},
wNodeInfo: &NodeInfo{
requestedResource: &Resource{
MilliCPU: 300,
Memory: 1524,
OpaqueIntResources: map[v1.ResourceName]int64{"pod.alpha.kubernetes.io/opaque-int-resource-oir-foo": 8},
},
nonzeroRequest: &Resource{
MilliCPU: 300,
Memory: 1524,
},
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[4], testPods[5]},
usedPorts: map[int]bool{80: true, 8080: true},
},
}, {
pods: []*v1.Pod{testPods[6]},
wNodeInfo: &NodeInfo{
requestedResource: &Resource{
MilliCPU: 100,
Memory: 500,
},
nonzeroRequest: &Resource{
MilliCPU: 100,
Memory: 500,
},
allocatableResource: &Resource{},
pods: []*v1.Pod{testPods[6]},
usedPorts: map[int]bool{},
},
},
}

for i, tt := range tests {
cache := newSchedulerCache(time.Second, time.Second, nil)
Expand Down Expand Up @@ -140,8 +193,8 @@ func assumeAndFinishBinding(cache *schedulerCache, pod *v1.Pod, assumedTime time
func TestExpirePod(t *testing.T) {
nodeName := "node"
testPods := []*v1.Pod{
makeBasePod(nodeName, "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(nodeName, "test-2", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}),
}
now := time.Now()
ttl := 10 * time.Second
Expand Down Expand Up @@ -200,8 +253,8 @@ func TestAddPodWillConfirm(t *testing.T) {
ttl := 10 * time.Second

testPods := []*v1.Pod{
makeBasePod(nodeName, "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(nodeName, "test-2", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
makeBasePod(t, nodeName, "test-1", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(t, nodeName, "test-2", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}),
}
tests := []struct {
podsToAssume []*v1.Pod
Expand Down Expand Up @@ -249,7 +302,7 @@ func TestAddPodWillConfirm(t *testing.T) {
func TestAddPodAfterExpiration(t *testing.T) {
nodeName := "node"
ttl := 10 * time.Second
basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}})
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}})
tests := []struct {
pod *v1.Pod

Expand Down Expand Up @@ -297,8 +350,8 @@ func TestUpdatePod(t *testing.T) {
nodeName := "node"
ttl := 10 * time.Second
testPods := []*v1.Pod{
makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(nodeName, "test", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}),
}
tests := []struct {
podsToAssume []*v1.Pod
Expand Down Expand Up @@ -363,8 +416,8 @@ func TestExpireAddUpdatePod(t *testing.T) {
nodeName := "node"
ttl := 10 * time.Second
testPods := []*v1.Pod{
makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(nodeName, "test", "200m", "1Ki", []v1.ContainerPort{{HostPort: 8080}}),
makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}}),
makeBasePod(t, nodeName, "test", "200m", "1Ki", "", []v1.ContainerPort{{HostPort: 8080}}),
}
tests := []struct {
podsToAssume []*v1.Pod
Expand Down Expand Up @@ -436,7 +489,7 @@ func TestExpireAddUpdatePod(t *testing.T) {
// TestRemovePod tests after added pod is removed, its information should also be subtracted.
func TestRemovePod(t *testing.T) {
nodeName := "node"
basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}})
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}})
tests := []struct {
pod *v1.Pod
wNodeInfo *NodeInfo
Expand Down Expand Up @@ -478,7 +531,7 @@ func TestRemovePod(t *testing.T) {

func TestForgetPod(t *testing.T) {
nodeName := "node"
basePod := makeBasePod(nodeName, "test", "100m", "500", []v1.ContainerPort{{HostPort: 80}})
basePod := makeBasePod(t, nodeName, "test", "100m", "500", "", []v1.ContainerPort{{HostPort: 80}})
tests := []struct {
pods []*v1.Pod
}{{
Expand Down Expand Up @@ -751,13 +804,30 @@ func benchmarkExpire(b *testing.B, podNum int) {
}
}

func makeBasePod(nodeName, objName, cpu, mem string, ports []v1.ContainerPort) *v1.Pod {
type testingMode interface {
Fatalf(format string, args ...interface{})
}

func makeBasePod(t testingMode, nodeName, objName, cpu, mem, oir string, ports []v1.ContainerPort) *v1.Pod {
req := v1.ResourceList{}
if cpu != "" {
req = v1.ResourceList{
v1.ResourceCPU: resource.MustParse(cpu),
v1.ResourceMemory: resource.MustParse(mem),
}
if oir != "" {
if len(strings.Split(oir, ":")) != 2 {
t.Fatalf("Invalid OIR string")
}
var name v1.ResourceName
if strings.Split(oir, ":")[0] != "random-invalid-oir-key" {
name = v1helper.OpaqueIntResourceName(strings.Split(oir, ":")[0])
} else {
name = v1.ResourceName(strings.Split(oir, ":")[0])
}
quantity := resource.MustParse(strings.Split(oir, ":")[1])
req[name] = quantity
}
}
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Expand All @@ -782,7 +852,7 @@ func setupCacheOf1kNodes30kPods(b *testing.B) Cache {
nodeName := fmt.Sprintf("node-%d", i)
for j := 0; j < 30; j++ {
objName := fmt.Sprintf("%s-pod-%d", nodeName, j)
pod := makeBasePod(nodeName, objName, "0", "0", nil)
pod := makeBasePod(b, nodeName, objName, "0", "0", "", nil)

if err := cache.AddPod(pod); err != nil {
b.Fatalf("AddPod failed: %v", err)
Expand All @@ -797,7 +867,7 @@ func setupCacheWithAssumedPods(b *testing.B, podNum int, assumedTime time.Time)
for i := 0; i < podNum; i++ {
nodeName := fmt.Sprintf("node-%d", i/10)
objName := fmt.Sprintf("%s-pod-%d", nodeName, i%10)
pod := makeBasePod(nodeName, objName, "0", "0", nil)
pod := makeBasePod(b, nodeName, objName, "0", "0", "", nil)

err := assumeAndFinishBinding(cache, pod, assumedTime)
if err != nil {
Expand Down

0 comments on commit df4f4d3

Please sign in to comment.