Skip to content

Commit

Permalink
Merge pull request kubernetes#47179 from ddysher/local-isolation-fix
Browse files Browse the repository at this point in the history
Automatic merge from submit-queue (batch tested with PRs 47883, 47179, 46966, 47982, 47945)

Fix local isolation for pod requesting only overlay or scratch

**What this PR does / why we need it**:

Fix overlay resource predicates for pod with only overlay or scratch storage request.

E.g. the following pod can pass predicate even if overlay is only 512Gi.
```yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod
spec:
  containers:
  - name: nginx
    image: nginx
    resources:
      requests:
        storage.kubernetes.io/overlay: 1024Gi
```

similarly, following pod will also pass predicate
```yaml
apiVersion: v1
kind: Pod
metadata:
  name: pod
spec:
  containers:
  - name: nginx
    image: nginx
    volumeMounts:
    - name: data
      mountPath: /data
  volumes:
  - name: data
    emptyDir:
      sizeLimit: 1024Gi
```

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes kubernetes#47798

**Special notes for your reviewer**:

**Release note**:

```release-note
```

@jingxu97 @vishh @dashpole
  • Loading branch information
Kubernetes Submit Queue authored Jun 23, 2017
2 parents 80a5842 + 3cecb07 commit 171f48a
Show file tree
Hide file tree
Showing 2 changed files with 26 additions and 7 deletions.
2 changes: 1 addition & 1 deletion plugin/pkg/scheduler/algorithm/predicates/predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -584,7 +584,7 @@ func PodFitsResources(pod *v1.Pod, meta interface{}, nodeInfo *schedulercache.No
// We couldn't parse metadata - fallback to computing it.
podRequest = GetResourceRequest(pod)
}
if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && len(podRequest.OpaqueIntResources) == 0 {
if podRequest.MilliCPU == 0 && podRequest.Memory == 0 && podRequest.NvidiaGPU == 0 && podRequest.StorageOverlay == 0 && podRequest.StorageScratch == 0 && len(podRequest.OpaqueIntResources) == 0 {
return len(predicateFails) == 0, predicateFails, nil
}

Expand Down
31 changes: 25 additions & 6 deletions plugin/pkg/scheduler/algorithm/predicates/predicates_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -435,7 +435,7 @@ func TestPodFitsResources(t *testing.T) {
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 10, Memory: 10, StorageOverlay: 20})),
fits: false,
test: "due to init container scratch disk",
test: "due to container scratch disk",
reasons: []algorithm.PredicateFailureReason{
NewInsufficientResourceError(v1.ResourceCPU, 1, 10, 10),
NewInsufficientResourceError(v1.ResourceStorageScratch, 1, 20, 20),
Expand All @@ -453,7 +453,17 @@ func TestPodFitsResources(t *testing.T) {
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
fits: false,
test: "request exceeds allocatable",
test: "request exceeds allocatable overlay storage resource",
reasons: []algorithm.PredicateFailureReason{
NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
},
},
{
pod: newResourcePod(schedulercache.Resource{StorageOverlay: 18}),
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
fits: false,
test: "request exceeds allocatable overlay storage resource",
reasons: []algorithm.PredicateFailureReason{
NewInsufficientResourceError(v1.ResourceStorageScratch, 18, 5, 20),
},
Expand All @@ -471,17 +481,26 @@ func TestPodFitsResources(t *testing.T) {
},
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}),
emptyDirLimit: 15,
storageMedium: v1.StorageMediumMemory,
pod: newResourcePod(schedulercache.Resource{}),
emptyDirLimit: 25,
storageMedium: v1.StorageMediumDefault,
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
fits: true,
fits: false,
test: "storage scratchrequest exceeds allocatable",
reasons: []algorithm.PredicateFailureReason{
NewInsufficientResourceError(v1.ResourceStorageScratch, 25, 5, 20),
},
},
{
pod: newResourcePod(schedulercache.Resource{MilliCPU: 1, Memory: 1, StorageOverlay: 10}),
emptyDirLimit: 15,
storageMedium: v1.StorageMediumMemory,
nodeInfo: schedulercache.NewNodeInfo(
newResourcePod(schedulercache.Resource{MilliCPU: 2, Memory: 2, StorageOverlay: 5})),
fits: true,
test: "pod fit with memory medium",
},
}

for _, test := range storagePodsTests {
Expand Down

0 comments on commit 171f48a

Please sign in to comment.