Skip to content

Commit

Permalink
feat: add typos to GitHub action (openkruise#1185)
Browse files Browse the repository at this point in the history
Signed-off-by: Zhizhen He <[email protected]>
  • Loading branch information
hezhizhen authored Feb 21, 2023
1 parent 7d78754 commit 3d3f51d
Show file tree
Hide file tree
Showing 28 changed files with 89 additions and 70 deletions.
10 changes: 10 additions & 0 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,16 @@ env:
AWS_USR: ${{ secrets.AWS_USR }}

jobs:
typos-check:
name: Spell Check with Typos
runs-on: ubuntu-latest
steps:
- name: Checkout Actions Repository
uses: actions/checkout@v3
- name: Check spelling with custom config file
uses: crate-ci/[email protected]
with:
config: ./typos.toml

golangci-lint:
runs-on: ubuntu-18.04
Expand Down
8 changes: 4 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Build the manager and daemon binaries
ARG BASE_IMAGE=alpine
ARG BASE_IMAGE_VERION=3.17
ARG BASE_IMAGE_VERSION=3.17
FROM golang:1.18-alpine3.17 as builder

WORKDIR /workspace
Expand All @@ -20,11 +20,11 @@ RUN CGO_ENABLED=0 GO111MODULE=on go build -mod=vendor -a -o manager main.go \
&& CGO_ENABLED=0 GO111MODULE=on go build -mod=vendor -a -o daemon ./cmd/daemon/main.go

ARG BASE_IMAGE
ARG BASE_IMAGE_VERION
FROM ${BASE_IMAGE}:${BASE_IMAGE_VERION}
ARG BASE_IMAGE_VERSION
FROM ${BASE_IMAGE}:${BASE_IMAGE_VERSION}

RUN apk add --no-cache ca-certificates=~20220614-r4 bash=~5.2.15-r0 expat=~2.5.0-r0 \
&& rm -rf /var/cache/apk/*
&& rm -rf /var/cache/apk/*

WORKDIR /
COPY --from=builder /workspace/manager .
Expand Down
8 changes: 4 additions & 4 deletions Dockerfile_multiarch
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Build the manager and daemon binaries
ARG BASE_IMAGE=alpine
ARG BASE_IMAGE_VERION=3.17
ARG BASE_IMAGE_VERSION=3.17
FROM --platform=$BUILDPLATFORM golang:1.18-alpine3.17 as builder

WORKDIR /workspace
Expand All @@ -23,11 +23,11 @@ RUN GOOS=${TARGETOS} GOARCH=${TARGETARCH} CGO_ENABLED=0 GO111MODULE=on go build


ARG BASE_IMAGE
ARG BASE_IMAGE_VERION
FROM ${BASE_IMAGE}:${BASE_IMAGE_VERION}
ARG BASE_IMAGE_VERSION
FROM ${BASE_IMAGE}:${BASE_IMAGE_VERSION}

RUN apk add --no-cache ca-certificates=~20220614-r4 bash=~5.2.15-r0 expat=~2.5.0-r0 \
&& rm -rf /var/cache/apk/*
&& rm -rf /var/cache/apk/*

WORKDIR /
COPY --from=builder /workspace/manager .
Expand Down
4 changes: 2 additions & 2 deletions apis/apps/v1alpha1/nodeimage_types.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ type ImageTagStatus struct {
// Represents the image pulling task phase.
Phase ImagePullPhase `json:"phase"`

// Represents the pulling progress of this tag, which is beetween 0-100. There is no guarantee
// Represents the pulling progress of this tag, which is between 0-100. There is no guarantee
// of monotonic consistency, and it may be a rollback due to retry during pulling.
Progress int32 `json:"progress,omitempty"`

Expand All @@ -168,7 +168,7 @@ type ImageTagStatus struct {
// +optional
ImageID string `json:"imageID,omitempty"`

// Represents the summary informations of this node
// Represents the summary information of this node
// +optional
Message string `json:"message,omitempty"`
}
Expand Down
4 changes: 2 additions & 2 deletions config/crd/bases/apps.kruise.io_nodeimages.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -272,15 +272,15 @@ spec:
description: Represents the ID of this image.
type: string
message:
description: Represents the summary informations of this
description: Represents the summary information of this
node
type: string
phase:
description: Represents the image pulling task phase.
type: string
progress:
description: Represents the pulling progress of this tag,
which is beetween 0-100. There is no guarantee of monotonic
which is between 0-100. There is no guarantee of monotonic
consistency, and it may be a rollback due to retry during
pulling.
format: int32
Expand Down
File renamed without changes.
2 changes: 1 addition & 1 deletion docs/proposals/20210316-containerrecreaterequest.md
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ We hope to provide a way to let users recreate(restart) one or more containers i
It is useful for some user scenarios, such as:

1. Restart the application in app container.
2. Restart a sidecar contaienr along with app container.
2. Restart a sidecar container along with app container.

## Proposal

Expand Down
2 changes: 1 addition & 1 deletion docs/proposals/20210614-podunavailablebudget.md
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ This program customizes the PodUnavailableBudget (later referred to as PUB) CRD
- When UnavailableAllowed<=0, the request is not allowed, indicates that the modification of the pod will be rejected
- Pub Controller dynamically calculates Pub.Status according to selected Pods
- DesiredAvailable indicates that minimum desired number of available pods(according to MaxUnavailable or MinAvailable)
- CurrentAvailable indicates that current number of ready pods(condition.Ready=ture)
- CurrentAvailable indicates that current number of ready pods(condition.Ready=true)
- UnavailableAllowed = CurrentAvailable - DesiredAvailable

### Pub selector cannot overlap
Expand Down
4 changes: 2 additions & 2 deletions docs/proposals/20210719-resourcedistribution.md
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@ type ResourceDistributionStatus struct {

1. Create and Distribute
- Parse and analyze the resource and the target namespaces.
- Create the resource based on `Resouce` field.
- Create the resource based on `Resource` field.
- Replicate and distribute the resource, and set their `OwnerReference` as the `ResourceDistribution`.

2. Update and Synchronize
Expand All @@ -232,4 +232,4 @@ type ResourceDistributionStatus struct {
- Benefiting from `OwnerReference`, the replicas will be cleaned when the `ResourceDistribution` is deleted.

## Implementation History
- [ ] 07/28/2021: Proposal submission
- [ ] 07/28/2021: Proposal submission
2 changes: 1 addition & 1 deletion pkg/control/pubcontrol/pub_control_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ func TestIsPodUnavailableChanged(t *testing.T) {
expect: false,
},
{
name: "add unvailable label",
name: "add unavailable label",
getOldPod: func() *corev1.Pod {
demo := podDemo.DeepCopy()
return demo
Expand Down
10 changes: 6 additions & 4 deletions pkg/control/sidecarcontrol/util_hotupgrade.go
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,8 @@ func findContainerToHotUpgrade(sidecarContainer *appsv1alpha1.SidecarContainer,
// First, empty hot sidecar container will be upgraded with the latest sidecarSet specification
if c1.Image == sidecarContainer.UpgradeStrategy.HotUpgradeEmptyImage {
return c1.Name, c2.Name
} else if c2.Image == sidecarContainer.UpgradeStrategy.HotUpgradeEmptyImage {
}
if c2.Image == sidecarContainer.UpgradeStrategy.HotUpgradeEmptyImage {
return c2.Name, c1.Name
}

Expand All @@ -123,11 +124,12 @@ func findContainerToHotUpgrade(sidecarContainer *appsv1alpha1.SidecarContainer,
klog.V(3).Infof("pod(%s/%s) container(%s) ready(%v) container(%s) ready(%v)", pod.Namespace, pod.Name, c1.Name, c1Ready, c2.Name, c2Ready)
if c1Ready && !c2Ready {
return c2.Name, c1.Name
} else if !c1Ready && c2Ready {
}
if !c1Ready && c2Ready {
return c1.Name, c2.Name
}

// Third, the older sidecar container will be upgraded
workContianer, olderContainer := GetPodHotUpgradeContainers(sidecarContainer.Name, pod)
return olderContainer, workContianer
workContainer, olderContainer := GetPodHotUpgradeContainers(sidecarContainer.Name, pod)
return olderContainer, workContainer
}
12 changes: 6 additions & 6 deletions pkg/control/sidecarcontrol/util_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -326,13 +326,13 @@ func TestGetPodSidecarSetRevision(t *testing.T) {

for _, cs := range cases {
t.Run(cs.name, func(t *testing.T) {
revison := GetPodSidecarSetRevision("test-sidecarset", cs.getPod())
if cs.exceptRevision != revison {
t.Fatalf("except sidecar container test-sidecarset revison %s, but get %s", cs.exceptRevision, revison)
revision := GetPodSidecarSetRevision("test-sidecarset", cs.getPod())
if cs.exceptRevision != revision {
t.Fatalf("except sidecar container test-sidecarset revision %s, but get %s", cs.exceptRevision, revision)
}
withoutRevison := GetPodSidecarSetWithoutImageRevision("test-sidecarset", cs.getPod())
if cs.exceptWithoutImageRevision != withoutRevison {
t.Fatalf("except sidecar container test-sidecarset WithoutImageRevision %s, but get %s", cs.exceptWithoutImageRevision, withoutRevison)
withoutRevision := GetPodSidecarSetWithoutImageRevision("test-sidecarset", cs.getPod())
if cs.exceptWithoutImageRevision != withoutRevision {
t.Fatalf("except sidecar container test-sidecarset WithoutImageRevision %s, but get %s", cs.exceptWithoutImageRevision, withoutRevision)
}
})
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func (r *ReconcileAdvancedCronJob) reconcileBroadcastJob(ctx context.Context, re
getNextSchedule := func(cronJob *appsv1alpha1.AdvancedCronJob, now time.Time) (lastMissed time.Time, next time.Time, err error) {
sched, err := cron.ParseStandard(formatSchedule(cronJob))
if err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("Unparseable schedule %q: %v", cronJob.Spec.Schedule, err)
return time.Time{}, time.Time{}, fmt.Errorf("unparsable schedule %q: %v", cronJob.Spec.Schedule, err)
}

// for optimization purposes, cheat a bit and start from our last observed run time
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func (r *ReconcileAdvancedCronJob) reconcileJob(ctx context.Context, req ctrl.Re
getNextSchedule := func(cronJob *appsv1alpha1.AdvancedCronJob, now time.Time) (lastMissed time.Time, next time.Time, err error) {
sched, err := cron.ParseStandard(formatSchedule(cronJob))
if err != nil {
return time.Time{}, time.Time{}, fmt.Errorf("Unparseable schedule %q: %v", cronJob.Spec.Schedule, err)
return time.Time{}, time.Time{}, fmt.Errorf("unparsable schedule %q: %v", cronJob.Spec.Schedule, err)
}

// for optimization purposes, cheat a bit and start from our last observed run time
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/broadcastjob/broadcastjob_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ func add(mgr manager.Manager, r reconcile.Reconciler) error {
return err
}

// Wathc for changes to Pod
// Watch for changes to Pod
err = c.Watch(&source.Kind{Type: &corev1.Pod{}}, &podEventHandler{
enqueueHandler: handler.EnqueueRequestForOwner{
IsController: true,
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/cloneset/sync/api.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client"
)

// Interface for managing pods scaleing and updating.
// Interface for managing pods scaling and updating.
type Interface interface {
Scale(
currentCS, updateCS *appsv1alpha1.CloneSet,
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/daemonset/daemonset_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -663,7 +663,7 @@ func (dsc *ReconcileDaemonSet) manage(ds *appsv1alpha1.DaemonSet, nodeList []*co
}

// syncNodes deletes given pods and creates new daemon set pods on the given nodes
// returns slice with erros if any
// returns slice with errors if any
func (dsc *ReconcileDaemonSet) syncNodes(ds *appsv1alpha1.DaemonSet, podsToDelete, nodesNeedingDaemonPods []string, hash string) error {
if ds.Spec.Lifecycle != nil && ds.Spec.Lifecycle.PreDelete != nil {
var err error
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/daemonset/daemonset_update_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) {
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
markPodsReady(podControl.podStore)

// surge is thhe controlling amount
// surge is the controlling amount
maxSurge := 2
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
Expand Down
2 changes: 1 addition & 1 deletion pkg/controller/sidecarset/sidecarset_strategy.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ func (p *spreadingStrategy) GetNextUpgradePods(control sidecarcontrol.SidecarCon

// If selector is not nil, check whether the pods is selected to upgrade
isSelected := func(pod *corev1.Pod) bool {
//when selector is nil, always return ture
//when selector is nil, always return true
if strategy.Selector == nil {
return true
}
Expand Down
8 changes: 4 additions & 4 deletions pkg/controller/statefulset/stateful_set_control_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3044,7 +3044,7 @@ func assertMonotonicInvariants(set *appsv1beta1.StatefulSet, om *fakeObjectManag

for _, claim := range getPersistentVolumeClaims(set, pods[ord]) {
claim, _ := om.claimsLister.PersistentVolumeClaims(set.Namespace).Get(claim.Name)
if err := checkClaimInvarients(set, pods[ord], claim, ord); err != nil {
if err := checkClaimInvariants(set, pods[ord], claim, ord); err != nil {
return err
}
}
Expand Down Expand Up @@ -3076,7 +3076,7 @@ func assertBurstInvariants(set *appsv1beta1.StatefulSet, om *fakeObjectManager)
if err != nil {
return err
}
if err := checkClaimInvarients(set, pods[ord], claim, ord); err != nil {
if err := checkClaimInvariants(set, pods[ord], claim, ord); err != nil {
return err
}
}
Expand Down Expand Up @@ -3111,7 +3111,7 @@ func assertUpdateInvariants(set *appsv1beta1.StatefulSet, om *fakeObjectManager)
if err != nil {
return err
}
if err := checkClaimInvarients(set, pods[ord], claim, ord); err != nil {
if err := checkClaimInvariants(set, pods[ord], claim, ord); err != nil {
return err
}
}
Expand All @@ -3138,7 +3138,7 @@ func assertUpdateInvariants(set *appsv1beta1.StatefulSet, om *fakeObjectManager)
return nil
}

func checkClaimInvarients(set *appsv1beta1.StatefulSet, pod *v1.Pod, claim *v1.PersistentVolumeClaim, ordinal int) error {
func checkClaimInvariants(set *appsv1beta1.StatefulSet, pod *v1.Pod, claim *v1.PersistentVolumeClaim, ordinal int) error {
policy := appsv1beta1.StatefulSetPersistentVolumeClaimRetentionPolicy{
WhenScaled: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
WhenDeleted: appsv1beta1.RetainPersistentVolumeClaimRetentionPolicyType,
Expand Down
6 changes: 3 additions & 3 deletions pkg/controller/statefulset/stateful_set_utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func TestIsMemberOf(t *testing.T) {
set2.Name = "foo2"
pod := newStatefulSetPod(set, 1)
if !isMemberOf(set, pod) {
t.Error("isMemberOf retruned false negative")
t.Error("isMemberOf returned false negative")
}
if isMemberOf(set2, pod) {
t.Error("isMemberOf returned false positive")
Expand Down Expand Up @@ -153,7 +153,7 @@ func TestStorageMatches(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 1)
if !storageMatches(set, pod) {
t.Error("Newly created Pod has a invalid stroage")
t.Error("Newly created Pod has a invalid storage")
}
pod.Spec.Volumes = nil
if storageMatches(set, pod) {
Expand Down Expand Up @@ -207,7 +207,7 @@ func TestUpdateStorage(t *testing.T) {
set := newStatefulSet(3)
pod := newStatefulSetPod(set, 1)
if !storageMatches(set, pod) {
t.Error("Newly created Pod has a invalid stroage")
t.Error("Newly created Pod has a invalid storage")
}
pod.Spec.Volumes = nil
if storageMatches(set, pod) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ func TestAstsReconcile(t *testing.T) {
expectedAstsCount(g, instance, 1)
}

func TestTemplateTypeSwtich(t *testing.T) {
func TestTemplateTypeSwitch(t *testing.T) {
g, requests, cancel, mgrStopped := setUp(t)
defer func() {
clean(g, c)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ func TestCloneSetAll(t *testing.T) {

cases := []TestCaseFunc{
testCsReconcile,
testTemplateTypeSwtichToCS,
testTemplateTypeSwitchToCS,
}

for _, f := range cases {
Expand Down Expand Up @@ -132,7 +132,7 @@ func testCsReconcile(t *testing.T, g *gomega.GomegaWithT, namespace string, requ
expectedCsCount(g, instance, 1)
}

func testTemplateTypeSwtichToCS(t *testing.T, g *gomega.GomegaWithT, namespace string, requests chan reconcile.Request) {
func testTemplateTypeSwitchToCS(t *testing.T, g *gomega.GomegaWithT, namespace string, requests chan reconcile.Request) {
caseName := "test-template-type-switch-to-cs"
instance := &appsv1alpha1.UnitedDeployment{
ObjectMeta: metav1.ObjectMeta{
Expand Down
2 changes: 1 addition & 1 deletion pkg/daemon/criruntime/imageruntime/containerd.go
Original file line number Diff line number Diff line change
Expand Up @@ -294,7 +294,7 @@ func (d *containerdImageClient) resolverGenerator(authInfo *daemonutil.AuthInfo)

// createRepoDigestRecord creates digest type record in containerd.
//
// NOTE: We don't use CRI-API to pull image but we juse CRI-API to retrieve
// NOTE: We don't use CRI-API to pull image but we use CRI-API to retrieve
// image list. For the repo:tag image, the containerd will receive image create
// event and then update local cache with the mapping between image ID and
// image name. But there is no mapping between image ID and image digest. We
Expand Down
8 changes: 4 additions & 4 deletions test/e2e/apps/statefulset.go
Original file line number Diff line number Diff line change
Expand Up @@ -496,7 +496,7 @@ var _ = SIGDescribe("StatefulSet", func() {
}
}
gomega.Expect(ss.Status.CurrentRevision).To(gomega.Equal(updateRevision),
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revison %s on update completion",
fmt.Sprintf("StatefulSet %s/%s current revision %s does not equal update revision %s on update completion",
ss.Namespace,
ss.Name,
ss.Status.CurrentRevision,
Expand Down Expand Up @@ -1736,15 +1736,15 @@ func verifyStatefulSetPVCsExist(c clientset.Interface, ss *appsv1beta1.StatefulS
}

// verifyStatefulSetPVCsExistWithOwnerRefs works as verifyStatefulSetPVCsExist, but also waits for the ownerRefs to match.
func verifyStatefulSetPVCsExistWithOwnerRefs(c clientset.Interface, kc kruiseclientset.Interface, ss *appsv1beta1.StatefulSet, claimIndicies []int, wantSetRef, wantPodRef bool) error {
func verifyStatefulSetPVCsExistWithOwnerRefs(c clientset.Interface, kc kruiseclientset.Interface, ss *appsv1beta1.StatefulSet, claimIndices []int, wantSetRef, wantPodRef bool) error {
indexSet := map[int]struct{}{}
for _, id := range claimIndicies {
for _, id := range claimIndices {
indexSet[id] = struct{}{}
}
set, _ := kc.AppsV1beta1().StatefulSets(ss.Namespace).Get(context.TODO(), ss.Name, metav1.GetOptions{})
setUID := set.GetUID()
if setUID == "" {
framework.Failf("Statefulset %s mising UID", ss.Name)
framework.Failf("Statefulset %s missing UID", ss.Name)
}
return wait.PollImmediate(framework.StatefulSetPoll, framework.StatefulSetTimeout, func() (bool, error) {
pvcList, err := c.CoreV1().PersistentVolumeClaims(ss.Namespace).List(context.TODO(), metav1.ListOptions{LabelSelector: klabels.Everything().String()})
Expand Down
Loading

0 comments on commit 3d3f51d

Please sign in to comment.