From 1f707e35708112ac19446570268f0b916578119a Mon Sep 17 00:00:00 2001 From: David Eads Date: Fri, 5 Apr 2019 13:41:00 -0400 Subject: [PATCH] switch to non-deprecated client --- .../oauth/expirationvalidator_test.go | 4 +- .../oauth/tokenauthenticator_test.go | 10 ++--- .../apiserver/registry/deploylog/rest_test.go | 8 ++-- .../apiserver/registry/deploylog/wait_test.go | 4 +- .../apiserver/registry/instantiate/rest.go | 2 +- .../registry/instantiate/rest_test.go | 2 +- pkg/apps/apiserver/registry/rollback/rest.go | 2 +- .../apiserver/registry/rollback/rest_test.go | 4 +- .../deploymentconfig_controller_test.go | 4 +- pkg/apps/strategy/recreate/recreate_test.go | 18 ++++----- pkg/apps/strategy/rolling/rolling_test.go | 16 ++++---- pkg/apps/strategy/support/lifecycle_test.go | 6 +-- .../admission/restrictusers/restrictusers.go | 2 +- .../restrictusers/subjectchecker_test.go | 2 +- pkg/build/apiserver/apiserver.go | 6 +-- .../registry/buildconfig/webhook_test.go | 16 ++++---- .../buildconfiginstantiate/rest_test.go | 2 +- .../apiserver/registry/buildlog/rest_test.go | 16 ++++---- pkg/build/client/clients.go | 16 ++++---- .../controller/build/build_controller_test.go | 6 +-- pkg/build/controller/common/util_test.go | 6 +-- pkg/build/generator/generator_test.go | 10 ++--- pkg/build/generator/test/mocks.go | 2 +- .../controller/image.go | 10 ++--- .../controller/quota.go | 8 ++-- .../controller/unidling.go | 2 +- .../patch_authenticator.go | 2 +- pkg/image/apiserver/apiserver.go | 2 +- .../registry/imagesecret/rest_test.go | 2 +- .../environmentresolvercache_test.go | 2 +- .../signature/signature_import_controller.go | 2 +- .../trigger/image_trigger_controller_test.go | 4 +- .../deploymentconfigs_test.go | 2 +- pkg/monitor/event.go | 4 +- pkg/network/common/common.go | 2 +- pkg/network/master/egressip.go | 2 +- pkg/network/master/master.go | 12 +++--- pkg/network/master/subnet_allocator.go | 2 +- pkg/network/master/subnets.go | 22 +++++------ pkg/network/master/vnids.go | 10 ++--- pkg/network/node/egress_network_policy.go | 2 +- pkg/network/node/multitenant.go | 2 +- pkg/network/node/networkpolicy.go | 2 +- pkg/network/node/node.go | 4 +- pkg/network/node/pod.go | 5 ++- pkg/network/node/subnets.go | 2 +- pkg/network/node/vnids.go | 4 +- pkg/network/proxy/proxy.go | 6 +-- .../oauth/registry/registry_test.go | 4 +- pkg/oauthserver/server/grant/grant_test.go | 8 ++-- pkg/oc/cli/admin/policy/modify_roles_test.go | 10 ++--- pkg/oc/cli/admin/prune/images/images.go | 12 +++--- pkg/oc/cli/admin/release/extract.go | 2 +- pkg/oc/cli/admin/release/info.go | 2 +- pkg/oc/cli/admin/upgrade/upgrade.go | 8 ++-- pkg/oc/cli/cancelbuild/cancelbuild_test.go | 2 +- pkg/oc/cli/importimage/importimage_test.go | 2 +- pkg/oc/cli/logs/logs_test.go | 2 +- pkg/oc/cli/registry/info/info.go | 2 +- pkg/oc/cli/registry/login/login.go | 6 +-- pkg/oc/cli/rollout/retry.go | 6 +-- pkg/oc/cli/rsync/util.go | 2 +- pkg/oc/cli/set/volume_test.go | 2 +- pkg/oc/cli/tag/tag_test.go | 10 ++--- pkg/oc/lib/describe/deployments.go | 12 +++--- pkg/oc/lib/describe/deployments_test.go | 2 +- pkg/oc/lib/newapp/app/templatelookup_test.go | 2 +- pkg/oc/util/clientcmd/resolve_test.go | 2 +- pkg/pod/envresolve/env.go | 2 +- .../registry/project/proxy/proxy_test.go | 10 ++--- .../projectrequest/delegated/delegated.go | 2 +- .../project_finalizer_controller.go | 2 +- .../clusterresourcequota/accessor_test.go | 4 +- .../reconciliation_controller_test.go | 2 +- .../image/imagestreamtag_evaluator_test.go | 2 +- pkg/route/controller/ingress/ingress_test.go | 4 +- .../ingressip/service_ingressip_controller.go | 2 +- .../controllers/create_dockercfg_secrets.go | 14 +++---- .../controllers/deleted_dockercfg_secrets.go | 6 +-- .../controllers/deleted_token_secrets.go | 4 +- .../controllers/docker_registry_service.go | 2 +- .../oauthclient/oauthclientregistry_test.go | 8 ++-- pkg/template/controller/metrics_test.go | 4 +- .../servicebroker/bind.go | 6 +-- .../servicebroker/deprovision.go | 4 +- .../servicebroker/lastoperation.go | 4 +- .../servicebroker/provision.go | 22 +++++------ .../servicebroker/servicebroker.go | 2 +- .../servicebroker/unbind.go | 4 +- .../controller/unidling_controller_test.go | 24 ++++++------ pkg/user/apiserver/apiserver.go | 2 +- test/extended/builds/build_pruning.go | 22 +++++------ test/extended/builds/cluster_config.go | 4 +- .../builds/completiondeadlineseconds.go | 4 +- test/extended/builds/contextdir.go | 8 ++-- test/extended/builds/controller_compat.go | 12 +++--- test/extended/builds/dockerfile.go | 12 +++--- test/extended/builds/failure_status.go | 38 +++++++++---------- test/extended/builds/gitauth.go | 4 +- test/extended/builds/hooks.go | 8 ++-- test/extended/builds/image_source.go | 20 +++++----- test/extended/builds/imagechangetriggers.go | 2 +- test/extended/builds/multistage.go | 4 +- test/extended/builds/new_app.go | 2 +- test/extended/builds/nosrc.go | 2 +- test/extended/builds/optimized.go | 4 +- test/extended/builds/pipeline_jenkins_e2e.go | 32 ++++++++-------- test/extended/builds/pipeline_origin_bld.go | 8 ++-- test/extended/builds/revision.go | 2 +- test/extended/builds/run_policy.go | 16 ++++---- test/extended/builds/s2i_quota.go | 6 +-- test/extended/builds/s2i_root.go | 12 +++--- test/extended/builds/service.go | 2 +- test/extended/builds/start.go | 24 ++++++------ test/extended/cli/rsync.go | 2 +- test/extended/cluster/audit.go | 8 ++-- test/extended/cluster/cl.go | 6 +-- .../image_ecosystem/mongodb_ephemeral.go | 2 +- test/extended/image_ecosystem/s2i_perl.go | 8 ++-- test/extended/image_ecosystem/s2i_php.go | 6 +-- test/extended/image_ecosystem/s2i_python.go | 10 ++--- test/extended/image_ecosystem/s2i_ruby.go | 10 ++--- test/extended/image_ecosystem/sample_repos.go | 2 +- test/extended/imageapis/quota_admission.go | 6 +-- test/extended/images/append.go | 2 +- test/extended/images/extract.go | 2 +- test/extended/images/hardprune.go | 2 +- test/extended/images/helper.go | 6 +-- test/extended/images/layers.go | 6 +-- test/extended/images/mirror.go | 4 +- test/extended/images/prune.go | 2 +- test/extended/machines/machines.go | 2 +- test/extended/machines/workers.go | 2 +- test/extended/networking/util.go | 10 ++--- test/extended/operators/cluster.go | 2 +- test/extended/operators/operators.go | 8 ++-- test/extended/prometheus/prometheus.go | 10 ++--- test/extended/prometheus/prometheus_builds.go | 2 +- test/extended/router/config_manager.go | 2 +- test/extended/router/headers.go | 6 +-- test/extended/router/metrics.go | 26 ++++++------- test/extended/router/reencrypt.go | 6 +-- test/extended/router/router.go | 2 +- test/extended/router/scoped.go | 2 +- test/extended/router/stress.go | 12 +++--- test/extended/router/unprivileged.go | 2 +- test/extended/router/weighted.go | 6 +-- test/extended/templates/helpers.go | 2 +- .../templates/templateinstance_readiness.go | 4 +- .../templates/templateservicebroker_bind.go | 2 +- test/extended/util/docker.go | 2 +- test/extended/util/framework.go | 2 +- test/extended/util/jenkins/ref.go | 8 ++-- test/extended/util/nfs.go | 2 +- test/extended/util/statefulsets.go | 2 +- test/extended/util/url/url.go | 2 +- test/integration/aggregator_test.go | 4 +- .../authorization_rbac_proxy_test.go | 2 +- test/integration/bootstrap_policy_test.go | 6 +-- test/integration/buildpod_admission_test.go | 6 +-- test/integration/clusterquota_test.go | 8 ++-- test/integration/deploy_defaults_test.go | 2 +- test/integration/deploy_scale_test.go | 4 +- test/integration/deploy_trigger_test.go | 10 ++--- test/integration/groups_test.go | 4 +- .../imagechange_buildtrigger_test.go | 4 +- test/integration/ingressip_test.go | 14 +++---- test/integration/master_routes_test.go | 4 +- test/integration/oauth_disabled_test.go | 2 +- .../oauth_serviceaccount_client_test.go | 8 ++-- test/integration/ownerrefs_test.go | 2 +- test/integration/pod_node_constraints_test.go | 2 +- test/integration/project_request_test.go | 8 ++-- test/integration/scopes_test.go | 8 ++-- test/integration/webhook_test.go | 12 +++--- test/util/client.go | 2 +- test/util/server/server.go | 4 +- 177 files changed, 545 insertions(+), 544 deletions(-) diff --git a/pkg/apiserver/authentication/oauth/expirationvalidator_test.go b/pkg/apiserver/authentication/oauth/expirationvalidator_test.go index 3642da288608..cf5720906f5a 100644 --- a/pkg/apiserver/authentication/oauth/expirationvalidator_test.go +++ b/pkg/apiserver/authentication/oauth/expirationvalidator_test.go @@ -30,7 +30,7 @@ func TestAuthenticateTokenExpired(t *testing.T) { ) fakeUserClient := userfake.NewSimpleClientset(&userapi.User{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "bar"}}) - tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.Oauth().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, NewExpirationValidator()) + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, NewExpirationValidator()) for _, tokenName := range []string{"token1", "token2"} { userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), tokenName) @@ -57,7 +57,7 @@ func TestAuthenticateTokenValidated(t *testing.T) { ) fakeUserClient := userfake.NewSimpleClientset(&userapi.User{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "bar"}}) - tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.Oauth().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, NewExpirationValidator(), NewUIDValidator()) + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, NewExpirationValidator(), NewUIDValidator()) userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), "token") if !found { diff --git a/pkg/apiserver/authentication/oauth/tokenauthenticator_test.go b/pkg/apiserver/authentication/oauth/tokenauthenticator_test.go index 5c87039b38a9..f24c89618857 100644 --- a/pkg/apiserver/authentication/oauth/tokenauthenticator_test.go +++ b/pkg/apiserver/authentication/oauth/tokenauthenticator_test.go @@ -31,7 +31,7 @@ func TestAuthenticateTokenInvalidUID(t *testing.T) { ) fakeUserClient := userfake.NewSimpleClientset(&userapi.User{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "bar2"}}) - tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.Oauth().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, NewUIDValidator()) + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}, NewUIDValidator()) userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), "token") if found { @@ -48,7 +48,7 @@ func TestAuthenticateTokenInvalidUID(t *testing.T) { func TestAuthenticateTokenNotFoundSuppressed(t *testing.T) { fakeOAuthClient := oauthfake.NewSimpleClientset() fakeUserClient := userfake.NewSimpleClientset() - tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.Oauth().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}) + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}) userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), "token") if found { @@ -68,7 +68,7 @@ func TestAuthenticateTokenOtherGetErrorSuppressed(t *testing.T) { return true, nil, errors.New("get error") }) fakeUserClient := userfake.NewSimpleClientset() - tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.Oauth().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}) + tokenAuthenticator := NewTokenAuthenticator(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeUserClient.UserV1().Users(), NoopGroupMapper{}) userInfo, found, err := tokenAuthenticator.AuthenticateToken(context.TODO(), "token") if found { @@ -137,8 +137,8 @@ func TestAuthenticateTokenTimeout(t *testing.T) { } fakeOAuthClient := oauthfake.NewSimpleClientset(&testToken, &quickToken, &slowToken, &emergToken, &testClient, &quickClient, &slowClient) fakeUserClient := userfake.NewSimpleClientset(&userapi.User{ObjectMeta: metav1.ObjectMeta{Name: "foo", UID: "bar"}}) - accessTokenGetter := fakeOAuthClient.Oauth().OAuthAccessTokens() - oauthClients := fakeOAuthClient.Oauth().OAuthClients() + accessTokenGetter := fakeOAuthClient.OauthV1().OAuthAccessTokens() + oauthClients := fakeOAuthClient.OauthV1().OAuthClients() lister := &fakeOAuthClientLister{ clients: oauthClients, } diff --git a/pkg/apps/apiserver/registry/deploylog/rest_test.go b/pkg/apps/apiserver/registry/deploylog/rest_test.go index b5766c9f27ff..be90f518823b 100644 --- a/pkg/apps/apiserver/registry/deploylog/rest_test.go +++ b/pkg/apps/apiserver/registry/deploylog/rest_test.go @@ -97,7 +97,7 @@ func mockREST(version, desired int64, status appsv1.DeploymentStatus) *REST { // Used for testing validation errors prior to getting replication controllers. if desired > version { return &REST{ - dcClient: fakeDn.Apps(), + dcClient: fakeDn.AppsV1(), timeout: defaultTimeout, } } @@ -151,9 +151,9 @@ func mockREST(version, desired int64, status appsv1.DeploymentStatus) *REST { } return &REST{ - dcClient: fakeDn.Apps(), - rcClient: fakeRn.Core(), - podClient: fakePn.Core(), + dcClient: fakeDn.AppsV1(), + rcClient: fakeRn.CoreV1(), + podClient: fakePn.CoreV1(), timeout: defaultTimeout, } } diff --git a/pkg/apps/apiserver/registry/deploylog/wait_test.go b/pkg/apps/apiserver/registry/deploylog/wait_test.go index 9a212973da34..a84289f1ac88 100644 --- a/pkg/apps/apiserver/registry/deploylog/wait_test.go +++ b/pkg/apps/apiserver/registry/deploylog/wait_test.go @@ -27,7 +27,7 @@ func TestWaitForRunningDeploymentSuccess(t *testing.T) { go func() { defer close(stopChan) - rc, ok, err := WaitForRunningDeployment(kubeclient.Core(), fakeController, 10*time.Second) + rc, ok, err := WaitForRunningDeployment(kubeclient.CoreV1(), fakeController, 10*time.Second) if err != nil { t.Fatalf("unexpected error: %v", err) } @@ -67,7 +67,7 @@ func TestWaitForRunningDeploymentRestartWatch(t *testing.T) { stopChan := make(chan struct{}) go func() { defer close(stopChan) - rc, ok, err := WaitForRunningDeployment(kubeclient.Core(), fakeController, 10*time.Second) + rc, ok, err := WaitForRunningDeployment(kubeclient.CoreV1(), fakeController, 10*time.Second) if err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/pkg/apps/apiserver/registry/instantiate/rest.go b/pkg/apps/apiserver/registry/instantiate/rest.go index 842e4420e60e..299f56679269 100644 --- a/pkg/apps/apiserver/registry/instantiate/rest.go +++ b/pkg/apps/apiserver/registry/instantiate/rest.go @@ -35,7 +35,7 @@ import ( // NewREST provides new REST storage for the apps API group. func NewREST(store registry.Store, imagesclient imageclientinternal.Interface, kc kubernetes.Interface, admission admission.Interface) *REST { store.UpdateStrategy = Strategy - return &REST{store: &store, is: imagesclient.Image(), rn: kc.Core(), admit: admission} + return &REST{store: &store, is: imagesclient.Image(), rn: kc.CoreV1(), admit: admission} } // REST implements the Creater interface. diff --git a/pkg/apps/apiserver/registry/instantiate/rest_test.go b/pkg/apps/apiserver/registry/instantiate/rest_test.go index fe5bab39e6a1..5aa48c3d7db9 100644 --- a/pkg/apps/apiserver/registry/instantiate/rest_test.go +++ b/pkg/apps/apiserver/registry/instantiate/rest_test.go @@ -713,7 +713,7 @@ func TestCanTrigger(t *testing.T) { test.config = appstest.RoundTripConfig(t, test.config) - got, gotCauses, err := canTrigger(test.config, client.Core(), test.force) + got, gotCauses, err := canTrigger(test.config, client.CoreV1(), test.force) if err != nil && !test.expectedErr { t.Errorf("unexpected error: %v", err) continue diff --git a/pkg/apps/apiserver/registry/rollback/rest.go b/pkg/apps/apiserver/registry/rollback/rest.go index d421bab845d6..510c6419e6d2 100644 --- a/pkg/apps/apiserver/registry/rollback/rest.go +++ b/pkg/apps/apiserver/registry/rollback/rest.go @@ -36,7 +36,7 @@ func NewREST(appsclient appsclient.Interface, kc kubernetes.Interface) *REST { return &REST{ generator: NewRollbackGenerator(), dn: appsclient.AppsV1(), - rn: kc.Core(), + rn: kc.CoreV1(), } } diff --git a/pkg/apps/apiserver/registry/rollback/rest_test.go b/pkg/apps/apiserver/registry/rollback/rest_test.go index 7f31875ba4a9..a151a73e8447 100644 --- a/pkg/apps/apiserver/registry/rollback/rest_test.go +++ b/pkg/apps/apiserver/registry/rollback/rest_test.go @@ -129,8 +129,8 @@ func TestCreateGeneratorError(t *testing.T) { rest := REST{ generator: &terribleGenerator{}, - dn: oc.Apps(), - rn: kc.Core(), + dn: oc.AppsV1(), + rn: kc.CoreV1(), } _, err := rest.Create(apirequest.NewDefaultContext(), &appsapi.DeploymentConfigRollback{ diff --git a/pkg/apps/controller/deploymentconfig/deploymentconfig_controller_test.go b/pkg/apps/controller/deploymentconfig/deploymentconfig_controller_test.go index aed99666ea34..c00136abb85a 100644 --- a/pkg/apps/controller/deploymentconfig/deploymentconfig_controller_test.go +++ b/pkg/apps/controller/deploymentconfig/deploymentconfig_controller_test.go @@ -371,10 +371,10 @@ func TestHandleScenarios(t *testing.T) { informer: cache.NewSharedIndexInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return oc.Apps().DeploymentConfigs(metav1.NamespaceAll).List(options) + return oc.AppsV1().DeploymentConfigs(metav1.NamespaceAll).List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return oc.Apps().DeploymentConfigs(metav1.NamespaceAll).Watch(options) + return oc.AppsV1().DeploymentConfigs(metav1.NamespaceAll).Watch(options) }, }, &appsv1.DeploymentConfig{}, diff --git a/pkg/apps/strategy/recreate/recreate_test.go b/pkg/apps/strategy/recreate/recreate_test.go index 96984533a398..80e5e3fc8ecb 100644 --- a/pkg/apps/strategy/recreate/recreate_test.go +++ b/pkg/apps/strategy/recreate/recreate_test.go @@ -75,7 +75,7 @@ type fakeControllerClient struct { } func (c *fakeControllerClient) ReplicationControllers(ns string) kcoreclient.ReplicationControllerInterface { - return c.fakeClient.Core().ReplicationControllers(ns) + return c.fakeClient.CoreV1().ReplicationControllers(ns) } func (c *fakeControllerClient) scaledOnce() bool { @@ -132,7 +132,7 @@ func TestRecreate_initialDeployment(t *testing.T) { out: &bytes.Buffer{}, errOut: &bytes.Buffer{}, getUpdateAcceptor: getUpdateAcceptor, - eventClient: fake.NewSimpleClientset().Core(), + eventClient: fake.NewSimpleClientset().CoreV1(), } config := appstest.OkDeploymentConfig(1) @@ -165,7 +165,7 @@ func TestRecreate_deploymentPreHookSuccess(t *testing.T) { out: &bytes.Buffer{}, errOut: &bytes.Buffer{}, getUpdateAcceptor: getUpdateAcceptor, - eventClient: fake.NewSimpleClientset().Core(), + eventClient: fake.NewSimpleClientset().CoreV1(), rcClient: controllerClient, scaleClient: controllerClient.fakeScaleClient(), hookExecutor: &hookExecutorImpl{ @@ -196,7 +196,7 @@ func TestRecreate_deploymentPreHookFail(t *testing.T) { out: &bytes.Buffer{}, errOut: &bytes.Buffer{}, getUpdateAcceptor: getUpdateAcceptor, - eventClient: fake.NewSimpleClientset().Core(), + eventClient: fake.NewSimpleClientset().CoreV1(), rcClient: controllerClient, scaleClient: controllerClient.fakeScaleClient(), hookExecutor: &hookExecutorImpl{ @@ -228,7 +228,7 @@ func TestRecreate_deploymentMidHookSuccess(t *testing.T) { errOut: &bytes.Buffer{}, rcClient: controllerClient, scaleClient: controllerClient.fakeScaleClient(), - eventClient: fake.NewSimpleClientset().Core(), + eventClient: fake.NewSimpleClientset().CoreV1(), getUpdateAcceptor: getUpdateAcceptor, hookExecutor: &hookExecutorImpl{ executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error { @@ -260,7 +260,7 @@ func TestRecreate_deploymentPostHookSuccess(t *testing.T) { errOut: &bytes.Buffer{}, rcClient: controllerClient, scaleClient: controllerClient.fakeScaleClient(), - eventClient: fake.NewSimpleClientset().Core(), + eventClient: fake.NewSimpleClientset().CoreV1(), getUpdateAcceptor: getUpdateAcceptor, hookExecutor: &hookExecutorImpl{ executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error { @@ -292,7 +292,7 @@ func TestRecreate_deploymentPostHookFail(t *testing.T) { errOut: &bytes.Buffer{}, rcClient: controllerClient, scaleClient: controllerClient.fakeScaleClient(), - eventClient: fake.NewSimpleClientset().Core(), + eventClient: fake.NewSimpleClientset().CoreV1(), getUpdateAcceptor: getUpdateAcceptor, hookExecutor: &hookExecutorImpl{ executeFunc: func(hook *appsv1.LifecycleHook, deployment *corev1.ReplicationController, suffix, label string) error { @@ -361,7 +361,7 @@ func TestRecreate_acceptorSuccessWithColdCaches(t *testing.T) { strategy := &RecreateDeploymentStrategy{ out: &bytes.Buffer{}, errOut: &bytes.Buffer{}, - eventClient: fake.NewSimpleClientset().Core(), + eventClient: fake.NewSimpleClientset().CoreV1(), } acceptorCalled := false @@ -406,7 +406,7 @@ func TestRecreate_acceptorFail(t *testing.T) { strategy := &RecreateDeploymentStrategy{ out: &bytes.Buffer{}, errOut: &bytes.Buffer{}, - eventClient: fake.NewSimpleClientset().Core(), + eventClient: fake.NewSimpleClientset().CoreV1(), } acceptor := &testAcceptor{ diff --git a/pkg/apps/strategy/rolling/rolling_test.go b/pkg/apps/strategy/rolling/rolling_test.go index 2b9e340d4b36..bca4125eaf6c 100644 --- a/pkg/apps/strategy/rolling/rolling_test.go +++ b/pkg/apps/strategy/rolling/rolling_test.go @@ -23,8 +23,8 @@ func TestRolling_deployInitial(t *testing.T) { initialStrategyInvoked := false strategy := &RollingDeploymentStrategy{ - rcClient: fake.NewSimpleClientset().Core(), - eventClient: fake.NewSimpleClientset().Core(), + rcClient: fake.NewSimpleClientset().CoreV1(), + eventClient: fake.NewSimpleClientset().CoreV1(), initialStrategy: &testStrategy{ deployFn: func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error { initialStrategyInvoked = true @@ -80,8 +80,8 @@ func TestRolling_deployRolling(t *testing.T) { var rollingConfig *RollingUpdaterConfig strategy := &RollingDeploymentStrategy{ - rcClient: client.Core(), - eventClient: fake.NewSimpleClientset().Core(), + rcClient: client.CoreV1(), + eventClient: fake.NewSimpleClientset().CoreV1(), initialStrategy: &testStrategy{ deployFn: func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error { t.Fatalf("unexpected call to initial strategy") @@ -169,8 +169,8 @@ func TestRolling_deployRollingHooks(t *testing.T) { }) strategy := &RollingDeploymentStrategy{ - rcClient: client.Core(), - eventClient: fake.NewSimpleClientset().Core(), + rcClient: client.CoreV1(), + eventClient: fake.NewSimpleClientset().CoreV1(), initialStrategy: &testStrategy{ deployFn: func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error { t.Fatalf("unexpected call to initial strategy") @@ -230,8 +230,8 @@ func TestRolling_deployInitialHooks(t *testing.T) { var hookError error strategy := &RollingDeploymentStrategy{ - rcClient: fake.NewSimpleClientset().Core(), - eventClient: fake.NewSimpleClientset().Core(), + rcClient: fake.NewSimpleClientset().CoreV1(), + eventClient: fake.NewSimpleClientset().CoreV1(), initialStrategy: &testStrategy{ deployFn: func(from *corev1.ReplicationController, to *corev1.ReplicationController, desiredReplicas int, updateAcceptor strat.UpdateAcceptor) error { diff --git a/pkg/apps/strategy/support/lifecycle_test.go b/pkg/apps/strategy/support/lifecycle_test.go index 3ce6d8d13765..318e4f29a286 100644 --- a/pkg/apps/strategy/support/lifecycle_test.go +++ b/pkg/apps/strategy/support/lifecycle_test.go @@ -66,7 +66,7 @@ func TestHookExecutor_executeExecNewCreatePodFailure(t *testing.T) { return true, nil, errors.New("could not create the pod") }) executor := &hookExecutor{ - pods: client.Core(), + pods: client.CoreV1(), } if err := executor.executeExecNewPod(hook, deployment, "hook", "test"); err == nil { @@ -111,7 +111,7 @@ func TestHookExecutor_executeExecNewPodSucceeded(t *testing.T) { }() executor := &hookExecutor{ - pods: client.Core(), + pods: client.CoreV1(), out: podLogs, getPodLogs: func(*corev1.Pod) (io.ReadCloser, error) { return ioutil.NopCloser(strings.NewReader("test")), nil @@ -175,7 +175,7 @@ func TestHookExecutor_executeExecNewPodFailed(t *testing.T) { }() executor := &hookExecutor{ - pods: client.Core(), + pods: client.CoreV1(), out: ioutil.Discard, getPodLogs: func(*corev1.Pod) (io.ReadCloser, error) { return ioutil.NopCloser(strings.NewReader("test")), nil diff --git a/pkg/authorization/apiserver/admission/restrictusers/restrictusers.go b/pkg/authorization/apiserver/admission/restrictusers/restrictusers.go index b83ddfd9ec36..1e8c34c5f1a8 100644 --- a/pkg/authorization/apiserver/admission/restrictusers/restrictusers.go +++ b/pkg/authorization/apiserver/admission/restrictusers/restrictusers.go @@ -181,7 +181,7 @@ func (q *restrictUsersAdmission) Validate(a admission.Attributes) (err error) { } roleBindingRestrictionContext, err := newRoleBindingRestrictionContext(ns, - q.kubeClient, q.userClient.User(), q.groupCache) + q.kubeClient, q.userClient.UserV1(), q.groupCache) if err != nil { return admission.NewForbidden(a, err) } diff --git a/pkg/authorization/apiserver/admission/restrictusers/subjectchecker_test.go b/pkg/authorization/apiserver/admission/restrictusers/subjectchecker_test.go index 82eb32fd859e..bc5aa2072b9c 100644 --- a/pkg/authorization/apiserver/admission/restrictusers/subjectchecker_test.go +++ b/pkg/authorization/apiserver/admission/restrictusers/subjectchecker_test.go @@ -329,7 +329,7 @@ func TestSubjectCheckers(t *testing.T) { } ctx, err := newRoleBindingRestrictionContext("namespace", - kclient, fakeUserClient.User(), groupCache) + kclient, fakeUserClient.UserV1(), groupCache) if err != nil { t.Errorf("unexpected error: %v", err) } diff --git a/pkg/build/apiserver/apiserver.go b/pkg/build/apiserver/apiserver.go index 9b856720d2e4..aa2867e1d3dd 100644 --- a/pkg/build/apiserver/apiserver.go +++ b/pkg/build/apiserver/apiserver.go @@ -130,9 +130,9 @@ func (c *completedConfig) newV1RESTStorage() (map[string]rest.Storage, error) { Client: buildgenerator.Client{ Builds: buildClient.BuildV1(), BuildConfigs: buildClient.BuildV1(), - ImageStreams: imageClient.Image(), - ImageStreamImages: imageClient.Image(), - ImageStreamTags: imageClient.Image(), + ImageStreams: imageClient.ImageV1(), + ImageStreamImages: imageClient.ImageV1(), + ImageStreamTags: imageClient.ImageV1(), }, ServiceAccounts: kubeClient.CoreV1(), Secrets: kubeClient.CoreV1(), diff --git a/pkg/build/apiserver/registry/buildconfig/webhook_test.go b/pkg/build/apiserver/registry/buildconfig/webhook_test.go index bfdd01bd4e1d..2eb73ea10c4e 100644 --- a/pkg/build/apiserver/registry/buildconfig/webhook_test.go +++ b/pkg/build/apiserver/registry/buildconfig/webhook_test.go @@ -85,7 +85,7 @@ func newStorage() (*WebHook, *buildConfigInstantiator, *buildfake.Clientset) { "errhook": &plugin{Err: webhook.ErrHookNotEnabled}, "err": &plugin{Err: fmt.Errorf("test error")}, } - hook := newWebHookREST(fakeBuildClient.Build(), nil, bci, buildv1.SchemeGroupVersion, plugins) + hook := newWebHookREST(fakeBuildClient.BuildV1(), nil, bci, buildv1.SchemeGroupVersion, plugins) return hook, bci, fakeBuildClient } @@ -340,7 +340,7 @@ var mockBuildStrategy = buildv1.BuildStrategy{ func TestParseUrlError(t *testing.T) { responder := &fakeResponder{} - handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).Build(), nil, &okBuildConfigInstantiator{}, + handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).BuildV1(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"github": github.New(), "gitlab": gitlab.New(), "bitbucket": bitbucket.New()}). Connect(apirequest.WithNamespace(apirequest.NewDefaultContext(), testBuildConfig.Namespace), "build100", &kapi.PodProxyOptions{Path: ""}, responder) @@ -359,7 +359,7 @@ func TestParseUrlError(t *testing.T) { func TestParseUrlOK(t *testing.T) { responder := &fakeResponder{} - handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).Build(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, + handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).BuildV1(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"pathplugin": &pathPlugin{}}). Connect(apirequest.WithNamespace(apirequest.NewDefaultContext(), testBuildConfig.Namespace), "build100", &kapi.PodProxyOptions{Path: "secret101/pathplugin"}, responder) server := httptest.NewServer(handler) @@ -377,7 +377,7 @@ func TestParseUrlOK(t *testing.T) { func TestParseUrlLong(t *testing.T) { plugin := &pathPlugin{} responder := &fakeResponder{} - handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).Build(), nil, &okBuildConfigInstantiator{}, + handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).BuildV1(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"pathplugin": plugin}). Connect(apirequest.WithNamespace(apirequest.NewDefaultContext(), testBuildConfig.Namespace), "build100", &kapi.PodProxyOptions{Path: "secret101/pathplugin/some/more/args"}, responder) server := httptest.NewServer(handler) @@ -395,7 +395,7 @@ func TestParseUrlLong(t *testing.T) { func TestInvokeWebhookMissingPlugin(t *testing.T) { responder := &fakeResponder{} - handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).Build(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"pathplugin": &pathPlugin{}}). + handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).BuildV1(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"pathplugin": &pathPlugin{}}). Connect(apirequest.WithNamespace(apirequest.NewDefaultContext(), testBuildConfig.Namespace), "build100", &kapi.PodProxyOptions{Path: "secret101/missingplugin"}, responder) server := httptest.NewServer(handler) defer server.Close() @@ -412,7 +412,7 @@ func TestInvokeWebhookMissingPlugin(t *testing.T) { func TestInvokeWebhookErrorBuildConfigInstantiate(t *testing.T) { responder := &fakeResponder{} - handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).Build(), nil, &errorBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"pathplugin": &pathPlugin{}}). + handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).BuildV1(), nil, &errorBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"pathplugin": &pathPlugin{}}). Connect(apirequest.WithNamespace(apirequest.NewDefaultContext(), testBuildConfig.Namespace), "build100", &kapi.PodProxyOptions{Path: "secret101/pathplugin"}, responder) server := httptest.NewServer(handler) defer server.Close() @@ -429,7 +429,7 @@ func TestInvokeWebhookErrorBuildConfigInstantiate(t *testing.T) { func TestInvokeWebhookErrorGetConfig(t *testing.T) { responder := &fakeResponder{} - handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).Build(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"pathplugin": &pathPlugin{}}). + handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).BuildV1(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"pathplugin": &pathPlugin{}}). Connect(apirequest.WithNamespace(apirequest.NewDefaultContext(), testBuildConfig.Namespace), "badbuild100", &kapi.PodProxyOptions{Path: "secret101/pathplugin"}, responder) server := httptest.NewServer(handler) defer server.Close() @@ -448,7 +448,7 @@ func TestInvokeWebhookErrorGetConfig(t *testing.T) { func TestInvokeWebhookErrorCreateBuild(t *testing.T) { responder := &fakeResponder{} - handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).Build(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"errPlugin": &errPlugin{}}). + handler, _ := newWebHookREST(buildfake.NewSimpleClientset(testBuildConfig).BuildV1(), nil, &okBuildConfigInstantiator{}, buildv1.SchemeGroupVersion, map[string]webhook.Plugin{"errPlugin": &errPlugin{}}). Connect(apirequest.WithNamespace(apirequest.NewDefaultContext(), testBuildConfig.Namespace), "build100", &kapi.PodProxyOptions{Path: "secret101/errPlugin"}, responder) server := httptest.NewServer(handler) defer server.Close() diff --git a/pkg/build/apiserver/registry/buildconfiginstantiate/rest_test.go b/pkg/build/apiserver/registry/buildconfiginstantiate/rest_test.go index 095fad7e7e74..5bd7580ca2f1 100644 --- a/pkg/build/apiserver/registry/buildconfiginstantiate/rest_test.go +++ b/pkg/build/apiserver/registry/buildconfiginstantiate/rest_test.go @@ -26,7 +26,7 @@ func TestCreateInstantiate(t *testing.T) { fakeSecrets = append(fakeSecrets, s) } rest := InstantiateREST{&generator.BuildGenerator{ - Secrets: fake.NewSimpleClientset(fakeSecrets...).Core(), + Secrets: fake.NewSimpleClientset(fakeSecrets...).CoreV1(), ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()), Client: generator.TestingClient{ GetBuildConfigFunc: func(ctx context.Context, name string, options *metav1.GetOptions) (*buildv1.BuildConfig, error) { diff --git a/pkg/build/apiserver/registry/buildlog/rest_test.go b/pkg/build/apiserver/registry/buildlog/rest_test.go index 92089fc96aa9..c23c3ea2d86d 100644 --- a/pkg/build/apiserver/registry/buildlog/rest_test.go +++ b/pkg/build/apiserver/registry/buildlog/rest_test.go @@ -138,8 +138,8 @@ func TestWaitForBuild(t *testing.T) { return true, fakeWatcher, nil }) storage := REST{ - BuildClient: buildClient.Build(), - PodClient: newPodClient().Core(), + BuildClient: buildClient.BuildV1(), + PodClient: newPodClient().CoreV1(), Timeout: defaultTimeout, } getSimplePodLogsFn := func(podNamespace, podName string, logOpts *kapi.PodLogOptions) (runtime.Object, error) { @@ -168,8 +168,8 @@ func TestWaitForBuildTimeout(t *testing.T) { buildClient := buildfakeclient.NewSimpleClientset(build) ctx := apirequest.NewDefaultContext() storage := REST{ - BuildClient: buildClient.Build(), - PodClient: newPodClient().Core(), + BuildClient: buildClient.BuildV1(), + PodClient: newPodClient().CoreV1(), Timeout: 100 * time.Millisecond, } @@ -184,8 +184,8 @@ func resourceLocationHelper(buildPhase buildv1.BuildPhase, podPhase string, ctx buildClient := buildfakeclient.NewSimpleClientset(expectedBuild) storage := &REST{ - BuildClient: buildClient.Build(), - PodClient: newPodClient().Core(), + BuildClient: buildClient.BuildV1(), + PodClient: newPodClient().CoreV1(), Timeout: defaultTimeout, } actualPodNamespace := "" @@ -254,8 +254,8 @@ func TestPreviousBuildLogs(t *testing.T) { buildClient := buildfakeclient.NewSimpleClientset(first, second, third) storage := &REST{ - BuildClient: buildClient.Build(), - PodClient: anotherNewPodClient().Core(), + BuildClient: buildClient.BuildV1(), + PodClient: anotherNewPodClient().CoreV1(), Timeout: defaultTimeout, } actualPodNamespace := "" diff --git a/pkg/build/client/clients.go b/pkg/build/client/clients.go index eb770d602daf..be2c41ac90c0 100644 --- a/pkg/build/client/clients.go +++ b/pkg/build/client/clients.go @@ -33,12 +33,12 @@ func NewClientBuildConfigClient(client buildclient.Interface) *ClientBuildConfig // Get returns a BuildConfig using the OpenShift client. func (c ClientBuildConfigClient) Get(namespace, name string, options metav1.GetOptions) (*buildv1.BuildConfig, error) { - return c.Client.Build().BuildConfigs(namespace).Get(name, options) + return c.Client.BuildV1().BuildConfigs(namespace).Get(name, options) } // Update updates a BuildConfig using the OpenShift client. func (c ClientBuildConfigClient) Update(buildConfig *buildv1.BuildConfig) error { - _, err := c.Client.Build().BuildConfigs(buildConfig.Namespace).Update(buildConfig) + _, err := c.Client.BuildV1().BuildConfigs(buildConfig.Namespace).Update(buildConfig) return err } @@ -74,23 +74,23 @@ func NewClientBuildClient(client buildclient.Interface) *ClientBuildClient { // Update updates buildclient using the OpenShift client. func (c ClientBuildClient) Update(namespace string, build *buildv1.Build) error { - _, e := c.Client.Build().Builds(namespace).Update(build) + _, e := c.Client.BuildV1().Builds(namespace).Update(build) return e } // Patch patches buildclient using the OpenShift client. func (c ClientBuildClient) Patch(namespace, name string, patch []byte) (*buildv1.Build, error) { - return c.Client.Build().Builds(namespace).Patch(name, types.StrategicMergePatchType, patch) + return c.Client.BuildV1().Builds(namespace).Patch(name, types.StrategicMergePatchType, patch) } // List lists the buildclient using the OpenShift client. func (c ClientBuildClient) List(namespace string, opts metav1.ListOptions) (*buildv1.BuildList, error) { - return c.Client.Build().Builds(namespace).List(opts) + return c.Client.BuildV1().Builds(namespace).List(opts) } // DeleteBuild deletes a build from OpenShift. func (c ClientBuildClient) DeleteBuild(build *buildv1.Build) error { - return c.Client.Build().Builds(build.Namespace).Delete(build.Name, &metav1.DeleteOptions{}) + return c.Client.BuildV1().Builds(build.Namespace).Delete(build.Name, &metav1.DeleteOptions{}) } // ClientBuildLister implements the build lister interface over a client @@ -204,7 +204,7 @@ func NewClientBuildClonerClient(client buildclient.Interface) *ClientBuildCloner // Clone generates new build for given build name func (c ClientBuildClonerClient) Clone(namespace string, request *buildv1.BuildRequest) (*buildv1.Build, error) { - return c.Client.Build().Builds(namespace).Clone(request.Name, request) + return c.Client.BuildV1().Builds(namespace).Clone(request.Name, request) } // BuildConfigInstantiator provides methods for instantiating buildclient from build configs @@ -224,7 +224,7 @@ func NewClientBuildConfigInstantiatorClient(client buildclient.Interface) *Clien // Instantiate generates new build for given buildConfig func (c ClientBuildConfigInstantiatorClient) Instantiate(namespace string, request *buildv1.BuildRequest) (*buildv1.Build, error) { - return c.Client.Build().BuildConfigs(namespace).Instantiate(request.Name, request) + return c.Client.BuildV1().BuildConfigs(namespace).Instantiate(request.Name, request) } // TODO: Why we need this, seems like an copy of the client above diff --git a/pkg/build/controller/build/build_controller_test.go b/pkg/build/controller/build/build_controller_test.go index 27aecbcf469a..93b07832b2f6 100644 --- a/pkg/build/controller/build/build_controller_test.go +++ b/pkg/build/controller/build/build_controller_test.go @@ -495,13 +495,13 @@ func TestCreateBuildPod(t *testing.T) { expected.setMessage("") validateUpdate(t, "create build pod", expected, update) // Make sure that a pod was created - pod, err := kubeClient.Core().Pods("namespace").Get(podName, metav1.GetOptions{}) + pod, err := kubeClient.CoreV1().Pods("namespace").Get(podName, metav1.GetOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) } // Make sure that a configMap was created, with an ownerRef configMapName := buildapihelpers.GetBuildCAConfigMapName(build) - configMap, err := kubeClient.Core().ConfigMaps("namespace").Get(configMapName, metav1.GetOptions{}) + configMap, err := kubeClient.CoreV1().ConfigMaps("namespace").Get(configMapName, metav1.GetOptions{}) if err != nil { t.Errorf("unexpected error: %v", err) } @@ -882,7 +882,7 @@ func TestCancelBuild(t *testing.T) { pod := &corev1.Pod{} pod.Name = "canceltest-build" pod.Namespace = "testns" - client := fake.NewSimpleClientset(pod).Core() + client := fake.NewSimpleClientset(pod).CoreV1() bc := BuildController{ podClient: client, } diff --git a/pkg/build/controller/common/util_test.go b/pkg/build/controller/common/util_test.go index 1cc7a5f65de6..3b93b1591390 100644 --- a/pkg/build/controller/common/util_test.go +++ b/pkg/build/controller/common/util_test.go @@ -99,13 +99,13 @@ func TestHandleBuildPruning(t *testing.T) { buildClient := buildfake.NewSimpleClientset(objects...) - build, err := buildClient.Build().Builds("namespace").Get("myapp-0", metav1.GetOptions{}) + build, err := buildClient.BuildV1().Builds("namespace").Get("myapp-0", metav1.GetOptions{}) if err != nil { t.Errorf("%v", err) } - buildLister := buildclient.NewClientBuildLister(buildClient.Build()) - buildConfigGetter := buildclient.NewClientBuildConfigLister(buildClient.Build()) + buildLister := buildclient.NewClientBuildLister(buildClient.BuildV1()) + buildConfigGetter := buildclient.NewClientBuildConfigLister(buildClient.BuildV1()) buildDeleter := buildclient.NewClientBuildClient(buildClient) bcName := buildutil.ConfigNameForBuild(build) diff --git a/pkg/build/generator/generator_test.go b/pkg/build/generator/generator_test.go index 4356ddb7d0bb..26a680617f4c 100644 --- a/pkg/build/generator/generator_test.go +++ b/pkg/build/generator/generator_test.go @@ -240,7 +240,7 @@ func TestInstantiateGenerateBuildError(t *testing.T) { fakeSecrets = append(fakeSecrets, s) } generator := BuildGenerator{ - Secrets: fake.NewSimpleClientset(fakeSecrets...).Core(), + Secrets: fake.NewSimpleClientset(fakeSecrets...).CoreV1(), ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()), Client: TestingClient{ GetBuildConfigFunc: func(ctx context.Context, name string, options *metav1.GetOptions) (*buildv1.BuildConfig, error) { @@ -1027,7 +1027,7 @@ func TestGenerateBuildWithImageTagForSourceStrategyImageRepository(t *testing.T) } is := mocks.MockImageStream("", originalImage, map[string]string{tagName: newTag}) generator := BuildGenerator{ - Secrets: fake.NewSimpleClientset(fakeSecrets...).Core(), + Secrets: fake.NewSimpleClientset(fakeSecrets...).CoreV1(), ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()), Client: TestingClient{ GetImageStreamFunc: func(ctx context.Context, name string, options *metav1.GetOptions) (*imagev1.ImageStream, error) { @@ -1098,7 +1098,7 @@ func TestGenerateBuildWithImageTagForDockerStrategyImageRepository(t *testing.T) } is := mocks.MockImageStream("", originalImage, map[string]string{tagName: newTag}) generator := BuildGenerator{ - Secrets: fake.NewSimpleClientset(fakeSecrets...).Core(), + Secrets: fake.NewSimpleClientset(fakeSecrets...).CoreV1(), ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()), Client: TestingClient{ GetImageStreamFunc: func(ctx context.Context, name string, options *metav1.GetOptions) (*imagev1.ImageStream, error) { @@ -1168,7 +1168,7 @@ func TestGenerateBuildWithImageTagForCustomStrategyImageRepository(t *testing.T) } is := mocks.MockImageStream("", originalImage, map[string]string{tagName: newTag}) generator := BuildGenerator{ - Secrets: fake.NewSimpleClientset(fakeSecrets...).Core(), + Secrets: fake.NewSimpleClientset(fakeSecrets...).CoreV1(), ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()), Client: TestingClient{ GetImageStreamFunc: func(ctx context.Context, name string, options *metav1.GetOptions) (*imagev1.ImageStream, error) { @@ -1805,7 +1805,7 @@ func mockBuildGenerator(buildConfigFunc func(ctx context.Context, name string, o } b := buildv1.Build{} return &BuildGenerator{ - Secrets: fake.NewSimpleClientset(fakeSecrets...).Core(), + Secrets: fake.NewSimpleClientset(fakeSecrets...).CoreV1(), ServiceAccounts: mocks.MockBuilderServiceAccount(mocks.MockBuilderSecrets()), Client: TestingClient{ GetBuildConfigFunc: getBuildConfigFunc(buildConfigFunc), diff --git a/pkg/build/generator/test/mocks.go b/pkg/build/generator/test/mocks.go index 264fc47bdeb6..3b8826c40f80 100644 --- a/pkg/build/generator/test/mocks.go +++ b/pkg/build/generator/test/mocks.go @@ -67,7 +67,7 @@ func MockBuilderServiceAccount(secrets []*corev1.Secret) corev1client.ServiceAcc }, Secrets: secretRefs, }) - return fake.NewSimpleClientset(fakeObjects...).Core() + return fake.NewSimpleClientset(fakeObjects...).CoreV1() } func MockBuildConfig(source buildv1.BuildSource, strategy buildv1.BuildStrategy, output buildv1.BuildOutput) *buildv1.BuildConfig { diff --git a/pkg/cmd/openshift-controller-manager/controller/image.go b/pkg/cmd/openshift-controller-manager/controller/image.go index 978feed1fc17..4c75ea5a160d 100644 --- a/pkg/cmd/openshift-controller-manager/controller/image.go +++ b/pkg/cmd/openshift-controller-manager/controller/image.go @@ -58,7 +58,7 @@ func RunImageTriggerController(ctx *ControllerContext) (bool, error) { Informer: ctx.BuildInformers.Build().V1().BuildConfigs().Informer(), Store: ctx.BuildInformers.Build().V1().BuildConfigs().Informer().GetIndexer(), TriggerFn: triggerbuildconfigs.NewBuildConfigTriggerIndexer, - Reactor: triggerbuildconfigs.NewBuildConfigReactor(bcInstantiator, kclient.Core().RESTClient()), + Reactor: triggerbuildconfigs.NewBuildConfigReactor(bcInstantiator, kclient.CoreV1().RESTClient()), }) sources = append(sources, imagetriggercontroller.TriggerSource{ Resource: schema.GroupResource{Group: "extensions", Resource: "deployments"}, @@ -105,10 +105,10 @@ type podSpecUpdater struct { func (u podSpecUpdater) Update(obj runtime.Object) error { switch t := obj.(type) { case *kextensionsv1beta1.DaemonSet: - _, err := u.kclient.Extensions().DaemonSets(t.Namespace).Update(t) + _, err := u.kclient.ExtensionsV1beta1().DaemonSets(t.Namespace).Update(t) return err case *kextensionsv1beta1.Deployment: - _, err := u.kclient.Extensions().Deployments(t.Namespace).Update(t) + _, err := u.kclient.ExtensionsV1beta1().Deployments(t.Namespace).Update(t) return err case *kappsv1beta1.Deployment: _, err := u.kclient.AppsV1beta1().Deployments(t.Namespace).Update(t) @@ -123,7 +123,7 @@ func (u podSpecUpdater) Update(obj runtime.Object) error { _, err := u.kclient.AppsV1beta2().StatefulSets(t.Namespace).Update(t) return err case *kbatchv1.Job: - _, err := u.kclient.Batch().Jobs(t.Namespace).Update(t) + _, err := u.kclient.BatchV1().Jobs(t.Namespace).Update(t) return err case *kbatchv1beta1.CronJob: _, err := u.kclient.BatchV1beta1().CronJobs(t.Namespace).Update(t) @@ -132,7 +132,7 @@ func (u podSpecUpdater) Update(obj runtime.Object) error { _, err := u.kclient.BatchV2alpha1().CronJobs(t.Namespace).Update(t) return err case *kapiv1.Pod: - _, err := u.kclient.Core().Pods(t.Namespace).Update(t) + _, err := u.kclient.CoreV1().Pods(t.Namespace).Update(t) return err default: return fmt.Errorf("unrecognized object - no trigger update possible for %T", obj) diff --git a/pkg/cmd/openshift-controller-manager/controller/quota.go b/pkg/cmd/openshift-controller-manager/controller/quota.go index fa2fdcdc2ada..923e4701dafc 100644 --- a/pkg/cmd/openshift-controller-manager/controller/quota.go +++ b/pkg/cmd/openshift-controller-manager/controller/quota.go @@ -25,11 +25,11 @@ func RunResourceQuotaManager(ctx *ControllerContext) (bool, error) { imageEvaluators := image.NewReplenishmentEvaluators( listerFuncForResource, ctx.ImageInformers.Image().V1().ImageStreams(), - ctx.ClientBuilder.OpenshiftImageClientOrDie(saName).Image()) + ctx.ClientBuilder.OpenshiftImageClientOrDie(saName).ImageV1()) resourceQuotaRegistry := generic.NewRegistry(imageEvaluators) resourceQuotaControllerOptions := &kresourcequota.ResourceQuotaControllerOptions{ - QuotaClient: ctx.ClientBuilder.ClientOrDie(saName).Core(), + QuotaClient: ctx.ClientBuilder.ClientOrDie(saName).CoreV1(), ResourceQuotaInformer: ctx.KubernetesInformers.Core().V1().ResourceQuotas(), ResyncPeriod: controller.StaticResyncPeriodFunc(resourceQuotaSyncPeriod), Registry: resourceQuotaRegistry, @@ -66,7 +66,7 @@ func RunClusterQuotaReconciliationController(ctx *ControllerContext) (bool, erro imageEvaluators := image.NewReplenishmentEvaluators( listerFuncForResource, ctx.ImageInformers.Image().V1().ImageStreams(), - ctx.ClientBuilder.OpenshiftImageClientOrDie(saName).Image()) + ctx.ClientBuilder.OpenshiftImageClientOrDie(saName).ImageV1()) for i := range imageEvaluators { resourceQuotaRegistry.Add(imageEvaluators[i]) } @@ -74,7 +74,7 @@ func RunClusterQuotaReconciliationController(ctx *ControllerContext) (bool, erro options := clusterquotareconciliation.ClusterQuotaReconcilationControllerOptions{ ClusterQuotaInformer: ctx.QuotaInformers.Quota().V1().ClusterResourceQuotas(), ClusterQuotaMapper: clusterQuotaMappingController.GetClusterQuotaMapper(), - ClusterQuotaClient: ctx.ClientBuilder.OpenshiftQuotaClientOrDie(saName).Quota().ClusterResourceQuotas(), + ClusterQuotaClient: ctx.ClientBuilder.OpenshiftQuotaClientOrDie(saName).QuotaV1().ClusterResourceQuotas(), Registry: resourceQuotaRegistry, ResyncPeriod: defaultResyncPeriod, diff --git a/pkg/cmd/openshift-controller-manager/controller/unidling.go b/pkg/cmd/openshift-controller-manager/controller/unidling.go index f5b9d519e1ab..dc49abcddfd3 100644 --- a/pkg/cmd/openshift-controller-manager/controller/unidling.go +++ b/pkg/cmd/openshift-controller-manager/controller/unidling.go @@ -29,7 +29,7 @@ func RunUnidlingController(ctx *ControllerContext) (bool, error) { return false, err } - coreClient := ctx.ClientBuilder.ClientOrDie(bootstrappolicy.InfraUnidlingControllerServiceAccountName).Core() + coreClient := ctx.ClientBuilder.ClientOrDie(bootstrappolicy.InfraUnidlingControllerServiceAccountName).CoreV1() controller := unidlingcontroller.NewUnidlingController( scaleClient, ctx.RestMapper, diff --git a/pkg/cmd/openshift-kube-apiserver/openshiftkubeapiserver/patch_authenticator.go b/pkg/cmd/openshift-kube-apiserver/openshiftkubeapiserver/patch_authenticator.go index 4f1e5a539b62..548373f16d20 100644 --- a/pkg/cmd/openshift-kube-apiserver/openshiftkubeapiserver/patch_authenticator.go +++ b/pkg/cmd/openshift-kube-apiserver/openshiftkubeapiserver/patch_authenticator.go @@ -74,7 +74,7 @@ func NewAuthenticator( oauthClient.OAuthAccessTokens(), oauthClientLister, serviceAccountTokenGetter, - userClient.User().Users(), + userClient.UserV1().Users(), servingInfo.ClientCA, usercache.NewGroupCache(groupInformer), bootstrap.NewBootstrapUserDataGetter(kubeExternalClient.CoreV1(), kubeExternalClient.CoreV1()), diff --git a/pkg/image/apiserver/apiserver.go b/pkg/image/apiserver/apiserver.go index ceea6c70bd42..afd0e0aa89e0 100644 --- a/pkg/image/apiserver/apiserver.go +++ b/pkg/image/apiserver/apiserver.go @@ -225,7 +225,7 @@ func (c *completedConfig) newV1RESTStorage() (map[string]rest.Storage, error) { whitelister = whitelist.WhitelistAllRegistries() } - imageLayerIndex := imagestreametcd.NewImageLayerIndex(imageV1Client.Image().Images()) + imageLayerIndex := imagestreametcd.NewImageLayerIndex(imageV1Client.ImageV1().Images()) c.ExtraConfig.startFns = append(c.ExtraConfig.startFns, imageLayerIndex.Run) imageRegistry := image.NewRegistry(imageStorage) diff --git a/pkg/image/apiserver/registry/imagesecret/rest_test.go b/pkg/image/apiserver/registry/imagesecret/rest_test.go index 0c57a5d15607..73f5b3ee079d 100644 --- a/pkg/image/apiserver/registry/imagesecret/rest_test.go +++ b/pkg/image/apiserver/registry/imagesecret/rest_test.go @@ -37,7 +37,7 @@ func TestGetSecrets(t *testing.T) { }, }, }) - rest := NewREST(fake.Core()) + rest := NewREST(fake.CoreV1()) opts, _, _ := rest.NewGetOptions() obj, err := rest.Get(apirequest.NewDefaultContext(), "", opts) if err != nil { diff --git a/pkg/image/apiserver/registryhostname/environmentresolvercache_test.go b/pkg/image/apiserver/registryhostname/environmentresolvercache_test.go index 29f5a83bef07..aebf8cdc3b03 100644 --- a/pkg/image/apiserver/registryhostname/environmentresolvercache_test.go +++ b/pkg/image/apiserver/registryhostname/environmentresolvercache_test.go @@ -19,7 +19,7 @@ func TestServiceResolverCacheEmpty(t *testing.T) { Ports: []corev1.ServicePort{{Port: 80}}, }, }) - cache := newServiceResolverCache(fakeClient.Core().Services("default").Get) + cache := newServiceResolverCache(fakeClient.CoreV1().Services("default").Get) if v, ok := cache.resolve("FOO_SERVICE_HOST"); v != "" || !ok { t.Errorf("unexpected cache item") } diff --git a/pkg/image/controller/signature/signature_import_controller.go b/pkg/image/controller/signature/signature_import_controller.go index 6e3dd2e45c93..76e7af09cd46 100644 --- a/pkg/image/controller/signature/signature_import_controller.go +++ b/pkg/image/controller/signature/signature_import_controller.go @@ -183,6 +183,6 @@ func (s *SignatureImportController) syncImageSignatures(key string) error { } glog.V(4).Infof("Image %s now has %d signatures", newImage.Name, len(newImage.Signatures)) - _, err = s.imageClient.Image().Images().Update(newImage) + _, err = s.imageClient.ImageV1().Images().Update(newImage) return err } diff --git a/pkg/image/controller/trigger/image_trigger_controller_test.go b/pkg/image/controller/trigger/image_trigger_controller_test.go index 9b6cadde045d..24b9898361f2 100644 --- a/pkg/image/controller/trigger/image_trigger_controller_test.go +++ b/pkg/image/controller/trigger/image_trigger_controller_test.go @@ -182,8 +182,8 @@ func fakeBuildConfigInstantiator(buildcfg *buildv1.BuildConfig, imageStream *ima instantiator := &fakeInstantiator{} instantiator.buildConfigUpdater = &fakeBuildConfigUpdater{} generator := &buildgenerator.BuildGenerator{ - Secrets: fake.NewSimpleClientset().Core(), - ServiceAccounts: fake.NewSimpleClientset(&builderAccount).Core(), + Secrets: fake.NewSimpleClientset().CoreV1(), + ServiceAccounts: fake.NewSimpleClientset(&builderAccount).CoreV1(), Client: buildgenerator.TestingClient{ GetBuildConfigFunc: func(ctx context.Context, name string, options *metav1.GetOptions) (*buildv1.BuildConfig, error) { return buildcfg, nil diff --git a/pkg/image/trigger/deploymentconfigs/deploymentconfigs_test.go b/pkg/image/trigger/deploymentconfigs/deploymentconfigs_test.go index 11fe5774626a..cd2d6975697e 100644 --- a/pkg/image/trigger/deploymentconfigs/deploymentconfigs_test.go +++ b/pkg/image/trigger/deploymentconfigs/deploymentconfigs_test.go @@ -397,7 +397,7 @@ func TestDeploymentConfigReactor(t *testing.T) { return true, test.response, nil }) } - r := DeploymentConfigReactor{Client: c.Apps()} + r := DeploymentConfigReactor{Client: c.AppsV1()} initial := test.obj.DeepCopy() err := r.ImageChanged(test.obj, fakeTagRetriever(test.tags)) if !kapihelper.Semantic.DeepEqual(initial, test.obj) { diff --git a/pkg/monitor/event.go b/pkg/monitor/event.go index f2225ec418e1..3dc860604039 100644 --- a/pkg/monitor/event.go +++ b/pkg/monitor/event.go @@ -19,13 +19,13 @@ func startEventMonitoring(ctx context.Context, m Recorder, client kubernetes.Int return default: } - events, err := client.Core().Events("").List(metav1.ListOptions{Limit: 1}) + events, err := client.CoreV1().Events("").List(metav1.ListOptions{Limit: 1}) if err != nil { continue } rv := events.ResourceVersion for { - w, err := client.Core().Events("").Watch(metav1.ListOptions{ResourceVersion: rv}) + w, err := client.CoreV1().Events("").Watch(metav1.ListOptions{ResourceVersion: rv}) if err != nil { if errors.IsResourceExpired(err) { break diff --git a/pkg/network/common/common.go b/pkg/network/common/common.go index ed11c70d4f15..4b5e6d55fc14 100644 --- a/pkg/network/common/common.go +++ b/pkg/network/common/common.go @@ -158,7 +158,7 @@ func (ni *NetworkInfo) CheckClusterObjects(subnets []networkapi.HostSubnet, pods } func GetNetworkInfo(networkClient networkclient.Interface) (*NetworkInfo, error) { - cn, err := networkClient.Network().ClusterNetworks().Get(networkapi.ClusterNetworkDefault, v1.GetOptions{}) + cn, err := networkClient.NetworkV1().ClusterNetworks().Get(networkapi.ClusterNetworkDefault, v1.GetOptions{}) if err != nil { return nil, err } diff --git a/pkg/network/master/egressip.go b/pkg/network/master/egressip.go index 528d69d2a2c6..e51b8c751cd9 100644 --- a/pkg/network/master/egressip.go +++ b/pkg/network/master/egressip.go @@ -105,7 +105,7 @@ func (eim *egressIPManager) maybeDoUpdateEgressCIDRs() (bool, error) { newIPs := sets.NewString(egressIPs...) if !oldIPs.Equal(newIPs) { hs.EgressIPs = egressIPs - _, err = eim.networkClient.Network().HostSubnets().Update(hs) + _, err = eim.networkClient.NetworkV1().HostSubnets().Update(hs) } return err }) diff --git a/pkg/network/master/master.go b/pkg/network/master/master.go index 4dcbfa3f6d6d..5510ceeda464 100644 --- a/pkg/network/master/master.go +++ b/pkg/network/master/master.go @@ -106,7 +106,7 @@ func Start(networkConfig openshiftcontrolplanev1.NetworkControllerConfig, networ err = wait.PollImmediate(1*time.Second, time.Minute, func() (bool, error) { // reset this so that failures come through correctly. getError = nil - existingCN, err := master.networkClient.Network().ClusterNetworks().Get(networkapi.ClusterNetworkDefault, metav1.GetOptions{}) + existingCN, err := master.networkClient.NetworkV1().ClusterNetworks().Get(networkapi.ClusterNetworkDefault, metav1.GetOptions{}) if err != nil { if !kapierrors.IsNotFound(err) { // the first request can fail on permissions @@ -117,7 +117,7 @@ func Start(networkConfig openshiftcontrolplanev1.NetworkControllerConfig, networ return false, err } - if _, err = master.networkClient.Network().ClusterNetworks().Create(configCN); err != nil { + if _, err = master.networkClient.NetworkV1().ClusterNetworks().Create(configCN); err != nil { return false, err } glog.Infof("Created ClusterNetwork %s", common.ClusterNetworkToString(configCN)) @@ -137,7 +137,7 @@ func Start(networkConfig openshiftcontrolplanev1.NetworkControllerConfig, networ utilruntime.HandleError(fmt.Errorf("Attempting to modify cluster to exclude existing objects: %v", err)) return false, err } - if _, err = master.networkClient.Network().ClusterNetworks().Update(configCN); err != nil { + if _, err = master.networkClient.NetworkV1().ClusterNetworks().Update(configCN); err != nil { return false, err } glog.Infof("Updated ClusterNetwork %s", common.ClusterNetworkToString(configCN)) @@ -209,13 +209,13 @@ func (master *OsdnMaster) checkClusterNetworkAgainstClusterObjects() error { var subnets []networkapi.HostSubnet var pods []kapi.Pod var services []kapi.Service - if subnetList, err := master.networkClient.Network().HostSubnets().List(metav1.ListOptions{}); err == nil { + if subnetList, err := master.networkClient.NetworkV1().HostSubnets().List(metav1.ListOptions{}); err == nil { subnets = subnetList.Items } - if podList, err := master.kClient.Core().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil { + if podList, err := master.kClient.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil { pods = podList.Items } - if serviceList, err := master.kClient.Core().Services(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil { + if serviceList, err := master.kClient.CoreV1().Services(metav1.NamespaceAll).List(metav1.ListOptions{}); err == nil { services = serviceList.Items } diff --git a/pkg/network/master/subnet_allocator.go b/pkg/network/master/subnet_allocator.go index 991a44d92440..876a89fffa66 100644 --- a/pkg/network/master/subnet_allocator.go +++ b/pkg/network/master/subnet_allocator.go @@ -161,7 +161,7 @@ func (master *OsdnMaster) initSubnetAllocators() error { } // Populate subnet allocator - subnets, err := master.networkClient.Network().HostSubnets().List(metav1.ListOptions{}) + subnets, err := master.networkClient.NetworkV1().HostSubnets().List(metav1.ListOptions{}) if err != nil { return err } diff --git a/pkg/network/master/subnets.go b/pkg/network/master/subnets.go index 21a56cf61d43..a6a65ae8ccd5 100644 --- a/pkg/network/master/subnets.go +++ b/pkg/network/master/subnets.go @@ -85,18 +85,18 @@ func (master *OsdnMaster) addNode(nodeName string, nodeUID string, nodeIP string } // Check if subnet needs to be created or updated - sub, err := master.networkClient.Network().HostSubnets().Get(nodeName, metav1.GetOptions{}) + sub, err := master.networkClient.NetworkV1().HostSubnets().Get(nodeName, metav1.GetOptions{}) if err == nil { if err = common.ValidateHostSubnet(sub); err != nil { utilruntime.HandleError(fmt.Errorf("Deleting invalid HostSubnet %q: %v", nodeName, err)) - _ = master.networkClient.Network().HostSubnets().Delete(nodeName, &metav1.DeleteOptions{}) + _ = master.networkClient.NetworkV1().HostSubnets().Delete(nodeName, &metav1.DeleteOptions{}) // fall through to create new subnet below } else if sub.HostIP == nodeIP { return nodeIP, nil } else { // Node IP changed, update old subnet sub.HostIP = nodeIP - sub, err = master.networkClient.Network().HostSubnets().Update(sub) + sub, err = master.networkClient.NetworkV1().HostSubnets().Update(sub) if err != nil { return "", fmt.Errorf("error updating subnet %s for node %s: %v", sub.Subnet, nodeName, err) } @@ -123,7 +123,7 @@ func (master *OsdnMaster) addNode(nodeName string, nodeUID string, nodeIP string HostIP: nodeIP, Subnet: network, } - sub, err = master.networkClient.Network().HostSubnets().Create(sub) + sub, err = master.networkClient.NetworkV1().HostSubnets().Create(sub) if err != nil { if er := master.releaseNetwork(network); er != nil { utilruntime.HandleError(er) @@ -142,7 +142,7 @@ func (master *OsdnMaster) deleteNode(nodeName string) error { if sub, err := master.hostSubnetInformer.Lister().Get(nodeName); err == nil { subInfo = common.HostSubnetToString(sub) } - if err := master.networkClient.Network().HostSubnets().Delete(nodeName, &metav1.DeleteOptions{}); err != nil { + if err := master.networkClient.NetworkV1().HostSubnets().Delete(nodeName, &metav1.DeleteOptions{}); err != nil { return fmt.Errorf("error deleting subnet for node %q: %v", nodeName, err) } @@ -180,7 +180,7 @@ func (master *OsdnMaster) clearInitialNodeNetworkUnavailableCondition(origNode * condition.Message = "openshift-sdn cleared kubelet-set NoRouteCreated" condition.LastTransitionTime = metav1.Now() - if knode, err = master.kClient.Core().Nodes().UpdateStatus(knode); err == nil { + if knode, err = master.kClient.CoreV1().Nodes().UpdateStatus(knode); err == nil { cleared = true } } @@ -258,7 +258,7 @@ func (master *OsdnMaster) reconcileHostSubnet(subnet *networkapi.HostSubnet) err var err error node, err = master.nodeInformer.Lister().Get(subnet.Name) if err != nil { - node, err = master.kClient.Core().Nodes().Get(subnet.Name, metav1.GetOptions{}) + node, err = master.kClient.CoreV1().Nodes().Get(subnet.Name, metav1.GetOptions{}) if err != nil { if kerrs.IsNotFound(err) { node = nil @@ -278,19 +278,19 @@ func (master *OsdnMaster) reconcileHostSubnet(subnet *networkapi.HostSubnet) err sn.Annotations = make(map[string]string) } sn.Annotations[networkapi.NodeUIDAnnotation] = string(node.UID) - if _, err = master.networkClient.Network().HostSubnets().Update(sn); err != nil { + if _, err = master.networkClient.NetworkV1().HostSubnets().Update(sn); err != nil { return fmt.Errorf("error updating subnet %v for node %s: %v", sn, sn.Name, err) } } else if node == nil && len(subnet.Annotations[networkapi.NodeUIDAnnotation]) > 0 { // Missed Node event, delete stale subnet. glog.Infof("Setup found no node associated with hostsubnet %s, deleting the hostsubnet", subnet.Name) - if err = master.networkClient.Network().HostSubnets().Delete(subnet.Name, &metav1.DeleteOptions{}); err != nil { + if err = master.networkClient.NetworkV1().HostSubnets().Delete(subnet.Name, &metav1.DeleteOptions{}); err != nil { return fmt.Errorf("error deleting subnet %v: %v", subnet, err) } } else if string(node.UID) != subnet.Annotations[networkapi.NodeUIDAnnotation] { // Missed Node event, node with the same name exists delete stale subnet. glog.Infof("Missed node event, hostsubnet %s has the UID of an incorrect object, deleting the hostsubnet", subnet.Name) - if err = master.networkClient.Network().HostSubnets().Delete(subnet.Name, &metav1.DeleteOptions{}); err != nil { + if err = master.networkClient.NetworkV1().HostSubnets().Delete(subnet.Name, &metav1.DeleteOptions{}); err != nil { return fmt.Errorf("error deleting subnet %v: %v", subnet, err) } } @@ -305,7 +305,7 @@ func (master *OsdnMaster) handleAssignHostSubnetAnnotation(hs *networkapi.HostSu // will skip the event if it finds that the hostsubnet has the same host // And we cannot fix the watchSubnets code for node because it will break migration if // nodes are upgraded after the master - if err := master.networkClient.Network().HostSubnets().Delete(hs.Name, &metav1.DeleteOptions{}); err != nil { + if err := master.networkClient.NetworkV1().HostSubnets().Delete(hs.Name, &metav1.DeleteOptions{}); err != nil { return fmt.Errorf("error in deleting annotated subnet: %s, %v", hs.Name, err) } glog.Infof("Deleted HostSubnet not backed by node: %s", common.HostSubnetToString(hs)) diff --git a/pkg/network/master/vnids.go b/pkg/network/master/vnids.go index ac10523fe04e..b13523bb5bf4 100644 --- a/pkg/network/master/vnids.go +++ b/pkg/network/master/vnids.go @@ -212,7 +212,7 @@ func (vmap *masterVNIDMap) assignVNID(networkClient networkclient.Interface, nsN NetName: nsName, NetID: netid, } - if _, err := networkClient.Network().NetNamespaces().Create(netns); err != nil { + if _, err := networkClient.NetworkV1().NetNamespaces().Create(netns); err != nil { if er := vmap.releaseNetID(nsName); er != nil { utilruntime.HandleError(er) } @@ -227,7 +227,7 @@ func (vmap *masterVNIDMap) revokeVNID(networkClient networkclient.Interface, nsN defer vmap.lock.Unlock() // Delete NetNamespace object - if err := networkClient.Network().NetNamespaces().Delete(nsName, &metav1.DeleteOptions{}); err != nil { + if err := networkClient.NetworkV1().NetNamespaces().Delete(nsName, &metav1.DeleteOptions{}); err != nil { return err } @@ -247,7 +247,7 @@ func (vmap *masterVNIDMap) updateVNID(networkClient networkclient.Interface, ori return nil } else if !vmap.allowRenumbering { networkapihelpers.DeleteChangePodNetworkAnnotation(netns) - _, _ = networkClient.Network().NetNamespaces().Update(netns) + _, _ = networkClient.NetworkV1().NetNamespaces().Update(netns) return fmt.Errorf("network plugin does not allow NetNamespace renumbering") } @@ -261,7 +261,7 @@ func (vmap *masterVNIDMap) updateVNID(networkClient networkclient.Interface, ori netns.NetID = netid networkapihelpers.DeleteChangePodNetworkAnnotation(netns) - if _, err := networkClient.Network().NetNamespaces().Update(netns); err != nil { + if _, err := networkClient.NetworkV1().NetNamespaces().Update(netns); err != nil { return err } return nil @@ -281,7 +281,7 @@ func (master *OsdnMaster) startVNIDMaster() error { } func (master *OsdnMaster) initNetIDAllocator() error { - netnsList, err := master.networkClient.Network().NetNamespaces().List(metav1.ListOptions{}) + netnsList, err := master.networkClient.NetworkV1().NetNamespaces().List(metav1.ListOptions{}) if err != nil { return err } diff --git a/pkg/network/node/egress_network_policy.go b/pkg/network/node/egress_network_policy.go index 955c638b16b1..ebb75b9a6c19 100644 --- a/pkg/network/node/egress_network_policy.go +++ b/pkg/network/node/egress_network_policy.go @@ -17,7 +17,7 @@ import ( ) func (plugin *OsdnNode) SetupEgressNetworkPolicy() error { - policies, err := plugin.networkClient.Network().EgressNetworkPolicies(metav1.NamespaceAll).List(metav1.ListOptions{}) + policies, err := plugin.networkClient.NetworkV1().EgressNetworkPolicies(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { return fmt.Errorf("could not get EgressNetworkPolicies: %s", err) } diff --git a/pkg/network/node/multitenant.go b/pkg/network/node/multitenant.go index 0aa418b1ae9d..ce79b5482d02 100644 --- a/pkg/network/node/multitenant.go +++ b/pkg/network/node/multitenant.go @@ -67,7 +67,7 @@ func (mp *multiTenantPlugin) updatePodNetwork(namespace string, oldNetID, netID if err != nil { utilruntime.HandleError(fmt.Errorf("Could not get list of local pods in namespace %q: %v", namespace, err)) } - services, err := mp.node.kClient.Core().Services(namespace).List(metav1.ListOptions{}) + services, err := mp.node.kClient.CoreV1().Services(namespace).List(metav1.ListOptions{}) if err != nil { utilruntime.HandleError(fmt.Errorf("Could not get list of services in namespace %q: %v", namespace, err)) services = &corev1.ServiceList{} diff --git a/pkg/network/node/networkpolicy.go b/pkg/network/node/networkpolicy.go index 07e6352d2669..35950560048f 100644 --- a/pkg/network/node/networkpolicy.go +++ b/pkg/network/node/networkpolicy.go @@ -123,7 +123,7 @@ func (np *networkPolicyPlugin) initNamespaces() error { inUseVNIDs := np.node.oc.FindPolicyVNIDs() - namespaces, err := np.node.kClient.Core().Namespaces().List(metav1.ListOptions{}) + namespaces, err := np.node.kClient.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { return err } diff --git a/pkg/network/node/node.go b/pkg/network/node/node.go index 3a8a0e9f9ed6..916c23e54c23 100644 --- a/pkg/network/node/node.go +++ b/pkg/network/node/node.go @@ -446,7 +446,7 @@ func (node *OsdnNode) GetLocalPods(namespace string) ([]corev1.Pod, error) { LabelSelector: labels.Everything().String(), FieldSelector: fieldSelector.String(), } - podList, err := node.kClient.Core().Pods(namespace).List(opts) + podList, err := node.kClient.CoreV1().Pods(namespace).List(opts) if err != nil { return nil, err } @@ -518,7 +518,7 @@ func (node *OsdnNode) handleDeleteService(obj interface{}) { func validateNetworkPluginName(networkClient networkclient.Interface, pluginName string) error { // Detect any plugin mismatches between node and master - clusterNetwork, err := networkClient.Network().ClusterNetworks().Get(networkapi.ClusterNetworkDefault, metav1.GetOptions{}) + clusterNetwork, err := networkClient.NetworkV1().ClusterNetworks().Get(networkapi.ClusterNetworkDefault, metav1.GetOptions{}) switch { case errors.IsNotFound(err): return fmt.Errorf("master has not created a default cluster network, network plugin %q can not start", pluginName) diff --git a/pkg/network/node/pod.go b/pkg/network/node/pod.go index 62bb43f6f8f1..552bfb236401 100644 --- a/pkg/network/node/pod.go +++ b/pkg/network/node/pod.go @@ -5,12 +5,13 @@ package node import ( "encoding/json" "fmt" - "github.com/containernetworking/cni/pkg/types/current" "net" "strconv" "sync" "time" + "github.com/containernetworking/cni/pkg/types/current" + networkv1 "github.com/openshift/api/network/v1" "github.com/openshift/origin/pkg/network/common" "github.com/openshift/origin/pkg/network/node/cniserver" @@ -509,7 +510,7 @@ func (m *podManager) setup(req *cniserver.PodRequest) (cnitypes.Result, *running } }() - v1Pod, err := m.kClient.Core().Pods(req.PodNamespace).Get(req.PodName, metav1.GetOptions{}) + v1Pod, err := m.kClient.CoreV1().Pods(req.PodNamespace).Get(req.PodName, metav1.GetOptions{}) if err != nil { return nil, nil, err } diff --git a/pkg/network/node/subnets.go b/pkg/network/node/subnets.go index 5102b3551563..ed2e68c20dee 100644 --- a/pkg/network/node/subnets.go +++ b/pkg/network/node/subnets.go @@ -144,7 +144,7 @@ func (node *OsdnNode) getLocalSubnet() (string, error) { } err := utilwait.ExponentialBackoff(backoff, func() (bool, error) { var err error - subnet, err = node.networkClient.Network().HostSubnets().Get(node.hostName, metav1.GetOptions{}) + subnet, err = node.networkClient.NetworkV1().HostSubnets().Get(node.hostName, metav1.GetOptions{}) if err == nil { if err = common.ValidateHostSubnet(subnet); err != nil { return false, err diff --git a/pkg/network/node/vnids.go b/pkg/network/node/vnids.go index 6e41fd9af94b..ea68f505ed97 100644 --- a/pkg/network/node/vnids.go +++ b/pkg/network/node/vnids.go @@ -114,7 +114,7 @@ func (vmap *nodeVNIDMap) WaitAndGetVNID(name string) (uint32, error) { // So that we can imply insufficient timeout if we see many VnidNotFoundErrors. VnidNotFoundErrors.Inc() - netns, err := vmap.networkClient.Network().NetNamespaces().Get(name, metav1.GetOptions{}) + netns, err := vmap.networkClient.NetworkV1().NetNamespaces().Get(name, metav1.GetOptions{}) if err != nil { return 0, fmt.Errorf("failed to find netid for namespace: %s, %v", name, err) } @@ -169,7 +169,7 @@ func netnsIsMulticastEnabled(netns *networkapi.NetNamespace) bool { } func (vmap *nodeVNIDMap) populateVNIDs() error { - nets, err := vmap.networkClient.Network().NetNamespaces().List(metav1.ListOptions{}) + nets, err := vmap.networkClient.NetworkV1().NetNamespaces().List(metav1.ListOptions{}) if err != nil { return err } diff --git a/pkg/network/proxy/proxy.go b/pkg/network/proxy/proxy.go index d6323660f7f0..a8c71bab1114 100644 --- a/pkg/network/proxy/proxy.go +++ b/pkg/network/proxy/proxy.go @@ -88,7 +88,7 @@ func (proxy *OsdnProxy) Start(baseHandler pconfig.EndpointsHandler) error { } proxy.baseEndpointsHandler = baseHandler - policies, err := proxy.networkClient.Network().EgressNetworkPolicies(metav1.NamespaceAll).List(metav1.ListOptions{}) + policies, err := proxy.networkClient.NetworkV1().EgressNetworkPolicies(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { return fmt.Errorf("could not get EgressNetworkPolicies: %s", err) } @@ -341,7 +341,7 @@ func (proxy *OsdnProxy) OnEndpointsSynced() { } func (proxy *OsdnProxy) syncEgressDNSProxyFirewall() { - policies, err := proxy.networkClient.Network().EgressNetworkPolicies(metav1.NamespaceAll).List(metav1.ListOptions{}) + policies, err := proxy.networkClient.NetworkV1().EgressNetworkPolicies(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { utilruntime.HandleError(fmt.Errorf("Could not get EgressNetworkPolicies: %v", err)) return @@ -355,7 +355,7 @@ func (proxy *OsdnProxy) syncEgressDNSProxyFirewall() { policy, ok := getPolicy(policyUpdates.UID, policies) if !ok { - policies, err = proxy.networkClient.Network().EgressNetworkPolicies(metav1.NamespaceAll).List(metav1.ListOptions{}) + policies, err = proxy.networkClient.NetworkV1().EgressNetworkPolicies(metav1.NamespaceAll).List(metav1.ListOptions{}) if err != nil { utilruntime.HandleError(fmt.Errorf("Failed to update proxy firewall for policy: %v, Could not get EgressNetworkPolicies: %v", policyUpdates.UID, err)) continue diff --git a/pkg/oauthserver/oauth/registry/registry_test.go b/pkg/oauthserver/oauth/registry/registry_test.go index c63bb3498526..72e98a46f767 100644 --- a/pkg/oauthserver/oauth/registry/registry_test.go +++ b/pkg/oauthserver/oauth/registry/registry_test.go @@ -281,7 +281,7 @@ func TestRegistryAndServer(t *testing.T) { objs = append(objs, testCase.ClientAuth) } fakeOAuthClient := oauthfake.NewSimpleClientset(objs...) - storage := registrystorage.New(fakeOAuthClient.Oauth().OAuthAccessTokens(), fakeOAuthClient.Oauth().OAuthAuthorizeTokens(), fakeOAuthClient.Oauth().OAuthClients(), 0) + storage := registrystorage.New(fakeOAuthClient.OauthV1().OAuthAccessTokens(), fakeOAuthClient.OauthV1().OAuthAuthorizeTokens(), fakeOAuthClient.OauthV1().OAuthClients(), 0) config := osinserver.NewDefaultServerConfig() h.AuthorizeHandler = osinserver.AuthorizeHandlers{ @@ -291,7 +291,7 @@ func TestRegistryAndServer(t *testing.T) { h, ), handlers.NewGrantCheck( - NewClientAuthorizationGrantChecker(fakeOAuthClient.Oauth().OAuthClientAuthorizations()), + NewClientAuthorizationGrantChecker(fakeOAuthClient.OauthV1().OAuthClientAuthorizations()), h, h, ), diff --git a/pkg/oauthserver/server/grant/grant_test.go b/pkg/oauthserver/server/grant/grant_test.go index c4b8f9ea6617..808fbb5be32e 100644 --- a/pkg/oauthserver/server/grant/grant_test.go +++ b/pkg/oauthserver/server/grant/grant_test.go @@ -41,7 +41,7 @@ func badAuth(err error) *testAuth { } func emptyClientRegistry() api.OAuthClientGetter { - return oauthfake.NewSimpleClientset().Oauth().OAuthClients() + return oauthfake.NewSimpleClientset().OauthV1().OAuthClients() } func goodClientRegistry(clientID string, redirectURIs []string, literalScopes []string) api.OAuthClientGetter { @@ -52,14 +52,14 @@ func goodClientRegistry(clientID string, redirectURIs []string, literalScopes [] } fakeOAuthClient := oauthfake.NewSimpleClientset(client) - return fakeOAuthClient.Oauth().OAuthClients() + return fakeOAuthClient.OauthV1().OAuthClients() } func badClientRegistry(err error) api.OAuthClientGetter { fakeOAuthClient := oauthfake.NewSimpleClientset() fakeOAuthClient.PrependReactor("get", "oauthclients", func(action clienttesting.Action) (handled bool, ret runtime.Object, err error) { return true, nil, err }) - return fakeOAuthClient.Oauth().OAuthClients() + return fakeOAuthClient.OauthV1().OAuthClients() } func emptyAuthRegistry() *oauthfake.Clientset { @@ -395,7 +395,7 @@ func TestGrant(t *testing.T) { } for k, testCase := range testCases { - server := httptest.NewServer(NewGrant(testCase.CSRF, testCase.Auth, DefaultFormRenderer, testCase.ClientRegistry, testCase.AuthRegistry.Oauth().OAuthClientAuthorizations())) + server := httptest.NewServer(NewGrant(testCase.CSRF, testCase.Auth, DefaultFormRenderer, testCase.ClientRegistry, testCase.AuthRegistry.OauthV1().OAuthClientAuthorizations())) var resp *http.Response if testCase.PostValues != nil { diff --git a/pkg/oc/cli/admin/policy/modify_roles_test.go b/pkg/oc/cli/admin/policy/modify_roles_test.go index 0c5075c3235d..4798267c1272 100644 --- a/pkg/oc/cli/admin/policy/modify_roles_test.go +++ b/pkg/oc/cli/admin/policy/modify_roles_test.go @@ -320,7 +320,7 @@ func TestModifyNamedClusterRoleBinding(t *testing.T) { RoleKind: "ClusterRole", RoleBindingName: tc.inputRoleBindingName, Users: tc.inputSubjects, - RbacClient: fakeclient.NewSimpleClientset(tc.existingClusterRoleBindings).Rbac(), + RbacClient: fakeclient.NewSimpleClientset(tc.existingClusterRoleBindings).RbacV1(), PrintFlags: genericclioptions.NewPrintFlags(""), ToPrinter: func(string) (printers.ResourcePrinter, error) { return printers.NewDiscardingPrinter(), nil }, } @@ -579,7 +579,7 @@ func TestModifyNamedLocalRoleBinding(t *testing.T) { RoleBindingName: tc.inputRoleBindingName, RoleKind: "Role", RoleName: tc.inputRole, - RbacClient: fakeclient.NewSimpleClientset(tc.existingRoleBindings).Rbac(), + RbacClient: fakeclient.NewSimpleClientset(tc.existingRoleBindings).RbacV1(), Users: tc.inputSubjects, PrintFlags: genericclioptions.NewPrintFlags(""), ToPrinter: func(string) (printers.ResourcePrinter, error) { return printers.NewDiscardingPrinter(), nil }, @@ -1210,12 +1210,12 @@ func TestModifyRoleBindingWarnings(t *testing.T) { RoleBindingName: tt.inputs.roleBindingName, RoleKind: tt.inputs.roleKind, RoleName: tt.inputs.roleName, - RbacClient: fakeclient.NewSimpleClientset(tt.initialState.roles, tt.initialState.clusterRoles, tt.initialState.roleBindings, tt.initialState.clusterRoleBindings).Rbac(), + RbacClient: fakeclient.NewSimpleClientset(tt.initialState.roles, tt.initialState.clusterRoles, tt.initialState.roleBindings, tt.initialState.clusterRoleBindings).RbacV1(), Users: tt.inputs.userNames, Groups: tt.inputs.groupNames, Subjects: tt.inputs.serviceAccounts, - UserClient: fakeuserclient.NewSimpleClientset(tt.initialState.users, tt.initialState.groups).User(), - ServiceAccountClient: fakeclient.NewSimpleClientset(tt.initialState.serviceAccounts).Core(), + UserClient: fakeuserclient.NewSimpleClientset(tt.initialState.users, tt.initialState.groups).UserV1(), + ServiceAccountClient: fakeclient.NewSimpleClientset(tt.initialState.serviceAccounts).CoreV1(), PrintFlags: genericclioptions.NewPrintFlags(""), ToPrinter: func(string) (printers.ResourcePrinter, error) { return printers.NewDiscardingPrinter(), nil }, PrintErrf: func(format string, args ...interface{}) { diff --git a/pkg/oc/cli/admin/prune/images/images.go b/pkg/oc/cli/admin/prune/images/images.go index da1b59a3c795..4afe0665fa49 100644 --- a/pkg/oc/cli/admin/prune/images/images.go +++ b/pkg/oc/cli/admin/prune/images/images.go @@ -273,12 +273,12 @@ func (o PruneImagesOptions) Validate() error { // Run contains all the necessary functionality for the OpenShift cli prune images command. func (o PruneImagesOptions) Run() error { - allPods, err := o.KubeClient.Core().Pods(o.Namespace).List(metav1.ListOptions{}) + allPods, err := o.KubeClient.CoreV1().Pods(o.Namespace).List(metav1.ListOptions{}) if err != nil { return err } - allRCs, err := o.KubeClient.Core().ReplicationControllers(o.Namespace).List(metav1.ListOptions{}) + allRCs, err := o.KubeClient.CoreV1().ReplicationControllers(o.Namespace).List(metav1.ListOptions{}) if err != nil { return err } @@ -297,7 +297,7 @@ func (o PruneImagesOptions) Run() error { return err } - allDSs, err := o.KubeClient.Apps().DaemonSets(o.Namespace).List(metav1.ListOptions{}) + allDSs, err := o.KubeClient.AppsV1().DaemonSets(o.Namespace).List(metav1.ListOptions{}) if err != nil { // TODO: remove in future (3.9) release if !kerrors.IsForbidden(err) { @@ -306,7 +306,7 @@ func (o PruneImagesOptions) Run() error { fmt.Fprintf(o.ErrOut, "Failed to list daemonsets: %v\n - * Make sure to update clusterRoleBindings.\n", err) } - allDeployments, err := o.KubeClient.Apps().Deployments(o.Namespace).List(metav1.ListOptions{}) + allDeployments, err := o.KubeClient.AppsV1().Deployments(o.Namespace).List(metav1.ListOptions{}) if err != nil { // TODO: remove in future (3.9) release if !kerrors.IsForbidden(err) { @@ -320,7 +320,7 @@ func (o PruneImagesOptions) Run() error { return err } - allRSs, err := o.KubeClient.Apps().ReplicaSets(o.Namespace).List(metav1.ListOptions{}) + allRSs, err := o.KubeClient.AppsV1().ReplicaSets(o.Namespace).List(metav1.ListOptions{}) if err != nil { // TODO: remove in future (3.9) release if !kerrors.IsForbidden(err) { @@ -329,7 +329,7 @@ func (o PruneImagesOptions) Run() error { fmt.Fprintf(o.ErrOut, "Failed to list replicasets: %v\n - * Make sure to update clusterRoleBindings.\n", err) } - limitRangesList, err := o.KubeClient.Core().LimitRanges(o.Namespace).List(metav1.ListOptions{}) + limitRangesList, err := o.KubeClient.CoreV1().LimitRanges(o.Namespace).List(metav1.ListOptions{}) if err != nil { return err } diff --git a/pkg/oc/cli/admin/release/extract.go b/pkg/oc/cli/admin/release/extract.go index e73b29a72036..6c11584c8a71 100644 --- a/pkg/oc/cli/admin/release/extract.go +++ b/pkg/oc/cli/admin/release/extract.go @@ -104,7 +104,7 @@ func (o *ExtractOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args [ if err != nil { return fmt.Errorf("info expects one argument, or a connection to an OpenShift 4.x server: %v", err) } - cv, err := client.Config().ClusterVersions().Get("version", metav1.GetOptions{}) + cv, err := client.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{}) if err != nil { if apierrors.IsNotFound(err) { return fmt.Errorf("you must be connected to an OpenShift 4.x server to fetch the current version") diff --git a/pkg/oc/cli/admin/release/info.go b/pkg/oc/cli/admin/release/info.go index 6a578022fe2c..b2c190384e20 100644 --- a/pkg/oc/cli/admin/release/info.go +++ b/pkg/oc/cli/admin/release/info.go @@ -111,7 +111,7 @@ func (o *InfoOptions) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []st if err != nil { return fmt.Errorf("info expects one argument, or a connection to an OpenShift 4.x server: %v", err) } - cv, err := client.Config().ClusterVersions().Get("version", metav1.GetOptions{}) + cv, err := client.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return fmt.Errorf("you must be connected to an OpenShift 4.x server to fetch the current version") diff --git a/pkg/oc/cli/admin/upgrade/upgrade.go b/pkg/oc/cli/admin/upgrade/upgrade.go index 238024f90542..a7e0e826556e 100644 --- a/pkg/oc/cli/admin/upgrade/upgrade.go +++ b/pkg/oc/cli/admin/upgrade/upgrade.go @@ -128,7 +128,7 @@ func (o *Options) Complete(f kcmdutil.Factory, cmd *cobra.Command, args []string } func (o *Options) Run() error { - cv, err := o.Client.Config().ClusterVersions().Get("version", metav1.GetOptions{}) + cv, err := o.Client.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{}) if err != nil { if errors.IsNotFound(err) { return fmt.Errorf("No cluster version information available - you must be connected to a v4.0 OpenShift server to fetch the current version") @@ -144,7 +144,7 @@ func (o *Options) Run() error { } original := cv.Spec.DesiredUpdate cv.Spec.DesiredUpdate = nil - updated, err := o.Client.Config().ClusterVersions().Patch(cv.Name, types.MergePatchType, []byte(`{"spec":{"desiredUpdate":null}}`)) + updated, err := o.Client.ConfigV1().ClusterVersions().Patch(cv.Name, types.MergePatchType, []byte(`{"spec":{"desiredUpdate":null}}`)) if err != nil { return fmt.Errorf("Unable to cancel current rollout: %v", err) } @@ -171,7 +171,7 @@ func (o *Options) Run() error { update := cv.Status.AvailableUpdates[len(cv.Status.AvailableUpdates)-1] cv.Spec.DesiredUpdate = &update - _, err := o.Client.Config().ClusterVersions().Update(cv) + _, err := o.Client.ConfigV1().ClusterVersions().Update(cv) if err != nil { return fmt.Errorf("Unable to upgrade to latest version %s: %v", update.Version, err) } @@ -226,7 +226,7 @@ func (o *Options) Run() error { cv.Spec.DesiredUpdate = update - _, err := o.Client.Config().ClusterVersions().Update(cv) + _, err := o.Client.ConfigV1().ClusterVersions().Update(cv) if err != nil { return fmt.Errorf("Unable to upgrade: %v", err) } diff --git a/pkg/oc/cli/cancelbuild/cancelbuild_test.go b/pkg/oc/cli/cancelbuild/cancelbuild_test.go index b1bdda7dd596..44a280baee55 100644 --- a/pkg/oc/cli/cancelbuild/cancelbuild_test.go +++ b/pkg/oc/cli/cancelbuild/cancelbuild_test.go @@ -175,7 +175,7 @@ func TestCancelBuildRun(t *testing.T) { test.opts.timeout = 1 * time.Second test.opts.Client = client.BuildV1() - test.opts.BuildClient = client.Build().Builds(test.opts.Namespace) + test.opts.BuildClient = client.BuildV1().Builds(test.opts.Namespace) test.opts.ReportError = func(err error) { test.opts.HasError = true t.Logf("got error: %v", err) diff --git a/pkg/oc/cli/importimage/importimage_test.go b/pkg/oc/cli/importimage/importimage_test.go index dd99cee22a30..fec900ade305 100644 --- a/pkg/oc/cli/importimage/importimage_test.go +++ b/pkg/oc/cli/importimage/importimage_test.go @@ -590,7 +590,7 @@ func TestCreateImageImport(t *testing.T) { Scheduled: test.scheduled, ReferencePolicy: test.referencePolicy, Confirm: test.confirm, - isClient: fake.Image().ImageStreams("other"), + isClient: fake.ImageV1().ImageStreams("other"), } if test.insecure != nil { diff --git a/pkg/oc/cli/logs/logs_test.go b/pkg/oc/cli/logs/logs_test.go index 2e2b0f9e39cd..5193d69e3a30 100644 --- a/pkg/oc/cli/logs/logs_test.go +++ b/pkg/oc/cli/logs/logs_test.go @@ -96,7 +96,7 @@ func TestRunLogForPipelineStrategy(t *testing.T) { Object: tc.o, Namespace: "foo", }, - Client: fakebc.Build(), + Client: fakebc.BuildV1(), } if err := o.runLogPipeline(); err != nil { t.Errorf("%#v: RunLog error %v", tc.o, err) diff --git a/pkg/oc/cli/registry/info/info.go b/pkg/oc/cli/registry/info/info.go index ec3bfc0bd3ad..e6ca85903608 100644 --- a/pkg/oc/cli/registry/info/info.go +++ b/pkg/oc/cli/registry/info/info.go @@ -116,7 +116,7 @@ func (i *RegistryInfo) HostPort() (string, bool) { func findRegistryInfo(client imageclient.Interface, namespaces ...string) (*RegistryInfo, error) { for _, ns := range namespaces { - imageStreams, err := client.Image().ImageStreams(ns).List(metav1.ListOptions{}) + imageStreams, err := client.ImageV1().ImageStreams(ns).List(metav1.ListOptions{}) if err != nil || len(imageStreams.Items) == 0 { continue } diff --git a/pkg/oc/cli/registry/login/login.go b/pkg/oc/cli/registry/login/login.go index f09f5e463c94..cc2e2219fd7e 100644 --- a/pkg/oc/cli/registry/login/login.go +++ b/pkg/oc/cli/registry/login/login.go @@ -140,7 +140,7 @@ func (o *LoginOptions) Complete(f kcmdutil.Factory, args []string) error { if err != nil { return err } - sa, err := client.Core().ServiceAccounts(ns).Get(o.ServiceAccount, metav1.GetOptions{}) + sa, err := client.CoreV1().ServiceAccounts(ns).Get(o.ServiceAccount, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { return fmt.Errorf("the service account %s does not exist in namespace %s", o.ServiceAccount, ns) @@ -149,7 +149,7 @@ func (o *LoginOptions) Complete(f kcmdutil.Factory, args []string) error { } var lastErr error for _, ref := range sa.Secrets { - secret, err := client.Core().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) + secret, err := client.CoreV1().Secrets(ns).Get(ref.Name, metav1.GetOptions{}) if err != nil { lastErr = err continue @@ -213,7 +213,7 @@ func (o *LoginOptions) Complete(f kcmdutil.Factory, args []string) error { func findPublicHostname(client *imageclient.Clientset, namespaces ...string) (name string, internal bool) { for _, ns := range namespaces { - imageStreams, err := client.Image().ImageStreams(ns).List(metav1.ListOptions{}) + imageStreams, err := client.ImageV1().ImageStreams(ns).List(metav1.ListOptions{}) if err != nil || len(imageStreams.Items) == 0 { continue } diff --git a/pkg/oc/cli/rollout/retry.go b/pkg/oc/cli/rollout/retry.go index 7c14a93400c9..4940b074c9f7 100644 --- a/pkg/oc/cli/rollout/retry.go +++ b/pkg/oc/cli/rollout/retry.go @@ -166,7 +166,7 @@ func (o RetryOptions) Run() error { } latestDeploymentName := appsutil.LatestDeploymentNameForConfig(config) - rc, err := o.Clientset.Core().ReplicationControllers(config.Namespace).Get(latestDeploymentName, metav1.GetOptions{}) + rc, err := o.Clientset.CoreV1().ReplicationControllers(config.Namespace).Get(latestDeploymentName, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("unable to find the latest rollout (#%d).\nYou can start a new rollout with 'oc rollout latest dc/%s'.", config.Status.LatestVersion, config.Name))) @@ -189,7 +189,7 @@ func (o RetryOptions) Run() error { } // Delete the deployer pod as well as the deployment hooks pods, if any - pods, err := o.Clientset.Core().Pods(config.Namespace).List(metav1.ListOptions{LabelSelector: appsutil.DeployerPodSelector( + pods, err := o.Clientset.CoreV1().Pods(config.Namespace).List(metav1.ListOptions{LabelSelector: appsutil.DeployerPodSelector( latestDeploymentName).String()}) if err != nil { allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("failed to list deployer/hook pods for deployment #%d: %v", config.Status.LatestVersion, err))) @@ -197,7 +197,7 @@ func (o RetryOptions) Run() error { } hasError := false for _, pod := range pods.Items { - err := o.Clientset.Core().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err := o.Clientset.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) if err != nil { allErrs = append(allErrs, kcmdutil.AddSourceToErr("retrying", info.Source, fmt.Errorf("failed to delete deployer/hook pod %s for deployment #%d: %v", pod.Name, config.Status.LatestVersion, err))) hasError = true diff --git a/pkg/oc/cli/rsync/util.go b/pkg/oc/cli/rsync/util.go index b6819a024f3c..1d3cb5f994c0 100644 --- a/pkg/oc/cli/rsync/util.go +++ b/pkg/oc/cli/rsync/util.go @@ -133,6 +133,6 @@ type podAPIChecker struct { // CheckPods will check if pods exists in the provided context func (p podAPIChecker) CheckPod() error { - _, err := p.client.Core().Pods(p.namespace).Get(p.podName, metav1.GetOptions{}) + _, err := p.client.CoreV1().Pods(p.namespace).Get(p.podName, metav1.GetOptions{}) return err } diff --git a/pkg/oc/cli/set/volume_test.go b/pkg/oc/cli/set/volume_test.go index b4d16bbebc37..dd658224606f 100644 --- a/pkg/oc/cli/set/volume_test.go +++ b/pkg/oc/cli/set/volume_test.go @@ -112,7 +112,7 @@ func getFakeMapping() *meta.RESTMapping { func getFakeInfo(podInfo *corev1.Pod) ([]*resource.Info, *VolumeOptions) { fakeMapping := getFakeMapping() info := &resource.Info{ - Client: fake.NewSimpleClientset().Core().RESTClient(), + Client: fake.NewSimpleClientset().CoreV1().RESTClient(), Mapping: fakeMapping, Namespace: "default", Name: "fakepod", diff --git a/pkg/oc/cli/tag/tag_test.go b/pkg/oc/cli/tag/tag_test.go index 06276ab22d88..012cc93f37e1 100644 --- a/pkg/oc/cli/tag/tag_test.go +++ b/pkg/oc/cli/tag/tag_test.go @@ -177,7 +177,7 @@ func TestTag(t *testing.T) { }) test.opts.IOStreams = genericclioptions.NewTestIOStreamsDiscard() - test.opts.client = client.Image() + test.opts.client = client.ImageV1() err := test.opts.Validate() if (err == nil && len(test.validateErr) != 0) || (err != nil && err.Error() != test.validateErr) { @@ -225,7 +225,7 @@ func TestRunTag_DeleteOld(t *testing.T) { }{ opts: &TagOptions{ IOStreams: genericclioptions.NewTestIOStreamsDiscard(), - client: client.Image(), + client: client.ImageV1(), deleteTag: true, destNamespace: []string{"yourproject"}, destNameAndTag: []string{"rails:tip"}, @@ -268,7 +268,7 @@ func TestRunTag_AddNew(t *testing.T) { }{ opts: &TagOptions{ IOStreams: genericclioptions.NewTestIOStreamsDiscard(), - client: client.Image(), + client: client.ImageV1(), ref: imagev1.DockerImageReference{ Namespace: "openshift", Name: "ruby", @@ -316,7 +316,7 @@ func TestRunTag_AddRestricted(t *testing.T) { }{ opts: &TagOptions{ IOStreams: genericclioptions.NewTestIOStreamsDiscard(), - client: client.Image(), + client: client.ImageV1(), ref: imagev1.DockerImageReference{ Namespace: "openshift", Name: "ruby", @@ -362,7 +362,7 @@ func TestRunTag_DeleteNew(t *testing.T) { }{ opts: &TagOptions{ IOStreams: genericclioptions.NewTestIOStreamsDiscard(), - client: client.Image(), + client: client.ImageV1(), deleteTag: true, destNamespace: []string{"yourproject"}, destNameAndTag: []string{"rails:tip"}, diff --git a/pkg/oc/lib/describe/deployments.go b/pkg/oc/lib/describe/deployments.go index 17f1df15b724..b2e70411217b 100644 --- a/pkg/oc/lib/describe/deployments.go +++ b/pkg/oc/lib/describe/deployments.go @@ -85,7 +85,7 @@ func (d *DeploymentConfigDescriber) Describe(namespace, name string, settings de ) if d.config == nil { - if rcs, err := d.kubeClient.Core().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: appsutil.ConfigSelector(deploymentConfig.Name).String()}); err == nil { + if rcs, err := d.kubeClient.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: appsutil.ConfigSelector(deploymentConfig.Name).String()}); err == nil { deploymentsHistory = make([]*corev1.ReplicationController, 0, len(rcs.Items)) for i := range rcs.Items { deploymentsHistory = append(deploymentsHistory, &rcs.Items[i]) @@ -150,7 +150,7 @@ func (d *DeploymentConfigDescriber) Describe(namespace, name string, settings de if settings.ShowEvents { // Events - if events, err := d.kubeClient.Core().Events(deploymentConfig.Namespace).Search(legacyscheme.Scheme, deploymentConfig); err == nil && events != nil { + if events, err := d.kubeClient.CoreV1().Events(deploymentConfig.Namespace).Search(legacyscheme.Scheme, deploymentConfig); err == nil && events != nil { latestDeploymentEvents := &corev1.EventList{Items: []corev1.Event{}} for i := len(events.Items); i != 0 && i > len(events.Items)-maxDisplayDeploymentsEvents; i-- { latestDeploymentEvents.Items = append(latestDeploymentEvents.Items, events.Items[i-1]) @@ -325,7 +325,7 @@ func printDeploymentConfigSpec(kc kubernetes.Interface, dc appsv1.DeploymentConf // TODO: Move this upstream func printAutoscalingInfo(res []schema.GroupResource, namespace, name string, kclient kubernetes.Interface, w *tabwriter.Writer) { - hpaList, err := kclient.Autoscaling().HorizontalPodAutoscalers(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) + hpaList, err := kclient.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(metav1.ListOptions{LabelSelector: labels.Everything().String()}) if err != nil { return } @@ -424,7 +424,7 @@ func printDeploymentRc(deployment *corev1.ReplicationController, kubeClient kube func getPodStatusForDeployment(deployment *corev1.ReplicationController, kubeClient kubernetes.Interface) (running, waiting, succeeded, failed int, err error) { - rcPods, err := kubeClient.Core().Pods(deployment.Namespace).List(metav1.ListOptions{LabelSelector: labels.Set(deployment.Spec.Selector).AsSelector().String()}) + rcPods, err := kubeClient.CoreV1().Pods(deployment.Namespace).List(metav1.ListOptions{LabelSelector: labels.Set(deployment.Spec.Selector).AsSelector().String()}) if err != nil { return } @@ -469,14 +469,14 @@ func (d *LatestDeploymentsDescriber) Describe(namespace, name string) (string, e var deployments []corev1.ReplicationController if d.count == -1 || d.count > 1 { - list, err := d.kubeClient.Core().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: appsutil.ConfigSelector(name).String()}) + list, err := d.kubeClient.CoreV1().ReplicationControllers(namespace).List(metav1.ListOptions{LabelSelector: appsutil.ConfigSelector(name).String()}) if err != nil && !kerrors.IsNotFound(err) { return "", err } deployments = list.Items } else { deploymentName := appsutil.LatestDeploymentNameForConfig(config) - deployment, err := d.kubeClient.Core().ReplicationControllers(config.Namespace).Get(deploymentName, metav1.GetOptions{}) + deployment, err := d.kubeClient.CoreV1().ReplicationControllers(config.Namespace).Get(deploymentName, metav1.GetOptions{}) if err != nil && !kerrors.IsNotFound(err) { return "", err } diff --git a/pkg/oc/lib/describe/deployments_test.go b/pkg/oc/lib/describe/deployments_test.go index 46991d424895..bdfaf1bb7cf4 100644 --- a/pkg/oc/lib/describe/deployments_test.go +++ b/pkg/oc/lib/describe/deployments_test.go @@ -48,7 +48,7 @@ func TestDeploymentConfigDescriber(t *testing.T) { }) d := &DeploymentConfigDescriber{ - appsClient: fake.Apps(), + appsClient: fake.AppsV1(), kubeClient: kFake, } diff --git a/pkg/oc/lib/newapp/app/templatelookup_test.go b/pkg/oc/lib/newapp/app/templatelookup_test.go index 7af373dc794a..c1ed3c49741c 100644 --- a/pkg/oc/lib/newapp/app/templatelookup_test.go +++ b/pkg/oc/lib/newapp/app/templatelookup_test.go @@ -29,7 +29,7 @@ func testTemplateClient(templates *templatev1.TemplateList) templatev1client.Tem return true, templates, nil } }) - return fake.Template() + return fake.TemplateV1() } func TestTemplateSearcher(t *testing.T) { diff --git a/pkg/oc/util/clientcmd/resolve_test.go b/pkg/oc/util/clientcmd/resolve_test.go index 3888220aca6e..fe713909ed08 100644 --- a/pkg/oc/util/clientcmd/resolve_test.go +++ b/pkg/oc/util/clientcmd/resolve_test.go @@ -87,7 +87,7 @@ func TestResolveImagePullSpec(t *testing.T) { for i, test := range testCases { t.Logf("[%d] trying to resolve %q %s and expecting %q (expectErr=%t)", i, test.source, test.input, test.expect, test.expectErr) - result, err := resolveImagePullSpec(test.client.Image(), test.source, test.input, "default") + result, err := resolveImagePullSpec(test.client.ImageV1(), test.source, test.input, "default") if err != nil && !test.expectErr { t.Errorf("[%d] unexpected error: %v", i, err) } else if err == nil && test.expectErr { diff --git a/pkg/pod/envresolve/env.go b/pkg/pod/envresolve/env.go index 1abf048e79fc..faf6d919299c 100644 --- a/pkg/pod/envresolve/env.go +++ b/pkg/pod/envresolve/env.go @@ -48,7 +48,7 @@ func getConfigMapRefValue(client kubernetes.Interface, namespace string, store * configMap, ok := store.ConfigMapStore[configMapSelector.Name] if !ok { var err error - configMap, err = client.Core().ConfigMaps(namespace).Get(configMapSelector.Name, metav1.GetOptions{}) + configMap, err = client.CoreV1().ConfigMaps(namespace).Get(configMapSelector.Name, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/pkg/project/apiserver/registry/project/proxy/proxy_test.go b/pkg/project/apiserver/registry/project/proxy/proxy_test.go index 25c768198b9e..39197fc2c01e 100644 --- a/pkg/project/apiserver/registry/project/proxy/proxy_test.go +++ b/pkg/project/apiserver/registry/project/proxy/proxy_test.go @@ -36,7 +36,7 @@ func TestListProjects(t *testing.T) { } mockClient := fake.NewSimpleClientset(&namespaceList) storage := REST{ - client: mockClient.Core().Namespaces(), + client: mockClient.CoreV1().Namespaces(), lister: &mockLister{&namespaceList}, } user := &user.DefaultInfo{ @@ -73,7 +73,7 @@ func TestCreateProjectBadObject(t *testing.T) { func TestCreateInvalidProject(t *testing.T) { mockClient := &fake.Clientset{} - storage := NewREST(mockClient.Core().Namespaces(), &mockLister{}, nil, nil) + storage := NewREST(mockClient.CoreV1().Namespaces(), &mockLister{}, nil, nil) _, err := storage.Create(apirequest.NewContext(), &projectapi.Project{ ObjectMeta: metav1.ObjectMeta{ Annotations: map[string]string{oapi.OpenShiftDisplayName: "h\t\ni"}, @@ -86,7 +86,7 @@ func TestCreateInvalidProject(t *testing.T) { func TestCreateProjectOK(t *testing.T) { mockClient := &fake.Clientset{} - storage := NewREST(mockClient.Core().Namespaces(), &mockLister{}, nil, nil) + storage := NewREST(mockClient.CoreV1().Namespaces(), &mockLister{}, nil, nil) _, err := storage.Create(apirequest.NewContext(), &projectapi.Project{ ObjectMeta: metav1.ObjectMeta{Name: "foo"}, }, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}) @@ -103,7 +103,7 @@ func TestCreateProjectOK(t *testing.T) { func TestGetProjectOK(t *testing.T) { mockClient := fake.NewSimpleClientset(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}) - storage := NewREST(mockClient.Core().Namespaces(), &mockLister{}, nil, nil) + storage := NewREST(mockClient.CoreV1().Namespaces(), &mockLister{}, nil, nil) project, err := storage.Get(apirequest.NewContext(), "foo", &metav1.GetOptions{}) if project == nil { t.Error("Unexpected nil project") @@ -119,7 +119,7 @@ func TestGetProjectOK(t *testing.T) { func TestDeleteProject(t *testing.T) { mockClient := &fake.Clientset{} storage := REST{ - client: mockClient.Core().Namespaces(), + client: mockClient.CoreV1().Namespaces(), } obj, _, err := storage.Delete(apirequest.NewContext(), "foo", nil) if obj == nil { diff --git a/pkg/project/apiserver/registry/projectrequest/delegated/delegated.go b/pkg/project/apiserver/registry/projectrequest/delegated/delegated.go index 6dddf3b4275a..3731878902ea 100644 --- a/pkg/project/apiserver/registry/projectrequest/delegated/delegated.go +++ b/pkg/project/apiserver/registry/projectrequest/delegated/delegated.go @@ -279,7 +279,7 @@ func (r *REST) getTemplate() (*templatev1.Template, error) { return DefaultTemplate(), nil } - return r.templateClient.Template().Templates(r.templateNamespace).Get(r.templateName, metav1.GetOptions{}) + return r.templateClient.TemplateV1().Templates(r.templateNamespace).Get(r.templateName, metav1.GetOptions{}) } var _ = rest.Lister(&REST{}) diff --git a/pkg/project/controller/project_finalizer_controller.go b/pkg/project/controller/project_finalizer_controller.go index f9c54ae2eb57..43d3e2596f26 100644 --- a/pkg/project/controller/project_finalizer_controller.go +++ b/pkg/project/controller/project_finalizer_controller.go @@ -149,6 +149,6 @@ func (c *ProjectFinalizerController) finalize(namespace *v1.Namespace) error { } // we have removed content, so mark it finalized by us - _, err := c.client.Core().Namespaces().Finalize(namespace) + _, err := c.client.CoreV1().Namespaces().Finalize(namespace) return err } diff --git a/pkg/quota/apiserver/admission/clusterresourcequota/accessor_test.go b/pkg/quota/apiserver/admission/clusterresourcequota/accessor_test.go index 8251593d88ed..950d7f40de59 100644 --- a/pkg/quota/apiserver/admission/clusterresourcequota/accessor_test.go +++ b/pkg/quota/apiserver/admission/clusterresourcequota/accessor_test.go @@ -123,7 +123,7 @@ func TestUpdateQuota(t *testing.T) { client := fakequotaclient.NewSimpleClientset(objs...) - accessor := newQuotaAccessor(quotaLister, nil, client.Quota(), nil) + accessor := newQuotaAccessor(quotaLister, nil, client.QuotaV1(), nil) actualErr := accessor.UpdateQuotaStatus(tc.quotaToUpdate) switch { @@ -313,7 +313,7 @@ func TestGetQuota(t *testing.T) { client := fakequotaclient.NewSimpleClientset() - accessor := newQuotaAccessor(quotaLister, namespaceLister, client.Quota(), tc.mapperFunc()) + accessor := newQuotaAccessor(quotaLister, namespaceLister, client.QuotaV1(), tc.mapperFunc()) actualQuotas, actualErr := accessor.GetQuotas(tc.requestedNamespace) switch { diff --git a/pkg/quota/controller/clusterquotareconciliation/reconciliation_controller_test.go b/pkg/quota/controller/clusterquotareconciliation/reconciliation_controller_test.go index 5ca89c1b10f6..9415a3ce7e91 100644 --- a/pkg/quota/controller/clusterquotareconciliation/reconciliation_controller_test.go +++ b/pkg/quota/controller/clusterquotareconciliation/reconciliation_controller_test.go @@ -255,7 +255,7 @@ func TestSyncFunc(t *testing.T) { // we only need these fields to test the sync func controller := ClusterQuotaReconcilationController{ clusterQuotaMapper: tc.mapperFunc(), - clusterQuotaClient: client.Quota().ClusterResourceQuotas(), + clusterQuotaClient: client.QuotaV1().ClusterResourceQuotas(), } actualErr, actualRetries := controller.syncQuotaForNamespaces(tc.startingQuota(), tc.workItems) diff --git a/pkg/quota/image/imagestreamtag_evaluator_test.go b/pkg/quota/image/imagestreamtag_evaluator_test.go index 95cf9209b9ad..6f000ababe1b 100644 --- a/pkg/quota/image/imagestreamtag_evaluator_test.go +++ b/pkg/quota/image/imagestreamtag_evaluator_test.go @@ -208,7 +208,7 @@ func TestImageStreamTagEvaluatorUsage(t *testing.T) { for _, is := range tc.iss { isInformer.Informer().GetIndexer().Add(&is) } - evaluator := NewImageStreamTagEvaluator(isInformer.Lister(), fakeClient.Image()) + evaluator := NewImageStreamTagEvaluator(isInformer.Lister(), fakeClient.ImageV1()) usage, err := evaluator.Usage(&tc.ist) if err != nil { diff --git a/pkg/route/controller/ingress/ingress_test.go b/pkg/route/controller/ingress/ingress_test.go index 50686350dc43..468239eeabfe 100644 --- a/pkg/route/controller/ingress/ingress_test.go +++ b/pkg/route/controller/ingress/ingress_test.go @@ -244,7 +244,7 @@ func TestController_stabilizeAfterCreate(t *testing.T) { c := &Controller{ queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ingress-to-route-test"), - client: kc.Route(), + client: kc.RouteV1(), ingressLister: i, routeLister: r, secretLister: s, @@ -1515,7 +1515,7 @@ func TestController_sync(t *testing.T) { c := &Controller{ queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ingress-to-route-test"), - client: kc.Route(), + client: kc.RouteV1(), ingressLister: tt.fields.i, routeLister: tt.fields.r, secretLister: tt.fields.s, diff --git a/pkg/route/controller/ingressip/service_ingressip_controller.go b/pkg/route/controller/ingressip/service_ingressip_controller.go index 43336b1df4df..0327cd519e62 100644 --- a/pkg/route/controller/ingressip/service_ingressip_controller.go +++ b/pkg/route/controller/ingressip/service_ingressip_controller.go @@ -85,7 +85,7 @@ func NewIngressIPController(services cache.SharedIndexInformer, kc kclientset.In recorder := eventBroadcaster.NewRecorder(legacyscheme.Scheme, v1.EventSource{Component: "ingressip-controller"}) ic := &IngressIPController{ - client: kc.Core(), + client: kc.CoreV1(), queue: workqueue.NewRateLimitingQueue(workqueue.DefaultControllerRateLimiter()), maxRetries: 10, recorder: recorder, diff --git a/pkg/serviceaccounts/controllers/create_dockercfg_secrets.go b/pkg/serviceaccounts/controllers/create_dockercfg_secrets.go index 324a2b73c727..9c05fe5f9d83 100644 --- a/pkg/serviceaccounts/controllers/create_dockercfg_secrets.go +++ b/pkg/serviceaccounts/controllers/create_dockercfg_secrets.go @@ -346,7 +346,7 @@ func (e *DockercfgController) syncServiceAccount(key string) error { // Clear the pending token annotation when updating delete(serviceAccount.Annotations, PendingTokenAnnotation) - updatedSA, err := e.client.Core().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount) + updatedSA, err := e.client.CoreV1().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount) if err == nil { e.serviceAccountCache.Mutation(updatedSA) } @@ -372,7 +372,7 @@ func (e *DockercfgController) syncServiceAccount(key string) error { if !exists || !needsDockercfgSecret(obj.(*v1.ServiceAccount)) || serviceAccount.UID != obj.(*v1.ServiceAccount).UID { // somehow a dockercfg secret appeared or the SA disappeared. cleanup the secret we made and return glog.V(2).Infof("Deleting secret because the work is already done %s/%s", dockercfgSecret.Namespace, dockercfgSecret.Name) - e.client.Core().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name, nil) + e.client.CoreV1().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name, nil) return nil } @@ -385,7 +385,7 @@ func (e *DockercfgController) syncServiceAccount(key string) error { // Clear the pending token annotation when updating delete(serviceAccount.Annotations, PendingTokenAnnotation) - updatedSA, err := e.client.Core().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount) + updatedSA, err := e.client.CoreV1().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount) if err == nil { e.serviceAccountCache.Mutation(updatedSA) } @@ -396,7 +396,7 @@ func (e *DockercfgController) syncServiceAccount(key string) error { // nothing to do. Our choice was stale or we got a conflict. Either way that means that the service account was updated. We simply need to return because we'll get an update notification later // we do need to clean up our dockercfgSecret. token secrets are cleaned up by the controller handling service account dockercfg secret deletes glog.V(2).Infof("Deleting secret %s/%s (err=%v)", dockercfgSecret.Namespace, dockercfgSecret.Name, err) - e.client.Core().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name, nil) + e.client.CoreV1().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name, nil) } return err } @@ -412,7 +412,7 @@ func (e *DockercfgController) createTokenSecret(serviceAccount *v1.ServiceAccoun serviceAccount.Annotations = map[string]string{} } serviceAccount.Annotations[PendingTokenAnnotation] = pendingTokenName - updatedServiceAccount, err := e.client.Core().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount) + updatedServiceAccount, err := e.client.CoreV1().ServiceAccounts(serviceAccount.Namespace).Update(serviceAccount) // Conflicts mean we'll get called to sync this service account again if kapierrors.IsConflict(err) { return nil, false, nil @@ -449,7 +449,7 @@ func (e *DockercfgController) createTokenSecret(serviceAccount *v1.ServiceAccoun } glog.V(4).Infof("Creating token secret %q for service account %s/%s", tokenSecret.Name, serviceAccount.Namespace, serviceAccount.Name) - token, err := e.client.Core().Secrets(tokenSecret.Namespace).Create(tokenSecret) + token, err := e.client.CoreV1().Secrets(tokenSecret.Namespace).Create(tokenSecret) // Already exists but not in cache means we'll get an add watch event and resync if kapierrors.IsAlreadyExists(err) { return nil, false, nil @@ -506,7 +506,7 @@ func (e *DockercfgController) createDockerPullSecret(serviceAccount *v1.ServiceA dockercfgSecret.Data[v1.DockerConfigKey] = dockercfgContent // Save the secret - createdSecret, err := e.client.Core().Secrets(tokenSecret.Namespace).Create(dockercfgSecret) + createdSecret, err := e.client.CoreV1().Secrets(tokenSecret.Namespace).Create(dockercfgSecret) return createdSecret, err == nil, err } diff --git a/pkg/serviceaccounts/controllers/deleted_dockercfg_secrets.go b/pkg/serviceaccounts/controllers/deleted_dockercfg_secrets.go index 4700aa8bad23..cf21efb571ee 100644 --- a/pkg/serviceaccounts/controllers/deleted_dockercfg_secrets.go +++ b/pkg/serviceaccounts/controllers/deleted_dockercfg_secrets.go @@ -103,7 +103,7 @@ func (e *DockercfgDeletedController) secretDeleted(obj interface{}) { } // remove the reference token secret - if err := e.client.Core().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey], nil); (err != nil) && !kapierrors.IsNotFound(err) { + if err := e.client.CoreV1().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Annotations[ServiceAccountTokenSecretNameKey], nil); (err != nil) && !kapierrors.IsNotFound(err) { utilruntime.HandleError(err) } } @@ -144,7 +144,7 @@ func (e *DockercfgDeletedController) removeDockercfgSecretReference(dockercfgSec serviceAccount.ImagePullSecrets = imagePullSecrets if changed { - _, err = e.client.Core().ServiceAccounts(dockercfgSecret.Namespace).Update(serviceAccount) + _, err = e.client.CoreV1().ServiceAccounts(dockercfgSecret.Namespace).Update(serviceAccount) if err != nil { return err } @@ -160,7 +160,7 @@ func (e *DockercfgDeletedController) getServiceAccount(secret *v1.Secret) (*v1.S return nil, nil } - serviceAccount, err := e.client.Core().ServiceAccounts(secret.Namespace).Get(saName, metav1.GetOptions{}) + serviceAccount, err := e.client.CoreV1().ServiceAccounts(secret.Namespace).Get(saName, metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/pkg/serviceaccounts/controllers/deleted_token_secrets.go b/pkg/serviceaccounts/controllers/deleted_token_secrets.go index 65157715ef17..df414032dab1 100644 --- a/pkg/serviceaccounts/controllers/deleted_token_secrets.go +++ b/pkg/serviceaccounts/controllers/deleted_token_secrets.go @@ -92,7 +92,7 @@ func (e *DockercfgTokenDeletedController) secretDeleted(obj interface{}) { // remove the reference token secrets for _, dockercfgSecret := range dockercfgSecrets { - if err := e.client.Core().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name, nil); (err != nil) && !apierrors.IsNotFound(err) { + if err := e.client.CoreV1().Secrets(dockercfgSecret.Namespace).Delete(dockercfgSecret.Name, nil); (err != nil) && !apierrors.IsNotFound(err) { utilruntime.HandleError(err) } } @@ -103,7 +103,7 @@ func (e *DockercfgTokenDeletedController) findDockercfgSecrets(tokenSecret *v1.S dockercfgSecrets := []*v1.Secret{} options := metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector(api.SecretTypeField, string(v1.SecretTypeDockercfg)).String()} - potentialSecrets, err := e.client.Core().Secrets(tokenSecret.Namespace).List(options) + potentialSecrets, err := e.client.CoreV1().Secrets(tokenSecret.Namespace).List(options) if err != nil { return nil, err } diff --git a/pkg/serviceaccounts/controllers/docker_registry_service.go b/pkg/serviceaccounts/controllers/docker_registry_service.go index 073fe640af2e..4ae190a32019 100644 --- a/pkg/serviceaccounts/controllers/docker_registry_service.go +++ b/pkg/serviceaccounts/controllers/docker_registry_service.go @@ -402,7 +402,7 @@ func (e *DockerRegistryServiceController) syncSecretUpdate(key string) error { } dockercfgSecret.Data[v1.DockerConfigKey] = dockercfgContent - if _, err := e.client.Core().Secrets(dockercfgSecret.Namespace).Update(dockercfgSecret); err != nil { + if _, err := e.client.CoreV1().Secrets(dockercfgSecret.Namespace).Update(dockercfgSecret); err != nil { return err } diff --git a/pkg/serviceaccounts/oauthclient/oauthclientregistry_test.go b/pkg/serviceaccounts/oauthclient/oauthclientregistry_test.go index 848276de7d78..b698135c0fb9 100644 --- a/pkg/serviceaccounts/oauthclient/oauthclientregistry_test.go +++ b/pkg/serviceaccounts/oauthclient/oauthclientregistry_test.go @@ -570,10 +570,10 @@ func TestGetClient(t *testing.T) { delegate := &fakeDelegate{} fakerecorder := record.NewFakeRecorder(100) getter := saOAuthClientAdapter{ - saClient: tc.kubeClient.Core(), - secretClient: tc.kubeClient.Core(), + saClient: tc.kubeClient.CoreV1(), + secretClient: tc.kubeClient.CoreV1(), eventRecorder: fakerecorder, - routeClient: tc.routeClient.Route(), + routeClient: tc.routeClient.RouteV1(), delegate: delegate, grantMethod: oauthapiv1.GrantHandlerPrompt, decoder: codecFactory.UniversalDecoder(), @@ -1236,7 +1236,7 @@ func buildRouteClient(routes []*routeapi.Route) saOAuthClientAdapter { objects = append(objects, route) } return saOAuthClientAdapter{ - routeClient: routefake.NewSimpleClientset(objects...).Route(), + routeClient: routefake.NewSimpleClientset(objects...).RouteV1(), eventRecorder: record.NewFakeRecorder(100), } } diff --git a/pkg/template/controller/metrics_test.go b/pkg/template/controller/metrics_test.go index 04489d865cac..cb62eb0ac2ba 100644 --- a/pkg/template/controller/metrics_test.go +++ b/pkg/template/controller/metrics_test.go @@ -26,7 +26,7 @@ type fakeLister struct { } func (f *fakeLister) List(labels.Selector) ([]*templateapi.TemplateInstance, error) { - list, err := f.templateClient.Template().TemplateInstances("").List(metav1.ListOptions{}) + list, err := f.templateClient.TemplateV1().TemplateInstances("").List(metav1.ListOptions{}) if err != nil { return nil, err } @@ -38,7 +38,7 @@ func (f *fakeLister) List(labels.Selector) ([]*templateapi.TemplateInstance, err } func (f *fakeLister) Get(name string) (*templateapi.TemplateInstance, error) { - return f.templateClient.Template().TemplateInstances("").Get(name, metav1.GetOptions{}) + return f.templateClient.TemplateV1().TemplateInstances("").Get(name, metav1.GetOptions{}) } func (f *fakeLister) TemplateInstances(string) templatelister.TemplateInstanceNamespaceLister { diff --git a/pkg/templateservicebroker/servicebroker/bind.go b/pkg/templateservicebroker/servicebroker/bind.go index c1fbd929d640..ca4339c281d8 100644 --- a/pkg/templateservicebroker/servicebroker/bind.go +++ b/pkg/templateservicebroker/servicebroker/bind.go @@ -172,7 +172,7 @@ func (b *Broker) Bind(u user.Info, instanceID, bindingID string, breq *api.BindR // end users are not expected to have access to BrokerTemplateInstance // objects; SAR on the TemplateInstance instead. - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "get", Group: templateapi.GroupName, @@ -214,7 +214,7 @@ func (b *Broker) Bind(u user.Info, instanceID, bindingID string, breq *api.BindR return api.InternalServerError(err) } - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: object.Ref.Namespace, Verb: "get", Group: mapping.Resource.Group, @@ -252,7 +252,7 @@ func (b *Broker) Bind(u user.Info, instanceID, bindingID string, breq *api.BindR // end users are not expected to have access to BrokerTemplateInstance // objects; SAR on the TemplateInstance instead. - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "update", Group: templateapi.GroupName, diff --git a/pkg/templateservicebroker/servicebroker/deprovision.go b/pkg/templateservicebroker/servicebroker/deprovision.go index a0e92cf6461d..f614e8954c51 100644 --- a/pkg/templateservicebroker/servicebroker/deprovision.go +++ b/pkg/templateservicebroker/servicebroker/deprovision.go @@ -35,7 +35,7 @@ func (b *Broker) Deprovision(u user.Info, instanceID string) *api.Response { // end users are not expected to have access to BrokerTemplateInstance // objects; SAR on the TemplateInstance instead. - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "get", Group: templateapi.GroupName, @@ -45,7 +45,7 @@ func (b *Broker) Deprovision(u user.Info, instanceID string) *api.Response { return api.Forbidden(err) } - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "delete", Group: templateapi.GroupName, diff --git a/pkg/templateservicebroker/servicebroker/lastoperation.go b/pkg/templateservicebroker/servicebroker/lastoperation.go index 7d4f6ff8778d..25dee33bd7ca 100644 --- a/pkg/templateservicebroker/servicebroker/lastoperation.go +++ b/pkg/templateservicebroker/servicebroker/lastoperation.go @@ -46,7 +46,7 @@ func (b *Broker) lastOperationProvisioning(u user.Info, instanceID string) *api. namespace := brokerTemplateInstance.Spec.TemplateInstance.Namespace - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "get", Group: templateapi.GroupName, @@ -91,7 +91,7 @@ func (b *Broker) lastOperationDeprovisioning(u user.Info, instanceID string) *ap namespace := brokerTemplateInstance.Spec.TemplateInstance.Namespace - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "get", Group: templateapi.GroupName, diff --git a/pkg/templateservicebroker/servicebroker/provision.go b/pkg/templateservicebroker/servicebroker/provision.go index d15a17a6998d..14ab6d457a9a 100644 --- a/pkg/templateservicebroker/servicebroker/provision.go +++ b/pkg/templateservicebroker/servicebroker/provision.go @@ -51,7 +51,7 @@ func (b *Broker) ensureSecret(u user.Info, namespace string, brokerTemplateInsta } } } - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "create", Group: kapiv1.GroupName, @@ -61,14 +61,14 @@ func (b *Broker) ensureSecret(u user.Info, namespace string, brokerTemplateInsta return nil, api.Forbidden(err) } - createdSec, err := b.kc.Core().Secrets(namespace).Create(secret) + createdSec, err := b.kc.CoreV1().Secrets(namespace).Create(secret) if err == nil { *didWork = true return createdSec, nil } if kerrors.IsAlreadyExists(err) { - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "get", Group: kapiv1.GroupName, @@ -78,7 +78,7 @@ func (b *Broker) ensureSecret(u user.Info, namespace string, brokerTemplateInsta return nil, api.Forbidden(err) } - existingSec, err := b.kc.Core().Secrets(namespace).Get(secret.Name, metav1.GetOptions{}) + existingSec, err := b.kc.CoreV1().Secrets(namespace).Get(secret.Name, metav1.GetOptions{}) if err == nil && reflect.DeepEqual(secret.Data, existingSec.Data) { return existingSec, nil } @@ -132,7 +132,7 @@ func (b *Broker) ensureTemplateInstance(u user.Info, namespace string, brokerTem }, } - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "create", Group: templateapiv1.GroupName, @@ -149,7 +149,7 @@ func (b *Broker) ensureTemplateInstance(u user.Info, namespace string, brokerTem } if kerrors.IsAlreadyExists(err) { - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "get", Group: templateapiv1.GroupName, @@ -181,7 +181,7 @@ func (b *Broker) ensureBrokerTemplateInstanceUIDs(u user.Info, namespace string, // end users are not expected to have access to BrokerTemplateInstance // objects; SAR on the TemplateInstance instead. - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "update", Group: templateapiv1.GroupName, @@ -243,7 +243,7 @@ func (b *Broker) ensureBrokerTemplateInstance(u user.Info, namespace, instanceID // end users are not expected to have access to BrokerTemplateInstance // objects; SAR on the TemplateInstance instead. - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "create", Group: templateapiv1.GroupName, @@ -262,7 +262,7 @@ func (b *Broker) ensureBrokerTemplateInstance(u user.Info, namespace, instanceID if kerrors.IsAlreadyExists(err) { // end users are not expected to have access to BrokerTemplateInstance // objects; SAR on the TemplateInstance instead. - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "get", Group: templateapiv1.GroupName, @@ -333,7 +333,7 @@ func (b *Broker) Provision(u user.Info, instanceID string, preq *api.ProvisionRe } // with groups in the user.Info vs. the username only form of auth, we can SAR for get access on template resources - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: template.Namespace, Verb: "get", Group: templateapiv1.GroupName, @@ -343,7 +343,7 @@ func (b *Broker) Provision(u user.Info, instanceID string, preq *api.ProvisionRe return api.Forbidden(err) } - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "create", Group: templateapiv1.GroupName, diff --git a/pkg/templateservicebroker/servicebroker/servicebroker.go b/pkg/templateservicebroker/servicebroker/servicebroker.go index 06582b2f7750..ef130fc0a3af 100644 --- a/pkg/templateservicebroker/servicebroker/servicebroker.go +++ b/pkg/templateservicebroker/servicebroker/servicebroker.go @@ -85,7 +85,7 @@ func NewBroker(saKubeClientConfig *restclient.Config, informer templateinformer. } b := &Broker{ kc: kubeClient, - templateclient: templateClient.Template(), + templateclient: templateClient.TemplateV1(), lister: informer.Lister(), hasSynced: informer.Informer().HasSynced, templateNamespaces: templateNamespaces, diff --git a/pkg/templateservicebroker/servicebroker/unbind.go b/pkg/templateservicebroker/servicebroker/unbind.go index 7892ddcc7f38..44c6f9d80665 100644 --- a/pkg/templateservicebroker/servicebroker/unbind.go +++ b/pkg/templateservicebroker/servicebroker/unbind.go @@ -34,7 +34,7 @@ func (b *Broker) Unbind(u user.Info, instanceID, bindingID string) *api.Response namespace := brokerTemplateInstance.Spec.TemplateInstance.Namespace - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "get", Group: templateapi.GroupName, @@ -64,7 +64,7 @@ func (b *Broker) Unbind(u user.Info, instanceID, bindingID string) *api.Response // Note that this specific templateinstance object might not actually exist // anymore, but the SAR check is still valid to confirm the user can update // templateinstances in this namespace. - if err := util.Authorize(b.kc.Authorization().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ + if err := util.Authorize(b.kc.AuthorizationV1().SubjectAccessReviews(), u, &authorizationv1.ResourceAttributes{ Namespace: namespace, Verb: "delete", Group: templateapi.GroupName, diff --git a/pkg/unidling/controller/unidling_controller_test.go b/pkg/unidling/controller/unidling_controller_test.go index 59c75f6ffe25..35c09331e2d8 100644 --- a/pkg/unidling/controller/unidling_controller_test.go +++ b/pkg/unidling/controller/unidling_controller_test.go @@ -187,9 +187,9 @@ func TestControllerHandlesStaleEvents(t *testing.T) { nowTime := time.Now().Truncate(time.Second) fakeClient, fakeDeployClient, res := prepFakeClient(t, nowTime) controller := &UnidlingController{ - endpointsNamespacer: fakeClient.Core(), - rcNamespacer: fakeClient.Core(), - dcNamespacer: fakeDeployClient.Apps(), + endpointsNamespacer: fakeClient.CoreV1(), + rcNamespacer: fakeClient.CoreV1(), + dcNamespacer: fakeDeployClient.AppsV1(), } retry, err := controller.handleRequest(types.NamespacedName{ @@ -243,9 +243,9 @@ func TestControllerIgnoresAlreadyScaledObjects(t *testing.T) { fakeClient, fakeDeployClient, res := prepFakeClient(t, idledTime, baseScales...) controller := &UnidlingController{ - endpointsNamespacer: fakeClient.Core(), - rcNamespacer: fakeClient.Core(), - dcNamespacer: fakeDeployClient.Apps(), + endpointsNamespacer: fakeClient.CoreV1(), + rcNamespacer: fakeClient.CoreV1(), + dcNamespacer: fakeDeployClient.AppsV1(), } retry, err := controller.handleRequest(types.NamespacedName{ @@ -355,9 +355,9 @@ func TestControllerUnidlesProperly(t *testing.T) { fakeClient, fakeDeployClient, res := prepFakeClient(t, nowTime.Add(-10*time.Second), baseScales...) controller := &UnidlingController{ - endpointsNamespacer: fakeClient.Core(), - rcNamespacer: fakeClient.Core(), - dcNamespacer: fakeDeployClient.Apps(), + endpointsNamespacer: fakeClient.CoreV1(), + rcNamespacer: fakeClient.CoreV1(), + dcNamespacer: fakeDeployClient.AppsV1(), } retry, err := controller.handleRequest(types.NamespacedName{ @@ -695,9 +695,9 @@ func TestControllerPerformsCorrectlyOnFailures(t *testing.T) { for _, test := range tests { fakeClient, fakeDeployClient := prepareFakeClientForFailureTest(test) controller := &UnidlingController{ - endpointsNamespacer: fakeClient.Core(), - rcNamespacer: fakeClient.Core(), - dcNamespacer: fakeDeployClient.Apps(), + endpointsNamespacer: fakeClient.CoreV1(), + rcNamespacer: fakeClient.CoreV1(), + dcNamespacer: fakeDeployClient.AppsV1(), } var retry bool diff --git a/pkg/user/apiserver/apiserver.go b/pkg/user/apiserver/apiserver.go index 835cb643e1f9..de0757df2f43 100644 --- a/pkg/user/apiserver/apiserver.go +++ b/pkg/user/apiserver/apiserver.go @@ -102,7 +102,7 @@ func (c *completedConfig) newV1RESTStorage() (map[string]rest.Storage, error) { if err != nil { return nil, err } - userIdentityMappingStorage := useridentitymapping.NewREST(userClient.User().Users(), userClient.User().Identities()) + userIdentityMappingStorage := useridentitymapping.NewREST(userClient.UserV1().Users(), userClient.UserV1().Identities()) groupStorage, err := groupetcd.NewREST(c.GenericConfig.RESTOptionsGetter) if err != nil { return nil, err diff --git a/test/extended/builds/build_pruning.go b/test/extended/builds/build_pruning.go index 66a9663173d8..4c41b1e8f0bd 100644 --- a/test/extended/builds/build_pruning.go +++ b/test/extended/builds/build_pruning.go @@ -69,7 +69,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in br.AssertSuccess() } - buildConfig, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) + buildConfig, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) } @@ -78,7 +78,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in g.By("waiting up to one minute for pruning to complete") err = wait.PollImmediate(pollingInterval, timeout, func() (bool, error) { - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) return false, err @@ -114,7 +114,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in br.AssertFailure() } - buildConfig, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) + buildConfig, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) } @@ -123,7 +123,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in g.By("waiting up to one minute for pruning to complete") err = wait.PollImmediate(pollingInterval, timeout, func() (bool, error) { - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) return false, err @@ -159,7 +159,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in err = oc.Run("cancel-build").Args(fmt.Sprintf("myphp-%d", i)).Execute() } - buildConfig, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) + buildConfig, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) } @@ -168,7 +168,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in g.By("waiting up to one minute for pruning to complete") err = wait.PollImmediate(pollingInterval, timeout, func() (bool, error) { - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) return false, err @@ -204,7 +204,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in br.AssertFailure() } - buildConfig, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) + buildConfig, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) } @@ -213,7 +213,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in g.By("waiting up to one minute for pruning to complete") err = wait.PollImmediate(pollingInterval, timeout, func() (bool, error) { - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) return false, err @@ -255,7 +255,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in g.By("patching the build config to leave 1 build") err = oc.Run("patch").Args("bc/myphp", "-p", `{"spec":{"failedBuildsHistoryLimit": 1}}`).Execute() - buildConfig, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) + buildConfig, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) } @@ -264,7 +264,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in g.By("waiting up to one minute for pruning to complete") err = wait.PollImmediate(pollingInterval, timeout, func() (bool, error) { - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) return false, err @@ -294,7 +294,7 @@ var _ = g.Describe("[Feature:Builds][pruning] prune builds based on settings in err := oc.Run("create").Args("-f", groupBuildConfig).Execute() o.Expect(err).NotTo(o.HaveOccurred()) - buildConfig, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) + buildConfig, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("myphp", metav1.GetOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) } diff --git a/test/extended/builds/cluster_config.go b/test/extended/builds/cluster_config.go index 7155727203aa..e957cd225105 100644 --- a/test/extended/builds/cluster_config.go +++ b/test/extended/builds/cluster_config.go @@ -29,10 +29,10 @@ var _ = g.Describe("[Feature:Builds][Serial][Slow][Disruptive] alter builds via g.JustBeforeEach(func() { g.By("waiting for default service account") - err := exutil.WaitForServiceAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()), "default") + err := exutil.WaitForServiceAccount(oc.KubeClient().CoreV1().ServiceAccounts(oc.Namespace()), "default") o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for builder service account") - err = exutil.WaitForServiceAccount(oc.KubeClient().Core().ServiceAccounts(oc.Namespace()), "builder") + err = exutil.WaitForServiceAccount(oc.KubeClient().CoreV1().ServiceAccounts(oc.Namespace()), "builder") o.Expect(err).NotTo(o.HaveOccurred()) oc.Run("create").Args("-f", buildFixture).Execute() }) diff --git a/test/extended/builds/completiondeadlineseconds.go b/test/extended/builds/completiondeadlineseconds.go index dbdc5679db19..33ff552c5d92 100644 --- a/test/extended/builds/completiondeadlineseconds.go +++ b/test/extended/builds/completiondeadlineseconds.go @@ -49,7 +49,7 @@ var _ = g.Describe("[Feature:Builds][Slow] builds should have deadlines", func() o.Expect(br.Build.Status.Phase).Should(o.BeEquivalentTo(buildv1.BuildPhaseFailed)) // the build should have failed g.By("verifying the build pod status") - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(buildutil.GetBuildPodName(br.Build), metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(buildutil.GetBuildPodName(br.Build), metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(pod.Status.Phase).Should(o.BeEquivalentTo(corev1.PodFailed)) o.Expect(pod.Status.Reason).Should(o.ContainSubstring("DeadlineExceeded")) @@ -73,7 +73,7 @@ var _ = g.Describe("[Feature:Builds][Slow] builds should have deadlines", func() o.Expect(br.Build.Status.Phase).Should(o.BeEquivalentTo(buildv1.BuildPhaseFailed)) // the build should have failed g.By("verifying the build pod status") - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(buildutil.GetBuildPodName(br.Build), metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(buildutil.GetBuildPodName(br.Build), metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(pod.Status.Phase).Should(o.BeEquivalentTo(corev1.PodFailed)) o.Expect(pod.Status.Reason).Should(o.ContainSubstring("DeadlineExceeded")) diff --git a/test/extended/builds/contextdir.go b/test/extended/builds/contextdir.go index 54455c635fd3..9ce676c77f77 100644 --- a/test/extended/builds/contextdir.go +++ b/test/extended/builds/contextdir.go @@ -56,7 +56,7 @@ var _ = g.Describe("[Feature:Builds][Slow] builds with a context directory", fun o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), s2iBuildName, exutil.CheckBuildSuccess, exutil.CheckBuildFailed, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), s2iBuildName, exutil.CheckBuildSuccess, exutil.CheckBuildFailed, nil) if err != nil { exutil.DumpBuildLogs("s2icontext", oc) } @@ -73,7 +73,7 @@ var _ = g.Describe("[Feature:Builds][Slow] builds with a context directory", fun o.Expect(err).NotTo(o.HaveOccurred()) assertPageContent := func(content string) { - _, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) + _, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) result, err := imageeco.CheckPageContains(oc, "frontend", "", content) @@ -85,7 +85,7 @@ var _ = g.Describe("[Feature:Builds][Slow] builds with a context directory", fun assertPageContent("Hello world!") g.By("checking the pod count") - pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: dcLabel.String()}) + pods, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: dcLabel.String()}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) @@ -117,7 +117,7 @@ var _ = g.Describe("[Feature:Builds][Slow] builds with a context directory", fun // build will fail if we don't use the right context dir because there won't be a dockerfile present. g.By("waiting for build to finish") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), dockerBuildName, exutil.CheckBuildSuccess, exutil.CheckBuildFailed, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), dockerBuildName, exutil.CheckBuildSuccess, exutil.CheckBuildFailed, nil) if err != nil { exutil.DumpBuildLogs("dockercontext", oc) } diff --git a/test/extended/builds/controller_compat.go b/test/extended/builds/controller_compat.go index 19b15556b7fc..16c32e72e120 100644 --- a/test/extended/builds/controller_compat.go +++ b/test/extended/builds/controller_compat.go @@ -28,32 +28,32 @@ var _ = g.Describe("[bldcompat][Slow][Compatibility] build controller", func() { g.Describe("RunBuildControllerTest", func() { g.It("should succeed", func() { - build.RunBuildControllerTest(g.GinkgoT(), oc.BuildClient().Build(), oc.AdminKubeClient(), oc.Namespace()) + build.RunBuildControllerTest(g.GinkgoT(), oc.BuildClient().BuildV1(), oc.AdminKubeClient(), oc.Namespace()) }) }) g.Describe("RunBuildControllerPodSyncTest", func() { g.It("should succeed", func() { - build.RunBuildControllerPodSyncTest(g.GinkgoT(), oc.BuildClient().Build(), oc.AdminKubeClient(), oc.Namespace()) + build.RunBuildControllerPodSyncTest(g.GinkgoT(), oc.BuildClient().BuildV1(), oc.AdminKubeClient(), oc.Namespace()) }) }) g.Describe("RunImageChangeTriggerTest [SkipPrevControllers]", func() { g.It("should succeed", func() { - build.RunImageChangeTriggerTest(g.GinkgoT(), oc.AdminBuildClient().Build(), oc.AdminImageClient().Image(), oc.Namespace()) + build.RunImageChangeTriggerTest(g.GinkgoT(), oc.AdminBuildClient().BuildV1(), oc.AdminImageClient().ImageV1(), oc.Namespace()) }) }) g.Describe("RunBuildDeleteTest", func() { g.It("should succeed", func() { - build.RunBuildDeleteTest(g.GinkgoT(), oc.AdminBuildClient().Build(), oc.AdminKubeClient(), oc.Namespace()) + build.RunBuildDeleteTest(g.GinkgoT(), oc.AdminBuildClient().BuildV1(), oc.AdminKubeClient(), oc.Namespace()) }) }) g.Describe("RunBuildRunningPodDeleteTest", func() { g.It("should succeed", func() { - build.RunBuildRunningPodDeleteTest(g.GinkgoT(), oc.AdminBuildClient().Build(), oc.AdminKubeClient(), oc.Namespace()) + build.RunBuildRunningPodDeleteTest(g.GinkgoT(), oc.AdminBuildClient().BuildV1(), oc.AdminKubeClient(), oc.Namespace()) }) }) g.Describe("RunBuildConfigChangeControllerTest", func() { g.It("should succeed", func() { - build.RunBuildConfigChangeControllerTest(g.GinkgoT(), oc.AdminBuildClient().Build(), oc.Namespace()) + build.RunBuildConfigChangeControllerTest(g.GinkgoT(), oc.AdminBuildClient().BuildV1(), oc.Namespace()) }) }) }) diff --git a/test/extended/builds/dockerfile.go b/test/extended/builds/dockerfile.go index f8271a063801..083f125b5257 100644 --- a/test/extended/builds/dockerfile.go +++ b/test/extended/builds/dockerfile.go @@ -48,14 +48,14 @@ USER 1001 o.Expect(err).NotTo(o.HaveOccurred()) g.By("checking the buildconfig content") - bc, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("busybox", metav1.GetOptions{}) + bc, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("busybox", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(bc.Spec.Source.Git).To(o.BeNil()) o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile)) buildName := "busybox-1" g.By("expecting the Dockerfile build is in Complete phase") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), buildName, nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), buildName, nil, nil, nil) //debug for failures if err != nil { exutil.DumpBuildLogs("busybox", oc) @@ -74,7 +74,7 @@ USER 1001 o.Expect(err).NotTo(o.HaveOccurred()) g.By("checking the buildconfig content") - bc, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("centos", metav1.GetOptions{}) + bc, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("centos", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(bc.Spec.Source.Git).To(o.BeNil()) o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile2)) @@ -83,7 +83,7 @@ USER 1001 buildName := "centos-1" g.By("expecting the Dockerfile build is in Complete phase") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), buildName, nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), buildName, nil, nil, nil) //debug for failures if err != nil { exutil.DumpBuildLogs("centos", oc) @@ -106,13 +106,13 @@ USER 1001 o.Expect(err).NotTo(o.HaveOccurred()) g.By("checking the buildconfig content") - bc, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("scratch", metav1.GetOptions{}) + bc, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("scratch", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile3)) buildName := "scratch-1" g.By("expecting the Dockerfile build is in Complete phase") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), buildName, nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), buildName, nil, nil, nil) //debug for failures if err != nil { exutil.DumpBuildLogs("scratch", oc) diff --git a/test/extended/builds/failure_status.go b/test/extended/builds/failure_status.go index cdcafd7f7226..9647f4f9b48a 100644 --- a/test/extended/builds/failure_status.go +++ b/test/extended/builds/failure_status.go @@ -61,18 +61,18 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { br.AssertFailure() br.DumpLogs() - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Reason).To(o.Equal(buildv1.StatusReasonGenericBuildFailed)) o.Expect(build.Status.Message).To(o.Equal(buildutil.StatusMessageGenericBuildFailed)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) // wait for the build to be updated w/ completiontimestamp which should also mean the logsnippet // is set if one is going to be set. err = wait.Poll(time.Second, 30*time.Second, func() (bool, error) { // note this is the same build variable used in the test scope - build, err = oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) if err != nil { return true, err } @@ -96,12 +96,12 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { br.AssertFailure() br.DumpLogs() - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Reason).To(o.Equal(buildv1.StatusReasonFetchSourceFailed)) o.Expect(build.Status.Message).To(o.Equal(StatusMessageFetchSourceFailed)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) }) }) @@ -115,12 +115,12 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { br.AssertFailure() br.DumpLogs() - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Reason).To(o.Equal(buildv1.StatusReasonFetchSourceFailed)) o.Expect(build.Status.Message).To(o.Equal(StatusMessageFetchSourceFailed)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) }) }) @@ -136,7 +136,7 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { var build *buildv1.Build wait.PollImmediate(200*time.Millisecond, 30*time.Second, func() (bool, error) { - build, err = oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) if build.Status.Reason != buildv1.StatusReasonOutOfMemoryKilled { return false, nil } @@ -146,7 +146,7 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { o.Expect(build.Status.Reason).To(o.Equal(buildv1.StatusReasonOutOfMemoryKilled)) o.Expect(build.Status.Message).To(o.Equal(buildutil.StatusMessageOutOfMemoryKilled)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) }) }) @@ -160,12 +160,12 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { br.AssertFailure() br.DumpLogs() - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Reason).To(o.Equal(buildv1.StatusReasonInvalidContextDirectory)) o.Expect(build.Status.Message).To(o.Equal(StatusMessageInvalidContextDirectory)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) }) }) @@ -179,12 +179,12 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { br.AssertFailure() br.DumpLogs() - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Reason).To(o.Equal(buildv1.StatusReasonPullBuilderImageFailed)) o.Expect(build.Status.Message).To(o.Equal(StatusMessagePullBuilderImageFailed)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) }) }) @@ -198,12 +198,12 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { br.AssertFailure() br.DumpLogs() - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Reason).To(o.Equal(buildv1.StatusReasonPushImageToRegistryFailed)) o.Expect(build.Status.Message).To(o.Equal(StatusMessagePushImageToRegistryFailed)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) }) }) @@ -217,12 +217,12 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { br.AssertFailure() br.DumpLogs() - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Reason).To(o.Equal(buildv1.StatusReasonGenericBuildFailed)) o.Expect(build.Status.Message).To(o.Equal(StatusMessageGenericBuildFailed)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) }) }) @@ -236,12 +236,12 @@ var _ = g.Describe("[Feature:Builds][Slow] update failure status", func() { br.AssertFailure() br.DumpLogs() - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Reason).To(o.Equal(buildv1.StatusReasonGenericBuildFailed)) o.Expect(build.Status.Message).To(o.Equal(StatusMessageGenericBuildFailed)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildFailedEventReason, buildutil.BuildFailedEventMessage) }) }) }) diff --git a/test/extended/builds/gitauth.go b/test/extended/builds/gitauth.go index 53c2edb8726d..c65f3a4ec815 100644 --- a/test/extended/builds/gitauth.go +++ b/test/extended/builds/gitauth.go @@ -85,11 +85,11 @@ var _ = g.Describe("[Feature:Builds][Slow] can use private repositories as build testGitAuth("gitserver", gitServerFixture, sourceURLTemplate, func() string { g.By(fmt.Sprintf("creating a new secret for the gitserver by calling oc secrets new-basicauth %s --username=%s --password=%s", sourceSecretName, gitUserName, gitPassword)) - sa, err := oc.KubeClient().Core().ServiceAccounts(oc.Namespace()).Get("builder", metav1.GetOptions{}) + sa, err := oc.KubeClient().CoreV1().ServiceAccounts(oc.Namespace()).Get("builder", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) for _, s := range sa.Secrets { if strings.Contains(s.Name, "token") { - secret, err := oc.KubeClient().Core().Secrets(oc.Namespace()).Get(s.Name, metav1.GetOptions{}) + secret, err := oc.KubeClient().CoreV1().Secrets(oc.Namespace()).Get(s.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) err = oc.Run("create").Args( "secret", diff --git a/test/extended/builds/hooks.go b/test/extended/builds/hooks.go index 687127fc2cd5..0bf4baaadbf4 100644 --- a/test/extended/builds/hooks.go +++ b/test/extended/builds/hooks.go @@ -95,12 +95,12 @@ var _ = g.Describe("[Feature:Builds][Slow] testing build configuration hooks", f g.By("expecting the pod to deploy successfully") deploymentConfigLabel := exutil.ParseLabelsOrDie("app=mys2itest") - pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), deploymentConfigLabel, exutil.CheckPodIsRunning, 1, 2*time.Minute) + pods, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), deploymentConfigLabel, exutil.CheckPodIsRunning, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods)).To(o.Equal(1)) g.By("getting the pod information") - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying the postCommit hook did not modify the final image") @@ -171,12 +171,12 @@ var _ = g.Describe("[Feature:Builds][Slow] testing build configuration hooks", f g.By("expecting the pod to deploy successfully") deploymentConfigLabel := exutil.ParseLabelsOrDie("app=mydockertest") - pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), deploymentConfigLabel, exutil.CheckPodIsRunning, 1, 2*time.Minute) + pods, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), deploymentConfigLabel, exutil.CheckPodIsRunning, 1, 2*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods)).To(o.Equal(1)) g.By("getting the pod information") - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) g.By("verifying the postCommit hook did not modify the final image") diff --git a/test/extended/builds/image_source.go b/test/extended/builds/image_source.go index 4a3a7da7400b..5e1576a79937 100644 --- a/test/extended/builds/image_source.go +++ b/test/extended/builds/image_source.go @@ -64,10 +64,10 @@ var _ = g.Describe("[Feature:Builds][Slow] build can have Docker image source", br.AssertSuccess() g.By("expecting the pod to deploy successfully") - pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), imageSourceLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) + pods, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), imageSourceLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods)).To(o.Equal(1)) - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the pod to contain the file from the input image") @@ -91,10 +91,10 @@ var _ = g.Describe("[Feature:Builds][Slow] build can have Docker image source", br.AssertSuccess() g.By("expect the pod to deploy successfully") - pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), imageDockerLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) + pods, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), imageDockerLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods)).To(o.Equal(1)) - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the pod to contain the file from the input image") @@ -118,10 +118,10 @@ var _ = g.Describe("[Feature:Builds][Slow] build can have Docker image source", o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build pod to exist") - pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), sourceBuildLabel, exutil.CheckPodNoOp, 1, 4*time.Minute) + pods, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), sourceBuildLabel, exutil.CheckPodNoOp, 1, 4*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods)).To(o.Equal(1)) - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) foundEnv := false @@ -163,10 +163,10 @@ var _ = g.Describe("[Feature:Builds][Slow] build can have Docker image source", o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build pod to exist") - pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dockerBuildLabel, exutil.CheckPodNoOp, 1, 4*time.Minute) + pods, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), dockerBuildLabel, exutil.CheckPodNoOp, 1, 4*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods)).To(o.Equal(1)) - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) foundEnv := false @@ -208,10 +208,10 @@ var _ = g.Describe("[Feature:Builds][Slow] build can have Docker image source", o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build pod to exist") - pods, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), customBuildLabel, exutil.CheckPodNoOp, 1, 4*time.Minute) + pods, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), customBuildLabel, exutil.CheckPodNoOp, 1, 4*time.Minute) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods)).To(o.Equal(1)) - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(pods[0], metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) foundBuildEnv := false diff --git a/test/extended/builds/imagechangetriggers.go b/test/extended/builds/imagechangetriggers.go index 72d57f44b9ae..a62591550acf 100644 --- a/test/extended/builds/imagechangetriggers.go +++ b/test/extended/builds/imagechangetriggers.go @@ -40,7 +40,7 @@ var _ = g.Describe("[Feature:Builds][Conformance] imagechangetriggers", func() { err = wait.Poll(time.Second, 30*time.Second, func() (done bool, err error) { for _, build := range []string{"bc-docker-1", "bc-jenkins-1", "bc-source-1", "bc-custom-1"} { - _, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(build, metav1.GetOptions{}) + _, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(build, metav1.GetOptions{}) if err == nil { continue } diff --git a/test/extended/builds/multistage.go b/test/extended/builds/multistage.go index 1670754106a3..a5b846f6a32a 100644 --- a/test/extended/builds/multistage.go +++ b/test/extended/builds/multistage.go @@ -44,7 +44,7 @@ COPY --from=test /usr/bin/curl /test/ o.Expect(is.Status.DockerImageRepository).NotTo(o.BeEmpty(), "registry not yet configured?") registry := strings.Split(is.Status.DockerImageRepository, "/")[0] - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Create(&buildv1.Build{ + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Create(&buildv1.Build{ ObjectMeta: metav1.ObjectMeta{ Name: "multi-stage", }, @@ -70,7 +70,7 @@ COPY --from=test /usr/bin/curl /test/ }) o.Expect(err).NotTo(o.HaveOccurred()) result := exutil.NewBuildResult(oc, build) - err = exutil.WaitForBuildResult(oc.AdminBuildClient().Build().Builds(oc.Namespace()), result) + err = exutil.WaitForBuildResult(oc.AdminBuildClient().BuildV1().Builds(oc.Namespace()), result) o.Expect(err).NotTo(o.HaveOccurred()) pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(build.Name+"-build", metav1.GetOptions{}) diff --git a/test/extended/builds/new_app.go b/test/extended/builds/new_app.go index ecbdc2346526..d156f0e62da3 100644 --- a/test/extended/builds/new_app.go +++ b/test/extended/builds/new_app.go @@ -49,7 +49,7 @@ var _ = g.Describe("[Feature:Builds][Conformance] oc new-app", func() { o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for the build to complete") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), a58+"-1", nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), a58+"-1", nil, nil, nil) if err != nil { exutil.DumpBuildLogs(a58, oc) } diff --git a/test/extended/builds/nosrc.go b/test/extended/builds/nosrc.go index 7163d262cae9..dd8b1a1cadcd 100644 --- a/test/extended/builds/nosrc.go +++ b/test/extended/builds/nosrc.go @@ -44,7 +44,7 @@ var _ = g.Describe("[Feature:Builds] build with empty source", func() { br.AssertSuccess() g.By(fmt.Sprintf("verifying the status of %q", br.BuildPath)) - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Spec.Source.Dockerfile).To(o.BeNil()) o.Expect(build.Spec.Source.Git).To(o.BeNil()) diff --git a/test/extended/builds/optimized.go b/test/extended/builds/optimized.go index 6d2887b84d3a..5b471af97d71 100644 --- a/test/extended/builds/optimized.go +++ b/test/extended/builds/optimized.go @@ -42,7 +42,7 @@ USER 1001 g.It("should succeed [Conformance]", func() { g.By("creating a build directly") - build, err := oc.AdminBuildClient().Build().Builds(oc.Namespace()).Create(&buildv1.Build{ + build, err := oc.AdminBuildClient().BuildV1().Builds(oc.Namespace()).Create(&buildv1.Build{ ObjectMeta: metav1.ObjectMeta{ Name: "optimized", }, @@ -62,7 +62,7 @@ USER 1001 o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Spec.Strategy.DockerStrategy.ImageOptimizationPolicy).ToNot(o.BeNil()) result := exutil.NewBuildResult(oc, build) - err = exutil.WaitForBuildResult(oc.AdminBuildClient().Build().Builds(oc.Namespace()), result) + err = exutil.WaitForBuildResult(oc.AdminBuildClient().BuildV1().Builds(oc.Namespace()), result) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(result.BuildSuccess).To(o.BeTrue(), "Build did not succeed: %v", result) diff --git a/test/extended/builds/pipeline_jenkins_e2e.go b/test/extended/builds/pipeline_jenkins_e2e.go index ad0abc471ba5..321dddb69853 100644 --- a/test/extended/builds/pipeline_jenkins_e2e.go +++ b/test/extended/builds/pipeline_jenkins_e2e.go @@ -199,7 +199,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin setupJenkins(jenkinsPersistentTemplatePath) // additionally ensure that the build works in a memory constrained // environment - _, err := oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Create(&corev1.LimitRange{ + _, err := oc.AdminKubeClient().CoreV1().LimitRanges(oc.Namespace()).Create(&corev1.LimitRange{ ObjectMeta: metav1.ObjectMeta{ Name: "limitrange", }, @@ -215,7 +215,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin }, }) o.Expect(err).NotTo(o.HaveOccurred()) - defer oc.AdminKubeClient().Core().LimitRanges(oc.Namespace()).Delete("limitrange", &metav1.DeleteOptions{}) + defer oc.AdminKubeClient().CoreV1().LimitRanges(oc.Namespace()).Delete("limitrange", &metav1.DeleteOptions{}) g.By("delete jenkins job runs when jenkins re-establishes communications") g.By("should delete job runs when the associated build is deleted - jenkins unreachable") @@ -236,7 +236,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin br.AssertSuccess() // get the build information - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(fmt.Sprintf("sample-pipeline-withenvs-%d", i), metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(fmt.Sprintf("sample-pipeline-withenvs-%d", i), metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) jenkinsBuildURI, err := url.Parse(build.Annotations[buildutil.BuildJenkinsBuildURIAnnotation]) @@ -251,7 +251,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin for buildName, buildInfo := range buildNameToBuildInfoMap { _, status, err := j.GetResource(buildInfo.jenkinsBuildURI) o.Expect(err).NotTo(o.HaveOccurred()) - _, err = oc.BuildClient().Build().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) + _, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(status == http.StatusOK).To(o.BeTrue(), "Jenkins job run does not exist for %s but should.", buildName) } @@ -264,7 +264,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin for buildName, buildInfo := range buildNameToBuildInfoMap { if buildInfo.number%2 == 0 { fmt.Fprintf(g.GinkgoWriter, "Deleting build: %s", buildName) - err := oc.BuildClient().Build().Builds(oc.Namespace()).Delete(buildName, &metav1.DeleteOptions{}) + err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Delete(buildName, &metav1.DeleteOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) } @@ -302,11 +302,11 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin o.Expect(err).NotTo(o.HaveOccurred()) fmt.Fprintf(g.GinkgoWriter, "Checking %s, status: %v\n", buildName, status) if buildInfo.number%2 == 0 { - _, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) + _, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) o.Expect(err).To(o.HaveOccurred()) o.Expect(status != http.StatusOK).To(o.BeTrue(), "Jenkins job run exists for %s but shouldn't.", buildName) } else { - _, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) + _, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(status == http.StatusOK).To(o.BeTrue(), "Jenkins job run does not exist for %s but should.", buildName) } @@ -370,7 +370,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin // start the build g.By("waiting for the build to complete") br := &exutil.BuildResult{Oc: oc, BuildName: "gradle-1"} - err = exutil.WaitForBuildResult(oc.BuildClient().Build().Builds(oc.Namespace()), br) + err = exutil.WaitForBuildResult(oc.BuildClient().BuildV1().Builds(oc.Namespace()), br) if err != nil || !br.BuildSuccess { exutil.DumpBuilds(oc) exutil.DumpPodLogsStartingWith("maven", oc) @@ -446,7 +446,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin err = oc.Run("new-app").Args(repo.RepoPath, "--strategy=pipeline", "--build-env=FOO1=BAR1").Execute() o.Expect(err).NotTo(o.HaveOccurred()) - bc, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get(envVarsPipelineGitRepoBuildConfig, metav1.GetOptions{}) + bc, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get(envVarsPipelineGitRepoBuildConfig, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) envs := bc.Spec.Strategy.JenkinsPipelineStrategy.Env o.Expect(len(envs)).To(o.Equal(1)) @@ -480,7 +480,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin g.By("Waiting for the build uri") var jenkinsBuildURI string for { - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.BuildName, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.BuildName, metav1.GetOptions{}) if err != nil { errs <- fmt.Errorf("error getting build: %s", err) return @@ -530,7 +530,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin defer g.GinkgoRecover() for { - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.BuildName, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.BuildName, metav1.GetOptions{}) switch { case err != nil: errs <- fmt.Errorf("error getting build: %s", err) @@ -608,7 +608,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin br.AssertSuccess() // get the build information - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(fmt.Sprintf("sample-pipeline-withenvs-%d", i), metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(fmt.Sprintf("sample-pipeline-withenvs-%d", i), metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) jenkinsBuildURI, err := url.Parse(build.Annotations[buildutil.BuildJenkinsBuildURIAnnotation]) @@ -622,7 +622,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin for buildName, buildInfo := range buildNameToBuildInfoMap { _, status, err := j.GetResource(buildInfo.jenkinsBuildURI) o.Expect(err).NotTo(o.HaveOccurred()) - _, err = oc.BuildClient().Build().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) + _, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(status == http.StatusOK).To(o.BeTrue(), "Jenkins job run does not exist for %s but should.", buildName) } @@ -631,7 +631,7 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin for buildName, buildInfo := range buildNameToBuildInfoMap { if buildInfo.number%2 == 0 { fmt.Fprintf(g.GinkgoWriter, "Deleting build: %s", buildName) - err := oc.BuildClient().Build().Builds(oc.Namespace()).Delete(buildName, &metav1.DeleteOptions{}) + err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Delete(buildName, &metav1.DeleteOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) } @@ -659,11 +659,11 @@ var _ = g.Describe("[Slow]jenkins repos e2e openshift using slow samples pipelin _, status, err := j.GetResource(buildInfo.jenkinsBuildURI) o.Expect(err).NotTo(o.HaveOccurred()) if buildInfo.number%2 == 0 { - _, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) + _, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) o.Expect(err).To(o.HaveOccurred()) o.Expect(status != http.StatusOK).To(o.BeTrue(), "Jenkins job run exists for %s but shouldn't.", buildName) } else { - _, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) + _, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(status == http.StatusOK).To(o.BeTrue(), "Jenkins job run does not exist for %s but should.", buildName) } diff --git a/test/extended/builds/pipeline_origin_bld.go b/test/extended/builds/pipeline_origin_bld.go index e867761af278..f93305cdb3de 100644 --- a/test/extended/builds/pipeline_origin_bld.go +++ b/test/extended/builds/pipeline_origin_bld.go @@ -619,7 +619,7 @@ var _ = g.Describe("[Feature:Builds][Slow] openshift pipeline build", func() { br.AssertSuccess() } - buildConfig, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("successful-pipeline", metav1.GetOptions{}) + buildConfig, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("successful-pipeline", metav1.GetOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) } @@ -628,7 +628,7 @@ var _ = g.Describe("[Feature:Builds][Slow] openshift pipeline build", func() { g.By("waiting up to one minute for pruning to complete") err = wait.PollImmediate(pollingInterval, timeout, func() (bool, error) { - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{LabelSelector: buildutil.BuildConfigSelector("successful-pipeline").String()}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{LabelSelector: buildutil.BuildConfigSelector("successful-pipeline").String()}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) return false, err @@ -660,14 +660,14 @@ var _ = g.Describe("[Feature:Builds][Slow] openshift pipeline build", func() { br.AssertFailure() } - buildConfig, err = oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get("failed-pipeline", metav1.GetOptions{}) + buildConfig, err = oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get("failed-pipeline", metav1.GetOptions{}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) } g.By("waiting up to one minute for pruning to complete") err = wait.PollImmediate(pollingInterval, timeout, func() (bool, error) { - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{LabelSelector: buildutil.BuildConfigSelector("successful-pipeline").String()}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{LabelSelector: buildutil.BuildConfigSelector("successful-pipeline").String()}) if err != nil { fmt.Fprintf(g.GinkgoWriter, "%v", err) return false, err diff --git a/test/extended/builds/revision.go b/test/extended/builds/revision.go index 8f1ef1b43595..2c5712018ebb 100644 --- a/test/extended/builds/revision.go +++ b/test/extended/builds/revision.go @@ -42,7 +42,7 @@ var _ = g.Describe("[Feature:Builds] build have source revision metadata", func( br.AssertSuccess() g.By(fmt.Sprintf("verifying the status of %q", br.BuildPath)) - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(br.Build.Name, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Spec.Revision).NotTo(o.BeNil()) o.Expect(build.Spec.Revision.Git).NotTo(o.BeNil()) diff --git a/test/extended/builds/run_policy.go b/test/extended/builds/run_policy.go index 3a4759824812..a028d5496f7b 100644 --- a/test/extended/builds/run_policy.go +++ b/test/extended/builds/run_policy.go @@ -51,7 +51,7 @@ var _ = g.Describe("[Feature:Builds][Slow] using build configuration runPolicy", ) bcName := "sample-parallel-build" - buildWatch, err := oc.BuildClient().Build().Builds(oc.Namespace()).Watch(metav1.ListOptions{ + buildWatch, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Watch(metav1.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName).String(), }) defer buildWatch.Stop() @@ -137,7 +137,7 @@ var _ = g.Describe("[Feature:Builds][Slow] using build configuration runPolicy", startedBuilds = append(startedBuilds, strings.TrimSpace(strings.Split(stdout, "/")[1])) } - buildWatch, err := oc.BuildClient().Build().Builds(oc.Namespace()).Watch(metav1.ListOptions{ + buildWatch, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Watch(metav1.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName).String(), }) defer buildWatch.Stop() @@ -165,7 +165,7 @@ var _ = g.Describe("[Feature:Builds][Slow] using build configuration runPolicy", } // Verify there are no other running or pending builds than this // build as serial build always runs alone. - c := buildclient.NewClientBuildLister(oc.BuildClient().Build()) + c := buildclient.NewClientBuildLister(oc.BuildClient().BuildV1()) builds, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b *buildv1.Build) bool { if b.Name == build.Name { return false @@ -202,7 +202,7 @@ var _ = g.Describe("[Feature:Builds][Slow] using build configuration runPolicy", o.Expect(err).NotTo(o.HaveOccurred()) } - buildWatch, err := oc.BuildClient().Build().Builds(oc.Namespace()).Watch(metav1.ListOptions{ + buildWatch, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Watch(metav1.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName).String(), }) defer buildWatch.Stop() @@ -245,7 +245,7 @@ var _ = g.Describe("[Feature:Builds][Slow] using build configuration runPolicy", o.Expect(err).NotTo(o.HaveOccurred()) } - buildWatch, err := oc.BuildClient().Build().Builds(oc.Namespace()).Watch(metav1.ListOptions{ + buildWatch, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Watch(metav1.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName).String(), }) defer buildWatch.Stop() @@ -323,7 +323,7 @@ var _ = g.Describe("[Feature:Builds][Slow] using build configuration runPolicy", o.Expect(err).NotTo(o.HaveOccurred()) } - buildWatch, err := oc.BuildClient().Build().Builds(oc.Namespace()).Watch(metav1.ListOptions{ + buildWatch, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Watch(metav1.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName).String(), }) defer buildWatch.Stop() @@ -376,7 +376,7 @@ var _ = g.Describe("[Feature:Builds][Slow] using build configuration runPolicy", bcName := "sample-serial-latest-only-build" buildVerified := map[string]bool{} - buildWatch, err := oc.BuildClient().Build().Builds(oc.Namespace()).Watch(metav1.ListOptions{ + buildWatch, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Watch(metav1.ListOptions{ LabelSelector: buildutil.BuildConfigSelector(bcName).String(), }) defer buildWatch.Stop() @@ -430,7 +430,7 @@ var _ = g.Describe("[Feature:Builds][Slow] using build configuration runPolicy", } // Verify there are no other running or pending builds than this // build as serial build always runs alone. - c := buildclient.NewClientBuildLister(oc.BuildClient().Build()) + c := buildclient.NewClientBuildLister(oc.BuildClient().BuildV1()) builds, err := buildutil.BuildConfigBuilds(c, oc.Namespace(), bcName, func(b *buildv1.Build) bool { e2e.Logf("[%s] build %s is %s", build.Name, b.Name, b.Status.Phase) if b.Name == build.Name { diff --git a/test/extended/builds/s2i_quota.go b/test/extended/builds/s2i_quota.go index a88a1bf531fe..1a20dd440312 100644 --- a/test/extended/builds/s2i_quota.go +++ b/test/extended/builds/s2i_quota.go @@ -58,12 +58,12 @@ var _ = g.Describe("[Feature:Builds][Conformance] s2i build with a quota", func( // TODO: re-enable this check when https://github.com/containers/buildah/issues/1213 is resolved. //o.Expect(buildLog).To(o.ContainSubstring("MEMORYSWAP=209715200")) - events, err := oc.KubeClient().Core().Events(oc.Namespace()).Search(legacyscheme.Scheme, br.Build) + events, err := oc.KubeClient().CoreV1().Events(oc.Namespace()).Search(legacyscheme.Scheme, br.Build) o.Expect(err).NotTo(o.HaveOccurred(), "Should be able to get events from the build") o.Expect(events).NotTo(o.BeNil(), "Build event list should not be nil") - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildStartedEventReason, buildutil.BuildStartedEventMessage) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), br.Build, buildutil.BuildCompletedEventReason, buildutil.BuildCompletedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildStartedEventReason, buildutil.BuildStartedEventMessage) + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), br.Build, buildutil.BuildCompletedEventReason, buildutil.BuildCompletedEventMessage) }) }) diff --git a/test/extended/builds/s2i_root.go b/test/extended/builds/s2i_root.go index f5ec456e1b87..e02dde2db487 100644 --- a/test/extended/builds/s2i_root.go +++ b/test/extended/builds/s2i_root.go @@ -38,17 +38,17 @@ var _ = g.Describe("[Feature:Builds][Conformance] s2i build with a root user ima err := oc.Run("new-app").Args("docker.io/openshift/test-build-roots2i~https://github.com/sclorg/nodejs-ex", "--name", "nodejsfail").Execute() o.Expect(err).NotTo(o.HaveOccurred()) - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), "nodejsfail-1", nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), "nodejsfail-1", nil, nil, nil) o.Expect(err).To(o.HaveOccurred()) - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get("nodejsfail-1", metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get("nodejsfail-1", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Phase).To(o.Equal(buildv1.BuildPhaseFailed)) o.Expect(build.Status.Reason).To(o.BeEquivalentTo(s2istatus.ReasonPullBuilderImageFailed)) o.Expect(build.Status.Message).To(o.BeEquivalentTo(s2istatus.ReasonMessagePullBuilderImageFailed)) podname := build.Annotations[buildutil.BuildPodNameAnnotation] - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(podname, metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(podname, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) containers := make([]corev1.Container, len(pod.Spec.Containers)+len(pod.Spec.InitContainers)) @@ -82,14 +82,14 @@ var _ = g.Describe("[Feature:Builds][Conformance] s2i build with a root user ima err = oc.Run("new-build").Args("docker.io/openshift/test-build-roots2i~https://github.com/sclorg/nodejs-ex", "--name", "nodejspass").Execute() o.Expect(err).NotTo(o.HaveOccurred()) - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), "nodejspass-1", nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), "nodejspass-1", nil, nil, nil) o.Expect(err).NotTo(o.HaveOccurred()) - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get("nodejspass-1", metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get("nodejspass-1", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) podname := build.Annotations[buildutil.BuildPodNameAnnotation] - pod, err := oc.KubeClient().Core().Pods(oc.Namespace()).Get(podname, metav1.GetOptions{}) + pod, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).Get(podname, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) containers := make([]corev1.Container, len(pod.Spec.Containers)+len(pod.Spec.InitContainers)) diff --git a/test/extended/builds/service.go b/test/extended/builds/service.go index 4591ee1594a6..66277d564e9c 100644 --- a/test/extended/builds/service.go +++ b/test/extended/builds/service.go @@ -51,7 +51,7 @@ RUN curl -vvv hello-openshift:8080 o.Expect(err).NotTo(o.HaveOccurred()) g.By("expecting the build is in Complete phase") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), "centos-1", nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), "centos-1", nil, nil, nil) //debug for failures if err != nil { exutil.DumpBuildLogs("centos", oc) diff --git a/test/extended/builds/start.go b/test/extended/builds/start.go index a399c12596d6..7f7344a8ee3c 100644 --- a/test/extended/builds/start.go +++ b/test/extended/builds/start.go @@ -270,10 +270,10 @@ var _ = g.Describe("[Feature:Builds][Slow] starting a build using CLI", func() { build = b return exutil.CheckBuildCancelled(b) } - err := exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), "sample-build-binary-invalidnodeselector-1", nil, nil, cancelFn) + err := exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), "sample-build-binary-invalidnodeselector-1", nil, nil, cancelFn) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build.Status.Phase).To(o.Equal(buildv1.BuildPhaseCancelled)) - exutil.CheckForBuildEvent(oc.KubeClient().Core(), build, buildutil.BuildCancelledEventReason, + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), build, buildutil.BuildCancelledEventReason, buildutil.BuildCancelledEventMessage) }) }) @@ -306,7 +306,7 @@ var _ = g.Describe("[Feature:Builds][Slow] starting a build using CLI", func() { }) o.Expect(buildName).ToNot(o.BeEmpty()) - build, err := oc.BuildClient().Build().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) + build, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(buildName, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(build).NotTo(o.BeNil(), "build object should exist") @@ -314,7 +314,7 @@ var _ = g.Describe("[Feature:Builds][Slow] starting a build using CLI", func() { err = oc.Run("cancel-build").Args(buildName).Execute() o.Expect(err).ToNot(o.HaveOccurred()) wg.Wait() - exutil.CheckForBuildEvent(oc.KubeClient().Core(), build, buildutil.BuildCancelledEventReason, + exutil.CheckForBuildEvent(oc.KubeClient().CoreV1(), build, buildutil.BuildCancelledEventReason, buildutil.BuildCancelledEventMessage) }) @@ -358,7 +358,7 @@ var _ = g.Describe("[Feature:Builds][Slow] starting a build using CLI", func() { o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for the build to complete") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), "ruby-hello-world-1", nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), "ruby-hello-world-1", nil, nil, nil) if err != nil { exutil.DumpBuildLogs("ruby-hello-world", oc) } @@ -375,7 +375,7 @@ var _ = g.Describe("[Feature:Builds][Slow] starting a build using CLI", func() { g.By("clearing existing builds") _, err := oc.Run("delete").Args("builds", "--all").Output() o.Expect(err).NotTo(o.HaveOccurred()) - builds, err := oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err := oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(builds.Items).To(o.BeEmpty()) @@ -400,14 +400,14 @@ var _ = g.Describe("[Feature:Builds][Slow] starting a build using CLI", func() { curlOut, err := exec.Command("curl", curlArgs...).Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("curl cmd: %v, output: %s", curlArgs, string(curlOut)) - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(builds.Items).NotTo(o.BeEmpty()) g.By("clearing existing builds") _, err = oc.Run("delete").Args("builds", "--all").Output() o.Expect(err).NotTo(o.HaveOccurred()) - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(builds.Items).To(o.BeEmpty()) @@ -421,14 +421,14 @@ var _ = g.Describe("[Feature:Builds][Slow] starting a build using CLI", func() { curlOut, err = exec.Command("curl", curlArgs...).Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("curl cmd: %s, output: %s", curlArgs, string(curlOut)) - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(builds.Items).NotTo(o.BeEmpty()) g.By("clearing existing builds") _, err = oc.Run("delete").Args("builds", "--all").Output() o.Expect(err).NotTo(o.HaveOccurred()) - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(builds.Items).To(o.BeEmpty()) @@ -442,7 +442,7 @@ var _ = g.Describe("[Feature:Builds][Slow] starting a build using CLI", func() { curlOut, err = exec.Command("curl", curlArgs...).Output() o.Expect(err).NotTo(o.HaveOccurred()) e2e.Logf("curl cmd: %v, output: %s", curlArgs, string(curlOut)) - builds, err = oc.BuildClient().Build().Builds(oc.Namespace()).List(metav1.ListOptions{}) + builds, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(builds.Items).To(o.BeEmpty()) @@ -471,7 +471,7 @@ var _ = g.Describe("[Feature:Builds][Slow] starting a build using CLI", func() { o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), "symlink-bc-1", exutil.CheckBuildSuccess, exutil.CheckBuildFailed, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), "symlink-bc-1", exutil.CheckBuildSuccess, exutil.CheckBuildFailed, nil) if err != nil { exutil.DumpBuildLogs("symlink-bc", oc) } diff --git a/test/extended/cli/rsync.go b/test/extended/cli/rsync.go index ccf709fbfdc5..9785fe19e82c 100644 --- a/test/extended/cli/rsync.go +++ b/test/extended/cli/rsync.go @@ -43,7 +43,7 @@ var _ = g.Describe("[cli][Slow] can use rsync to upload files to pods", func() { g.By("Getting the mariadb pod name") selector, _ := labels.Parse("name=mariadb") - pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: selector.String()}) + pods, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: selector.String()}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).ToNot(o.BeZero()) podName = pods.Items[0].Name diff --git a/test/extended/cluster/audit.go b/test/extended/cluster/audit.go index da845cbca6c4..e531988d99a1 100644 --- a/test/extended/cluster/audit.go +++ b/test/extended/cluster/audit.go @@ -46,15 +46,15 @@ var _ = g.Describe("[Feature:Audit] Basic audit", func() { "top-secret": []byte("foo-bar"), }, } - _, err := f.ClientSet.Core().Secrets(f.Namespace.Name).Create(secret) + _, err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret) framework.ExpectNoError(err, "failed to create audit-secret") - _, err = f.ClientSet.Core().Secrets(f.Namespace.Name).Get(secret.Name, metav1.GetOptions{}) + _, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Get(secret.Name, metav1.GetOptions{}) framework.ExpectNoError(err, "failed to get audit-secret") - err = f.ClientSet.Core().Secrets(f.Namespace.Name).Delete(secret.Name, &metav1.DeleteOptions{}) + err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(secret.Name, &metav1.DeleteOptions{}) framework.ExpectNoError(err, "failed to delete audit-secret") // /version should not be audited - _, err = f.ClientSet.Core().RESTClient().Get().AbsPath("/version").DoRaw() + _, err = f.ClientSet.CoreV1().RESTClient().Get().AbsPath("/version").DoRaw() framework.ExpectNoError(err, "failed to query version") expectedEvents := []auditEvent{{ diff --git a/test/extended/cluster/cl.go b/test/extended/cluster/cl.go index fd208458b407..ce5979458f63 100644 --- a/test/extended/cluster/cl.go +++ b/test/extended/cluster/cl.go @@ -149,7 +149,7 @@ var _ = g.Describe("[Feature:Performance][Serial][Slow] Load cluster", func() { configMapName := InjectConfigMap(c, nsName, pod.Parameters, config) // Cleanup ConfigMap at some point after the Pods are created defer func() { - _ = c.Core().ConfigMaps(nsName).Delete(configMapName, nil) + _ = c.CoreV1().ConfigMaps(nsName).Delete(configMapName, nil) }() } // TODO sjug: pass label via config @@ -186,14 +186,14 @@ var _ = g.Describe("[Feature:Performance][Serial][Slow] Load cluster", func() { // Wait for builds and deployments to complete for _, ns := range namespaces { - buildList, err := oc.BuildClient().Build().Builds(ns).List(metav1.ListOptions{}) + buildList, err := oc.BuildClient().BuildV1().Builds(ns).List(metav1.ListOptions{}) if err != nil { e2e.Logf("Error listing builds: %v", err) } if len(buildList.Items) > 0 { buildName := buildList.Items[0].Name e2e.Logf("Waiting for build: %q", buildName) - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(ns), buildName, nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(ns), buildName, nil, nil, nil) if err != nil { exutil.DumpBuildLogs(buildName, oc) } diff --git a/test/extended/image_ecosystem/mongodb_ephemeral.go b/test/extended/image_ecosystem/mongodb_ephemeral.go index 4e838c07e699..ced775c4caa3 100644 --- a/test/extended/image_ecosystem/mongodb_ephemeral.go +++ b/test/extended/image_ecosystem/mongodb_ephemeral.go @@ -43,7 +43,7 @@ var _ = g.Describe("[image_ecosystem][mongodb] openshift mongodb image", func() g.By("expecting the mongodb pod is running") podNames, err := exutil.WaitForPods( - oc.KubeClient().Core().Pods(oc.Namespace()), + oc.KubeClient().CoreV1().Pods(oc.Namespace()), exutil.ParseLabelsOrDie("name=mongodb"), exutil.CheckPodIsRunning, 1, diff --git a/test/extended/image_ecosystem/s2i_perl.go b/test/extended/image_ecosystem/s2i_perl.go index a9eae20329f3..e7a702d4d77b 100644 --- a/test/extended/image_ecosystem/s2i_perl.go +++ b/test/extended/image_ecosystem/s2i_perl.go @@ -55,7 +55,7 @@ var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl br.AssertSuccess() g.By("waiting for build to finish") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), rcNameOne, nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), rcNameOne, nil, nil, nil) if err != nil { exutil.DumpBuildLogs(dcName, oc) } @@ -67,11 +67,11 @@ var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl g.By("waiting for endpoint") err = e2e.WaitForEndpoint(oc.KubeFramework().ClientSet, oc.Namespace(), dcName) o.Expect(err).NotTo(o.HaveOccurred()) - oldEndpoint, err := oc.KubeFramework().ClientSet.Core().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) + oldEndpoint, err := oc.KubeFramework().ClientSet.CoreV1().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) checkPage := func(expected string, dcLabel labels.Selector) { - _, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) + _, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, dcName, "", expected) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) @@ -99,7 +99,7 @@ var _ = g.Describe("[image_ecosystem][perl][Slow] hot deploy for openshift perl // request timeouts against the previous pod's ip. So make sure the endpoint is pointing to the // new pod before hitting it. err = wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { - newEndpoint, err := oc.KubeFramework().ClientSet.Core().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) + newEndpoint, err := oc.KubeFramework().ClientSet.CoreV1().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/extended/image_ecosystem/s2i_php.go b/test/extended/image_ecosystem/s2i_php.go index 3704e7b81dc4..d80e9165ecfc 100644 --- a/test/extended/image_ecosystem/s2i_php.go +++ b/test/extended/image_ecosystem/s2i_php.go @@ -47,7 +47,7 @@ var _ = g.Describe("[image_ecosystem][php][Slow] hot deploy for openshift php im o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), dcName, nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), dcName, nil, nil, nil) if err != nil { exutil.DumpBuildLogs("cakephp-mysql-example", oc) } @@ -61,7 +61,7 @@ var _ = g.Describe("[image_ecosystem][php][Slow] hot deploy for openshift php im o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountRegexp := func(priorValue string) string { - _, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) + _, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) result, val, err := CheckPageRegexp(oc, "cakephp-mysql-example", "", pageRegexpCount, 1) @@ -87,7 +87,7 @@ var _ = g.Describe("[image_ecosystem][php][Slow] hot deploy for openshift php im o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs := func(i int) { - _, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) + _, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, "cakephp-mysql-example", "", fmt.Sprintf(pageExactCount, i)) diff --git a/test/extended/image_ecosystem/s2i_python.go b/test/extended/image_ecosystem/s2i_python.go index c93feb9b633e..13793f998d70 100644 --- a/test/extended/image_ecosystem/s2i_python.go +++ b/test/extended/image_ecosystem/s2i_python.go @@ -58,7 +58,7 @@ var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift pyt o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), rcNameOne, nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), rcNameOne, nil, nil, nil) if err != nil { exutil.DumpBuildLogs(dcName, oc) } @@ -70,11 +70,11 @@ var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift pyt g.By("waiting for endpoint") err = e2e.WaitForEndpoint(oc.KubeFramework().ClientSet, oc.Namespace(), dcName) o.Expect(err).NotTo(o.HaveOccurred()) - oldEndpoint, err := oc.KubeFramework().ClientSet.Core().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) + oldEndpoint, err := oc.KubeFramework().ClientSet.CoreV1().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs := func(i int, dcLabel labels.Selector) { - _, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) + _, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, dcName, "", pageCountFn(i)) @@ -94,7 +94,7 @@ var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift pyt o.Expect(err).NotTo(o.HaveOccurred()) assertPageCountIs(3, dcLabelOne) - pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: dcLabelOne.String()}) + pods, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: dcLabelOne.String()}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) @@ -112,7 +112,7 @@ var _ = g.Describe("[image_ecosystem][python][Slow] hot deploy for openshift pyt // request timeouts against the previous pod's ip. So make sure the endpoint is pointing to the // new pod before hitting it. err = wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { - newEndpoint, err := oc.KubeFramework().ClientSet.Core().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) + newEndpoint, err := oc.KubeFramework().ClientSet.CoreV1().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/extended/image_ecosystem/s2i_ruby.go b/test/extended/image_ecosystem/s2i_ruby.go index 8b21c19be6a7..8c48bbf91125 100644 --- a/test/extended/image_ecosystem/s2i_ruby.go +++ b/test/extended/image_ecosystem/s2i_ruby.go @@ -51,7 +51,7 @@ var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), rcNameOne, nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), rcNameOne, nil, nil, nil) if err != nil { exutil.DumpBuildLogs(dcName, oc) } @@ -63,11 +63,11 @@ var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby g.By("waiting for endpoint") err = e2e.WaitForEndpoint(oc.KubeFramework().ClientSet, oc.Namespace(), dcName) o.Expect(err).NotTo(o.HaveOccurred()) - oldEndpoint, err := oc.KubeFramework().ClientSet.Core().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) + oldEndpoint, err := oc.KubeFramework().ClientSet.CoreV1().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) assertPageContent := func(content string, dcLabel labels.Selector) { - _, err := exutil.WaitForPods(oc.KubeClient().Core().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) + _, err := exutil.WaitForPods(oc.KubeClient().CoreV1().Pods(oc.Namespace()), dcLabel, exutil.CheckPodIsRunning, 1, 4*time.Minute) o.ExpectWithOffset(1, err).NotTo(o.HaveOccurred()) result, err := CheckPageContains(oc, dcName, "", content) @@ -85,7 +85,7 @@ var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby g.By("testing application content source modification") assertPageContent("Welcome to your Rails application on OpenShift", dcLabelOne) - pods, err := oc.KubeClient().Core().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: dcLabelOne.String()}) + pods, err := oc.KubeClient().CoreV1().Pods(oc.Namespace()).List(metav1.ListOptions{LabelSelector: dcLabelOne.String()}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(len(pods.Items)).To(o.Equal(1)) @@ -103,7 +103,7 @@ var _ = g.Describe("[image_ecosystem][ruby][Slow] hot deploy for openshift ruby // request timeouts against the previous pod's ip. So make sure the endpoint is pointing to the // new pod before hitting it. err = wait.Poll(1*time.Second, 1*time.Minute, func() (bool, error) { - newEndpoint, err := oc.KubeFramework().ClientSet.Core().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) + newEndpoint, err := oc.KubeFramework().ClientSet.CoreV1().Endpoints(oc.Namespace()).Get(dcName, metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/extended/image_ecosystem/sample_repos.go b/test/extended/image_ecosystem/sample_repos.go index 19969a1dfe17..659692666724 100644 --- a/test/extended/image_ecosystem/sample_repos.go +++ b/test/extended/image_ecosystem/sample_repos.go @@ -62,7 +62,7 @@ func NewSampleRepoTest(c sampleRepoConfig) func() { buildName := c.buildConfigName + "-1" g.By("expecting the build is in the Complete phase") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), buildName, nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), buildName, nil, nil, nil) if err != nil { exutil.DumpBuildLogs(c.buildConfigName, oc) } diff --git a/test/extended/imageapis/quota_admission.go b/test/extended/imageapis/quota_admission.go index c2c87c8291ec..1cef853ef07d 100644 --- a/test/extended/imageapis/quota_admission.go +++ b/test/extended/imageapis/quota_admission.go @@ -193,9 +193,9 @@ func waitForLimitSync(oc *exutil.CLI, hardLimit corev1.ResourceList) error { func createImageStreamMapping(oc *exutil.CLI, namespace, name, tag string) error { e2e.Logf("Creating image stream mapping for %s/%s:%s...", namespace, name, tag) - _, err := oc.AdminImageClient().Image().ImageStreams(namespace).Get(name, metav1.GetOptions{}) + _, err := oc.AdminImageClient().ImageV1().ImageStreams(namespace).Get(name, metav1.GetOptions{}) if kerrors.IsNotFound(err) { - _, err = oc.AdminImageClient().Image().ImageStreams(namespace).Create(&imagev1.ImageStream{ + _, err = oc.AdminImageClient().ImageV1().ImageStreams(namespace).Create(&imagev1.ImageStream{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -207,7 +207,7 @@ func createImageStreamMapping(oc *exutil.CLI, namespace, name, tag string) error } else if err != nil { return err } - _, err = oc.AdminImageClient().Image().ImageStreamMappings(namespace).Create(&imagev1.ImageStreamMapping{ + _, err = oc.AdminImageClient().ImageV1().ImageStreamMappings(namespace).Create(&imagev1.ImageStreamMapping{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, diff --git a/test/extended/images/append.go b/test/extended/images/append.go index 946ec3ebebed..a4fcec87955f 100644 --- a/test/extended/images/append.go +++ b/test/extended/images/append.go @@ -15,7 +15,7 @@ import ( ) func cliPodWithPullSecret(cli *exutil.CLI, shell string) *kapiv1.Pod { - sa, err := cli.KubeClient().Core().ServiceAccounts(cli.Namespace()).Get("builder", metav1.GetOptions{}) + sa, err := cli.KubeClient().CoreV1().ServiceAccounts(cli.Namespace()).Get("builder", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(sa.ImagePullSecrets).NotTo(o.BeEmpty()) pullSecretName := sa.ImagePullSecrets[0].Name diff --git a/test/extended/images/extract.go b/test/extended/images/extract.go index 9d8e8855e2e7..551f05671f81 100644 --- a/test/extended/images/extract.go +++ b/test/extended/images/extract.go @@ -37,7 +37,7 @@ var _ = g.Describe("[Feature:ImageExtract] Image extract", func() { ns = oc.Namespace() cli := oc.KubeFramework().PodClient() - client := imageclientset.NewForConfigOrDie(oc.UserConfig()).Image() + client := imageclientset.NewForConfigOrDie(oc.UserConfig()).ImageV1() _, err = client.ImageStreamImports(ns).Create(&imageapi.ImageStreamImport{ ObjectMeta: metav1.ObjectMeta{ diff --git a/test/extended/images/hardprune.go b/test/extended/images/hardprune.go index eb17730cc0b0..98b61db4666b 100644 --- a/test/extended/images/hardprune.go +++ b/test/extended/images/hardprune.go @@ -406,7 +406,7 @@ func GetRegistryPod(podsGetter kcoreclient.PodsGetter) (*kapiv1.Pod, error) { // LogRegistryPod attempts to write registry log to a file in artifacts directory. func LogRegistryPod(oc *exutil.CLI) error { - pod, err := GetRegistryPod(oc.KubeClient().Core()) + pod, err := GetRegistryPod(oc.KubeClient().CoreV1()) if err != nil { return fmt.Errorf("failed to get registry pod: %v", err) } diff --git a/test/extended/images/helper.go b/test/extended/images/helper.go index 255adf0cf394..29079f47e1e3 100644 --- a/test/extended/images/helper.go +++ b/test/extended/images/helper.go @@ -168,13 +168,13 @@ func BuildAndPushImageOfSizeWithBuilder( istName += ":" + tag } - bc, err := oc.BuildClient().Build().BuildConfigs(namespace).Get(name, metav1.GetOptions{}) + bc, err := oc.BuildClient().BuildV1().BuildConfigs(namespace).Get(name, metav1.GetOptions{}) if err == nil { if bc.Spec.CommonSpec.Output.To.Kind != "ImageStreamTag" { return fmt.Errorf("Unexpected kind of buildspec's output (%s != %s)", bc.Spec.CommonSpec.Output.To.Kind, "ImageStreamTag") } bc.Spec.CommonSpec.Output.To.Name = istName - if _, err = oc.BuildClient().Build().BuildConfigs(namespace).Update(bc); err != nil { + if _, err = oc.BuildClient().BuildV1().BuildConfigs(namespace).Update(bc); err != nil { return err } } else { @@ -612,7 +612,7 @@ func IsBlobStoredInRegistry( // assumed to be in a read-only mode and using filesystem as a storage driver. It returns lists of deleted // files. func RunHardPrune(oc *exutil.CLI, dryRun bool) (*RegistryStorageFiles, error) { - pod, err := GetRegistryPod(oc.AsAdmin().KubeClient().Core()) + pod, err := GetRegistryPod(oc.AsAdmin().KubeClient().CoreV1()) if err != nil { return nil, err } diff --git a/test/extended/images/layers.go b/test/extended/images/layers.go index f92fa506eddf..ac3947970e80 100644 --- a/test/extended/images/layers.go +++ b/test/extended/images/layers.go @@ -36,7 +36,7 @@ var _ = g.Describe("[Feature:ImageLayers][registry] Image layer subresource", fu oc = exutil.NewCLI("image-layers", exutil.KubeConfigPath()) g.It("should identify a deleted image as missing", func() { - client := imagev1client.NewForConfigOrDie(oc.AdminConfig()).Image() + client := imagev1client.NewForConfigOrDie(oc.AdminConfig()).ImageV1() _, err := client.ImageStreams(oc.Namespace()).Create(&imagev1.ImageStream{ ObjectMeta: metav1.ObjectMeta{ Name: "test", @@ -74,7 +74,7 @@ var _ = g.Describe("[Feature:ImageLayers][registry] Image layer subresource", fu g.It("should return layers from tagged images", func() { ns = []string{oc.Namespace()} - client := imagev1client.NewForConfigOrDie(oc.UserConfig()).Image() + client := imagev1client.NewForConfigOrDie(oc.UserConfig()).ImageV1() isi, err := client.ImageStreamImports(oc.Namespace()).Create(&imagev1.ImageStreamImport{ ObjectMeta: metav1.ObjectMeta{ Name: "1", @@ -154,7 +154,7 @@ RUN mkdir -p /var/lib && echo "a" > /var/lib/file ` g.By("running a build based on our tagged layer") - buildClient := buildv1client.NewForConfigOrDie(oc.UserConfig()).Build() + buildClient := buildv1client.NewForConfigOrDie(oc.UserConfig()).BuildV1() _, err = buildClient.Builds(oc.Namespace()).Create(&buildv1.Build{ ObjectMeta: metav1.ObjectMeta{ Name: "output", diff --git a/test/extended/images/mirror.go b/test/extended/images/mirror.go index 1e52d79e62ab..5998590b16a6 100644 --- a/test/extended/images/mirror.go +++ b/test/extended/images/mirror.go @@ -246,12 +246,12 @@ RUN echo %s > /3 o.Expect(err).NotTo(o.HaveOccurred()) g.By("starting a test build") - bc, err := oc.BuildClient().Build().BuildConfigs(oc.Namespace()).Get(isName, metav1.GetOptions{}) + bc, err := oc.BuildClient().BuildV1().BuildConfigs(oc.Namespace()).Get(isName, metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(*bc.Spec.Source.Dockerfile).To(o.Equal(testDockerfile)) g.By("expecting the Dockerfile build is in Complete phase") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), isName+"-1", nil, nil, nil) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), isName+"-1", nil, nil, nil) o.Expect(err).NotTo(o.HaveOccurred()) g.By(fmt.Sprintf("checking for the imported tag: %s", istName)) diff --git a/test/extended/images/prune.go b/test/extended/images/prune.go index f462bf10fe6e..d02f97b1a888 100644 --- a/test/extended/images/prune.go +++ b/test/extended/images/prune.go @@ -192,7 +192,7 @@ var _ = g.Describe("[Feature:ImagePrune][registry][Serial][Suite:openshift/regis }) func getImageName(oc *exutil.CLI, namespace, name, tag string) (string, error) { - istag, err := oc.AdminImageClient().Image().ImageStreamTags(namespace).Get(fmt.Sprintf("%s:%s", name, tag), metav1.GetOptions{}) + istag, err := oc.AdminImageClient().ImageV1().ImageStreamTags(namespace).Get(fmt.Sprintf("%s:%s", name, tag), metav1.GetOptions{}) if err != nil { return "", err } diff --git a/test/extended/machines/machines.go b/test/extended/machines/machines.go index c1a2dc3dfcb2..c9b054dd978e 100644 --- a/test/extended/machines/machines.go +++ b/test/extended/machines/machines.go @@ -38,7 +38,7 @@ var _ = g.Describe("[Feature:Machines][Smoke] Managed cluster should", func() { g.By("checking for the openshift machine api operator") // TODO: skip if platform != aws - skipUnlessMachineAPIOperator(c.Core().Namespaces()) + skipUnlessMachineAPIOperator(c.CoreV1().Namespaces()) g.By("ensuring every node is linked to a machine api resource") allNodes, err := c.CoreV1().Nodes().List(metav1.ListOptions{}) diff --git a/test/extended/machines/workers.go b/test/extended/machines/workers.go index a0d0c3c2c718..462e6e5eead2 100644 --- a/test/extended/machines/workers.go +++ b/test/extended/machines/workers.go @@ -119,7 +119,7 @@ var _ = g.Describe("[Feature:Machines][Disruptive] Managed cluster should", func g.By("checking for the openshift machine api operator") // TODO: skip if platform != aws - skipUnlessMachineAPIOperator(c.Core().Namespaces()) + skipUnlessMachineAPIOperator(c.CoreV1().Namespaces()) g.By("validating node and machine invariants") // fetch machines diff --git a/test/extended/networking/util.go b/test/extended/networking/util.go index b19b2540e4a8..1f1e31b74f8a 100644 --- a/test/extended/networking/util.go +++ b/test/extended/networking/util.go @@ -104,7 +104,7 @@ func waitForPodSuccessInNamespace(c kclientset.Interface, podName string, contNa func waitForEndpoint(c kclientset.Interface, ns, name string) error { for t := time.Now(); time.Since(t) < 3*time.Minute; time.Sleep(poll) { - endpoint, err := c.Core().Endpoints(ns).Get(name, metav1.GetOptions{}) + endpoint, err := c.CoreV1().Endpoints(ns).Get(name, metav1.GetOptions{}) if kapierrs.IsNotFound(err) { e2e.Logf("Endpoint %s/%s is not ready yet", ns, name) continue @@ -175,10 +175,10 @@ func checkConnectivityToHost(f *e2e.Framework, nodeName string, podName string, }) defer func() { e2e.Logf("Cleaning up the exec pod") - err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(execPodName, nil) + err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(execPodName, nil) Expect(err).NotTo(HaveOccurred()) }() - execPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(execPodName, metav1.GetOptions{}) + execPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(execPodName, metav1.GetOptions{}) e2e.ExpectNoError(err) var stdout string @@ -227,10 +227,10 @@ func checkConnectivityToHost(f *e2e.Framework, nodeName string, podName string, pod.Spec.Containers[0].SecurityContext = &corev1.SecurityContext{Privileged: &privileged} }) defer func() { - err := f.ClientSet.Core().Pods(f.Namespace.Name).Delete(debugPodName, nil) + err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(debugPodName, nil) Expect(err).NotTo(HaveOccurred()) }() - debugPod, err := f.ClientSet.Core().Pods(f.Namespace.Name).Get(debugPodName, metav1.GetOptions{}) + debugPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(debugPodName, metav1.GetOptions{}) e2e.ExpectNoError(err) stdout, err = e2e.RunHostCmd(debugPod.Namespace, debugPod.Name, "ovs-ofctl -O OpenFlow13 dump-flows br0") diff --git a/test/extended/operators/cluster.go b/test/extended/operators/cluster.go index 39c3b1b94787..5816799c1bf9 100644 --- a/test/extended/operators/cluster.go +++ b/test/extended/operators/cluster.go @@ -25,7 +25,7 @@ var _ = g.Describe("[Feature:Platform] Managed cluster should", func() { var lastPodsWithProblems []*corev1.Pod var pending map[string]*corev1.Pod wait.PollImmediate(5*time.Second, 2*time.Minute, func() (bool, error) { - allPods, err := c.Core().Pods("").List(metav1.ListOptions{}) + allPods, err := c.CoreV1().Pods("").List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) var pods []*corev1.Pod diff --git a/test/extended/operators/operators.go b/test/extended/operators/operators.go index ad41320461f5..3d1ebbeec432 100644 --- a/test/extended/operators/operators.go +++ b/test/extended/operators/operators.go @@ -41,7 +41,7 @@ var _ = g.Describe("[Feature:Platform][Smoke] Managed cluster should", func() { // presence of the CVO namespace gates this test g.By("checking for the cluster version operator") - skipUnlessCVO(c.Core().Namespaces()) + skipUnlessCVO(c.CoreV1().Namespaces()) g.By("waiting for the cluster version to be applied") cvc := dc.Resource(schema.GroupVersionResource{Group: "config.openshift.io", Resource: "clusterversions", Version: "v1"}) @@ -182,12 +182,12 @@ var _ = g.Describe("[Feature:Platform] Managed cluster should", func() { // presence of the CVO namespace gates this test g.By("checking for the cluster version operator") - skipUnlessCVO(coreclient.Core().Namespaces()) + skipUnlessCVO(coreclient.CoreV1().Namespaces()) // we need to get the list of versions - cv, err := c.Config().ClusterVersions().Get("version", metav1.GetOptions{}) + cv, err := c.ConfigV1().ClusterVersions().Get("version", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) - coList, err := c.Config().ClusterOperators().List(metav1.ListOptions{}) + coList, err := c.ConfigV1().ClusterOperators().List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) o.Expect(coList.Items).NotTo(o.BeEmpty()) diff --git a/test/extended/prometheus/prometheus.go b/test/extended/prometheus/prometheus.go index ee5a6fa72cea..a5c4019419d1 100644 --- a/test/extended/prometheus/prometheus.go +++ b/test/extended/prometheus/prometheus.go @@ -55,7 +55,7 @@ var _ = g.Describe("[Feature:Prometheus][Conformance] Prometheus", func() { ns := oc.Namespace() execPodName := e2e.CreateExecPodOrFail(oc.AdminKubeClient(), ns, "execpod", func(pod *v1.Pod) { pod.Spec.Containers[0].Image = "centos:7" }) - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() tests := map[string][]metricTest{ // should have successfully sent at least once to remote @@ -72,7 +72,7 @@ var _ = g.Describe("[Feature:Prometheus][Conformance] Prometheus", func() { oc.SetupProject() ns := oc.Namespace() execPodName := e2e.CreateExecPodOrFail(oc.AdminKubeClient(), ns, "execpod", func(pod *v1.Pod) { pod.Spec.Containers[0].Image = "centos:7" }) - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() g.By("checking the unsecured metrics path") var metrics map[string]*dto.MetricFamily @@ -317,7 +317,7 @@ func getInsecureURLViaPod(ns, execPodName, url string) (string, error) { } func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { - w, err := c.Core().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) + w, err := c.CoreV1().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) if err != nil { return err } @@ -328,14 +328,14 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN } func locatePrometheus(oc *exutil.CLI) (url, bearerToken string, ok bool) { - _, err := oc.AdminKubeClient().Core().Services("openshift-monitoring").Get("prometheus-k8s", metav1.GetOptions{}) + _, err := oc.AdminKubeClient().CoreV1().Services("openshift-monitoring").Get("prometheus-k8s", metav1.GetOptions{}) if kapierrs.IsNotFound(err) { return "", "", false } waitForServiceAccountInNamespace(oc.AdminKubeClient(), "openshift-monitoring", "prometheus-k8s", 2*time.Minute) for i := 0; i < 30; i++ { - secrets, err := oc.AdminKubeClient().Core().Secrets("openshift-monitoring").List(metav1.ListOptions{}) + secrets, err := oc.AdminKubeClient().CoreV1().Secrets("openshift-monitoring").List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) for _, secret := range secrets.Items { if secret.Type != v1.SecretTypeServiceAccountToken { diff --git a/test/extended/prometheus/prometheus_builds.go b/test/extended/prometheus/prometheus_builds.go index f51724a1deee..f0239fedfce0 100644 --- a/test/extended/prometheus/prometheus_builds.go +++ b/test/extended/prometheus/prometheus_builds.go @@ -50,7 +50,7 @@ var _ = g.Describe("[Feature:Prometheus][Feature:Builds] Prometheus", func() { appTemplate := exutil.FixturePath("testdata", "builds", "build-pruning", "successful-build-config.yaml") execPodName := e2e.CreateExecPodOrFail(oc.AdminKubeClient(), ns, "execpod", func(pod *corev1.Pod) { pod.Spec.Containers[0].Image = "centos:7" }) - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() g.By("verifying the oauth-proxy reports a 403 on the root URL") // allow for some retry, a la prometheus.go and its initial hitting of the metrics endpoint after diff --git a/test/extended/router/config_manager.go b/test/extended/router/config_manager.go index becdd7399c18..93b4b96f5837 100644 --- a/test/extended/router/config_manager.go +++ b/test/extended/router/config_manager.go @@ -30,7 +30,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { // hook g.AfterEach(func() { if g.CurrentGinkgoTestDescription().Failed { - client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).Route().Routes(ns) + client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(ns) if routes, _ := client.List(metav1.ListOptions{}); routes != nil { outputIngress(routes.Items...) } diff --git a/test/extended/router/headers.go b/test/extended/router/headers.go index 8e70d8d0d6c3..0ec156cb26ad 100644 --- a/test/extended/router/headers.go +++ b/test/extended/router/headers.go @@ -65,8 +65,8 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { }() ns := oc.KubeFramework().Namespace.Name - execPodName := exutil.CreateExecPodOrFail(oc.AdminKubeClient().Core(), ns, "execpod") - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + execPodName := exutil.CreateExecPodOrFail(oc.AdminKubeClient().CoreV1(), ns, "execpod") + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() g.By(fmt.Sprintf("creating an http echo server from a config file %q", configPath)) @@ -75,7 +75,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { var clientIP string err = wait.Poll(time.Second, changeTimeoutSeconds*time.Second, func() (bool, error) { - pod, err := oc.KubeFramework().ClientSet.Core().Pods(ns).Get("execpod", metav1.GetOptions{}) + pod, err := oc.KubeFramework().ClientSet.CoreV1().Pods(ns).Get("execpod", metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/extended/router/metrics.go b/test/extended/router/metrics.go index 832ad86f9bee..2f0cf7f581b2 100644 --- a/test/extended/router/metrics.go +++ b/test/extended/router/metrics.go @@ -86,8 +86,8 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { g.Describe("The HAProxy router", func() { g.It("should expose a health check on the metrics port", func() { - execPodName = exutil.CreateExecPodOrFail(oc.AdminKubeClient().Core(), ns, "execpod") - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + execPodName = exutil.CreateExecPodOrFail(oc.AdminKubeClient().CoreV1(), ns, "execpod") + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() g.By("listening on the health port") err := expectURLStatusCodeExec(ns, execPodName, fmt.Sprintf("http://%s:%d/healthz", host, statsPort), 200) @@ -100,8 +100,8 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { err := oc.Run("create").Args("-f", configPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) - execPodName = exutil.CreateExecPodOrFail(oc.AdminKubeClient().Core(), ns, "execpod") - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + execPodName = exutil.CreateExecPodOrFail(oc.AdminKubeClient().CoreV1(), ns, "execpod") + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() g.By("preventing access without a username and password") err = expectURLStatusCodeExec(ns, execPodName, fmt.Sprintf("http://%s:%d/metrics", host, statsPort), 401, 403) @@ -207,8 +207,8 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { }) g.It("should expose the profiling endpoints", func() { - execPodName = exutil.CreateExecPodOrFail(oc.AdminKubeClient().Core(), ns, "execpod") - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + execPodName = exutil.CreateExecPodOrFail(oc.AdminKubeClient().CoreV1(), ns, "execpod") + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() g.By("preventing access without a username and password") err := expectURLStatusCodeExec(ns, execPodName, fmt.Sprintf("http://%s:%d/debug/pprof/heap", host, statsPort), 401, 403) @@ -227,7 +227,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { } execPodName := e2e.CreateExecPodOrFail(oc.AdminKubeClient(), ns, "execpod", func(pod *corev1.Pod) { pod.Spec.Containers[0].Image = "centos:7" }) - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() o.Expect(wait.PollImmediate(10*time.Second, 5*time.Minute, func() (bool, error) { contents, err := getBearerTokenURLViaPod(ns, execPodName, fmt.Sprintf("%s/api/v1/targets", prometheusURL), token) @@ -295,7 +295,7 @@ func (t *promTargets) Expect(l promLabels, health, scrapeURLPattern string) erro } func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountName string, timeout time.Duration) error { - w, err := c.Core().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) + w, err := c.CoreV1().ServiceAccounts(ns).Watch(metav1.SingleObject(metav1.ObjectMeta{Name: serviceAccountName})) if err != nil { return err } @@ -306,14 +306,14 @@ func waitForServiceAccountInNamespace(c clientset.Interface, ns, serviceAccountN } func locatePrometheus(oc *exutil.CLI) (url, bearerToken string, ok bool) { - _, err := oc.AdminKubeClient().Core().Services("openshift-monitoring").Get("prometheus-k8s", metav1.GetOptions{}) + _, err := oc.AdminKubeClient().CoreV1().Services("openshift-monitoring").Get("prometheus-k8s", metav1.GetOptions{}) if kapierrs.IsNotFound(err) { return "", "", false } waitForServiceAccountInNamespace(oc.AdminKubeClient(), "openshift-monitoring", "prometheus-k8s", 2*time.Minute) for i := 0; i < 30; i++ { - secrets, err := oc.AdminKubeClient().Core().Secrets("openshift-monitoring").List(metav1.ListOptions{}) + secrets, err := oc.AdminKubeClient().CoreV1().Secrets("openshift-monitoring").List(metav1.ListOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) for _, secret := range secrets.Items { if secret.Type != corev1.SecretTypeServiceAccountToken { @@ -449,7 +449,7 @@ func findStatsUsernameAndPassword(oc *exutil.CLI, ns string, env []corev1.EnvVar return "", "", fmt.Errorf("stats username and password not found, env: %v", env) } - secret, err := oc.AdminKubeClient().Core().Secrets(ns).Get(secretName, metav1.GetOptions{}) + secret, err := oc.AdminKubeClient().CoreV1().Secrets(ns).Get(secretName, metav1.GetOptions{}) if err != nil { return "", "", err } @@ -463,7 +463,7 @@ func findStatsUsernameAndPassword(oc *exutil.CLI, ns string, env []corev1.EnvVar } func findMetricsBearerToken(oc *exutil.CLI) (string, error) { - sa, err := oc.AdminKubeClient().Core().ServiceAccounts("openshift-monitoring").Get("prometheus-k8s", metav1.GetOptions{}) + sa, err := oc.AdminKubeClient().CoreV1().ServiceAccounts("openshift-monitoring").Get("prometheus-k8s", metav1.GetOptions{}) if err != nil { return "", err } @@ -479,7 +479,7 @@ func findMetricsBearerToken(oc *exutil.CLI) (string, error) { return "", fmt.Errorf("serviceaccount 'openshift-monitoring/prometheus-k8s' does not contain 'prometheus-k8s-token' secret") } - secret, err := oc.AdminKubeClient().Core().Secrets("openshift-monitoring").Get(secretName, metav1.GetOptions{}) + secret, err := oc.AdminKubeClient().CoreV1().Secrets("openshift-monitoring").Get(secretName, metav1.GetOptions{}) if err != nil { return "", err } diff --git a/test/extended/router/reencrypt.go b/test/extended/router/reencrypt.go index cc39c1c3f5b6..7059d03d5790 100644 --- a/test/extended/router/reencrypt.go +++ b/test/extended/router/reencrypt.go @@ -28,7 +28,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { // hook g.AfterEach(func() { if g.CurrentGinkgoTestDescription().Failed { - client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).Route().Routes(ns) + client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(ns) if routes, _ := client.List(metav1.ListOptions{}); routes != nil { outputIngress(routes.Items...) } @@ -54,8 +54,8 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { g.It("should support reencrypt to services backed by a serving certificate automatically", func() { routerURL := fmt.Sprintf("https://%s", ip) - execPodName := exutil.CreateExecPodOrFail(oc.AdminKubeClient().Core(), ns, "execpod") - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + execPodName := exutil.CreateExecPodOrFail(oc.AdminKubeClient().CoreV1(), ns, "execpod") + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() g.By(fmt.Sprintf("deploying a service using a reencrypt route without a destinationCACertificate")) err := oc.Run("create").Args("-f", configPath).Execute() o.Expect(err).NotTo(o.HaveOccurred()) diff --git a/test/extended/router/router.go b/test/extended/router/router.go index e3c9526dc2eb..70505b025db8 100644 --- a/test/extended/router/router.go +++ b/test/extended/router/router.go @@ -80,7 +80,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { g.By("waiting for the ingress rule to be converted to routes") client := routeclientset.NewForConfigOrDie(oc.AdminConfig()) err = wait.Poll(time.Second, time.Minute, func() (bool, error) { - routes, err := client.Route().Routes(ns).List(metav1.ListOptions{}) + routes, err := client.RouteV1().Routes(ns).List(metav1.ListOptions{}) if err != nil { return false, err } diff --git a/test/extended/router/scoped.go b/test/extended/router/scoped.go index 1c4e1ae9ef3b..21fc1fd85086 100644 --- a/test/extended/router/scoped.go +++ b/test/extended/router/scoped.go @@ -37,7 +37,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { // hook g.AfterEach(func() { if g.CurrentGinkgoTestDescription().Failed { - client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).Route().Routes(ns) + client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(ns) if routes, _ := client.List(metav1.ListOptions{}); routes != nil { outputIngress(routes.Items...) } diff --git a/test/extended/router/stress.go b/test/extended/router/stress.go index 495d7b9f630e..55d3eb063f55 100644 --- a/test/extended/router/stress.go +++ b/test/extended/router/stress.go @@ -37,7 +37,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { // hook g.AfterEach(func() { if g.CurrentGinkgoTestDescription().Failed { - client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).Route().Routes(ns) + client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(ns) if routes, _ := client.List(metav1.ListOptions{}); routes != nil { outputIngress(routes.Items...) } @@ -53,7 +53,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { routerImage, _ = exutil.FindRouterImage(oc) routerImage = strings.Replace(routerImage, "${component}", "haproxy-router", -1) - _, err := oc.AdminKubeClient().Rbac().RoleBindings(ns).Create(&rbacv1.RoleBinding{ + _, err := oc.AdminKubeClient().RbacV1().RoleBindings(ns).Create(&rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: "router", }, @@ -74,7 +74,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { g.Describe("The HAProxy router", func() { g.It("converges when multiple routers are writing status", func() { g.By("deploying a scaled out namespace scoped router") - rs, err := oc.KubeClient().Extensions().ReplicaSets(ns).Create( + rs, err := oc.KubeClient().ExtensionsV1beta1().ReplicaSets(ns).Create( scaledRouter( routerImage, []string{ @@ -90,7 +90,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { o.Expect(err).NotTo(o.HaveOccurred()) g.By("creating multiple routes") - client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).Route().Routes(ns) + client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(ns) var rv string for i := 0; i < 10; i++ { _, err := client.Create(&routev1.Route{ @@ -156,7 +156,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { g.It("converges when multiple routers are writing conflicting status", func() { g.By("deploying a scaled out namespace scoped router") - rs, err := oc.KubeClient().Extensions().ReplicaSets(ns).Create( + rs, err := oc.KubeClient().ExtensionsV1beta1().ReplicaSets(ns).Create( scaledRouter( routerImage, []string{ @@ -176,7 +176,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { o.Expect(err).NotTo(o.HaveOccurred()) g.By("creating multiple routes") - client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).Route().Routes(ns) + client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(ns) var rv string for i := 0; i < 20; i++ { _, err := client.Create(&routev1.Route{ diff --git a/test/extended/router/unprivileged.go b/test/extended/router/unprivileged.go index 7175ce4fbe1c..deb9b618b866 100644 --- a/test/extended/router/unprivileged.go +++ b/test/extended/router/unprivileged.go @@ -28,7 +28,7 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { // hook g.AfterEach(func() { if g.CurrentGinkgoTestDescription().Failed { - client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).Route().Routes(ns) + client := routeclientset.NewForConfigOrDie(oc.AdminConfig()).RouteV1().Routes(ns) if routes, _ := client.List(metav1.ListOptions{}); routes != nil { outputIngress(routes.Items...) } diff --git a/test/extended/router/weighted.go b/test/extended/router/weighted.go index 7ca56814de24..140dac6c02d8 100644 --- a/test/extended/router/weighted.go +++ b/test/extended/router/weighted.go @@ -41,14 +41,14 @@ var _ = g.Describe("[Conformance][Area:Networking][Feature:Router]", func() { }() ns := oc.KubeFramework().Namespace.Name - execPodName := exutil.CreateExecPodOrFail(oc.AdminKubeClient().Core(), ns, "execpod") - defer func() { oc.AdminKubeClient().Core().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() + execPodName := exutil.CreateExecPodOrFail(oc.AdminKubeClient().CoreV1(), ns, "execpod") + defer func() { oc.AdminKubeClient().CoreV1().Pods(ns).Delete(execPodName, metav1.NewDeleteOptions(1)) }() g.By(fmt.Sprintf("creating a weighted router from a config file %q", configPath)) var routerIP string err := wait.Poll(time.Second, changeTimeoutSeconds*time.Second, func() (bool, error) { - pod, err := oc.KubeFramework().ClientSet.Core().Pods(oc.KubeFramework().Namespace.Name).Get("weighted-router", metav1.GetOptions{}) + pod, err := oc.KubeFramework().ClientSet.CoreV1().Pods(oc.KubeFramework().Namespace.Name).Get("weighted-router", metav1.GetOptions{}) if err != nil { return false, err } diff --git a/test/extended/templates/helpers.go b/test/extended/templates/helpers.go index dedb618b1f76..2a071fb4b7ec 100644 --- a/test/extended/templates/helpers.go +++ b/test/extended/templates/helpers.go @@ -117,7 +117,7 @@ func setUser(cli *exutil.CLI, user *userapi.User) { // TSBClient returns a client to the running template service broker func TSBClient(oc *exutil.CLI) (osbclient.Client, error) { - svc, err := oc.AdminKubeClient().Core().Services("openshift-template-service-broker").Get("apiserver", metav1.GetOptions{}) + svc, err := oc.AdminKubeClient().CoreV1().Services("openshift-template-service-broker").Get("apiserver", metav1.GetOptions{}) if err != nil { return nil, err } diff --git a/test/extended/templates/templateinstance_readiness.go b/test/extended/templates/templateinstance_readiness.go index ff6d9c5a590d..05aa0593fd06 100644 --- a/test/extended/templates/templateinstance_readiness.go +++ b/test/extended/templates/templateinstance_readiness.go @@ -42,7 +42,7 @@ var _ = g.Describe("[Conformance][templates] templateinstance readiness test", f return false, err } - build, err := cli.BuildClient().Build().Builds(cli.Namespace()).Get("simple-example-1", metav1.GetOptions{}) + build, err := cli.BuildClient().BuildV1().Builds(cli.Namespace()).Get("simple-example-1", metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { err = nil @@ -168,7 +168,7 @@ var _ = g.Describe("[Conformance][templates] templateinstance readiness test", f g.It("should report failed soon after an annotated objects has failed", func() { var err error - secret, err := cli.KubeClient().Core().Secrets(cli.Namespace()).Create(&v1.Secret{ + secret, err := cli.KubeClient().CoreV1().Secrets(cli.Namespace()).Create(&v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: "secret", }, diff --git a/test/extended/templates/templateservicebroker_bind.go b/test/extended/templates/templateservicebroker_bind.go index f7ba77dde76c..2f018df212eb 100644 --- a/test/extended/templates/templateservicebroker_bind.go +++ b/test/extended/templates/templateservicebroker_bind.go @@ -106,7 +106,7 @@ var _ = g.Describe("[Conformance][templates] templateservicebroker bind test", f }) g.It("should pass bind tests", func() { - svc, err := cli.KubeClient().Core().Services(cli.Namespace()).Get("service", metav1.GetOptions{}) + svc, err := cli.KubeClient().CoreV1().Services(cli.Namespace()).Get("service", metav1.GetOptions{}) o.Expect(err).NotTo(o.HaveOccurred()) bind, err := brokercli.Bind(context.Background(), cliUser, instanceID, bindingID, &api.BindRequest{ diff --git a/test/extended/util/docker.go b/test/extended/util/docker.go index aadb84be612f..2bcc4cde118a 100644 --- a/test/extended/util/docker.go +++ b/test/extended/util/docker.go @@ -36,7 +36,7 @@ func ListImages() ([]string, error) { //BuildAuthConfiguration constructs a non-standard dockerClient.AuthConfiguration that can be used to communicate with the openshift internal docker registry func BuildAuthConfiguration(credKey string, oc *CLI) (*dockerClient.AuthConfiguration, error) { authCfg := &dockerClient.AuthConfiguration{} - secretList, err := oc.AdminKubeClient().Core().Secrets(oc.Namespace()).List(metav1.ListOptions{}) + secretList, err := oc.AdminKubeClient().CoreV1().Secrets(oc.Namespace()).List(metav1.ListOptions{}) g.By(fmt.Sprintf("get secret list err %v ", err)) if err == nil { diff --git a/test/extended/util/framework.go b/test/extended/util/framework.go index 387bde2adbac..94e16f7d0566 100644 --- a/test/extended/util/framework.go +++ b/test/extended/util/framework.go @@ -643,7 +643,7 @@ func StartBuildAndWait(oc *CLI, args ...string) (result *BuildResult, err error) if err != nil { return result, err } - return result, WaitForBuildResult(oc.BuildClient().Build().Builds(oc.Namespace()), result) + return result, WaitForBuildResult(oc.BuildClient().BuildV1().Builds(oc.Namespace()), result) } // WaitForBuildResult updates result wit the state of the build diff --git a/test/extended/util/jenkins/ref.go b/test/extended/util/jenkins/ref.go index 6c020a01851b..6a1371942be0 100644 --- a/test/extended/util/jenkins/ref.go +++ b/test/extended/util/jenkins/ref.go @@ -319,7 +319,7 @@ func (j *JenkinsRef) GetJobConsoleLogsAndMatchViaBuildResult(br *exutil.BuildRes return "", fmt.Errorf("BuildResult oc should have been set up during BuildResult construction") } var err error // interestingly, removing this line and using := on the next got a compile error - br.Build, err = br.Oc.BuildClient().Build().Builds(br.Oc.Namespace()).Get(br.BuildName, metav1.GetOptions{}) + br.Build, err = br.Oc.BuildClient().BuildV1().Builds(br.Oc.Namespace()).Get(br.BuildName, metav1.GetOptions{}) if err != nil { return "", err } @@ -381,7 +381,7 @@ func SetupDockerhubImage(localImageName, snapshotImageStream string, newAppArgs o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), snapshotImageStream+"-1", exutil.CheckBuildSuccess, exutil.CheckBuildFailed, exutil.CheckBuildCancelled) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), snapshotImageStream+"-1", exutil.CheckBuildSuccess, exutil.CheckBuildFailed, exutil.CheckBuildCancelled) if err != nil { exutil.DumpBuildLogs(snapshotImageStream, oc) } @@ -417,7 +417,7 @@ func SetupSnapshotImage(envVarName, localImageName, snapshotImageStream string, o.Expect(err).NotTo(o.HaveOccurred()) g.By("waiting for build to finish") - err = exutil.WaitForABuild(oc.BuildClient().Build().Builds(oc.Namespace()), snapshotImageStream+"-1", exutil.CheckBuildSuccess, exutil.CheckBuildFailed, exutil.CheckBuildCancelled) + err = exutil.WaitForABuild(oc.BuildClient().BuildV1().Builds(oc.Namespace()), snapshotImageStream+"-1", exutil.CheckBuildSuccess, exutil.CheckBuildFailed, exutil.CheckBuildCancelled) if err != nil { exutil.DumpBuildLogs(snapshotImageStream, oc) } @@ -465,7 +465,7 @@ func ProcessLogURLAnnotations(oc *exutil.CLI, t *exutil.BuildResult) (*url.URL, func DumpLogs(oc *exutil.CLI, t *exutil.BuildResult) (string, error) { var err error if t.Build == nil { - t.Build, err = oc.BuildClient().Build().Builds(oc.Namespace()).Get(t.BuildName, metav1.GetOptions{}) + t.Build, err = oc.BuildClient().BuildV1().Builds(oc.Namespace()).Get(t.BuildName, metav1.GetOptions{}) if err != nil { return "", fmt.Errorf("cannot retrieve build %s: %v", t.BuildName, err) } diff --git a/test/extended/util/nfs.go b/test/extended/util/nfs.go index 978fb6e05589..38227603ec2e 100644 --- a/test/extended/util/nfs.go +++ b/test/extended/util/nfs.go @@ -59,7 +59,7 @@ func SetupK8SNFSServerAndVolume(oc *CLI, count int) (*kapiv1.Pod, []*kapiv1.Pers }, } pvTemplate := e2e.MakePersistentVolume(pvConfig) - pv, err := oc.AdminKubeClient().Core().PersistentVolumes().Create(pvTemplate) + pv, err := oc.AdminKubeClient().CoreV1().PersistentVolumes().Create(pvTemplate) if err != nil { e2e.Logf("error creating persistent volume %#v", err) } diff --git a/test/extended/util/statefulsets.go b/test/extended/util/statefulsets.go index 8efec4581890..1c3698576ebc 100644 --- a/test/extended/util/statefulsets.go +++ b/test/extended/util/statefulsets.go @@ -14,7 +14,7 @@ func RemoveStatefulSets(oc *CLI, sets ...string) error { errs := []error{} for _, set := range sets { e2e.Logf("Removing stateful set %s/%s", oc.Namespace(), set) - if err := oc.AdminKubeClient().Apps().StatefulSets(oc.Namespace()).Delete(set, &metav1.DeleteOptions{}); err != nil { + if err := oc.AdminKubeClient().AppsV1().StatefulSets(oc.Namespace()).Delete(set, &metav1.DeleteOptions{}); err != nil { e2e.Logf("Error occurred removing stateful set: %v", err) errs = append(errs, err) } diff --git a/test/extended/util/url/url.go b/test/extended/util/url/url.go index 84a36de81084..04c690a2d3d0 100644 --- a/test/extended/util/url/url.go +++ b/test/extended/util/url/url.go @@ -35,7 +35,7 @@ func NewTester(client kclientset.Interface, ns string) *Tester { } func (ut *Tester) Close() { - if err := ut.client.Core().Pods(ut.namespace).Delete(ut.podName, metav1.NewDeleteOptions(1)); err != nil { + if err := ut.client.CoreV1().Pods(ut.namespace).Delete(ut.podName, metav1.NewDeleteOptions(1)); err != nil { e2e.Logf("Failed to delete exec pod %s: %v", ut.podName, err) } ut.podName = "" diff --git a/test/integration/aggregator_test.go b/test/integration/aggregator_test.go index 82ff05f3152e..495ecf21a71f 100644 --- a/test/integration/aggregator_test.go +++ b/test/integration/aggregator_test.go @@ -95,11 +95,11 @@ func TestAggregator(t *testing.T) { t.Fatal(err) } // Legacy openshift resource - if _, err := openshiftProjectClient.Project().Projects().Get("default", metav1.GetOptions{}); err != nil { + if _, err := openshiftProjectClient.ProjectV1().Projects().Get("default", metav1.GetOptions{}); err != nil { t.Fatal(err) } // Groupified openshift resource - if _, err := openshiftProjectClient.Project().Projects().Get("default", metav1.GetOptions{}); err != nil { + if _, err := openshiftProjectClient.ProjectV1().Projects().Get("default", metav1.GetOptions{}); err != nil { t.Fatal(err) } diff --git a/test/integration/authorization_rbac_proxy_test.go b/test/integration/authorization_rbac_proxy_test.go index e83720c54097..563fe0fe105b 100644 --- a/test/integration/authorization_rbac_proxy_test.go +++ b/test/integration/authorization_rbac_proxy_test.go @@ -692,7 +692,7 @@ func TestLegacyEndpointConfirmNoEscalation(t *testing.T) { for _, group := range rule.APIGroups { for _, resource := range rule.Resources { if err := testutil.WaitForClusterPolicyUpdate( - userInternalClient.Authorization(), + userInternalClient.AuthorizationV1(), verb, schema.GroupResource{Group: group, Resource: resource}, true, diff --git a/test/integration/bootstrap_policy_test.go b/test/integration/bootstrap_policy_test.go index 2962e97cc58e..e87e0d165c90 100644 --- a/test/integration/bootstrap_policy_test.go +++ b/test/integration/bootstrap_policy_test.go @@ -95,7 +95,7 @@ func TestBootstrapPolicySelfSubjectAccessReviews(t *testing.T) { description: "can I get a subjectaccessreview on myself even if I have no rights to do it generally", localInterface: valerieAuthorizationClient.LocalSubjectAccessReviews("openshift"), localReview: askCanICreatePolicyBindings, - kubeAuthInterface: valerieKubeClient.Authorization(), + kubeAuthInterface: valerieKubeClient.AuthorizationV1(), response: authorizationapi.SubjectAccessReviewResponse{ Allowed: false, Reason: ``, @@ -111,7 +111,7 @@ func TestBootstrapPolicySelfSubjectAccessReviews(t *testing.T) { description: "I shouldn't be allowed to ask whether someone else can perform an action", localInterface: valerieAuthorizationClient.LocalSubjectAccessReviews("openshift"), localReview: askCanClusterAdminsCreateProject, - kubeAuthInterface: valerieKubeClient.Authorization(), + kubeAuthInterface: valerieKubeClient.AuthorizationV1(), kubeNamespace: "openshift", err: `localsubjectaccessreviews.authorization.openshift.io is forbidden: User "valerie" cannot create resource "localsubjectaccessreviews" in API group "authorization.openshift.io" in the namespace "openshift"`, kubeErr: `localsubjectaccessreviews.authorization.k8s.io is forbidden: User "valerie" cannot create resource "localsubjectaccessreviews" in API group "authorization.k8s.io" in the namespace "openshift"`, @@ -145,7 +145,7 @@ func TestSelfSubjectAccessReviewsNonExistingNamespace(t *testing.T) { description: "ensure SAR for non-existing namespace does not leak namespace info", localInterface: authorizationclient.NewForConfigOrDie(valerieClientConfig).Authorization().LocalSubjectAccessReviews("foo"), localReview: askCanICreatePodsInNonExistingNamespace, - kubeAuthInterface: valerieKubeClient.Authorization(), + kubeAuthInterface: valerieKubeClient.AuthorizationV1(), response: authorizationapi.SubjectAccessReviewResponse{ Allowed: false, Reason: ``, diff --git a/test/integration/buildpod_admission_test.go b/test/integration/buildpod_admission_test.go index d1cf98538175..998a04a232de 100644 --- a/test/integration/buildpod_admission_test.go +++ b/test/integration/buildpod_admission_test.go @@ -250,7 +250,7 @@ func runBuildPodAdmissionTest(t *testing.T, client buildclient.Interface, kclien *v1.Pod) { ns := testutil.Namespace() - _, err := client.Build().Builds(ns).Create(build) + _, err := client.BuildV1().Builds(ns).Create(build) if err != nil { t.Fatalf("%v", err) } @@ -261,7 +261,7 @@ func runBuildPodAdmissionTest(t *testing.T, client buildclient.Interface, kclien buildutil.GetBuildPodName(build), ).String(), } - podWatch, err := kclientset.Core().Pods(ns).Watch(watchOpt) + podWatch, err := kclientset.CoreV1().Pods(ns).Watch(watchOpt) if err != nil { t.Fatalf("%v", err) } @@ -329,7 +329,7 @@ func setupBuildPodAdmissionTest(t *testing.T, pluginConfig map[string]*configapi t.Fatal(err) } - _, err = clusterAdminKubeClientset.Core().Namespaces().Create(&v1.Namespace{ + _, err = clusterAdminKubeClientset.CoreV1().Namespaces().Create(&v1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: testutil.Namespace()}, }) if err != nil { diff --git a/test/integration/clusterquota_test.go b/test/integration/clusterquota_test.go index c01b5a73f29a..921a1480e15c 100644 --- a/test/integration/clusterquota_test.go +++ b/test/integration/clusterquota_test.go @@ -108,7 +108,7 @@ func TestClusterQuota(t *testing.T) { t.Fatalf("unexpected error: %v", err) } if _, err := clusterAdminKubeClient.CoreV1().ConfigMaps("second").Create(configmap); !apierrors.IsForbidden(err) { - list, err := clusterAdminQuotaClient.Quota().AppliedClusterResourceQuotas("second").List(metav1.ListOptions{}) + list, err := clusterAdminQuotaClient.QuotaV1().AppliedClusterResourceQuotas("second").List(metav1.ListOptions{}) if err == nil { t.Errorf("quota is %#v", list) } @@ -137,7 +137,7 @@ func TestClusterQuota(t *testing.T) { } if _, err := clusterAdminImageClient.ImageStreams("second").Create(imagestream); !apierrors.IsForbidden(err) { - list, err := clusterAdminQuotaClient.Quota().AppliedClusterResourceQuotas("second").List(metav1.ListOptions{}) + list, err := clusterAdminQuotaClient.QuotaV1().AppliedClusterResourceQuotas("second").List(metav1.ListOptions{}) if err == nil { t.Errorf("quota is %#v", list) } @@ -153,7 +153,7 @@ func TestClusterQuota(t *testing.T) { func waitForQuotaLabeling(clusterAdminClient quotaclient.Interface, namespaceName string) error { return utilwait.PollImmediate(100*time.Millisecond, 10*time.Second, func() (done bool, err error) { - list, err := clusterAdminClient.Quota().AppliedClusterResourceQuotas(namespaceName).List(metav1.ListOptions{}) + list, err := clusterAdminClient.QuotaV1().AppliedClusterResourceQuotas(namespaceName).List(metav1.ListOptions{}) if err != nil { return false, nil } @@ -181,7 +181,7 @@ func labelNamespace(clusterAdminKubeClient corev1client.NamespacesGetter, namesp func waitForQuotaStatus(clusterAdminClient quotaclient.Interface, name string, conditionFn func(*quotav1.ClusterResourceQuota) bool) error { err := utilwait.PollImmediate(100*time.Millisecond, 30*time.Second, func() (done bool, err error) { - quota, err := clusterAdminClient.Quota().ClusterResourceQuotas().Get(name, metav1.GetOptions{}) + quota, err := clusterAdminClient.QuotaV1().ClusterResourceQuotas().Get(name, metav1.GetOptions{}) if err != nil { return false, nil } diff --git a/test/integration/deploy_defaults_test.go b/test/integration/deploy_defaults_test.go index 747005afecac..10452fcae3ca 100644 --- a/test/integration/deploy_defaults_test.go +++ b/test/integration/deploy_defaults_test.go @@ -149,7 +149,7 @@ func TestDeploymentConfigDefaults(t *testing.T) { t.Run("apps.openshift.io", func(t *testing.T) { for _, tc := range ttApps { t.Run("", func(t *testing.T) { - appsDC, err := appsClient.Apps().DeploymentConfigs(namespace).Create(tc.obj) + appsDC, err := appsClient.AppsV1().DeploymentConfigs(namespace).Create(tc.obj) if err != nil { t.Fatalf("Failed to create DC: %v", err) } diff --git a/test/integration/deploy_scale_test.go b/test/integration/deploy_scale_test.go index b0b1aac29ea6..aa3113da6e9b 100644 --- a/test/integration/deploy_scale_test.go +++ b/test/integration/deploy_scale_test.go @@ -50,7 +50,7 @@ func TestDeployScale(t *testing.T) { config.Spec.Triggers = []appsv1.DeploymentTriggerPolicy{} config.Spec.Replicas = 1 - dc, err := adminAppsClient.Apps().DeploymentConfigs(namespace).Create(config) + dc, err := adminAppsClient.AppsV1().DeploymentConfigs(namespace).Create(config) if err != nil { t.Fatalf("Couldn't create DeploymentConfig: %v %#v", err, config) } @@ -79,7 +79,7 @@ func TestDeployScale(t *testing.T) { } condition := func() (bool, error) { - config, err := adminAppsClient.Apps().DeploymentConfigs(namespace).Get(dc.Name, metav1.GetOptions{}) + config, err := adminAppsClient.AppsV1().DeploymentConfigs(namespace).Get(dc.Name, metav1.GetOptions{}) if err != nil { return false, nil } diff --git a/test/integration/deploy_trigger_test.go b/test/integration/deploy_trigger_test.go index d042281cae2e..2295b982f1b3 100644 --- a/test/integration/deploy_trigger_test.go +++ b/test/integration/deploy_trigger_test.go @@ -43,7 +43,7 @@ func TestTriggers_manual(t *testing.T) { if err != nil { t.Fatal(err) } - adminAppsClient := appsclient.NewForConfigOrDie(adminConfig).Apps() + adminAppsClient := appsclient.NewForConfigOrDie(adminConfig).AppsV1() config := appstest.OkDeploymentConfig(0) config.Namespace = namespace @@ -118,7 +118,7 @@ func TestTriggers_imageChange(t *testing.T) { if err != nil { t.Fatalf("error creating project: %v", err) } - projectAdminAppsClient := appsclient.NewForConfigOrDie(projectAdminClientConfig).Apps() + projectAdminAppsClient := appsclient.NewForConfigOrDie(projectAdminClientConfig).AppsV1() projectAdminImageClient := imageclient.NewForConfigOrDie(projectAdminClientConfig).Image() imageStream := &imageapi.ImageStream{ObjectMeta: metav1.ObjectMeta{Name: appstest.ImageStreamName}} @@ -223,7 +223,7 @@ func TestTriggers_imageChange_nonAutomatic(t *testing.T) { if err != nil { t.Fatalf("error creating project: %v", err) } - adminAppsClient := appsclient.NewForConfigOrDie(adminConfig).Apps() + adminAppsClient := appsclient.NewForConfigOrDie(adminConfig).AppsV1() adminImageClient := imageclient.NewForConfigOrDie(adminConfig).Image() imageStream := &imageapi.ImageStream{ObjectMeta: metav1.ObjectMeta{Name: appstest.ImageStreamName}} @@ -402,7 +402,7 @@ func TestTriggers_MultipleICTs(t *testing.T) { if err != nil { t.Fatalf("error creating project: %v", err) } - adminAppsClient := appsclient.NewForConfigOrDie(adminConfig).Apps() + adminAppsClient := appsclient.NewForConfigOrDie(adminConfig).AppsV1() adminImageClient := imageclient.NewForConfigOrDie(adminConfig).Image() imageStream := &imageapi.ImageStream{ObjectMeta: metav1.ObjectMeta{Name: appstest.ImageStreamName}} @@ -568,7 +568,7 @@ func TestTriggers_configChange(t *testing.T) { if err != nil { t.Fatal(err) } - adminAppsClient := appsclient.NewForConfigOrDie(adminConfig).Apps() + adminAppsClient := appsclient.NewForConfigOrDie(adminConfig).AppsV1() config := appstest.OkDeploymentConfig(0) config.Namespace = namespace diff --git a/test/integration/groups_test.go b/test/integration/groups_test.go index 4d2f2c25861e..7590d053947a 100644 --- a/test/integration/groups_test.go +++ b/test/integration/groups_test.go @@ -97,7 +97,7 @@ func TestBasicUserBasedGroupManipulation(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if err := testutil.WaitForPolicyUpdate(valerieKubeClient.Authorization(), "empty", "get", kapi.Resource("pods"), true); err != nil { + if err := testutil.WaitForPolicyUpdate(valerieKubeClient.AuthorizationV1(), "empty", "get", kapi.Resource("pods"), true); err != nil { t.Error(err) } @@ -155,7 +155,7 @@ func TestBasicGroupManipulation(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if err := testutil.WaitForPolicyUpdate(valerieKubeClient.Authorization(), "empty", "get", kapi.Resource("pods"), true); err != nil { + if err := testutil.WaitForPolicyUpdate(valerieKubeClient.AuthorizationV1(), "empty", "get", kapi.Resource("pods"), true); err != nil { t.Error(err) } diff --git a/test/integration/imagechange_buildtrigger_test.go b/test/integration/imagechange_buildtrigger_test.go index c83f3925d0f5..57bf08be77c3 100644 --- a/test/integration/imagechange_buildtrigger_test.go +++ b/test/integration/imagechange_buildtrigger_test.go @@ -97,7 +97,7 @@ func TestSimpleImageChangeBuildTriggerFromImageStreamTagCustom(t *testing.T) { } options.AddRole() - if err := testutil.WaitForPolicyUpdate(projectAdminKubeClient.Authorization(), testutil.Namespace(), "create", build.Resource(bootstrappolicy.CustomBuildResource), true); err != nil { + if err := testutil.WaitForPolicyUpdate(projectAdminKubeClient.AuthorizationV1(), testutil.Namespace(), "create", build.Resource(bootstrappolicy.CustomBuildResource), true); err != nil { t.Fatal(err) } @@ -130,7 +130,7 @@ func TestSimpleImageChangeBuildTriggerFromImageStreamTagCustomWithConfigChange(t } options.AddRole() - if err := testutil.WaitForPolicyUpdate(projectAdminKubeClient.Authorization(), testutil.Namespace(), "create", build.Resource(bootstrappolicy.CustomBuildResource), true); err != nil { + if err := testutil.WaitForPolicyUpdate(projectAdminKubeClient.AuthorizationV1(), testutil.Namespace(), "create", build.Resource(bootstrappolicy.CustomBuildResource), true); err != nil { t.Fatal(err) } diff --git a/test/integration/ingressip_test.go b/test/integration/ingressip_test.go index 3f3c13dac3a8..fa1810864059 100644 --- a/test/integration/ingressip_test.go +++ b/test/integration/ingressip_test.go @@ -57,10 +57,10 @@ func TestIngressIPAllocation(t *testing.T) { _, informerController := cache.NewInformer( &cache.ListWatch{ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { - return kc.Core().Services(metav1.NamespaceAll).List(options) + return kc.CoreV1().Services(metav1.NamespaceAll).List(options) }, WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { - return kc.Core().Services(metav1.NamespaceAll).Watch(options) + return kc.CoreV1().Services(metav1.NamespaceAll).Watch(options) }, }, &v1.Service{}, @@ -95,7 +95,7 @@ func TestIngressIPAllocation(t *testing.T) { // Validate that all services of type load balancer have a unique // ingress ip and corresponding external ip. - services, err := kc.Core().Services(metav1.NamespaceDefault).List(metav1.ListOptions{}) + services, err := kc.CoreV1().Services(metav1.NamespaceDefault).List(metav1.ListOptions{}) if err != nil { t.Fatalf("Unexpected error: %v", err) } @@ -158,7 +158,7 @@ func generateServiceEvents(t *testing.T, kc kclientset.Interface) { case updateOp: targetIndex := rand.Intn(len(services)) name := services[targetIndex].Name - s, err := kc.Core().Services(metav1.NamespaceDefault).Get(name, metav1.GetOptions{}) + s, err := kc.CoreV1().Services(metav1.NamespaceDefault).Get(name, metav1.GetOptions{}) if err != nil { continue } @@ -169,7 +169,7 @@ func generateServiceEvents(t *testing.T, kc kclientset.Interface) { } else { s.Spec.Type = v1.ServiceTypeLoadBalancer } - s, err = kc.Core().Services(metav1.NamespaceDefault).Update(s) + s, err = kc.CoreV1().Services(metav1.NamespaceDefault).Update(s) if err != nil { continue } @@ -177,7 +177,7 @@ func generateServiceEvents(t *testing.T, kc kclientset.Interface) { case deleteOp: targetIndex := rand.Intn(len(services)) name := services[targetIndex].Name - err := kc.Core().Services(metav1.NamespaceDefault).Delete(name, nil) + err := kc.CoreV1().Services(metav1.NamespaceDefault).Delete(name, nil) if err != nil { continue } @@ -217,5 +217,5 @@ func createService(kc kclientset.Interface, name string, typeLoadBalancer bool) }}, }, } - return kc.Core().Services(metav1.NamespaceDefault).Create(service) + return kc.CoreV1().Services(metav1.NamespaceDefault).Create(service) } diff --git a/test/integration/master_routes_test.go b/test/integration/master_routes_test.go index aba80a263afe..90625530dce4 100644 --- a/test/integration/master_routes_test.go +++ b/test/integration/master_routes_test.go @@ -266,7 +266,7 @@ func TestApiGroups(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - defer kclientset.Core().Namespaces().Delete(ns, &metav1.DeleteOptions{}) + defer kclientset.CoreV1().Namespaces().Delete(ns, &metav1.DeleteOptions{}) t.Logf("GETting builds") req, err := http.NewRequest("GET", masterConfig.OAuthConfig.MasterPublicURL+fmt.Sprintf("/apis/%s/%s", buildv1.GroupName, buildv1.SchemeGroupVersion.Version), nil) @@ -281,7 +281,7 @@ func TestApiGroups(t *testing.T) { t.Logf("Creating a Build") originalBuild := testBuild() - _, err = buildv1client.NewForConfigOrDie(clusterAdminClientConfig).Build().Builds(ns).Create(originalBuild) + _, err = buildv1client.NewForConfigOrDie(clusterAdminClientConfig).BuildV1().Builds(ns).Create(originalBuild) if err != nil { t.Fatalf("Unexpected BuildConfig create error: %v", err) } diff --git a/test/integration/oauth_disabled_test.go b/test/integration/oauth_disabled_test.go index d6c038649101..e0388210f28d 100644 --- a/test/integration/oauth_disabled_test.go +++ b/test/integration/oauth_disabled_test.go @@ -39,7 +39,7 @@ func TestOAuthDisabled(t *testing.T) { } // Make sure cert auth still works - namespaces, err := client.Core().Namespaces().List(metav1.ListOptions{}) + namespaces, err := client.CoreV1().Namespaces().List(metav1.ListOptions{}) if err != nil { t.Fatalf("Unexpected error %v", err) } diff --git a/test/integration/oauth_serviceaccount_client_test.go b/test/integration/oauth_serviceaccount_client_test.go index 86bece195a10..4a5f14879d25 100644 --- a/test/integration/oauth_serviceaccount_client_test.go +++ b/test/integration/oauth_serviceaccount_client_test.go @@ -93,7 +93,7 @@ func TestOAuthServiceAccountClient(t *testing.T) { // retry this a couple times. We seem to be flaking on update conflicts and missing secrets all together err = retry.RetryOnConflict(retry.DefaultRetry, func() error { - defaultSA, err = clusterAdminKubeClientset.Core().ServiceAccounts(projectName).Get("default", metav1.GetOptions{}) + defaultSA, err = clusterAdminKubeClientset.CoreV1().ServiceAccounts(projectName).Get("default", metav1.GetOptions{}) if err != nil { return err } @@ -102,7 +102,7 @@ func TestOAuthServiceAccountClient(t *testing.T) { } defaultSA.Annotations[saoauth.OAuthRedirectModelAnnotationURIPrefix+"one"] = redirectURL defaultSA.Annotations[saoauth.OAuthWantChallengesAnnotationPrefix] = "true" - defaultSA, err = clusterAdminKubeClientset.Core().ServiceAccounts(projectName).Update(defaultSA) + defaultSA, err = clusterAdminKubeClientset.CoreV1().ServiceAccounts(projectName).Update(defaultSA) return err }) if err != nil { @@ -112,7 +112,7 @@ func TestOAuthServiceAccountClient(t *testing.T) { var oauthSecret *corev1.Secret // retry this a couple times. We seem to be flaking on update conflicts and missing secrets all together err = wait.PollImmediate(30*time.Millisecond, 10*time.Second, func() (done bool, err error) { - allSecrets, err := clusterAdminKubeClientset.Core().Secrets(projectName).List(metav1.ListOptions{}) + allSecrets, err := clusterAdminKubeClientset.CoreV1().Secrets(projectName).List(metav1.ListOptions{}) if err != nil { return false, err } @@ -590,7 +590,7 @@ func runOAuthFlow( whoamiConfig := restclient.AnonymousClientConfig(clusterAdminClientConfig) whoamiConfig.BearerToken = accessData.AccessToken - whoamiBuildClient := buildv1client.NewForConfigOrDie(whoamiConfig).Build() + whoamiBuildClient := buildv1client.NewForConfigOrDie(whoamiConfig).BuildV1() whoamiUserClient := userclient.NewForConfigOrDie(whoamiConfig) _, err = whoamiBuildClient.Builds(projectName).List(metav1.ListOptions{}) diff --git a/test/integration/ownerrefs_test.go b/test/integration/ownerrefs_test.go index a2445b3357da..d68a87679797 100644 --- a/test/integration/ownerrefs_test.go +++ b/test/integration/ownerrefs_test.go @@ -60,7 +60,7 @@ func TestOwnerRefRestriction(t *testing.T) { if err != nil { t.Fatalf("unexpected error: %v", err) } - if err := testutil.WaitForPolicyUpdate(creatorClient.Authorization(), "foo", "create", kapi.Resource("services"), true); err != nil { + if err := testutil.WaitForPolicyUpdate(creatorClient.AuthorizationV1(), "foo", "create", kapi.Resource("services"), true); err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/test/integration/pod_node_constraints_test.go b/test/integration/pod_node_constraints_test.go index 6612ea02ef99..4a4fdb49fc32 100644 --- a/test/integration/pod_node_constraints_test.go +++ b/test/integration/pod_node_constraints_test.go @@ -273,6 +273,6 @@ func testPodNodeConstraintsObjectCreationWithPodTemplate(t *testing.T, name stri // DeploymentConfig dc := testPodNodeConstraintsDeploymentConfig(nodeName, nodeSelector) - _, err = appsClient.Apps().DeploymentConfigs(testutil.Namespace()).Create(dc) + _, err = appsClient.AppsV1().DeploymentConfigs(testutil.Namespace()).Create(dc) checkForbiddenErr("dc", err) } diff --git a/test/integration/project_request_test.go b/test/integration/project_request_test.go index e9796199ba7d..97f6534fbd9f 100644 --- a/test/integration/project_request_test.go +++ b/test/integration/project_request_test.go @@ -74,15 +74,15 @@ func TestProjectRequestError(t *testing.T) { } // Watch the project, rolebindings, and configmaps - nswatch, err := kubeClientset.Core().Namespaces().Watch(metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", ns).String()}) + nswatch, err := kubeClientset.CoreV1().Namespaces().Watch(metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", ns).String()}) if err != nil { t.Fatal(err) } - roleWatch, err := kubeClientset.Rbac().RoleBindings(ns).Watch(metav1.ListOptions{}) + roleWatch, err := kubeClientset.RbacV1().RoleBindings(ns).Watch(metav1.ListOptions{}) if err != nil { t.Fatal(err) } - cmwatch, err := kubeClientset.Core().ConfigMaps(ns).Watch(metav1.ListOptions{}) + cmwatch, err := kubeClientset.CoreV1().ConfigMaps(ns).Watch(metav1.ListOptions{}) if err != nil { t.Fatal(err) } @@ -137,7 +137,7 @@ func TestProjectRequestError(t *testing.T) { } // Verify project is deleted - if nsObj, err := kubeClientset.Core().Namespaces().Get(ns, metav1.GetOptions{}); !kapierrors.IsNotFound(err) { + if nsObj, err := kubeClientset.CoreV1().Namespaces().Get(ns, metav1.GetOptions{}); !kapierrors.IsNotFound(err) { t.Errorf("Expected namespace to be gone, got %#v, %#v", nsObj, err) } } diff --git a/test/integration/scopes_test.go b/test/integration/scopes_test.go index 3f71b3773fe1..d54de74ee676 100644 --- a/test/integration/scopes_test.go +++ b/test/integration/scopes_test.go @@ -56,7 +56,7 @@ func TestScopedTokens(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - if _, err := buildv1client.NewForConfigOrDie(haroldConfig).Build().Builds(projectName).List(metav1.ListOptions{}); err != nil { + if _, err := buildv1client.NewForConfigOrDie(haroldConfig).BuildV1().Builds(projectName).List(metav1.ListOptions{}); err != nil { t.Fatalf("unexpected error: %v", err) } @@ -80,7 +80,7 @@ func TestScopedTokens(t *testing.T) { whoamiConfig := rest.AnonymousClientConfig(clusterAdminClientConfig) whoamiConfig.BearerToken = whoamiOnlyToken.Name - if _, err := buildv1client.NewForConfigOrDie(whoamiConfig).Build().Builds(projectName).List(metav1.ListOptions{}); !kapierrors.IsForbidden(err) { + if _, err := buildv1client.NewForConfigOrDie(whoamiConfig).BuildV1().Builds(projectName).List(metav1.ListOptions{}); !kapierrors.IsForbidden(err) { t.Fatalf("unexpected error: %v", err) } @@ -119,7 +119,7 @@ func TestScopedImpersonation(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - err = clusterAdminBuildClient.Build().RESTClient().Get(). + err = clusterAdminBuildClient.BuildV1().RESTClient().Get(). SetHeader(authenticationv1.ImpersonateUserHeader, "harold"). SetHeader(authenticationv1.ImpersonateUserExtraHeaderPrefix+authorizationapi.ScopesKey, "user:info"). Namespace(projectName).Resource("builds").Name("name").Do().Into(&buildapi.Build{}) @@ -160,7 +160,7 @@ func TestScopeEscalations(t *testing.T) { t.Fatalf("unexpected error: %v", err) } - if _, err := buildv1client.NewForConfigOrDie(haroldConfig).Build().Builds(projectName).List(metav1.ListOptions{}); err != nil { + if _, err := buildv1client.NewForConfigOrDie(haroldConfig).BuildV1().Builds(projectName).List(metav1.ListOptions{}); err != nil { t.Fatalf("unexpected error: %v", err) } diff --git a/test/integration/webhook_test.go b/test/integration/webhook_test.go index ae19c13ab06e..df74c1aa3590 100644 --- a/test/integration/webhook_test.go +++ b/test/integration/webhook_test.go @@ -36,7 +36,7 @@ func TestWebhook(t *testing.T) { if err != nil { t.Fatalf("unable to get osClient: %v", err) } - clusterAdminBuildClient := buildv1client.NewForConfigOrDie(clusterAdminClientConfig).Build() + clusterAdminBuildClient := buildv1client.NewForConfigOrDie(clusterAdminClientConfig).BuildV1() kubeClient.CoreV1().Namespaces().Create(&corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: testutil.Namespace()}, @@ -146,8 +146,8 @@ func TestWebhookGitHubPushWithImage(t *testing.T) { if err != nil { t.Errorf("unexpected error: %v", err) } - clusterAdminImageClient := imagev1client.NewForConfigOrDie(clusterAdminClientConfig).Image() - clusterAdminBuildClient := buildv1client.NewForConfigOrDie(clusterAdminClientConfig).Build() + clusterAdminImageClient := imagev1client.NewForConfigOrDie(clusterAdminClientConfig).ImageV1() + clusterAdminBuildClient := buildv1client.NewForConfigOrDie(clusterAdminClientConfig).BuildV1() err = testutil.CreateNamespace(clusterAdminKubeConfig, testutil.Namespace()) if err != nil { @@ -259,8 +259,8 @@ func TestWebhookGitHubPushWithImageStream(t *testing.T) { if err != nil { t.Errorf("unexpected error: %v", err) } - clusterAdminImageClient := imagev1client.NewForConfigOrDie(clusterAdminClientConfig).Image() - clusterAdminBuildClient := buildv1client.NewForConfigOrDie(clusterAdminClientConfig).Build() + clusterAdminImageClient := imagev1client.NewForConfigOrDie(clusterAdminClientConfig).ImageV1() + clusterAdminBuildClient := buildv1client.NewForConfigOrDie(clusterAdminClientConfig).BuildV1() clusterAdminKubeClient, err := testutil.GetClusterAdminKubeClient(clusterAdminKubeConfig) if err != nil { @@ -365,7 +365,7 @@ func TestWebhookGitHubPing(t *testing.T) { if err != nil { t.Fatalf("unable to get osClient: %v", err) } - clusterAdminBuildClient := buildv1client.NewForConfigOrDie(clusterAdminClientConfig).Build() + clusterAdminBuildClient := buildv1client.NewForConfigOrDie(clusterAdminClientConfig).BuildV1() kubeClient.CoreV1().Namespaces().Create(&corev1.Namespace{ ObjectMeta: metav1.ObjectMeta{Name: testutil.Namespace()}, diff --git a/test/util/client.go b/test/util/client.go index dc7d79e3c675..c1e780a3f1e0 100644 --- a/test/util/client.go +++ b/test/util/client.go @@ -160,7 +160,7 @@ func GetScopedClientForUser(clusterAdminClientConfig *restclient.Config, usernam } func GetClientForServiceAccount(adminClient kubernetes.Interface, clientConfig restclient.Config, namespace, name string) (*kubernetes.Clientset, *restclient.Config, error) { - _, err := adminClient.Core().Namespaces().Create(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) + _, err := adminClient.CoreV1().Namespaces().Create(&corev1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}) if err != nil && !kerrs.IsAlreadyExists(err) { return nil, nil, err } diff --git a/test/util/server/server.go b/test/util/server/server.go index fc5c0399ccdb..a0dab972a88b 100644 --- a/test/util/server/server.go +++ b/test/util/server/server.go @@ -785,7 +785,7 @@ func serviceAccountSecretsExist(clientset kubernetes.Interface, namespace string if len(secret.Namespace) > 0 { ns = secret.Namespace } - secret, err := clientset.Core().Secrets(ns).Get(secret.Name, metav1.GetOptions{}) + secret, err := clientset.CoreV1().Secrets(ns).Get(secret.Name, metav1.GetOptions{}) if err == nil { switch secret.Type { case corev1.SecretTypeServiceAccountToken: @@ -821,7 +821,7 @@ func WaitForPodCreationServiceAccounts(clientset kubernetes.Interface, namespace glog.Warningf("Error attempting to create test pod: %v", err) return false, nil } - err = clientset.Core().Pods(namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) + err = clientset.CoreV1().Pods(namespace).Delete(pod.Name, metav1.NewDeleteOptions(0)) if err != nil { return false, err }